2020-05-19 14:18:07 +00:00
# beacon_chain
2022-01-04 09:45:38 +00:00
# Copyright (c) 2018-2022 Status Research & Development GmbH
2020-05-19 14:18:07 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{. push raises : [ Defect ] . }
import
2020-10-22 10:53:33 +00:00
std / [ options , sequtils , tables , sets ] ,
2021-09-23 22:13:25 +00:00
stew / [ assign2 , byteutils , results ] ,
2020-10-22 10:53:33 +00:00
metrics , snappy , chronicles ,
2021-11-10 11:39:08 +00:00
.. / spec / [ beaconstate , eth2_merkleization , eth2_ssz_serialization , helpers ,
2021-08-18 18:57:58 +00:00
state_transition , validator ] ,
2022-01-18 13:36:52 +00:00
.. / spec / datatypes / [ phase0 , altair ] ,
2021-08-20 08:58:15 +00:00
" .. " / beacon_chain_db ,
2021-11-10 11:39:08 +00:00
" . " / [ block_pools_types , block_quarantine ]
2020-05-19 14:18:07 +00:00
2021-11-05 07:34:34 +00:00
export
2021-11-10 11:39:08 +00:00
eth2_merkleization , eth2_ssz_serialization ,
block_pools_types , results , beacon_chain_db
2020-07-30 19:18:17 +00:00
2020-11-27 22:16:13 +00:00
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
declareGauge beacon_head_root , " Root of the head block of the beacon chain "
declareGauge beacon_head_slot , " Slot of the head block of the beacon chain "
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
declareGauge beacon_finalized_epoch , " Current finalized epoch " # On epoch transition
declareGauge beacon_finalized_root , " Current finalized root " # On epoch transition
declareGauge beacon_current_justified_epoch , " Current justified epoch " # On epoch transition
declareGauge beacon_current_justified_root , " Current justified root " # On epoch transition
declareGauge beacon_previous_justified_epoch , " Current previously justified epoch " # On epoch transition
declareGauge beacon_previous_justified_root , " Current previously justified root " # On epoch transition
2021-10-07 06:19:07 +00:00
declareGauge beacon_reorgs_total_total , " Total occurrences of reorganizations of the chain " # On fork choice; backwards-compat name (used to be a counter)
declareGauge beacon_reorgs_total , " Total occurrences of reorganizations of the chain " # Interop copy
2020-08-06 19:48:47 +00:00
declareCounter beacon_state_data_cache_hits , " EpochRef hits "
declareCounter beacon_state_data_cache_misses , " EpochRef misses "
2020-08-13 09:50:05 +00:00
declareCounter beacon_state_rewinds , " State database rewinds "
2020-05-19 14:18:07 +00:00
2020-11-27 22:16:13 +00:00
declareGauge beacon_active_validators , " Number of validators in the active validator set "
2021-10-07 06:19:07 +00:00
declareGauge beacon_current_active_validators , " Number of validators in the active validator set " # Interop copy
2020-08-26 15:25:39 +00:00
declareGauge beacon_pending_deposits , " Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index) " # On block
declareGauge beacon_processed_deposits_total , " Number of total deposits included on chain " # On block
2020-08-13 09:50:05 +00:00
logScope : topics = " chaindag "
2020-05-19 14:18:07 +00:00
2022-01-05 18:38:04 +00:00
const
# When finality happens, we prune historical states from the database except
# for a snapshort every 32 epochs from which replays can happen - there's a
# balance here between making long replays and saving on disk space
EPOCHS_PER_STATE_SNAPSHOT = 32
2020-06-25 10:23:10 +00:00
proc putBlock * (
2021-11-05 07:34:34 +00:00
dag : ChainDAGRef , signedBlock : ForkyTrustedSignedBeaconBlock ) =
2020-07-16 13:16:51 +00:00
dag . db . putBlock ( signedBlock )
2020-05-19 14:18:07 +00:00
proc updateStateData * (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , bs : BlockSlot , save : bool ,
2022-01-05 18:38:04 +00:00
cache : var StateCache ) : bool {. gcsafe . }
2020-05-19 14:18:07 +00:00
2021-04-13 13:05:44 +00:00
template withStateVars * (
stateDataInternal : var StateData , body : untyped ) : untyped =
2021-03-17 10:17:15 +00:00
## Inject a few more descriptive names for the members of `stateData` -
## the stateData instance may get mutated through these names as well
2021-04-13 13:05:44 +00:00
template stateData ( ) : StateData {. inject , used . } = stateDataInternal
2021-05-28 12:51:15 +00:00
template stateRoot ( ) : Eth2Digest {. inject , used . } =
2021-06-11 17:51:46 +00:00
getStateRoot ( stateDataInternal . data )
2021-04-13 13:05:44 +00:00
template blck ( ) : BlockRef {. inject , used . } = stateDataInternal . blck
template root ( ) : Eth2Digest {. inject , used . } = stateDataInternal . data . root
2021-03-17 10:17:15 +00:00
body
2022-01-05 18:38:04 +00:00
template withUpdatedState * (
2020-08-18 20:29:33 +00:00
dag : ChainDAGRef , stateData : var StateData , blockSlot : BlockSlot ,
2022-01-05 18:38:04 +00:00
okBody : untyped , failureBody : untyped ) : untyped =
2020-08-18 20:29:33 +00:00
## Helper template that updates stateData to a particular BlockSlot - usage of
2022-01-05 18:38:04 +00:00
## stateData is unsafe outside of block, or across `await` boundaries
2020-05-19 14:18:07 +00:00
2021-11-29 10:25:47 +00:00
block :
var cache {. inject . } = StateCache ( )
2022-01-05 18:38:04 +00:00
if updateStateData ( dag , stateData , blockSlot , false , cache ) :
withStateVars ( stateData ) :
okBody
else :
failureBody
2020-05-19 14:18:07 +00:00
2021-06-01 12:40:13 +00:00
func get_effective_balances ( validators : openArray [ Validator ] , epoch : Epoch ) :
seq [ Gwei ] =
2020-08-12 04:49:52 +00:00
## Get the balances from a state as counted for fork choice
2021-06-01 12:40:13 +00:00
result . newSeq ( validators . len ) # zero-init
2020-08-12 04:49:52 +00:00
for i in 0 .. < result . len :
# All non-active validators have a 0 balance
2021-06-01 12:40:13 +00:00
let validator = unsafeAddr validators [ i ]
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
if validator [ ] . is_active_validator ( epoch ) :
result [ i ] = validator [ ] . effective_balance
2020-08-12 04:49:52 +00:00
2021-06-01 11:13:40 +00:00
proc updateValidatorKeys * ( dag : ChainDAGRef , validators : openArray [ Validator ] ) =
2021-06-10 07:37:02 +00:00
# Update validator key cache - must be called every time a valid block is
# applied to the state - this is important to ensure that when we sync blocks
# without storing a state (non-epoch blocks essentially), the deposits from
# those blocks are persisted to the in-database cache of immutable validator
# data (but no earlier than that the whole block as been validated)
dag . db . updateImmutableValidators ( validators )
2022-01-30 16:51:04 +00:00
proc updateFinalizedBlocks * ( dag : ChainDAGRef ) =
template update ( s : Slot ) =
if s < dag . tail . slot :
2022-02-14 05:26:19 +00:00
if not dag . backfillBlocks [ s . int ] . isZero :
2022-01-30 16:51:04 +00:00
dag . db . finalizedBlocks . insert ( s , dag . backfillBlocks [ s . int ] )
else :
let dagIndex = int ( s - dag . tail . slot )
if not isNil ( dag . finalizedBlocks [ dagIndex ] ) :
dag . db . finalizedBlocks . insert ( s , dag . finalizedBlocks [ dagIndex ] . root )
if not dag . db . db . readOnly : # TODO abstraction leak - where to put this?
dag . db . withManyWrites :
if dag . db . finalizedBlocks . low . isNone ( ) :
for s in dag . backfill . slot .. dag . finalizedHead . slot :
update ( s )
else :
for s in dag . backfill . slot .. < dag . db . finalizedBlocks . low . get ( ) :
update ( s )
for s in dag . db . finalizedBlocks . high . get ( ) + 1 .. dag . finalizedHead . slot :
update ( s )
2021-06-10 07:37:02 +00:00
func validatorKey * (
dag : ChainDAGRef , index : ValidatorIndex or uint64 ) : Option [ CookedPubKey ] =
## Returns the validator pubkey for the index, assuming it's been observed
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
dag . db . immutableValidators . load ( index )
func validatorKey * (
epochRef : EpochRef , index : ValidatorIndex or uint64 ) : Option [ CookedPubKey ] =
## Returns the validator pubkey for the index, assuming it's been observed
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
2021-08-24 19:49:51 +00:00
validatorKey ( epochRef . dag , index )
2021-06-01 11:13:40 +00:00
func init * (
T : type EpochRef , dag : ChainDAGRef , state : StateData ,
cache : var StateCache ) : T =
2020-08-05 06:28:43 +00:00
let
2021-06-11 17:51:46 +00:00
epoch = state . data . get_current_epoch ( )
2020-08-05 06:28:43 +00:00
epochRef = EpochRef (
2021-06-01 11:13:40 +00:00
dag : dag , # This gives access to the validator pubkeys through an EpochRef
2021-12-09 17:06:21 +00:00
key : state . blck . epochAncestor ( epoch ) ,
2021-06-11 17:51:46 +00:00
eth1_data : getStateField ( state . data , eth1_data ) ,
eth1_deposit_index : getStateField ( state . data , eth1_deposit_index ) ,
2021-05-21 09:23:28 +00:00
current_justified_checkpoint :
2021-06-11 17:51:46 +00:00
getStateField ( state . data , current_justified_checkpoint ) ,
finalized_checkpoint : getStateField ( state . data , finalized_checkpoint ) ,
2020-08-05 06:28:43 +00:00
shuffled_active_validator_indices :
2021-12-08 17:29:22 +00:00
cache . get_shuffled_active_validator_indices ( state . data , epoch ) ,
merge_transition_complete :
case state . data . kind :
of BeaconStateFork . Phase0 : false
of BeaconStateFork . Altair : false
2022-01-04 09:45:38 +00:00
of BeaconStateFork . Bellatrix :
2022-01-06 18:35:38 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.1.7/specs/merge/beacon-chain.md#is_merge_transition_complete
2022-01-24 16:23:13 +00:00
state . data . bellatrixData . data . latest_execution_payload_header ! =
2021-12-08 17:29:22 +00:00
ExecutionPayloadHeader ( )
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
)
2022-01-11 10:01:54 +00:00
epochStart = epoch . start_slot ( )
2021-08-24 19:49:51 +00:00
2022-01-05 18:38:04 +00:00
doAssert epochRef . key . blck ! = nil , " epochAncestor should not fail for state block "
2020-08-05 06:28:43 +00:00
for i in 0 'u64 .. < SLOTS_PER_EPOCH :
2021-06-01 11:13:40 +00:00
epochRef . beacon_proposers [ i ] = get_beacon_proposer_index (
2021-12-09 17:06:21 +00:00
state . data , cache , epochStart + i )
2020-08-12 04:49:52 +00:00
# When fork choice runs, it will need the effective balance of the justified
2020-10-22 10:53:33 +00:00
# checkpoint - we pre-load the balances here to avoid rewinding the justified
# state later and compress them because not all checkpoints end up being used
# for fork choice - specially during long periods of non-finalization
proc snappyEncode ( inp : openArray [ byte ] ) : seq [ byte ] =
try :
snappy . encode ( inp )
except CatchableError as err :
raiseAssert err . msg
epochRef . effective_balances_bytes =
snappyEncode ( SSZ . encode (
2021-06-01 12:40:13 +00:00
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] ( get_effective_balances (
2021-06-11 17:51:46 +00:00
getStateField ( state . data , validators ) . asSeq ,
2021-12-09 17:06:21 +00:00
epoch ) ) ) )
2020-08-12 04:49:52 +00:00
2020-08-05 06:28:43 +00:00
epochRef
2020-05-29 06:10:20 +00:00
2020-10-22 10:53:33 +00:00
func effective_balances * ( epochRef : EpochRef ) : seq [ Gwei ] =
try :
SSZ . decode ( snappy . decode ( epochRef . effective_balances_bytes , uint32 . high ) ,
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] ) . toSeq ( )
except CatchableError as exc :
raiseAssert exc . msg
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func getBlockRef * ( dag : ChainDAGRef , root : Eth2Digest ) : Opt [ BlockRef ] =
## Retrieve a resolved block reference, if available - this function does
## not return historical finalized blocks, see `getBlockAtSlot` for a function
## that covers the entire known history
let key = KeyedBlockRef . asLookupKey ( root )
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
# the copy of the instance in the set which has more fields than `root` set!
if key in dag . forkBlocks :
try : ok ( dag . forkBlocks [ key ] . blockRef ( ) )
except KeyError : raiseAssert " contains "
else :
err ( )
func getBlockAtSlot * ( dag : ChainDAGRef , slot : Slot ) : BlockSlot =
2021-12-06 18:52:35 +00:00
## Retrieve the canonical block at the given slot, or the last block that
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
## comes before - similar to atSlot, but without the linear scan - see
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
## getBlockIdAtSlot for a version that covers backfill blocks as well
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
## May return an empty BlockSlot (where blck is nil!)
if slot = = dag . genesis . slot :
# There may be gaps in the
return dag . genesis . atSlot ( slot )
2021-12-06 18:52:35 +00:00
if slot > dag . finalizedHead . slot :
return dag . head . atSlot ( slot ) # Linear iteration is the fastest we have
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
doAssert dag . finalizedHead . slot > = dag . tail . slot
doAssert dag . tail . slot > = dag . backfill . slot
doAssert dag . finalizedBlocks . len = =
( dag . finalizedHead . slot - dag . tail . slot ) . int + 1 , " see updateHead "
if slot > = dag . tail . slot :
var pos = int ( slot - dag . tail . slot )
while true :
if dag . finalizedBlocks [ pos ] ! = nil :
return dag . finalizedBlocks [ pos ] . atSlot ( slot )
if pos = = 0 :
break
pos - = 1
if dag . tail . slot = = 0 :
raiseAssert " Genesis missing "
BlockSlot ( ) # nil blck!
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func getBlockIdAtSlot * ( dag : ChainDAGRef , slot : Slot ) : BlockSlotId =
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
## Retrieve the canonical block at the given slot, or the last block that
## comes before - similar to atSlot, but without the linear scan
if slot = = dag . genesis . slot :
return dag . genesis . bid . atSlot ( slot )
if slot > = dag . tail . slot :
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
return dag . getBlockAtSlot ( slot ) . toBlockSlotId ( )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
var pos = slot . int
while pos > = dag . backfill . slot . int :
2022-02-14 05:26:19 +00:00
if not dag . backfillBlocks [ pos ] . isZero :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
return BlockId ( root : dag . backfillBlocks [ pos ] , slot : Slot ( pos ) ) . atSlot ( slot )
pos - = 1
BlockSlotId ( ) # not backfilled yet, and not genesis
2021-12-06 18:52:35 +00:00
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func getBlockId * ( dag : ChainDAGRef , root : Eth2Digest ) : Opt [ BlockId ] =
let blck = ? dag . getBlockRef ( root )
ok ( blck . bid )
func isCanonical * ( dag : ChainDAGRef , bid : BlockId ) : bool =
dag . getBlockIdAtSlot ( bid . slot ) . bid = = bid
2021-12-09 17:06:21 +00:00
func epochAncestor * ( blck : BlockRef , epoch : Epoch ) : EpochKey =
2020-08-18 20:29:33 +00:00
## The state transition works by storing information from blocks in a
## "working" area until the epoch transition, then batching work collected
## during the epoch. Thus, last block in the ancestor epochs is the block
## that has an impact on epoch currently considered.
##
2021-12-09 17:06:21 +00:00
## This function returns an epoch key pointing to that epoch boundary, i.e. the
2020-08-18 20:29:33 +00:00
## boundary where the last block has been applied to the state and epoch
2021-03-17 10:17:15 +00:00
## processing has been done.
2021-12-09 17:06:21 +00:00
var blck = blck
while blck . slot . epoch > = epoch and not blck . parent . isNil :
blck = blck . parent
2020-08-18 20:29:33 +00:00
2022-01-05 18:38:04 +00:00
if blck . slot . epoch > epoch :
EpochKey ( ) # The searched-for epoch predates our tail block
else :
EpochKey ( epoch : epoch , blck : blck )
2020-08-18 20:29:33 +00:00
2021-03-17 10:17:15 +00:00
func findEpochRef * (
2022-01-05 18:38:04 +00:00
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ) : Opt [ EpochRef ] =
# Look for an existing EpochRef in the cache
let ancestor = epochAncestor ( blck , epoch )
if isNil ( ancestor . blck ) :
2021-12-09 17:06:21 +00:00
# We can't compute EpochRef instances for states before the tail because
# we do not have them!
2022-01-05 18:38:04 +00:00
return err ( )
2021-03-17 10:17:15 +00:00
for i in 0 .. < dag . epochRefs . len :
2021-06-10 22:07:16 +00:00
if dag . epochRefs [ i ] ! = nil and dag . epochRefs [ i ] . key = = ancestor :
2022-01-05 18:38:04 +00:00
return ok ( dag . epochRefs [ i ] )
2021-03-17 10:17:15 +00:00
2022-01-05 18:38:04 +00:00
err ( )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2021-06-01 12:40:13 +00:00
func loadStateCache (
2021-03-17 10:17:15 +00:00
dag : ChainDAGRef , cache : var StateCache , blck : BlockRef , epoch : Epoch ) =
2020-08-18 20:29:33 +00:00
# When creating a state cache, we want the current and the previous epoch
# information to be preloaded as both of these are used in state transition
# functions
2020-07-30 15:48:25 +00:00
2020-08-18 20:29:33 +00:00
template load ( e : Epoch ) =
2022-01-05 18:38:04 +00:00
block :
let epoch = e
if epoch notin cache . shuffled_active_validator_indices :
let epochRef = dag . findEpochRef ( blck , epoch )
if epochRef . isSome ( ) :
cache . shuffled_active_validator_indices [ epoch ] =
epochRef [ ] . shuffled_active_validator_indices
2022-01-11 10:01:54 +00:00
let start_slot = epoch . start_slot ( )
2022-01-05 18:38:04 +00:00
for i , idx in epochRef [ ] . beacon_proposers :
2022-01-11 10:01:54 +00:00
cache . beacon_proposer_indices [ start_slot + i ] = idx
2020-08-18 20:29:33 +00:00
load ( epoch )
if epoch > 0 :
load ( epoch - 1 )
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func containsForkBlock * ( dag : ChainDAGRef , root : Eth2Digest ) : bool =
## Checks for blocks at the finalized checkpoint or newer
KeyedBlockRef . asLookupKey ( root ) in dag . forkBlocks
2021-03-17 10:17:15 +00:00
2021-09-08 03:46:33 +00:00
proc containsBlock (
2021-12-09 17:06:21 +00:00
cfg : RuntimeConfig , db : BeaconChainDB , slot : Slot , root : Eth2Digest ) : bool =
case cfg . blockForkAtEpoch ( slot . epoch )
2022-01-04 09:45:38 +00:00
of BeaconBlockFork . Phase0 : db . containsBlockPhase0 ( root )
of BeaconBlockFork . Altair : db . containsBlockAltair ( root )
of BeaconBlockFork . Bellatrix : db . containsBlockMerge ( root )
2021-09-08 03:46:33 +00:00
2021-05-30 08:14:17 +00:00
func isStateCheckpoint ( bs : BlockSlot ) : bool =
## State checkpoints are the points in time for which we store full state
## snapshots, which later serve as rewind starting points when replaying state
## transitions from database, for example during reorgs.
##
# As a policy, we only store epoch boundary states without the epoch block
# (if it exists) applied - the rest can be reconstructed by loading an epoch
# boundary state and applying the missing blocks.
# We also avoid states that were produced with empty slots only - as such,
# there is only a checkpoint for the first epoch after a block.
# The tail block also counts as a state checkpoint!
( bs . slot = = bs . blck . slot and bs . blck . parent = = nil ) or
2022-01-11 10:01:54 +00:00
( bs . slot . is_epoch and bs . slot . epoch = = ( bs . blck . slot . epoch + 1 ) )
2021-05-30 08:14:17 +00:00
2021-10-18 12:32:54 +00:00
proc getStateData (
db : BeaconChainDB , cfg : RuntimeConfig , state : var StateData , bs : BlockSlot ,
rollback : RollbackProc ) : bool =
if not bs . isStateCheckpoint ( ) :
return false
let root = db . getStateRoot ( bs . blck . root , bs . slot )
if not root . isSome ( ) :
return false
2021-11-05 07:34:34 +00:00
let expectedFork = cfg . stateForkAtEpoch ( bs . slot . epoch )
if state . data . kind ! = expectedFork :
state . data = ( ref ForkedHashedBeaconState ) ( kind : expectedFork ) [ ]
2021-10-18 12:32:54 +00:00
2021-11-05 07:34:34 +00:00
case expectedFork
2022-01-04 09:45:38 +00:00
of BeaconStateFork . Bellatrix :
2022-01-24 16:23:13 +00:00
if not db . getState ( root . get ( ) , state . data . bellatrixData . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Altair :
2021-11-05 07:34:34 +00:00
if not db . getState ( root . get ( ) , state . data . altairData . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Phase0 :
if not db . getState ( root . get ( ) , state . data . phase0Data . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
state . blck = bs . blck
setStateRoot ( state . data , root . get ( ) )
true
2022-01-17 12:58:33 +00:00
proc getForkedBlock * ( db : BeaconChainDB , root : Eth2Digest ) :
2021-11-10 11:39:08 +00:00
Opt [ ForkedTrustedSignedBeaconBlock ] =
# When we only have a digest, we don't know which fork it's from so we try
# them one by one - this should be used sparingly
if ( let blck = db . getMergeBlock ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getAltairBlock ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getPhase0Block ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
else :
err ( )
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
proc getForkedBlock * (
dag : ChainDAGRef , root : Eth2Digest ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
dag . db . getForkedBlock ( root )
proc getForkedBlock * (
dag : ChainDAGRef , id : BlockId ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
case dag . cfg . blockForkAtEpoch ( id . slot . epoch )
of BeaconBlockFork . Phase0 :
let data = dag . db . getPhase0Block ( id . root )
if data . isOk ( ) :
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
of BeaconBlockFork . Altair :
let data = dag . db . getAltairBlock ( id . root )
if data . isOk ( ) :
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
of BeaconBlockFork . Bellatrix :
let data = dag . db . getMergeBlock ( id . root )
if data . isOk ( ) :
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
proc getForkedBlock * (
dag : ChainDAGRef , blck : BlockRef ) : ForkedTrustedSignedBeaconBlock =
dag . getForkedBlock ( blck . bid ) . expect (
" BlockRef block should always load, database corrupt? " )
2022-01-31 07:36:29 +00:00
proc updateBeaconMetrics ( state : StateData , cache : var StateCache ) =
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
# both non-negative, so difference can't overflow or underflow int64
beacon_head_root . set ( state . blck . root . toGaugeValue )
beacon_head_slot . set ( state . blck . slot . toGaugeValue )
withState ( state . data ) :
beacon_pending_deposits . set (
( state . data . eth1_data . deposit_count -
state . data . eth1_deposit_index ) . toGaugeValue )
beacon_processed_deposits_total . set (
state . data . eth1_deposit_index . toGaugeValue )
beacon_current_justified_epoch . set (
state . data . current_justified_checkpoint . epoch . toGaugeValue )
beacon_current_justified_root . set (
state . data . current_justified_checkpoint . root . toGaugeValue )
beacon_previous_justified_epoch . set (
state . data . previous_justified_checkpoint . epoch . toGaugeValue )
beacon_previous_justified_root . set (
state . data . previous_justified_checkpoint . root . toGaugeValue )
beacon_finalized_epoch . set (
state . data . finalized_checkpoint . epoch . toGaugeValue )
beacon_finalized_root . set (
state . data . finalized_checkpoint . root . toGaugeValue )
let active_validators = count_active_validators (
state . data , state . data . slot . epoch , cache ) . toGaugeValue
beacon_active_validators . set ( active_validators )
beacon_current_active_validators . set ( active_validators )
2021-09-22 12:17:15 +00:00
proc init * ( T : type ChainDAGRef , cfg : RuntimeConfig , db : BeaconChainDB ,
2021-12-20 19:20:31 +00:00
validatorMonitor : ref ValidatorMonitor , updateFlags : UpdateFlags ,
onBlockCb : OnBlockCallback = nil , onHeadCb : OnHeadCallback = nil ,
onReorgCb : OnReorgCallback = nil ,
2021-09-22 12:17:15 +00:00
onFinCb : OnFinalizedCallback = nil ) : ChainDAGRef =
2020-05-19 14:18:07 +00:00
# TODO we require that the db contains both a head and a tail block -
# asserting here doesn't seem like the right way to go about it however..
let
tailBlockRoot = db . getTailBlock ( )
headBlockRoot = db . getHeadBlock ( )
doAssert tailBlockRoot . isSome ( ) , " Missing tail block, database corrupt? "
doAssert headBlockRoot . isSome ( ) , " Missing head block, database corrupt? "
let
tailRoot = tailBlockRoot . get ( )
2021-11-10 11:39:08 +00:00
tailBlock = db . getForkedBlock ( tailRoot ) . get ( )
tailRef = withBlck ( tailBlock ) : BlockRef . init ( tailRoot , blck . message )
2020-05-19 14:18:07 +00:00
headRoot = headBlockRoot . get ( )
2021-11-10 11:39:08 +00:00
let genesisRef = if tailBlock . slot = = GENESIS_SLOT :
2020-10-06 15:32:17 +00:00
tailRef
else :
let
2021-11-05 07:34:34 +00:00
genesisBlockRoot = db . getGenesisBlock ( ) . expect (
2021-05-26 07:07:18 +00:00
" preInit should have initialized the database with a genesis block root " )
2021-11-10 11:39:08 +00:00
genesisBlock = db . getForkedBlock ( genesisBlockRoot ) . expect (
2020-10-06 15:32:17 +00:00
" preInit should have initialized the database with a genesis block " )
2021-11-10 11:39:08 +00:00
withBlck ( genesisBlock ) : BlockRef . init ( genesisBlockRoot , blck . message )
2020-10-06 15:32:17 +00:00
2020-05-19 14:18:07 +00:00
var
2022-01-30 16:51:04 +00:00
backfillBlocks = newSeq [ Eth2Digest ] ( tailRef . slot . int )
2022-02-11 15:58:33 +00:00
backfill = withBlck ( tailBlock ) : blck . message . toBeaconBlockSummary ( )
2022-01-30 16:51:04 +00:00
midRef : BlockRef
backRoot : Option [ Eth2Digest ]
startTick = Moment . now ( )
# Loads blocks in the forward direction - these may or may not be available
# in the database
for slot , root in db . finalizedBlocks :
if slot < tailRef . slot :
backfillBlocks [ slot . int ] = root
if backRoot . isNone ( ) :
backRoot = some ( root )
elif slot = = tailRef . slot :
midRef = tailRef
elif slot > tailRef . slot :
let next = BlockRef . init ( root , slot )
link ( midRef , next )
midRef = next
let finalizedTick = Moment . now ( )
2020-05-19 14:18:07 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
var
2022-01-30 16:51:04 +00:00
headRef : BlockRef
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
curRef : BlockRef
2020-05-19 14:18:07 +00:00
2022-01-30 16:51:04 +00:00
# Now load the part from head to finalized in the other direction - these
# should meet at the midpoint if we loaded any finalized blocks
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
for blck in db . getAncestorSummaries ( headRoot ) :
2022-01-30 16:51:04 +00:00
if midRef ! = nil and blck . summary . slot = = midRef . slot :
if midRef . root ! = blck . root :
2022-02-01 20:23:18 +00:00
fatal " Finalized block table does not match ancestor summaries, database corrupt? " ,
head = shortLog ( headRoot ) , cur = shortLog ( curRef ) ,
midref = shortLog ( midRef ) , blck = shortLog ( blck . root )
2022-01-30 16:51:04 +00:00
quit 1
if curRef = = nil :
# When starting from checkpoint, head == tail and there won't be any
# blocks in between
headRef = tailRef
else :
link ( midRef , curRef )
# The finalized blocks form a linear history by definition - we can skip
# straight to the tail
curRef = tailRef
break
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if blck . summary . slot < tailRef . slot :
backfillBlocks [ blck . summary . slot . int ] = blck . root
2022-01-30 16:51:04 +00:00
if backRoot . isNone ( ) :
backfill = blck . summary
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
elif blck . summary . slot = = tailRef . slot :
if curRef = = nil :
curRef = tailRef
headRef = tailRef
else :
2020-05-19 14:18:07 +00:00
link ( tailRef , curRef )
curRef = curRef . parent
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
else :
2020-11-03 22:30:43 +00:00
let newRef = BlockRef . init ( blck . root , blck . summary . slot )
2020-05-19 14:18:07 +00:00
if curRef = = nil :
curRef = newRef
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
headRef = newRef
2020-05-19 14:18:07 +00:00
else :
link ( newRef , curRef )
curRef = curRef . parent
2021-09-08 03:46:33 +00:00
2020-05-19 14:18:07 +00:00
trace " Populating block dag " , key = curRef . root , val = curRef
2022-01-30 16:51:04 +00:00
if backRoot . isSome ( ) :
backfill = db . getBeaconBlockSummary ( backRoot . get ( ) ) . expect (
" Backfill block must have a summary " )
let summariesTick = Moment . now ( )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if curRef ! = tailRef :
fatal " Head block does not lead to tail - database corrupt? " ,
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
genesisRef , tailRef , headRef , curRef , tailRoot , headRoot
2021-11-13 16:27:28 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
quit 1
2020-05-19 14:18:07 +00:00
2022-01-30 16:51:04 +00:00
while not containsBlock ( cfg , db , headRef . slot , headRef . root ) :
# When the database has been written with a pre-fork version of the
# software, it may happen that blocks produced using an "unforked"
# chain get written to the database - we need to skip such blocks
# when loading the database with a fork-compatible version
if isNil ( headRef . parent ) :
fatal " Cannot find block for head root - database corrupt? " ,
headRef = shortLog ( headRef )
headRef = headRef . parent
2021-09-08 03:46:33 +00:00
# Because of incorrect hardfork check, there might be no head block, in which
# case it's equivalent to the tail block
if headRef = = nil :
headRef = tailRef
2021-06-01 11:13:40 +00:00
let dag = ChainDAGRef (
2021-12-09 17:06:21 +00:00
db : db ,
2021-12-20 19:20:31 +00:00
validatorMonitor : validatorMonitor ,
2020-10-06 15:32:17 +00:00
genesis : genesisRef ,
2021-12-09 17:06:21 +00:00
tail : tailRef ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
backfill : backfill ,
2021-12-09 17:06:21 +00:00
finalizedHead : tailRef . atSlot ( ) ,
lastPrunePoint : tailRef . atSlot ( ) ,
# Tail is implicitly finalized - we'll adjust it below when computing the
# head state
2020-07-28 13:54:32 +00:00
heads : @ [ headRef ] ,
2020-05-19 14:18:07 +00:00
# The only allowed flag right now is verifyFinalization, as the others all
# allow skipping some validation.
2020-07-07 23:02:14 +00:00
updateFlags : { verifyFinalization } * updateFlags ,
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
cfg : cfg ,
2021-09-22 12:17:15 +00:00
onBlockAdded : onBlockCb ,
onHeadChanged : onHeadCb ,
onReorgHappened : onReorgCb ,
onFinHappened : onFinCb
2020-05-19 14:18:07 +00:00
)
2022-01-30 16:51:04 +00:00
block : # Initialize dag states
var
cur = headRef . atSlot ( )
# Now that we have a head block, we need to find the most recent state that
# we have saved in the database
while cur . blck ! = nil and
not getStateData ( db , cfg , dag . headState , cur , noRollback ) :
cur = cur . parentOrSlot ( )
if dag . headState . blck = = nil :
fatal " No state found in head history, database corrupt? " ,
genesisRef , tailRef , headRef , tailRoot , headRoot
# TODO Potentially we could recover from here instead of crashing - what
# would be a good recovery model?
quit 1
let
configFork = case dag . headState . data . kind
of BeaconStateFork . Phase0 : genesisFork ( cfg )
of BeaconStateFork . Altair : altairFork ( cfg )
of BeaconStateFork . Bellatrix : bellatrixFork ( cfg )
statefork = getStateField ( dag . headState . data , fork )
if stateFork ! = configFork :
error " State from database does not match network, check --network parameter " ,
genesisRef , tailRef , headRef , tailRoot , headRoot , stateFork , configFork
quit 1
2022-01-31 07:36:29 +00:00
# db state is likely a epoch boundary state which is what we want for epochs
2022-01-30 16:51:04 +00:00
assign ( dag . epochRefState , dag . headState )
dag . forkDigests = newClone ForkDigests . init (
cfg ,
getStateField ( dag . headState . data , genesis_validators_root ) )
swap ( dag . backfillBlocks , backfillBlocks ) # avoid allocating a full copy
2021-12-21 14:24:23 +00:00
let forkVersions =
2022-01-20 08:30:33 +00:00
[ cfg . GENESIS_FORK_VERSION , cfg . ALTAIR_FORK_VERSION ,
cfg . BELLATRIX_FORK_VERSION , cfg . SHARDING_FORK_VERSION ]
2021-12-21 14:24:23 +00:00
for i in 0 .. < forkVersions . len :
for j in i + 1 .. < forkVersions . len :
doAssert forkVersions [ i ] ! = forkVersions [ j ]
2022-02-02 13:06:55 +00:00
doAssert cfg . ALTAIR_FORK_EPOCH < = cfg . BELLATRIX_FORK_EPOCH
doAssert cfg . BELLATRIX_FORK_EPOCH < = cfg . SHARDING_FORK_EPOCH
2021-06-01 11:13:40 +00:00
doAssert dag . updateFlags in [ { } , { verifyFinalization } ]
2020-08-18 20:29:33 +00:00
var cache : StateCache
2022-01-05 18:38:04 +00:00
if not dag . updateStateData ( dag . headState , headRef . atSlot ( ) , false , cache ) :
fatal " Unable to load head state, database corrupt? " ,
head = shortLog ( headRef )
quit 1
2021-12-09 17:06:21 +00:00
2022-01-31 07:36:29 +00:00
# Clearance most likely happens from head - assign it after rewinding head
assign ( dag . clearanceState , dag . headState )
2022-01-31 13:02:38 +00:00
withState ( dag . headState . data ) :
dag . validatorMonitor [ ] . registerState ( state . data )
2022-01-31 07:36:29 +00:00
updateBeaconMetrics ( dag . headState , cache )
2021-12-09 17:06:21 +00:00
# The tail block is "implicitly" finalized as it was given either as a
# checkpoint block, or is the genesis, thus we use it as a lower bound when
# computing the finalized head
let
finalized_checkpoint =
getStateField ( dag . headState . data , finalized_checkpoint )
2022-01-11 10:01:54 +00:00
finalizedSlot = max ( finalized_checkpoint . epoch . start_slot ( ) , tailRef . slot )
2021-12-09 17:06:21 +00:00
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
block : # Set up finalizedHead -> head
var tmp = dag . head
while tmp . slot > finalizedSlot :
dag . forkBlocks . incl ( KeyedBlockRef . init ( tmp ) )
tmp = tmp . parent
2020-08-18 20:29:33 +00:00
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
dag . forkBlocks . incl ( KeyedBlockRef . init ( tmp ) )
dag . finalizedHead = tmp . atSlot ( finalizedSlot )
block : # Set up tail -> finalizedHead
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks . setLen ( ( dag . finalizedHead . slot - dag . tail . slot ) . int + 1 )
2021-12-06 18:52:35 +00:00
var tmp = dag . finalizedHead . blck
while not isNil ( tmp ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks [ ( tmp . slot - dag . tail . slot ) . int ] = tmp
2021-12-06 18:52:35 +00:00
tmp = tmp . parent
2022-01-30 16:51:04 +00:00
let stateTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
# Pruning metadata
2021-06-01 11:13:40 +00:00
dag . lastPrunePoint = dag . finalizedHead
2021-03-09 14:36:17 +00:00
2022-01-30 16:51:04 +00:00
# Fill validator key cache in case we're loading an old database that doesn't
# have a cache
dag . updateValidatorKeys ( getStateField ( dag . headState . data , validators ) . asSeq ( ) )
dag . updateFinalizedBlocks ( )
2021-08-05 08:26:10 +00:00
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
withState ( dag . headState . data ) :
when stateFork > = BeaconStateFork . Altair :
dag . headSyncCommittees = state . data . get_sync_committee_cache ( cache )
2022-01-30 16:51:04 +00:00
info " Block DAG initialized " ,
2021-12-09 17:06:21 +00:00
head = shortLog ( dag . head ) ,
2021-06-01 11:13:40 +00:00
finalizedHead = shortLog ( dag . finalizedHead ) ,
2021-12-09 17:06:21 +00:00
tail = shortLog ( dag . tail ) ,
2022-01-30 16:51:04 +00:00
backfill = ( dag . backfill . slot , shortLog ( dag . backfill . parent_root ) ) ,
finalizedDur = finalizedTick - startTick ,
summariesDur = summariesTick - finalizedTick ,
stateDur = stateTick - summariesTick ,
indexDur = Moment . now ( ) - stateTick
2020-05-19 14:18:07 +00:00
2021-06-01 11:13:40 +00:00
dag
2020-05-19 14:18:07 +00:00
2021-08-09 12:54:45 +00:00
template genesisValidatorsRoot * ( dag : ChainDAGRef ) : Eth2Digest =
getStateField ( dag . headState . data , genesis_validators_root )
2021-06-01 12:40:13 +00:00
func getEpochRef * (
2021-05-29 18:56:30 +00:00
dag : ChainDAGRef , state : StateData , cache : var StateCache ) : EpochRef =
2022-01-05 18:38:04 +00:00
## Get a cached `EpochRef` or construct one based on the given state - always
## returns an EpochRef instance
2021-05-29 18:56:30 +00:00
let
blck = state . blck
2021-06-11 17:51:46 +00:00
epoch = state . data . get_current_epoch ( )
2021-05-29 18:56:30 +00:00
var epochRef = dag . findEpochRef ( blck , epoch )
2022-01-05 18:38:04 +00:00
if epochRef . isErr :
let res = EpochRef . init ( dag , state , cache )
2021-05-29 18:56:30 +00:00
if epoch > = dag . finalizedHead . slot . epoch ( ) :
# Only cache epoch information for unfinalized blocks - earlier states
# are seldomly used (ie RPC), so no need to cache
# Because we put a cap on the number of epochRefs we store, we want to
# prune the least useful state - for now, we'll assume that to be the
# oldest epochRef we know about.
var
oldest = 0
for x in 0 .. < dag . epochRefs . len :
let candidate = dag . epochRefs [ x ]
2021-06-10 22:07:16 +00:00
if candidate = = nil :
2021-05-29 18:56:30 +00:00
oldest = x
break
2021-06-10 22:07:16 +00:00
if candidate . key . epoch < dag . epochRefs [ oldest ] . epoch :
2021-05-29 18:56:30 +00:00
oldest = x
2022-01-05 18:38:04 +00:00
dag . epochRefs [ oldest ] = res
res
else :
epochRef . get ( )
2021-05-29 18:56:30 +00:00
2022-01-05 18:38:04 +00:00
proc getEpochRef * (
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ,
preFinalized : bool ) : Opt [ EpochRef ] =
## Return a cached EpochRef or construct one from the database, if possible -
## returns `none` on failure.
##
## When `preFinalized` is true, include epochs from before the finalized
## checkpoint in the search - this potentially can result in long processing
## times due to state replays.
##
## Requests for epochs >= dag.finalizedHead.slot.epoch always return an
## instance. One must be careful to avoid race conditions in `async` code
## where the finalized head might change during an `await`.
##
## Requests for epochs < dag.finalizedHead.slot.epoch may fail, either because
## the search was limited by the `preFinalized` flag or because state history
## has been pruned - none will be returned in this case.
if not preFinalized and epoch < dag . finalizedHead . slot . epoch :
return err ( )
2021-03-17 10:17:15 +00:00
let epochRef = dag . findEpochRef ( blck , epoch )
2022-01-05 18:38:04 +00:00
if epochRef . isOk ( ) :
2020-09-22 20:42:42 +00:00
beacon_state_data_cache_hits . inc
return epochRef
2020-07-28 13:54:32 +00:00
2020-08-06 19:48:47 +00:00
beacon_state_data_cache_misses . inc
2020-08-18 20:29:33 +00:00
let
2021-12-09 17:06:21 +00:00
ancestor = epochAncestor ( blck , epoch )
2022-01-05 18:38:04 +00:00
if isNil ( ancestor . blck ) : # past the tail
return err ( )
2020-08-18 20:29:33 +00:00
2022-01-05 18:38:04 +00:00
dag . withUpdatedState (
dag . epochRefState , ancestor . blck . atEpochStart ( ancestor . epoch ) ) do :
ok ( dag . getEpochRef ( stateData , cache ) )
do :
err ( )
2020-07-28 13:54:32 +00:00
2020-10-26 08:55:10 +00:00
proc getFinalizedEpochRef * ( dag : ChainDAGRef ) : EpochRef =
2022-01-05 18:38:04 +00:00
dag . getEpochRef (
dag . finalizedHead . blck , dag . finalizedHead . slot . epoch , false ) . expect (
" getEpochRef for finalized head should always succeed " )
2020-10-26 08:55:10 +00:00
2021-03-01 19:50:43 +00:00
func stateCheckpoint * ( bs : BlockSlot ) : BlockSlot =
## The first ancestor BlockSlot that is a state checkpoint
var bs = bs
while not isStateCheckPoint ( bs ) :
bs = bs . parentOrSlot
bs
2021-08-09 12:54:45 +00:00
template forkAtEpoch * ( dag : ChainDAGRef , epoch : Epoch ) : Fork =
forkAtEpoch ( dag . cfg , epoch )
proc forkDigestAtEpoch * ( dag : ChainDAGRef , epoch : Epoch ) : ForkDigest =
2021-10-04 08:31:21 +00:00
case dag . cfg . stateForkAtEpoch ( epoch )
2022-01-05 14:24:15 +00:00
of BeaconStateFork . Bellatrix : dag . forkDigests . bellatrix
2022-01-04 09:45:38 +00:00
of BeaconStateFork . Altair : dag . forkDigests . altair
of BeaconStateFork . Phase0 : dag . forkDigests . phase0
2021-07-07 09:09:47 +00:00
2020-08-13 09:50:05 +00:00
proc getState ( dag : ChainDAGRef , state : var StateData , bs : BlockSlot ) : bool =
## Load a state from the database given a block and a slot - this will first
## lookup the state root in the state root table then load the corresponding
## state, if it exists
2021-03-01 19:50:43 +00:00
if not bs . isStateCheckpoint ( ) :
return false # Only state checkpoints are stored - no need to hit DB
2020-08-18 20:29:33 +00:00
2021-10-18 12:32:54 +00:00
let stateRoot = dag . db . getStateRoot ( bs . blck . root , bs . slot )
if stateRoot . isNone ( ) : return false
let restoreAddr =
# Any restore point will do as long as it's not the object being updated
if unsafeAddr ( state ) = = unsafeAddr ( dag . headState ) :
unsafeAddr dag . clearanceState
else :
unsafeAddr dag . headState
let v = addr state . data
func restore ( ) =
assign ( v [ ] , restoreAddr [ ] . data )
2020-08-13 09:50:05 +00:00
2021-10-18 12:32:54 +00:00
getStateData ( dag . db , dag . cfg , state , bs , restore )
2020-08-13 09:50:05 +00:00
2021-06-24 18:34:08 +00:00
proc putState ( dag : ChainDAGRef , state : StateData ) =
2020-08-13 09:50:05 +00:00
# Store a state and its root
2020-09-24 07:02:03 +00:00
logScope :
blck = shortLog ( state . blck )
2021-06-11 17:51:46 +00:00
stateSlot = shortLog ( getStateField ( state . data , slot ) )
stateRoot = shortLog ( getStateRoot ( state . data ) )
2020-05-19 14:18:07 +00:00
2021-06-11 17:51:46 +00:00
if not isStateCheckpoint ( state . blck . atSlot ( getStateField ( state . data , slot ) ) ) :
2020-08-13 09:50:05 +00:00
return
2021-05-30 08:14:17 +00:00
# Don't consider legacy tables here, they are slow to read so we'll want to
# rewrite things in the new database anyway.
2021-06-11 17:51:46 +00:00
if dag . db . containsState ( getStateRoot ( state . data ) , legacy = false ) :
2020-08-13 09:50:05 +00:00
return
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2020-08-13 09:50:05 +00:00
# Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing
2021-11-18 12:02:43 +00:00
withState ( state . data ) :
2022-01-25 08:28:26 +00:00
dag . db . putState ( state )
2020-05-19 14:18:07 +00:00
2021-05-28 16:34:00 +00:00
debug " Stored state " , putStateDur = Moment . now ( ) - startTick
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
proc getBlockRange * (
2020-08-05 23:22:12 +00:00
dag : ChainDAGRef , startSlot : Slot , skipStep : uint64 ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
output : var openArray [ BlockId ] ) : Natural =
2020-05-19 14:18:07 +00:00
## This function populates an `output` buffer of blocks
## with a slots ranging from `startSlot` up to, but not including,
## `startSlot + skipStep * output.len`, skipping any slots that don't have
## a block.
##
## Blocks will be written to `output` from the end without gaps, even if
## a block is missing in a particular slot. The return value shows how
## many slots were missing blocks - to iterate over the result, start
## at this index.
##
## If there were no blocks in the range, `output.len` will be returned.
2020-09-22 20:42:42 +00:00
let
requestedCount = output . lenu64
headSlot = dag . head . slot
2020-05-19 14:18:07 +00:00
trace " getBlockRange entered " ,
2020-09-22 20:42:42 +00:00
head = shortLog ( dag . head . root ) , requestedCount , startSlot , skipStep , headSlot
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if startSlot < dag . backfill . slot :
notice " Got request for pre-backfill slot " ,
startSlot , backfillSlot = dag . backfill . slot
return output . len
if headSlot < = startSlot or requestedCount = = 0 :
2020-09-22 20:42:42 +00:00
return output . len # Identical to returning an empty set of block as indicated above
2020-05-19 14:18:07 +00:00
let
2020-09-22 20:42:42 +00:00
runway = uint64 ( headSlot - startSlot )
2020-10-14 18:06:50 +00:00
# This is the number of blocks that will follow the start block
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
extraSlots = min ( runway div skipStep , requestedCount - 1 )
2020-10-14 18:06:50 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
# If `skipStep` is very large, `extraSlots` should be 0 from
2020-10-14 18:06:50 +00:00
# the previous line, so `endSlot` will be equal to `startSlot`:
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
endSlot = startSlot + extraSlots * skipStep
2020-05-19 14:18:07 +00:00
var
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
curSlot = endSlot
2020-08-05 23:22:12 +00:00
o = output . len
2020-10-14 18:06:50 +00:00
# Process all blocks that follow the start block (may be zero blocks)
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
while curSlot > startSlot :
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
let bs = dag . getBlockIdAtSlot ( curSlot )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if bs . isProposed ( ) :
o - = 1
output [ o ] = bs . bid
curSlot - = skipStep
# Handle start slot separately (to avoid underflow when computing curSlot)
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
let bs = dag . getBlockIdAtSlot ( startSlot )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if bs . isProposed ( ) :
o - = 1
output [ o ] = bs . bid
2020-05-19 14:18:07 +00:00
o # Return the index of the first non-nil item in the output
2020-08-13 09:50:05 +00:00
proc advanceSlots (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , slot : Slot , save : bool ,
2021-10-13 14:24:36 +00:00
cache : var StateCache , info : var ForkedEpochInfo ) =
2020-08-13 09:50:05 +00:00
# Given a state, advance it zero or more slots by applying empty slot
2020-10-15 12:28:44 +00:00
# processing - the state must be positions at a slot before or equal to the
# target
2021-06-11 17:51:46 +00:00
doAssert getStateField ( state . data , slot ) < = slot
2021-12-30 11:33:03 +00:00
2021-06-11 17:51:46 +00:00
while getStateField ( state . data , slot ) < slot :
2021-12-20 19:20:31 +00:00
let preEpoch = getStateField ( state . data , slot ) . epoch
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
2021-06-03 13:32:00 +00:00
2022-01-17 11:19:58 +00:00
process_slots (
dag . cfg , state . data , getStateField ( state . data , slot ) + 1 , cache , info ,
dag . updateFlags ) . expect ( " process_slots shouldn ' t fail when state slot is correct " )
2020-10-18 15:47:39 +00:00
if save :
dag . putState ( state )
2020-05-19 14:18:07 +00:00
2021-12-20 19:20:31 +00:00
# The reward information in the state transition is computed for epoch
# transitions - when transitioning into epoch N, the activities in epoch
# N-2 are translated into balance updates, and this is what we capture
# in the monitor. This may be inaccurate during a deep reorg (>1 epoch)
# which is an acceptable tradeoff for monitoring.
withState ( state . data ) :
let postEpoch = state . data . slot . epoch
if preEpoch ! = postEpoch :
dag . validatorMonitor [ ] . registerEpochInfo ( postEpoch , info , state . data )
2020-08-13 09:50:05 +00:00
proc applyBlock (
2020-07-31 14:49:06 +00:00
dag : ChainDAGRef ,
2022-01-17 11:19:58 +00:00
state : var StateData , blck : BlockRef , flags : UpdateFlags ,
cache : var StateCache , info : var ForkedEpochInfo ) =
2020-08-13 09:50:05 +00:00
# Apply a single block to the state - the state must be positioned at the
# parent of the block with a slot lower than the one of the block being
# applied
2022-01-17 11:19:58 +00:00
doAssert state . blck = = blck . parent
2020-05-19 14:18:07 +00:00
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2022-01-17 11:19:58 +00:00
case dag . cfg . blockForkAtEpoch ( blck . slot . epoch )
of BeaconBlockFork . Phase0 :
let data = dag . db . getPhase0Block ( blck . root ) . expect ( " block loaded " )
state_transition (
dag . cfg , state . data , data , cache , info ,
flags + dag . updateFlags + { slotProcessed } , noRollback ) . expect (
" Blocks from database must not fail to apply " )
of BeaconBlockFork . Altair :
let data = dag . db . getAltairBlock ( blck . root ) . expect ( " block loaded " )
state_transition (
dag . cfg , state . data , data , cache , info ,
flags + dag . updateFlags + { slotProcessed } , noRollback ) . expect (
" Blocks from database must not fail to apply " )
of BeaconBlockFork . Bellatrix :
let data = dag . db . getMergeBlock ( blck . root ) . expect ( " block loaded " )
2021-10-18 12:32:54 +00:00
state_transition (
2022-01-17 11:19:58 +00:00
dag . cfg , state . data , data , cache , info ,
flags + dag . updateFlags + { slotProcessed } , noRollback ) . expect (
" Blocks from database must not fail to apply " )
2020-05-19 14:18:07 +00:00
2022-01-17 11:19:58 +00:00
state . blck = blck
2020-05-19 14:18:07 +00:00
2020-07-22 07:51:45 +00:00
proc updateStateData * (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , bs : BlockSlot , save : bool ,
2022-01-05 18:38:04 +00:00
cache : var StateCache ) : bool =
2020-05-19 14:18:07 +00:00
## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a
## different branch or has advanced to a higher slot number than slot
2022-01-05 18:38:04 +00:00
## If `bs.slot` is higher than `bs.blck.slot`, `updateStateData` will fill in
## with empty/non-block slots
2020-05-19 14:18:07 +00:00
2020-08-13 09:50:05 +00:00
# First, see if we're already at the requested block. If we are, also check
# that the state has not been advanced past the desired block - if it has,
# an earlier state must be loaded since there's no way to undo the slot
# transitions
2020-05-19 14:18:07 +00:00
2022-01-05 18:38:04 +00:00
if isNil ( bs . blck ) :
info " Requesting state for unknown block, historical data not available? " ,
head = shortLog ( dag . head ) , tail = shortLog ( dag . tail )
return false
2022-01-03 21:18:49 +00:00
let
startTick = Moment . now ( )
current {. used . } = state . blck . atSlot ( getStateField ( state . data , slot ) )
2021-03-01 19:50:43 +00:00
2020-08-13 09:50:05 +00:00
var
ancestors : seq [ BlockRef ]
2020-10-22 10:53:33 +00:00
found = false
2021-05-30 08:14:17 +00:00
template exactMatch ( state : StateData , bs : BlockSlot ) : bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
2021-06-11 17:51:46 +00:00
state . blck = = bs . blck and getStateField ( state . data , slot ) = = bs . slot
2021-05-30 08:14:17 +00:00
2020-10-22 10:53:33 +00:00
template canAdvance ( state : StateData , bs : BlockSlot ) : bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
2021-06-11 17:51:46 +00:00
state . blck = = bs . blck and getStateField ( state . data , slot ) < = bs . slot
2020-10-22 10:53:33 +00:00
2021-05-30 08:14:17 +00:00
# Fast path: check all caches for an exact match - this is faster than
# advancing a state where there's epoch processing to do, by a wide margin -
# it also avoids `hash_tree_root` for slot processing
2022-01-05 18:38:04 +00:00
if exactMatch ( state , bs ) :
2021-05-30 08:14:17 +00:00
found = true
2021-12-30 11:33:03 +00:00
elif not save :
# When required to save states, we cannot rely on the caches because that
# would skip the extra processing that save does - not all information that
# goes into the database is cached
2022-01-05 18:38:04 +00:00
if exactMatch ( dag . headState , bs ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . headState )
found = true
2022-01-05 18:38:04 +00:00
elif exactMatch ( dag . clearanceState , bs ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . clearanceState )
found = true
2022-01-05 18:38:04 +00:00
elif exactMatch ( dag . epochRefState , bs ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . epochRefState )
found = true
2021-05-30 08:14:17 +00:00
2020-10-22 10:53:33 +00:00
const RewindBlockThreshold = 64
2022-01-05 18:38:04 +00:00
if not found :
# No exact match found - see if any in-memory state can be used as a base
# onto which we can apply a few blocks - there's a tradeoff here between
# loading the state from disk and performing the block applications
var cur = bs
while ancestors . len < RewindBlockThreshold :
if isNil ( cur . blck ) : # tail reached
2021-12-30 11:33:03 +00:00
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( state , cur ) : # Typical case / fast path when there's no reorg
2021-12-30 11:33:03 +00:00
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if not save : # see above
if canAdvance ( dag . headState , cur ) :
assign ( state , dag . headState )
found = true
break
2020-11-10 13:48:59 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( dag . clearanceState , cur ) :
assign ( state , dag . clearanceState )
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( dag . epochRefState , cur ) :
assign ( state , dag . epochRefState )
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if cur . isProposed ( ) :
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
ancestors . add ( cur . blck )
# Move slot by slot to capture epoch boundary states
cur = cur . parentOrSlot ( )
2020-10-22 10:53:33 +00:00
if not found :
debug " UpdateStateData cache miss " ,
2022-01-03 21:18:49 +00:00
current = shortLog ( current ) , target = shortLog ( bs )
2020-10-22 10:53:33 +00:00
# Either the state is too new or was created by applying a different block.
# We'll now resort to loading the state from the database then reapplying
# blocks until we reach the desired point in time.
2022-01-05 18:38:04 +00:00
var cur = bs
2020-10-22 10:53:33 +00:00
ancestors . setLen ( 0 )
# Look for a state in the database and load it - as long as it cannot be
# found, keep track of the blocks that are needed to reach it from the
2022-01-05 18:38:04 +00:00
# state that eventually will be found.
# If we hit the tail, it means that we've reached a point for which we can
# no longer recreate history - this happens for example when starting from
# a checkpoint block
let startEpoch = bs . slot . epoch
2022-01-05 14:49:10 +00:00
while not canAdvance ( state , cur ) and not dag . getState ( state , cur ) :
2022-01-05 18:38:04 +00:00
# There's no state saved for this particular BlockSlot combination, and
# the state we have can't trivially be advanced (in case it was older than
# RewindBlockThreshold), keep looking..
if cur . isProposed ( ) :
2021-05-28 16:34:00 +00:00
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
2020-08-18 20:29:33 +00:00
ancestors . add ( cur . blck )
2020-08-13 09:50:05 +00:00
2022-01-05 18:38:04 +00:00
if cur . slot = = dag . tail . slot or
( cur . slot . epoch + EPOCHS_PER_STATE_SNAPSHOT * 2 < startEpoch ) :
# We've either walked two full state snapshot lengths or hit the tail
# and still can't find a matching state: this can happen when
# starting the node from an arbitrary finalized checkpoint and not
# backfilling the states
notice " Request for pruned historical state " ,
request = shortLog ( bs ) , tail = shortLog ( dag . tail ) , cur = shortLog ( cur )
return false
2021-05-28 16:34:00 +00:00
# Move slot by slot to capture epoch boundary states
cur = cur . parentOrSlot ( )
2020-10-22 10:53:33 +00:00
beacon_state_rewinds . inc ( )
2020-08-13 09:50:05 +00:00
2021-05-28 16:34:00 +00:00
# Starting state has been assigned, either from memory or database
2020-08-18 20:29:33 +00:00
let
2021-05-28 16:34:00 +00:00
assignTick = Moment . now ( )
2022-01-03 21:18:49 +00:00
ancestor {. used . } = state . blck . atSlot ( getStateField ( state . data , slot ) )
ancestorRoot {. used . } = getStateRoot ( state . data )
2021-10-13 14:24:36 +00:00
var info : ForkedEpochInfo
2020-08-13 09:50:05 +00:00
# Time to replay all the blocks between then and now
2020-05-19 14:18:07 +00:00
for i in countdown ( ancestors . len - 1 , 0 ) :
# Because the ancestors are in the database, there's no need to persist them
# again. Also, because we're applying blocks that were loaded from the
# database, we can skip certain checks that have already been performed
2020-08-13 09:50:05 +00:00
# before adding the block to the database.
2022-01-17 11:19:58 +00:00
dag . applyBlock ( state , ancestors [ i ] , { } , cache , info )
2020-05-19 14:18:07 +00:00
2020-10-15 12:28:44 +00:00
# ...and make sure to process empty slots as requested
2021-10-13 14:24:36 +00:00
dag . advanceSlots ( state , bs . slot , save , cache , info )
2020-08-13 09:50:05 +00:00
2021-06-03 13:32:00 +00:00
# ...and make sure to load the state cache, if it exists
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
2021-06-03 13:32:00 +00:00
2021-05-28 16:34:00 +00:00
let
assignDur = assignTick - startTick
replayDur = Moment . now ( ) - assignTick
2021-03-01 19:50:43 +00:00
2022-01-03 21:18:49 +00:00
# TODO https://github.com/status-im/nim-chronicles/issues/108
2021-06-01 15:33:00 +00:00
if ( assignDur + replayDur ) > = 250 . millis :
2021-03-01 19:50:43 +00:00
# This might indicate there's a cache that's not in order or a disk that is
# too slow - for now, it's here for investigative purposes and the cutoff
# time might need tuning
2022-01-03 21:18:49 +00:00
info " State replayed " ,
blocks = ancestors . len ,
slots = getStateField ( state . data , slot ) - ancestor . slot ,
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
target = shortLog ( bs ) ,
ancestorStateRoot = shortLog ( ancestorRoot ) ,
targetStateRoot = shortLog ( getStateRoot ( state . data ) ) ,
found ,
assignDur ,
replayDur
2021-03-01 19:50:43 +00:00
elif ancestors . len > 0 :
2022-01-03 21:18:49 +00:00
debug " State replayed " ,
blocks = ancestors . len ,
slots = getStateField ( state . data , slot ) - ancestor . slot ,
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
target = shortLog ( bs ) ,
ancestorStateRoot = shortLog ( ancestorRoot ) ,
targetStateRoot = shortLog ( getStateRoot ( state . data ) ) ,
found ,
assignDur ,
replayDur
else : # Normal case!
trace " State advanced " ,
blocks = ancestors . len ,
slots = getStateField ( state . data , slot ) - ancestor . slot ,
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
target = shortLog ( bs ) ,
ancestorStateRoot = shortLog ( ancestorRoot ) ,
targetStateRoot = shortLog ( getStateRoot ( state . data ) ) ,
found ,
assignDur ,
replayDur
2020-05-19 14:18:07 +00:00
2022-01-05 18:38:04 +00:00
true
2020-07-31 14:49:06 +00:00
proc delState ( dag : ChainDAGRef , bs : BlockSlot ) =
2020-05-19 14:18:07 +00:00
# Delete state state and mapping for a particular block+slot
2021-06-10 22:07:16 +00:00
if not isStateCheckpoint ( bs ) :
2020-08-13 09:50:05 +00:00
return # We only ever save epoch states
2021-06-10 22:07:16 +00:00
2020-05-19 14:18:07 +00:00
if ( let root = dag . db . getStateRoot ( bs . blck . root , bs . slot ) ; root . isSome ( ) ) :
dag . db . delState ( root . get ( ) )
2020-08-13 09:50:05 +00:00
dag . db . delStateRoot ( bs . blck . root , bs . slot )
2020-05-19 14:18:07 +00:00
2021-03-09 14:36:17 +00:00
proc pruneBlocksDAG ( dag : ChainDAGRef ) =
## This prunes the block DAG
## This does NOT prune the cached state checkpoints and EpochRef
2021-04-01 11:26:17 +00:00
## This must be done after a new finalization point is reached
2021-03-09 14:36:17 +00:00
## to invalidate pending blocks or attestations referring
## to a now invalid fork.
##
## This does NOT update the `dag.lastPrunePoint` field.
## as the caches and fork choice can be pruned at a later time.
# Clean up block refs, walking block by block
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2021-04-01 11:26:17 +00:00
# Finalization means that we choose a single chain as the canonical one -
# it also means we're no longer interested in any branches from that chain
# up to the finalization point
let hlen = dag . heads . len
for i in 0 .. < hlen :
let n = hlen - i - 1
let head = dag . heads [ n ]
if dag . finalizedHead . blck . isAncestorOf ( head ) :
continue
2021-12-09 17:06:21 +00:00
var cur = head . atSlot ( )
2021-04-01 11:26:17 +00:00
while not cur . blck . isAncestorOf ( dag . finalizedHead . blck ) :
dag . delState ( cur ) # TODO: should we move that disk I/O to `onSlotEnd`
2022-01-05 18:38:04 +00:00
if cur . isProposed ( ) :
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
dag . forkBlocks . excl ( KeyedBlockRef . init ( cur . blck ) )
2021-04-01 11:26:17 +00:00
dag . db . delBlock ( cur . blck . root )
if cur . blck . parent . isNil :
break
cur = cur . parentOrSlot
dag . heads . del ( n )
debug " Pruned the blockchain DAG " ,
currentCandidateHeads = dag . heads . len ,
prunedHeads = hlen - dag . heads . len ,
2021-05-28 19:03:20 +00:00
dagPruneDur = Moment . now ( ) - startTick
2021-03-09 14:36:17 +00:00
2021-10-07 13:19:47 +00:00
iterator syncSubcommittee * (
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
subcommitteeIdx : SyncSubcommitteeIndex ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-08-28 10:40:01 +00:00
2021-10-07 13:19:47 +00:00
while i < onePastEndIdx :
yield syncCommittee [ i ]
inc i
2021-08-28 10:40:01 +00:00
2021-10-14 10:38:38 +00:00
iterator syncSubcommitteePairs * (
2021-10-20 16:32:46 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : tuple [ validatorIdx : ValidatorIndex ,
subcommitteeIdx : int ] =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-10-14 10:38:38 +00:00
while i < onePastEndIdx :
yield ( syncCommittee [ i ] , i )
inc i
2021-10-20 16:32:46 +00:00
func syncCommitteeParticipants * ( dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
slot : Slot ) : seq [ ValidatorIndex ] =
2021-10-06 17:05:06 +00:00
withState ( dag . headState . data ) :
2021-10-18 16:37:27 +00:00
when stateFork > = BeaconStateFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
curPeriod = sync_committee_period ( state . data . slot )
2021-12-07 12:25:54 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-08-28 10:40:01 +00:00
else :
@ [ ]
2021-09-28 07:44:20 +00:00
func getSubcommitteePositionsAux (
2021-08-28 10:40:01 +00:00
dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
2021-09-28 07:44:20 +00:00
validatorIdx : uint64 ) : seq [ uint64 ] =
2021-12-07 12:25:54 +00:00
var pos = 0 'u64
for valIdx in syncCommittee . syncSubcommittee ( subcommitteeIdx ) :
if validatorIdx = = uint64 ( valIdx ) :
result . add pos
inc pos
func getSubcommitteePositions * (
dag : ChainDAGRef ,
slot : Slot ,
subcommitteeIdx : SyncSubcommitteeIndex ,
validatorIdx : uint64 ) : seq [ uint64 ] =
2021-10-06 17:05:06 +00:00
withState ( dag . headState . data ) :
2021-10-18 16:37:27 +00:00
when stateFork > = BeaconStateFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
curPeriod = sync_committee_period ( state . data . slot )
2021-10-06 17:05:06 +00:00
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
template search ( syncCommittee : openArray [ ValidatorIndex ] ) : seq [ uint64 ] =
2021-11-05 15:39:47 +00:00
dag . getSubcommitteePositionsAux (
syncCommittee , subcommitteeIdx , validatorIdx )
2021-10-06 17:05:06 +00:00
2021-10-20 16:32:46 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-10-06 17:05:06 +00:00
else :
@ [ ]
2021-08-28 10:40:01 +00:00
template syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : seq [ ValidatorIndex ] =
2021-11-05 15:39:47 +00:00
toSeq ( syncSubcommittee ( dag . syncCommitteeParticipants ( slot ) , subcommitteeIdx ) )
2021-08-28 10:40:01 +00:00
iterator syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
aggregationBits : SyncCommitteeAggregationBits ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
for pos , valIdx in dag . syncCommitteeParticipants ( slot , subcommitteeIdx ) :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
if pos < aggregationBits . bits and aggregationBits [ pos ] :
2021-08-28 10:40:01 +00:00
yield valIdx
2021-03-09 14:36:17 +00:00
func needStateCachesAndForkChoicePruning * ( dag : ChainDAGRef ) : bool =
dag . lastPrunePoint ! = dag . finalizedHead
proc pruneStateCachesDAG * ( dag : ChainDAGRef ) =
## This prunes the cached state checkpoints and EpochRef
## This does NOT prune the state associated with invalidated blocks on a fork
## They are pruned via `pruneBlocksDAG`
##
## This updates the `dag.lastPrunePoint` variable
doAssert dag . needStateCachesAndForkChoicePruning ( )
2021-05-28 19:03:20 +00:00
let startTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Remove states, walking slot by slot
# We remove all state checkpoints that come _before_ the current finalized
# head, as we might frequently be asked to replay states from the
# finalized checkpoint and onwards (for example when validating blocks and
# attestations)
var
cur = dag . finalizedHead . stateCheckpoint . parentOrSlot
prev = dag . lastPrunePoint . stateCheckpoint . parentOrSlot
while cur . blck ! = nil and cur ! = prev :
2022-01-05 18:38:04 +00:00
if cur . slot . epoch mod EPOCHS_PER_STATE_SNAPSHOT ! = 0 and
cur . slot ! = dag . tail . slot :
2021-03-09 14:36:17 +00:00
dag . delState ( cur )
cur = cur . parentOrSlot
2021-05-28 19:03:20 +00:00
let statePruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Clean up old EpochRef instances
# After finalization, we can clear up the epoch cache and save memory -
# it will be recomputed if needed
2021-03-17 10:17:15 +00:00
for i in 0 .. < dag . epochRefs . len :
2021-06-10 22:07:16 +00:00
if dag . epochRefs [ i ] ! = nil and
dag . epochRefs [ i ] . epoch < dag . finalizedHead . slot . epoch :
dag . epochRefs [ i ] = nil
2021-05-28 19:03:20 +00:00
let epochRefPruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
dag . lastPrunePoint = dag . finalizedHead
2021-03-17 06:30:16 +00:00
debug " Pruned the state checkpoints and DAG caches. " ,
2021-05-28 19:03:20 +00:00
statePruneDur = statePruneTick - startTick ,
epochRefPruneDur = epochRefPruneTick - statePruneTick
2021-03-09 14:36:17 +00:00
2020-08-31 09:00:38 +00:00
proc updateHead * (
2022-01-08 20:06:34 +00:00
dag : ChainDAGRef ,
newHead : BlockRef ,
quarantine : var Quarantine ) =
2020-05-19 14:18:07 +00:00
## Update what we consider to be the current head, as given by the fork
## choice.
2021-03-09 14:36:17 +00:00
##
2020-05-19 14:18:07 +00:00
## The choice of head affects the choice of finalization point - the order
## of operations naturally becomes important here - after updating the head,
## blocks that were once considered potential candidates for a tree will
## now fall from grace, or no longer be considered resolved.
2020-08-26 15:23:34 +00:00
doAssert not newHead . isNil ( )
2020-09-22 20:42:42 +00:00
doAssert not newHead . parent . isNil ( ) or newHead . slot < = dag . tail . slot
2020-07-16 13:16:51 +00:00
logScope :
newHead = shortLog ( newHead )
2020-05-19 14:18:07 +00:00
2020-07-28 13:54:32 +00:00
if dag . head = = newHead :
2020-10-01 18:56:42 +00:00
trace " No head block update "
2020-05-19 14:18:07 +00:00
return
let
lastHead = dag . head
2021-09-22 12:17:15 +00:00
lastHeadStateRoot = getStateRoot ( dag . headState . data )
2020-05-19 14:18:07 +00:00
2020-11-02 17:34:23 +00:00
# Start off by making sure we have the right state - updateStateData will try
# to use existing in-memory states to make this smooth
var cache : StateCache
2022-01-05 18:38:04 +00:00
if not updateStateData (
dag , dag . headState , newHead . atSlot ( ) , false , cache ) :
# Advancing the head state should never fail, given that the tail is
# implicitly finalised, the head is an ancestor of the tail and we always
# store the tail state in the database, as well as every epoch slot state in
# between
fatal " Unable to load head state during head update, database corrupt? " ,
lastHead = shortLog ( lastHead )
quit 1
2020-05-19 14:18:07 +00:00
2020-11-27 22:16:13 +00:00
dag . db . putHeadBlock ( newHead . root )
2022-01-31 07:36:29 +00:00
updateBeaconMetrics ( dag . headState , cache )
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
withState ( dag . headState . data ) :
when stateFork > = BeaconStateFork . Altair :
dag . headSyncCommittees = state . data . get_sync_committee_cache ( cache )
2020-11-27 22:16:13 +00:00
let
2021-12-09 17:06:21 +00:00
finalized_checkpoint =
getStateField ( dag . headState . data , finalized_checkpoint )
2022-01-11 10:01:54 +00:00
finalizedSlot = max ( finalized_checkpoint . epoch . start_slot ( ) , dag . tail . slot )
2021-12-09 17:06:21 +00:00
finalizedHead = newHead . atSlot ( finalizedSlot )
2020-11-27 22:16:13 +00:00
doAssert ( not finalizedHead . blck . isNil ) ,
" Block graph should always lead to a finalized block "
2021-09-22 12:17:15 +00:00
let ( isAncestor , ancestorDepth ) = lastHead . getDepth ( newHead )
if not ( isAncestor ) :
2020-10-01 18:56:42 +00:00
notice " Updated head block with chain reorg " ,
2020-07-28 13:54:32 +00:00
lastHead = shortLog ( lastHead ) ,
2020-05-19 14:18:07 +00:00
headParent = shortLog ( newHead . parent ) ,
2021-06-11 17:51:46 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
2020-05-19 14:18:07 +00:00
headBlock = shortLog ( dag . headState . blck ) ,
2021-06-11 17:51:46 +00:00
stateSlot = shortLog ( getStateField ( dag . headState . data , slot ) ) ,
justified = shortLog ( getStateField (
dag . headState . data , current_justified_checkpoint ) ) ,
finalized = shortLog ( getStateField (
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onReorgHappened ) ) :
let data = ReorgInfoObject . init ( dag . head . slot , uint64 ( ancestorDepth ) ,
lastHead . root , newHead . root ,
lastHeadStateRoot ,
getStateRoot ( dag . headState . data ) )
dag . onReorgHappened ( data )
2020-05-19 14:18:07 +00:00
# A reasonable criterion for "reorganizations of the chain"
2022-01-28 10:59:55 +00:00
quarantine . clearAfterReorg ( )
2021-10-07 06:19:07 +00:00
beacon_reorgs_total_total . inc ( )
2020-05-19 14:18:07 +00:00
beacon_reorgs_total . inc ( )
else :
2020-10-01 18:56:42 +00:00
debug " Updated head block " ,
2021-11-02 17:06:36 +00:00
head = shortLog ( dag . headState . blck ) ,
2021-06-11 17:51:46 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
2021-04-08 08:24:25 +00:00
justified = shortLog ( getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , current_justified_checkpoint ) ) ,
2021-04-08 08:24:25 +00:00
finalized = shortLog ( getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onHeadChanged ) ) :
let
2022-01-08 20:06:34 +00:00
currentEpoch = epoch ( newHead . slot )
depBlock = dag . head . dependentBlock ( dag . tail , currentEpoch )
prevDepBlock = dag . head . prevDependentBlock ( dag . tail , currentEpoch )
2021-09-22 12:17:15 +00:00
epochTransition = ( finalizedHead ! = dag . finalizedHead )
let data = HeadChangeInfoObject . init ( dag . head . slot , dag . head . root ,
getStateRoot ( dag . headState . data ) ,
2022-01-08 20:06:34 +00:00
epochTransition , depBlock . root ,
prevDepBlock . root )
2021-09-22 12:17:15 +00:00
dag . onHeadChanged ( data )
2021-12-20 19:20:31 +00:00
withState ( dag . headState . data ) :
# Every time the head changes, the "canonical" view of balances and other
# state-related metrics change - notify the validator monitor.
# Doing this update during head update ensures there's a reasonable number
# of such updates happening - at most once per valid block.
dag . validatorMonitor [ ] . registerState ( state . data )
2020-05-19 14:18:07 +00:00
if finalizedHead ! = dag . finalizedHead :
2022-01-03 21:18:49 +00:00
debug " Reached new finalization checkpoint " ,
2021-11-02 17:06:36 +00:00
head = shortLog ( dag . headState . blck ) ,
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
justified = shortLog ( getStateField (
dag . headState . data , current_justified_checkpoint ) ) ,
finalized = shortLog ( getStateField (
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-12-06 18:52:35 +00:00
block :
# Update `dag.finalizedBlocks` with all newly finalized blocks (those
# newer than the previous finalized head), then update `dag.finalizedHead`
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks . setLen ( finalizedHead . slot - dag . tail . slot + 1 )
2021-12-06 18:52:35 +00:00
var tmp = finalizedHead . blck
while not isNil ( tmp ) and tmp . slot > = dag . finalizedHead . slot :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks [ ( tmp . slot - dag . tail . slot ) . int ] = tmp
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
if tmp ! = finalizedHead . blck :
# The newly finalized block itself should remain in here so that fork
# choice still can find it via root
dag . forkBlocks . excl ( KeyedBlockRef . init ( tmp ) )
2021-12-06 18:52:35 +00:00
tmp = tmp . parent
dag . finalizedHead = finalizedHead
2020-07-25 19:41:12 +00:00
2022-01-30 16:51:04 +00:00
dag . updateFinalizedBlocks ( )
2021-04-01 11:26:17 +00:00
# Pruning the block dag is required every time the finalized head changes
# in order to clear out blocks that are no longer viable and should
# therefore no longer be considered as part of the chain we're following
dag . pruneBlocksDAG ( )
2021-03-09 14:36:17 +00:00
2021-09-22 12:17:15 +00:00
# Send notification about new finalization point via callback.
if not ( isNil ( dag . onFinHappened ) ) :
2021-12-09 17:06:21 +00:00
let stateRoot =
if dag . finalizedHead . slot = = dag . head . slot :
getStateRoot ( dag . headState . data )
elif dag . finalizedHead . slot + SLOTS_PER_HISTORICAL_ROOT > dag . head . slot :
getStateField ( dag . headState . data , state_roots ) . data [
int ( dag . finalizedHead . slot mod SLOTS_PER_HISTORICAL_ROOT ) ]
else :
Eth2Digest ( ) # The thing that finalized was >8192 blocks old?
let data = FinalizationInfoObject . init (
dag . finalizedHead . blck . root ,
stateRoot ,
dag . finalizedHead . slot . epoch )
2022-01-31 17:28:26 +00:00
dag . onFinHappened ( dag , data )
2021-09-22 12:17:15 +00:00
2021-12-21 10:40:14 +00:00
proc isInitialized * ( T : type ChainDAGRef , db : BeaconChainDB ) : Result [ void , cstring ] =
2021-11-10 11:39:08 +00:00
# Lightweight check to see if we have the minimal information needed to
# load up a database - we don't check head here - if something is wrong with
# head, it's likely an initialized, but corrupt database - init will detect
# that
2020-05-19 14:18:07 +00:00
let
2021-11-10 11:39:08 +00:00
genesisBlockRoot = db . getGenesisBlock ( )
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
if not genesisBlockRoot . isSome ( ) :
return err ( " Genesis block root missing " )
2020-05-19 14:18:07 +00:00
let
2021-11-10 11:39:08 +00:00
genesisBlock = db . getForkedBlock ( genesisBlockRoot . get ( ) )
2021-12-21 10:40:14 +00:00
if not genesisBlock . isSome ( ) :
return err ( " Genesis block missing " )
2020-05-19 14:18:07 +00:00
2021-11-10 11:39:08 +00:00
let
genesisStateRoot = withBlck ( genesisBlock . get ( ) ) : blck . message . state_root
2021-12-21 10:40:14 +00:00
if not db . containsState ( genesisStateRoot ) :
return err ( " Genesis state missing " )
let
tailBlockRoot = db . getTailBlock ( )
if not tailBlockRoot . isSome ( ) :
return err ( " Tail block root missing " )
let
tailBlock = db . getForkedBlock ( tailBlockRoot . get ( ) )
if not tailBlock . isSome ( ) :
return err ( " Tail block missing " )
let
2021-11-10 11:39:08 +00:00
tailStateRoot = withBlck ( tailBlock . get ( ) ) : blck . message . state_root
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
if not db . containsState ( tailStateRoot ) :
return err ( " Tail state missing " )
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
ok ( )
2020-05-19 14:18:07 +00:00
proc preInit * (
2020-09-22 20:42:42 +00:00
T : type ChainDAGRef , db : BeaconChainDB ,
2021-11-10 11:39:08 +00:00
genesisState , tailState : ForkedHashedBeaconState ,
tailBlock : ForkedTrustedSignedBeaconBlock ) =
2020-07-31 14:49:06 +00:00
# write a genesis state, the way the ChainDAGRef expects it to be stored in
2020-05-19 14:18:07 +00:00
# database
2021-11-10 11:39:08 +00:00
logScope :
genesisStateRoot = getStateRoot ( genesisState )
genesisStateSlot = getStateField ( genesisState , slot )
tailStateRoot = getStateRoot ( tailState )
tailStateSlot = getStateField ( tailState , slot )
let genesisBlockRoot = withState ( genesisState ) :
if state . root ! = getStateRoot ( tailState ) :
# Different tail and genesis
if state . data . slot > = getStateField ( tailState , slot ) :
fatal " Tail state must be newer or the same as genesis state "
quit 1
let tail_genesis_validators_root =
getStateField ( tailState , genesis_validators_root )
if state . data . genesis_validators_root ! = tail_genesis_validators_root :
fatal " Tail state doesn ' t match genesis validators root, it is likely from a different network! " ,
genesis_validators_root = shortLog ( state . data . genesis_validators_root ) ,
tail_genesis_validators_root = shortLog ( tail_genesis_validators_root )
quit 1
2021-11-18 12:02:43 +00:00
let blck = get_initial_beacon_block ( state )
2021-11-10 11:39:08 +00:00
db . putBlock ( blck )
2022-01-25 08:28:26 +00:00
db . putState ( state )
db . putGenesisBlock ( blck . root )
2021-11-10 11:39:08 +00:00
blck . root
else : # tail and genesis are the same
withBlck ( tailBlock ) :
db . putGenesisBlock ( blck . root )
blck . root
withState ( tailState ) :
withBlck ( tailBlock ) :
# When looking up the state root of the tail block, we don't use the
# BlockSlot->state_root map, so the only way the init code can find the
# state is through the state root in the block - this could be relaxed
# down the line
if blck . message . state_root ! = state . root :
fatal " State must match the given block " ,
tailBlck = shortLog ( blck )
quit 1
db . putBlock ( blck )
2022-01-25 08:28:26 +00:00
db . putState ( state )
2021-11-10 11:39:08 +00:00
db . putTailBlock ( blck . root )
db . putHeadBlock ( blck . root )
notice " New database from snapshot " ,
genesisBlockRoot = shortLog ( genesisBlockRoot ) ,
genesisStateRoot = shortLog ( getStateRoot ( genesisState ) ) ,
tailBlockRoot = shortLog ( blck . root ) ,
tailStateRoot = shortLog ( state . root ) ,
fork = state . data . fork ,
validators = state . data . validators . len ( )
2020-09-22 20:42:42 +00:00
2020-05-22 14:21:22 +00:00
proc getProposer * (
2021-06-01 11:13:40 +00:00
dag : ChainDAGRef , head : BlockRef , slot : Slot ) : Option [ ValidatorIndex ] =
2020-08-05 06:28:43 +00:00
let
2022-01-05 18:38:04 +00:00
epochRef = block :
2022-01-11 10:01:54 +00:00
let tmp = dag . getEpochRef ( head , slot . epoch ( ) , false )
2022-01-05 18:38:04 +00:00
if tmp . isErr ( ) :
return none ( ValidatorIndex )
tmp . get ( )
2022-01-11 10:01:54 +00:00
slotInEpoch = slot . since_epoch_start ( )
2020-05-19 14:18:07 +00:00
2021-06-01 11:13:40 +00:00
let proposer = epochRef . beacon_proposers [ slotInEpoch ]
if proposer . isSome ( ) :
2021-06-10 07:37:02 +00:00
if proposer . get ( ) . uint64 > = dag . db . immutableValidators . lenu64 ( ) :
2021-06-01 11:13:40 +00:00
# Sanity check - it should never happen that the key cache doesn't contain
# a key for the selected proposer - that would mean that we somehow
# created validators in the state without updating the cache!
warn " Proposer key not found " ,
2021-06-10 07:37:02 +00:00
keys = dag . db . immutableValidators . lenu64 ( ) , proposer = proposer . get ( )
2021-06-01 11:13:40 +00:00
return none ( ValidatorIndex )
proposer
2021-12-09 12:56:54 +00:00
proc aggregateAll * (
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex ] ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len = = 0 :
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Eth2 spec requires at least one attesting index in attestation
2022-01-29 13:53:31 +00:00
# - https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
2021-12-09 12:56:54 +00:00
return err ( " aggregate: no attesting keys " )
let
firstKey = dag . validatorKey ( validator_indices [ 0 ] )
if not firstKey . isSome ( ) :
return err ( " aggregate: invalid validator index " )
var aggregateKey {. noInit . } : AggregatePublicKey
aggregateKey . init ( firstKey . get ( ) )
for i in 1 .. < validator_indices . len :
let key = dag . validatorKey ( validator_indices [ i ] )
if not key . isSome ( ) :
return err ( " aggregate: invalid validator index " )
aggregateKey . aggregate ( key . get ( ) )
ok ( finish ( aggregateKey ) )
proc aggregateAll * (
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex | uint64 ] ,
bits : BitSeq | BitArray ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len ( ) ! = bits . len ( ) :
return err ( " aggregateAll: mismatch in bits length " )
var
aggregateKey {. noInit . } : AggregatePublicKey
inited = false
for i in 0 .. < bits . len ( ) :
if bits [ i ] :
let key = dag . validatorKey ( validator_indices [ i ] )
if not key . isSome ( ) :
return err ( " aggregate: invalid validator index " )
if inited :
aggregateKey . aggregate ( key . get )
else :
aggregateKey = AggregatePublicKey . init ( key . get )
inited = true
if not inited :
err ( " aggregate: no attesting keys " )
else :
ok ( finish ( aggregateKey ) )
2022-01-07 10:13:19 +00:00
proc getBlockSSZ * ( dag : ChainDAGRef , id : BlockId , bytes : var seq [ byte ] ) : bool =
# Load the SSZ-encoded data of a block into `bytes`, overwriting the existing
# content
# careful: there are two snappy encodings in use, with and without framing!
# Returns true if the block is found, false if not
case dag . cfg . blockForkAtEpoch ( id . slot . epoch )
of BeaconBlockFork . Phase0 :
dag . db . getPhase0BlockSSZ ( id . root , bytes )
of BeaconBlockFork . Altair :
dag . db . getAltairBlockSSZ ( id . root , bytes )
of BeaconBlockFork . Bellatrix :
dag . db . getMergeBlockSSZ ( id . root , bytes )
2022-01-20 07:25:45 +00:00
func needsBackfill * ( dag : ChainDAGRef ) : bool =
dag . backfill . slot > dag . genesis . slot