2020-05-19 14:18:07 +00:00
# beacon_chain
2021-02-25 13:37:22 +00:00
# Copyright (c) 2018-2021 Status Research & Development GmbH
2020-05-19 14:18:07 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{. push raises : [ Defect ] . }
import
2020-10-22 10:53:33 +00:00
std / [ options , sequtils , tables , sets ] ,
2021-09-23 22:13:25 +00:00
stew / [ assign2 , byteutils , results ] ,
2020-10-22 10:53:33 +00:00
metrics , snappy , chronicles ,
2021-11-10 11:39:08 +00:00
.. / spec / [ beaconstate , eth2_merkleization , eth2_ssz_serialization , helpers ,
2021-08-18 18:57:58 +00:00
state_transition , validator ] ,
2021-09-27 14:22:58 +00:00
.. / spec / datatypes / [ phase0 , altair , merge ] ,
2021-08-20 08:58:15 +00:00
" .. " / beacon_chain_db ,
2021-11-10 11:39:08 +00:00
" . " / [ block_pools_types , block_quarantine ]
2020-05-19 14:18:07 +00:00
2021-11-05 07:34:34 +00:00
export
2021-11-10 11:39:08 +00:00
eth2_merkleization , eth2_ssz_serialization ,
block_pools_types , results , beacon_chain_db
2020-07-30 19:18:17 +00:00
2020-11-27 22:16:13 +00:00
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
declareGauge beacon_head_root , " Root of the head block of the beacon chain "
declareGauge beacon_head_slot , " Slot of the head block of the beacon chain "
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
declareGauge beacon_finalized_epoch , " Current finalized epoch " # On epoch transition
declareGauge beacon_finalized_root , " Current finalized root " # On epoch transition
declareGauge beacon_current_justified_epoch , " Current justified epoch " # On epoch transition
declareGauge beacon_current_justified_root , " Current justified root " # On epoch transition
declareGauge beacon_previous_justified_epoch , " Current previously justified epoch " # On epoch transition
declareGauge beacon_previous_justified_root , " Current previously justified root " # On epoch transition
2021-10-07 06:19:07 +00:00
declareGauge beacon_reorgs_total_total , " Total occurrences of reorganizations of the chain " # On fork choice; backwards-compat name (used to be a counter)
declareGauge beacon_reorgs_total , " Total occurrences of reorganizations of the chain " # Interop copy
2020-08-06 19:48:47 +00:00
declareCounter beacon_state_data_cache_hits , " EpochRef hits "
declareCounter beacon_state_data_cache_misses , " EpochRef misses "
2020-08-13 09:50:05 +00:00
declareCounter beacon_state_rewinds , " State database rewinds "
2020-05-19 14:18:07 +00:00
2020-11-27 22:16:13 +00:00
declareGauge beacon_active_validators , " Number of validators in the active validator set "
2021-10-07 06:19:07 +00:00
declareGauge beacon_current_active_validators , " Number of validators in the active validator set " # Interop copy
2020-08-26 15:25:39 +00:00
declareGauge beacon_pending_deposits , " Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index) " # On block
declareGauge beacon_processed_deposits_total , " Number of total deposits included on chain " # On block
2020-08-13 09:50:05 +00:00
logScope : topics = " chaindag "
2020-05-19 14:18:07 +00:00
2020-06-25 10:23:10 +00:00
proc putBlock * (
2021-11-05 07:34:34 +00:00
dag : ChainDAGRef , signedBlock : ForkyTrustedSignedBeaconBlock ) =
2020-07-16 13:16:51 +00:00
dag . db . putBlock ( signedBlock )
2020-05-19 14:18:07 +00:00
proc updateStateData * (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , bs : BlockSlot , save : bool ,
2020-08-18 20:29:33 +00:00
cache : var StateCache ) {. gcsafe . }
2020-05-19 14:18:07 +00:00
2021-04-13 13:05:44 +00:00
template withStateVars * (
stateDataInternal : var StateData , body : untyped ) : untyped =
2021-03-17 10:17:15 +00:00
## Inject a few more descriptive names for the members of `stateData` -
## the stateData instance may get mutated through these names as well
2021-04-13 13:05:44 +00:00
template stateData ( ) : StateData {. inject , used . } = stateDataInternal
2021-05-28 12:51:15 +00:00
template stateRoot ( ) : Eth2Digest {. inject , used . } =
2021-06-11 17:51:46 +00:00
getStateRoot ( stateDataInternal . data )
2021-04-13 13:05:44 +00:00
template blck ( ) : BlockRef {. inject , used . } = stateDataInternal . blck
template root ( ) : Eth2Digest {. inject , used . } = stateDataInternal . data . root
2021-03-17 10:17:15 +00:00
body
2020-05-19 14:18:07 +00:00
template withState * (
2020-08-18 20:29:33 +00:00
dag : ChainDAGRef , stateData : var StateData , blockSlot : BlockSlot ,
body : untyped ) : untyped =
## Helper template that updates stateData to a particular BlockSlot - usage of
## stateData is unsafe outside of block.
## TODO async transformations will lead to a race where stateData gets updated
2020-05-19 14:18:07 +00:00
## while waiting for future to complete - catch this here somehow?
2021-11-29 10:25:47 +00:00
block :
var cache {. inject . } = StateCache ( )
updateStateData ( dag , stateData , blockSlot , false , cache )
2020-05-19 14:18:07 +00:00
2021-11-29 10:25:47 +00:00
withStateVars ( stateData ) :
body
2020-05-19 14:18:07 +00:00
2021-06-01 12:40:13 +00:00
func get_effective_balances ( validators : openArray [ Validator ] , epoch : Epoch ) :
seq [ Gwei ] =
2020-08-12 04:49:52 +00:00
## Get the balances from a state as counted for fork choice
2021-06-01 12:40:13 +00:00
result . newSeq ( validators . len ) # zero-init
2020-08-12 04:49:52 +00:00
for i in 0 .. < result . len :
# All non-active validators have a 0 balance
2021-06-01 12:40:13 +00:00
let validator = unsafeAddr validators [ i ]
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
if validator [ ] . is_active_validator ( epoch ) :
result [ i ] = validator [ ] . effective_balance
2020-08-12 04:49:52 +00:00
2021-06-01 11:13:40 +00:00
proc updateValidatorKeys * ( dag : ChainDAGRef , validators : openArray [ Validator ] ) =
2021-06-10 07:37:02 +00:00
# Update validator key cache - must be called every time a valid block is
# applied to the state - this is important to ensure that when we sync blocks
# without storing a state (non-epoch blocks essentially), the deposits from
# those blocks are persisted to the in-database cache of immutable validator
# data (but no earlier than that the whole block as been validated)
dag . db . updateImmutableValidators ( validators )
func validatorKey * (
dag : ChainDAGRef , index : ValidatorIndex or uint64 ) : Option [ CookedPubKey ] =
## Returns the validator pubkey for the index, assuming it's been observed
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
dag . db . immutableValidators . load ( index )
func validatorKey * (
epochRef : EpochRef , index : ValidatorIndex or uint64 ) : Option [ CookedPubKey ] =
## Returns the validator pubkey for the index, assuming it's been observed
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
2021-08-24 19:49:51 +00:00
validatorKey ( epochRef . dag , index )
2021-06-01 11:13:40 +00:00
func init * (
T : type EpochRef , dag : ChainDAGRef , state : StateData ,
cache : var StateCache ) : T =
2020-08-05 06:28:43 +00:00
let
2021-06-11 17:51:46 +00:00
epoch = state . data . get_current_epoch ( )
2020-08-05 06:28:43 +00:00
epochRef = EpochRef (
2021-06-01 11:13:40 +00:00
dag : dag , # This gives access to the validator pubkeys through an EpochRef
2021-12-09 17:06:21 +00:00
key : state . blck . epochAncestor ( epoch ) ,
2021-06-11 17:51:46 +00:00
eth1_data : getStateField ( state . data , eth1_data ) ,
eth1_deposit_index : getStateField ( state . data , eth1_deposit_index ) ,
2021-05-21 09:23:28 +00:00
current_justified_checkpoint :
2021-06-11 17:51:46 +00:00
getStateField ( state . data , current_justified_checkpoint ) ,
finalized_checkpoint : getStateField ( state . data , finalized_checkpoint ) ,
2020-08-05 06:28:43 +00:00
shuffled_active_validator_indices :
2021-12-08 17:29:22 +00:00
cache . get_shuffled_active_validator_indices ( state . data , epoch ) ,
merge_transition_complete :
case state . data . kind :
of BeaconStateFork . Phase0 : false
of BeaconStateFork . Altair : false
of BeaconStateFork . Merge :
# https://github.com/ethereum/consensus-specs/blob/v1.1.6/specs/merge/beacon-chain.md#is_merge_transition_complete
state . data . mergeData . data . latest_execution_payload_header ! =
ExecutionPayloadHeader ( )
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
)
2021-12-09 17:06:21 +00:00
epochStart = epoch . compute_start_slot_at_epoch ( )
2021-08-24 19:49:51 +00:00
2020-08-05 06:28:43 +00:00
for i in 0 'u64 .. < SLOTS_PER_EPOCH :
2021-06-01 11:13:40 +00:00
epochRef . beacon_proposers [ i ] = get_beacon_proposer_index (
2021-12-09 17:06:21 +00:00
state . data , cache , epochStart + i )
2020-08-12 04:49:52 +00:00
# When fork choice runs, it will need the effective balance of the justified
2020-10-22 10:53:33 +00:00
# checkpoint - we pre-load the balances here to avoid rewinding the justified
# state later and compress them because not all checkpoints end up being used
# for fork choice - specially during long periods of non-finalization
proc snappyEncode ( inp : openArray [ byte ] ) : seq [ byte ] =
try :
snappy . encode ( inp )
except CatchableError as err :
raiseAssert err . msg
epochRef . effective_balances_bytes =
snappyEncode ( SSZ . encode (
2021-06-01 12:40:13 +00:00
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] ( get_effective_balances (
2021-06-11 17:51:46 +00:00
getStateField ( state . data , validators ) . asSeq ,
2021-12-09 17:06:21 +00:00
epoch ) ) ) )
2020-08-12 04:49:52 +00:00
2020-08-05 06:28:43 +00:00
epochRef
2020-05-29 06:10:20 +00:00
2020-10-22 10:53:33 +00:00
func effective_balances * ( epochRef : EpochRef ) : seq [ Gwei ] =
try :
SSZ . decode ( snappy . decode ( epochRef . effective_balances_bytes , uint32 . high ) ,
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] ) . toSeq ( )
except CatchableError as exc :
raiseAssert exc . msg
2021-12-06 18:52:35 +00:00
func getBlockBySlot * ( dag : ChainDAGRef , slot : Slot ) : BlockSlot =
## Retrieve the canonical block at the given slot, or the last block that
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
## comes before - similar to atSlot, but without the linear scan - see
## getBlockSlotIdBySlot for a version that covers backfill blocks as well
## May return an empty BlockSlot (where blck is nil!)
if slot = = dag . genesis . slot :
# There may be gaps in the
return dag . genesis . atSlot ( slot )
2021-12-06 18:52:35 +00:00
if slot > dag . finalizedHead . slot :
return dag . head . atSlot ( slot ) # Linear iteration is the fastest we have
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
doAssert dag . finalizedHead . slot > = dag . tail . slot
doAssert dag . tail . slot > = dag . backfill . slot
doAssert dag . finalizedBlocks . len = =
( dag . finalizedHead . slot - dag . tail . slot ) . int + 1 , " see updateHead "
if slot > = dag . tail . slot :
var pos = int ( slot - dag . tail . slot )
while true :
if dag . finalizedBlocks [ pos ] ! = nil :
return dag . finalizedBlocks [ pos ] . atSlot ( slot )
if pos = = 0 :
break
pos - = 1
if dag . tail . slot = = 0 :
raiseAssert " Genesis missing "
BlockSlot ( ) # nil blck!
func getBlockSlotIdBySlot * ( dag : ChainDAGRef , slot : Slot ) : BlockSlotId =
## Retrieve the canonical block at the given slot, or the last block that
## comes before - similar to atSlot, but without the linear scan
if slot = = dag . genesis . slot :
return dag . genesis . bid . atSlot ( slot )
if slot > = dag . tail . slot :
return dag . getBlockBySlot ( slot ) . toBlockSlotId ( )
var pos = slot . int
while pos > = dag . backfill . slot . int :
if dag . backfillBlocks [ pos ] ! = Eth2Digest ( ) :
return BlockId ( root : dag . backfillBlocks [ pos ] , slot : Slot ( pos ) ) . atSlot ( slot )
pos - = 1
BlockSlotId ( ) # not backfilled yet, and not genesis
2021-12-06 18:52:35 +00:00
2021-12-09 17:06:21 +00:00
func epochAncestor * ( blck : BlockRef , epoch : Epoch ) : EpochKey =
2020-08-18 20:29:33 +00:00
## The state transition works by storing information from blocks in a
## "working" area until the epoch transition, then batching work collected
## during the epoch. Thus, last block in the ancestor epochs is the block
## that has an impact on epoch currently considered.
##
2021-12-09 17:06:21 +00:00
## This function returns an epoch key pointing to that epoch boundary, i.e. the
2020-08-18 20:29:33 +00:00
## boundary where the last block has been applied to the state and epoch
2021-03-17 10:17:15 +00:00
## processing has been done.
2021-12-09 17:06:21 +00:00
var blck = blck
while blck . slot . epoch > = epoch and not blck . parent . isNil :
blck = blck . parent
2020-08-18 20:29:33 +00:00
2021-06-10 22:07:16 +00:00
EpochKey ( epoch : epoch , blck : blck )
2020-08-18 20:29:33 +00:00
2021-03-17 10:17:15 +00:00
func findEpochRef * (
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ) : EpochRef = # may return nil!
2021-12-09 17:06:21 +00:00
if epoch < dag . tail . slot . epoch :
# We can't compute EpochRef instances for states before the tail because
# we do not have them!
return
let ancestor = epochAncestor ( blck , epoch )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
doAssert ancestor . blck ! = nil
2021-03-17 10:17:15 +00:00
for i in 0 .. < dag . epochRefs . len :
2021-06-10 22:07:16 +00:00
if dag . epochRefs [ i ] ! = nil and dag . epochRefs [ i ] . key = = ancestor :
return dag . epochRefs [ i ]
2021-03-17 10:17:15 +00:00
return nil
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2021-06-01 12:40:13 +00:00
func loadStateCache (
2021-03-17 10:17:15 +00:00
dag : ChainDAGRef , cache : var StateCache , blck : BlockRef , epoch : Epoch ) =
2020-08-18 20:29:33 +00:00
# When creating a state cache, we want the current and the previous epoch
# information to be preloaded as both of these are used in state transition
# functions
2020-07-30 15:48:25 +00:00
2020-08-18 20:29:33 +00:00
template load ( e : Epoch ) =
2021-06-03 13:32:00 +00:00
if e notin cache . shuffled_active_validator_indices :
let epochRef = dag . findEpochRef ( blck , e )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
if epochRef ! = nil :
cache . shuffled_active_validator_indices [ epochRef . epoch ] =
2020-08-18 20:29:33 +00:00
epochRef . shuffled_active_validator_indices
2020-07-30 15:48:25 +00:00
2021-06-03 13:32:00 +00:00
for i , idx in epochRef . beacon_proposers :
cache . beacon_proposer_indices [
epochRef . epoch . compute_start_slot_at_epoch + i ] = idx
2020-08-18 20:29:33 +00:00
load ( epoch )
if epoch > 0 :
load ( epoch - 1 )
2021-03-17 10:17:15 +00:00
func contains * ( dag : ChainDAGRef , root : Eth2Digest ) : bool =
KeyedBlockRef . asLookupKey ( root ) in dag . blocks
2021-09-08 03:46:33 +00:00
proc containsBlock (
2021-12-09 17:06:21 +00:00
cfg : RuntimeConfig , db : BeaconChainDB , slot : Slot , root : Eth2Digest ) : bool =
case cfg . blockForkAtEpoch ( slot . epoch )
of BeaconBlockFork . Phase0 : db . containsBlockPhase0 ( root )
of BeaconBlockFork . Altair : db . containsBlockAltair ( root )
of BeaconBlockFork . Merge : db . containsBlockMerge ( root )
2021-09-08 03:46:33 +00:00
2021-05-30 08:14:17 +00:00
func isStateCheckpoint ( bs : BlockSlot ) : bool =
## State checkpoints are the points in time for which we store full state
## snapshots, which later serve as rewind starting points when replaying state
## transitions from database, for example during reorgs.
##
# As a policy, we only store epoch boundary states without the epoch block
# (if it exists) applied - the rest can be reconstructed by loading an epoch
# boundary state and applying the missing blocks.
# We also avoid states that were produced with empty slots only - as such,
# there is only a checkpoint for the first epoch after a block.
# The tail block also counts as a state checkpoint!
( bs . slot = = bs . blck . slot and bs . blck . parent = = nil ) or
( bs . slot . isEpoch and bs . slot . epoch = = ( bs . blck . slot . epoch + 1 ) )
2021-10-18 12:32:54 +00:00
proc getStateData (
db : BeaconChainDB , cfg : RuntimeConfig , state : var StateData , bs : BlockSlot ,
rollback : RollbackProc ) : bool =
if not bs . isStateCheckpoint ( ) :
return false
let root = db . getStateRoot ( bs . blck . root , bs . slot )
if not root . isSome ( ) :
return false
2021-11-05 07:34:34 +00:00
let expectedFork = cfg . stateForkAtEpoch ( bs . slot . epoch )
if state . data . kind ! = expectedFork :
state . data = ( ref ForkedHashedBeaconState ) ( kind : expectedFork ) [ ]
2021-10-18 12:32:54 +00:00
2021-11-05 07:34:34 +00:00
case expectedFork
of BeaconStateFork . Merge :
if not db . getState ( root . get ( ) , state . data . mergeData . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Altair :
2021-11-05 07:34:34 +00:00
if not db . getState ( root . get ( ) , state . data . altairData . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Phase0 :
if not db . getState ( root . get ( ) , state . data . phase0Data . data , rollback ) :
2021-10-18 12:32:54 +00:00
return false
state . blck = bs . blck
setStateRoot ( state . data , root . get ( ) )
true
2021-11-10 11:39:08 +00:00
proc getForkedBlock ( db : BeaconChainDB , root : Eth2Digest ) :
Opt [ ForkedTrustedSignedBeaconBlock ] =
# When we only have a digest, we don't know which fork it's from so we try
# them one by one - this should be used sparingly
if ( let blck = db . getMergeBlock ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getAltairBlock ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getPhase0Block ( root ) ; blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
else :
err ( )
2021-09-22 12:17:15 +00:00
proc init * ( T : type ChainDAGRef , cfg : RuntimeConfig , db : BeaconChainDB ,
2021-12-20 19:20:31 +00:00
validatorMonitor : ref ValidatorMonitor , updateFlags : UpdateFlags ,
onBlockCb : OnBlockCallback = nil , onHeadCb : OnHeadCallback = nil ,
onReorgCb : OnReorgCallback = nil ,
2021-09-22 12:17:15 +00:00
onFinCb : OnFinalizedCallback = nil ) : ChainDAGRef =
2020-05-19 14:18:07 +00:00
# TODO we require that the db contains both a head and a tail block -
# asserting here doesn't seem like the right way to go about it however..
let
tailBlockRoot = db . getTailBlock ( )
headBlockRoot = db . getHeadBlock ( )
doAssert tailBlockRoot . isSome ( ) , " Missing tail block, database corrupt? "
doAssert headBlockRoot . isSome ( ) , " Missing head block, database corrupt? "
let
tailRoot = tailBlockRoot . get ( )
2021-11-10 11:39:08 +00:00
tailBlock = db . getForkedBlock ( tailRoot ) . get ( )
tailRef = withBlck ( tailBlock ) : BlockRef . init ( tailRoot , blck . message )
2020-05-19 14:18:07 +00:00
headRoot = headBlockRoot . get ( )
2021-11-10 11:39:08 +00:00
let genesisRef = if tailBlock . slot = = GENESIS_SLOT :
2020-10-06 15:32:17 +00:00
tailRef
else :
let
2021-11-05 07:34:34 +00:00
genesisBlockRoot = db . getGenesisBlock ( ) . expect (
2021-05-26 07:07:18 +00:00
" preInit should have initialized the database with a genesis block root " )
2021-11-10 11:39:08 +00:00
genesisBlock = db . getForkedBlock ( genesisBlockRoot ) . expect (
2020-10-06 15:32:17 +00:00
" preInit should have initialized the database with a genesis block " )
2021-11-10 11:39:08 +00:00
withBlck ( genesisBlock ) : BlockRef . init ( genesisBlockRoot , blck . message )
2020-10-06 15:32:17 +00:00
2020-05-19 14:18:07 +00:00
var
2021-03-17 10:17:15 +00:00
blocks : HashSet [ KeyedBlockRef ]
2020-05-19 14:18:07 +00:00
headRef : BlockRef
2021-03-17 10:17:15 +00:00
blocks . incl ( KeyedBlockRef . init ( tailRef ) )
2020-10-06 15:32:17 +00:00
if genesisRef ! = tailRef :
2021-03-17 10:17:15 +00:00
blocks . incl ( KeyedBlockRef . init ( genesisRef ) )
2020-10-06 15:32:17 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
var
backfillBlocks = newSeq [ Eth2Digest ] ( tailRef . slot . int )
curRef : BlockRef
2021-12-21 10:40:14 +00:00
backfill = BeaconBlockSummary ( slot : GENESIS_SLOT )
2020-05-19 14:18:07 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
for blck in db . getAncestorSummaries ( headRoot ) :
if blck . summary . slot < tailRef . slot :
backfillBlocks [ blck . summary . slot . int ] = blck . root
2021-12-21 10:40:14 +00:00
backfill = blck . summary
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
elif blck . summary . slot = = tailRef . slot :
2021-12-21 10:40:14 +00:00
backfill = blck . summary
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if curRef = = nil :
curRef = tailRef
headRef = tailRef
else :
2020-05-19 14:18:07 +00:00
link ( tailRef , curRef )
curRef = curRef . parent
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
else :
if curRef = = nil :
# When the database has been written with a pre-fork version of the
# software, it may happen that blocks produced using an "unforked"
# chain get written to the database - we need to skip such blocks
# when loading the database with a fork-compatible version
if not containsBlock ( cfg , db , blck . summary . slot , blck . root ) :
continue
2020-05-19 14:18:07 +00:00
2020-11-03 22:30:43 +00:00
let newRef = BlockRef . init ( blck . root , blck . summary . slot )
2020-05-19 14:18:07 +00:00
if curRef = = nil :
curRef = newRef
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
headRef = newRef
2020-05-19 14:18:07 +00:00
else :
link ( newRef , curRef )
curRef = curRef . parent
2021-09-08 03:46:33 +00:00
2021-03-17 10:17:15 +00:00
blocks . incl ( KeyedBlockRef . init ( curRef ) )
2020-05-19 14:18:07 +00:00
trace " Populating block dag " , key = curRef . root , val = curRef
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if curRef ! = tailRef :
fatal " Head block does not lead to tail - database corrupt? " ,
genesisRef , tailRef , headRef , curRef , tailRoot , headRoot ,
blocks = blocks . len ( )
2021-11-13 16:27:28 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
quit 1
2020-05-19 14:18:07 +00:00
2021-09-08 03:46:33 +00:00
# Because of incorrect hardfork check, there might be no head block, in which
# case it's equivalent to the tail block
if headRef = = nil :
headRef = tailRef
2020-05-19 14:18:07 +00:00
var
2021-12-09 17:06:21 +00:00
cur = headRef . atSlot ( )
2020-05-19 14:18:07 +00:00
tmpState = ( ref StateData ) ( )
# Now that we have a head block, we need to find the most recent state that
# we have saved in the database
2021-10-18 12:32:54 +00:00
while cur . blck ! = nil and
not getStateData ( db , cfg , tmpState [ ] , cur , noRollback ) :
2021-05-30 08:14:17 +00:00
cur = cur . parentOrSlot ( )
2020-05-19 14:18:07 +00:00
if tmpState . blck = = nil :
2021-11-13 16:27:28 +00:00
warn " No state found in head history, database corrupt? " ,
genesisRef , tailRef , headRef , tailRoot , headRoot ,
blocks = blocks . len ( )
2020-05-19 14:18:07 +00:00
# TODO Potentially we could recover from here instead of crashing - what
# would be a good recovery model?
2021-11-13 16:27:28 +00:00
quit 1
2020-05-19 14:18:07 +00:00
2021-10-18 16:37:27 +00:00
case tmpState . data . kind
of BeaconStateFork . Phase0 :
if tmpState . data . phase0Data . data . fork ! = genesisFork ( cfg ) :
2021-08-10 20:46:35 +00:00
error " State from database does not match network, check --network parameter " ,
2021-11-13 16:27:28 +00:00
genesisRef , tailRef , headRef , tailRoot , headRoot ,
blocks = blocks . len ( ) ,
2021-10-18 16:37:27 +00:00
stateFork = tmpState . data . phase0Data . data . fork ,
2021-08-10 20:46:35 +00:00
configFork = genesisFork ( cfg )
quit 1
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Altair :
if tmpState . data . altairData . data . fork ! = altairFork ( cfg ) :
2021-08-10 20:46:35 +00:00
error " State from database does not match network, check --network parameter " ,
2021-11-13 16:27:28 +00:00
genesisRef , tailRef , headRef , tailRoot , headRoot ,
blocks = blocks . len ( ) ,
2021-10-18 16:37:27 +00:00
stateFork = tmpState . data . altairData . data . fork ,
2021-08-10 20:46:35 +00:00
configFork = altairFork ( cfg )
quit 1
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Merge :
if tmpState . data . mergeData . data . fork ! = mergeFork ( cfg ) :
2021-09-27 14:22:58 +00:00
error " State from database does not match network, check --network parameter " ,
2021-11-13 16:27:28 +00:00
genesisRef , tailRef , headRef , tailRoot , headRoot ,
blocks = blocks . len ( ) ,
2021-10-18 16:37:27 +00:00
stateFork = tmpState . data . mergeData . data . fork ,
2021-09-27 14:22:58 +00:00
configFork = mergeFork ( cfg )
quit 1
2021-08-10 20:46:35 +00:00
2021-06-01 11:13:40 +00:00
let dag = ChainDAGRef (
2021-12-09 17:06:21 +00:00
db : db ,
2021-12-20 19:20:31 +00:00
validatorMonitor : validatorMonitor ,
2020-05-19 14:18:07 +00:00
blocks : blocks ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
backfillBlocks : backfillBlocks ,
2020-10-06 15:32:17 +00:00
genesis : genesisRef ,
2021-12-09 17:06:21 +00:00
tail : tailRef ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
backfill : backfill ,
2021-12-09 17:06:21 +00:00
finalizedHead : tailRef . atSlot ( ) ,
lastPrunePoint : tailRef . atSlot ( ) ,
# Tail is implicitly finalized - we'll adjust it below when computing the
# head state
2020-07-28 13:54:32 +00:00
heads : @ [ headRef ] ,
2020-05-19 14:18:07 +00:00
headState : tmpState [ ] ,
2020-11-10 13:48:59 +00:00
epochRefState : tmpState [ ] ,
2020-07-22 06:25:13 +00:00
clearanceState : tmpState [ ] ,
2020-05-19 14:18:07 +00:00
# The only allowed flag right now is verifyFinalization, as the others all
# allow skipping some validation.
2020-07-07 23:02:14 +00:00
updateFlags : { verifyFinalization } * updateFlags ,
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
cfg : cfg ,
2021-09-22 12:17:15 +00:00
2021-12-09 17:06:21 +00:00
forkDigests : newClone ForkDigests . init (
cfg ,
getStateField ( tmpState . data , genesis_validators_root ) ) ,
2021-09-22 12:17:15 +00:00
onBlockAdded : onBlockCb ,
onHeadChanged : onHeadCb ,
onReorgHappened : onReorgCb ,
onFinHappened : onFinCb
2020-05-19 14:18:07 +00:00
)
2021-07-14 12:18:52 +00:00
doAssert cfg . GENESIS_FORK_VERSION ! = cfg . ALTAIR_FORK_VERSION
2021-10-13 14:08:50 +00:00
doAssert cfg . GENESIS_FORK_VERSION ! = cfg . MERGE_FORK_VERSION
doAssert cfg . ALTAIR_FORK_VERSION ! = cfg . MERGE_FORK_VERSION
2021-09-30 01:07:24 +00:00
doAssert cfg . ALTAIR_FORK_EPOCH < = cfg . MERGE_FORK_EPOCH
2021-06-01 11:13:40 +00:00
doAssert dag . updateFlags in [ { } , { verifyFinalization } ]
2020-08-18 20:29:33 +00:00
var cache : StateCache
2021-12-09 17:06:21 +00:00
dag . updateStateData ( dag . headState , headRef . atSlot ( ) , false , cache )
# The tail block is "implicitly" finalized as it was given either as a
# checkpoint block, or is the genesis, thus we use it as a lower bound when
# computing the finalized head
let
finalized_checkpoint =
getStateField ( dag . headState . data , finalized_checkpoint )
finalizedSlot = max (
finalized_checkpoint . epoch . compute_start_slot_at_epoch ( ) , tailRef . slot )
dag . finalizedHead = headRef . atSlot ( finalizedSlot )
2020-08-18 20:29:33 +00:00
2021-12-06 18:52:35 +00:00
block :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks . setLen ( ( dag . finalizedHead . slot - dag . tail . slot ) . int + 1 )
2021-12-06 18:52:35 +00:00
var tmp = dag . finalizedHead . blck
while not isNil ( tmp ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks [ ( tmp . slot - dag . tail . slot ) . int ] = tmp
2021-12-06 18:52:35 +00:00
tmp = tmp . parent
2021-06-01 11:13:40 +00:00
dag . clearanceState = dag . headState
2020-05-19 14:18:07 +00:00
2021-03-09 14:36:17 +00:00
# Pruning metadata
2021-06-01 11:13:40 +00:00
dag . lastPrunePoint = dag . finalizedHead
2021-03-09 14:36:17 +00:00
2021-08-05 08:26:10 +00:00
# Fill validator key cache in case we're loading an old database that doesn't
# have a cache
2021-08-09 11:14:28 +00:00
dag . updateValidatorKeys ( getStateField ( dag . headState . data , validators ) . asSeq ( ) )
2021-08-05 08:26:10 +00:00
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
withState ( dag . headState . data ) :
when stateFork > = BeaconStateFork . Altair :
dag . headSyncCommittees = state . data . get_sync_committee_cache ( cache )
2020-05-19 14:18:07 +00:00
info " Block dag initialized " ,
2021-12-09 17:06:21 +00:00
head = shortLog ( dag . head ) ,
2021-06-01 11:13:40 +00:00
finalizedHead = shortLog ( dag . finalizedHead ) ,
2021-12-09 17:06:21 +00:00
tail = shortLog ( dag . tail ) ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
totalBlocks = dag . blocks . len ( ) ,
2021-12-21 10:40:14 +00:00
backfill = ( dag . backfill . slot , shortLog ( dag . backfill . parent_root ) )
2020-05-19 14:18:07 +00:00
2021-06-01 11:13:40 +00:00
dag
2020-05-19 14:18:07 +00:00
2021-08-09 12:54:45 +00:00
template genesisValidatorsRoot * ( dag : ChainDAGRef ) : Eth2Digest =
getStateField ( dag . headState . data , genesis_validators_root )
2021-06-01 12:40:13 +00:00
func getEpochRef * (
2021-05-29 18:56:30 +00:00
dag : ChainDAGRef , state : StateData , cache : var StateCache ) : EpochRef =
let
blck = state . blck
2021-06-11 17:51:46 +00:00
epoch = state . data . get_current_epoch ( )
2021-05-29 18:56:30 +00:00
var epochRef = dag . findEpochRef ( blck , epoch )
if epochRef = = nil :
2021-06-01 11:13:40 +00:00
epochRef = EpochRef . init ( dag , state , cache )
2021-05-29 18:56:30 +00:00
if epoch > = dag . finalizedHead . slot . epoch ( ) :
# Only cache epoch information for unfinalized blocks - earlier states
# are seldomly used (ie RPC), so no need to cache
# Because we put a cap on the number of epochRefs we store, we want to
# prune the least useful state - for now, we'll assume that to be the
# oldest epochRef we know about.
var
oldest = 0
for x in 0 .. < dag . epochRefs . len :
let candidate = dag . epochRefs [ x ]
2021-06-10 22:07:16 +00:00
if candidate = = nil :
2021-05-29 18:56:30 +00:00
oldest = x
break
2021-06-10 22:07:16 +00:00
if candidate . key . epoch < dag . epochRefs [ oldest ] . epoch :
2021-05-29 18:56:30 +00:00
oldest = x
2021-06-10 22:07:16 +00:00
dag . epochRefs [ oldest ] = epochRef
2021-05-29 18:56:30 +00:00
epochRef
2021-03-17 10:17:15 +00:00
2020-08-18 20:29:33 +00:00
proc getEpochRef * ( dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ) : EpochRef =
2021-03-17 10:17:15 +00:00
let epochRef = dag . findEpochRef ( blck , epoch )
2020-08-18 20:29:33 +00:00
if epochRef ! = nil :
2020-09-22 20:42:42 +00:00
beacon_state_data_cache_hits . inc
return epochRef
2020-07-28 13:54:32 +00:00
2020-08-06 19:48:47 +00:00
beacon_state_data_cache_misses . inc
2020-08-18 20:29:33 +00:00
let
2021-12-09 17:06:21 +00:00
ancestor = epochAncestor ( blck , epoch )
2020-08-18 20:29:33 +00:00
2021-06-10 22:07:16 +00:00
dag . withState (
dag . epochRefState , ancestor . blck . atEpochStart ( ancestor . epoch ) ) :
2021-05-29 18:56:30 +00:00
dag . getEpochRef ( stateData , cache )
2020-07-28 13:54:32 +00:00
2020-10-26 08:55:10 +00:00
proc getFinalizedEpochRef * ( dag : ChainDAGRef ) : EpochRef =
dag . getEpochRef ( dag . finalizedHead . blck , dag . finalizedHead . slot . epoch )
2021-03-01 19:50:43 +00:00
func stateCheckpoint * ( bs : BlockSlot ) : BlockSlot =
## The first ancestor BlockSlot that is a state checkpoint
var bs = bs
while not isStateCheckPoint ( bs ) :
bs = bs . parentOrSlot
bs
2021-08-09 12:54:45 +00:00
template forkAtEpoch * ( dag : ChainDAGRef , epoch : Epoch ) : Fork =
forkAtEpoch ( dag . cfg , epoch )
proc forkDigestAtEpoch * ( dag : ChainDAGRef , epoch : Epoch ) : ForkDigest =
2021-10-04 08:31:21 +00:00
case dag . cfg . stateForkAtEpoch ( epoch )
2021-10-18 16:37:27 +00:00
of BeaconStateFork . Merge : dag . forkDigests . merge
of BeaconStateFork . Altair : dag . forkDigests . altair
of BeaconStateFork . Phase0 : dag . forkDigests . phase0
2021-07-07 09:09:47 +00:00
2020-08-13 09:50:05 +00:00
proc getState ( dag : ChainDAGRef , state : var StateData , bs : BlockSlot ) : bool =
## Load a state from the database given a block and a slot - this will first
## lookup the state root in the state root table then load the corresponding
## state, if it exists
2021-03-01 19:50:43 +00:00
if not bs . isStateCheckpoint ( ) :
return false # Only state checkpoints are stored - no need to hit DB
2020-08-18 20:29:33 +00:00
2021-10-18 12:32:54 +00:00
let stateRoot = dag . db . getStateRoot ( bs . blck . root , bs . slot )
if stateRoot . isNone ( ) : return false
let restoreAddr =
# Any restore point will do as long as it's not the object being updated
if unsafeAddr ( state ) = = unsafeAddr ( dag . headState ) :
unsafeAddr dag . clearanceState
else :
unsafeAddr dag . headState
let v = addr state . data
func restore ( ) =
assign ( v [ ] , restoreAddr [ ] . data )
2020-08-13 09:50:05 +00:00
2021-10-18 12:32:54 +00:00
getStateData ( dag . db , dag . cfg , state , bs , restore )
2020-08-13 09:50:05 +00:00
2021-06-24 18:34:08 +00:00
proc putState ( dag : ChainDAGRef , state : StateData ) =
2020-08-13 09:50:05 +00:00
# Store a state and its root
2020-09-24 07:02:03 +00:00
logScope :
blck = shortLog ( state . blck )
2021-06-11 17:51:46 +00:00
stateSlot = shortLog ( getStateField ( state . data , slot ) )
stateRoot = shortLog ( getStateRoot ( state . data ) )
2020-05-19 14:18:07 +00:00
2021-06-11 17:51:46 +00:00
if not isStateCheckpoint ( state . blck . atSlot ( getStateField ( state . data , slot ) ) ) :
2020-08-13 09:50:05 +00:00
return
2021-05-30 08:14:17 +00:00
# Don't consider legacy tables here, they are slow to read so we'll want to
# rewrite things in the new database anyway.
2021-06-11 17:51:46 +00:00
if dag . db . containsState ( getStateRoot ( state . data ) , legacy = false ) :
2020-08-13 09:50:05 +00:00
return
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2020-08-13 09:50:05 +00:00
# Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing
2021-11-18 12:02:43 +00:00
withState ( state . data ) :
dag . db . putStateRoot ( state . latest_block_root ( ) , state . data . slot , state . root )
dag . db . putState ( state . root , state . data )
2020-05-19 14:18:07 +00:00
2021-05-28 16:34:00 +00:00
debug " Stored state " , putStateDur = Moment . now ( ) - startTick
2020-07-31 14:49:06 +00:00
func getRef * ( dag : ChainDAGRef , root : Eth2Digest ) : BlockRef =
2020-05-19 14:18:07 +00:00
## Retrieve a resolved block reference, if available
2021-03-17 10:17:15 +00:00
let key = KeyedBlockRef . asLookupKey ( root )
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
# the copy of the instance in the set which has more fields than `root` set!
if key in dag . blocks :
try : dag . blocks [ key ] . blockRef ( )
except KeyError : raiseAssert " contains "
else :
nil
2020-05-19 14:18:07 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
proc getBlockRange * (
2020-08-05 23:22:12 +00:00
dag : ChainDAGRef , startSlot : Slot , skipStep : uint64 ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
output : var openArray [ BlockId ] ) : Natural =
2020-05-19 14:18:07 +00:00
## This function populates an `output` buffer of blocks
## with a slots ranging from `startSlot` up to, but not including,
## `startSlot + skipStep * output.len`, skipping any slots that don't have
## a block.
##
## Blocks will be written to `output` from the end without gaps, even if
## a block is missing in a particular slot. The return value shows how
## many slots were missing blocks - to iterate over the result, start
## at this index.
##
## If there were no blocks in the range, `output.len` will be returned.
2020-09-22 20:42:42 +00:00
let
requestedCount = output . lenu64
headSlot = dag . head . slot
2020-05-19 14:18:07 +00:00
trace " getBlockRange entered " ,
2020-09-22 20:42:42 +00:00
head = shortLog ( dag . head . root ) , requestedCount , startSlot , skipStep , headSlot
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if startSlot < dag . backfill . slot :
notice " Got request for pre-backfill slot " ,
startSlot , backfillSlot = dag . backfill . slot
return output . len
if headSlot < = startSlot or requestedCount = = 0 :
2020-09-22 20:42:42 +00:00
return output . len # Identical to returning an empty set of block as indicated above
2020-05-19 14:18:07 +00:00
let
2020-09-22 20:42:42 +00:00
runway = uint64 ( headSlot - startSlot )
2020-10-14 18:06:50 +00:00
# This is the number of blocks that will follow the start block
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
extraSlots = min ( runway div skipStep , requestedCount - 1 )
2020-10-14 18:06:50 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
# If `skipStep` is very large, `extraSlots` should be 0 from
2020-10-14 18:06:50 +00:00
# the previous line, so `endSlot` will be equal to `startSlot`:
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
endSlot = startSlot + extraSlots * skipStep
2020-05-19 14:18:07 +00:00
var
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
curSlot = endSlot
2020-08-05 23:22:12 +00:00
o = output . len
2020-10-14 18:06:50 +00:00
# Process all blocks that follow the start block (may be zero blocks)
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
while curSlot > startSlot :
let bs = dag . getBlockSlotIdBySlot ( curSlot )
if bs . isProposed ( ) :
o - = 1
output [ o ] = bs . bid
curSlot - = skipStep
# Handle start slot separately (to avoid underflow when computing curSlot)
let bs = dag . getBlockSlotIdBySlot ( startSlot )
if bs . isProposed ( ) :
o - = 1
output [ o ] = bs . bid
2020-05-19 14:18:07 +00:00
o # Return the index of the first non-nil item in the output
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
proc getForkedBlock * ( dag : ChainDAGRef , id : BlockId ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
case dag . cfg . blockForkAtEpoch ( id . slot . epoch )
2021-10-18 12:32:54 +00:00
of BeaconBlockFork . Phase0 :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
let data = dag . db . getPhase0Block ( id . root )
2021-10-18 12:32:54 +00:00
if data . isOk ( ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
2021-10-18 12:32:54 +00:00
of BeaconBlockFork . Altair :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
let data = dag . db . getAltairBlock ( id . root )
2021-10-18 12:32:54 +00:00
if data . isOk ( ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
2021-10-18 12:32:54 +00:00
of BeaconBlockFork . Merge :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
let data = dag . db . getMergeBlock ( id . root )
2021-10-18 12:32:54 +00:00
if data . isOk ( ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
return ok ForkedTrustedSignedBeaconBlock . init ( data . get )
2021-07-07 09:09:47 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
proc getForkedBlock * ( dag : ChainDAGRef , blck : BlockRef ) : ForkedTrustedSignedBeaconBlock =
let blck = dag . getForkedBlock ( blck . bid )
if blck . isSome ( ) :
return blck . get ( )
2021-07-07 09:09:47 +00:00
2021-07-14 12:18:52 +00:00
proc get * ( dag : ChainDAGRef , blck : BlockRef ) : BlockData =
## Retrieve the associated block body of a block reference
doAssert ( not blck . isNil ) , " Trying to get nil BlockRef "
BlockData ( data : dag . getForkedBlock ( blck ) , refs : blck )
2020-07-31 14:49:06 +00:00
proc get * ( dag : ChainDAGRef , root : Eth2Digest ) : Option [ BlockData ] =
2020-05-19 14:18:07 +00:00
## Retrieve a resolved block reference and its associated body, if available
let refs = dag . getRef ( root )
if not refs . isNil :
some ( dag . get ( refs ) )
else :
none ( BlockData )
2020-08-13 09:50:05 +00:00
proc advanceSlots (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , slot : Slot , save : bool ,
2021-10-13 14:24:36 +00:00
cache : var StateCache , info : var ForkedEpochInfo ) =
2020-08-13 09:50:05 +00:00
# Given a state, advance it zero or more slots by applying empty slot
2020-10-15 12:28:44 +00:00
# processing - the state must be positions at a slot before or equal to the
# target
2021-06-11 17:51:46 +00:00
doAssert getStateField ( state . data , slot ) < = slot
while getStateField ( state . data , slot ) < slot :
2021-12-20 19:20:31 +00:00
let preEpoch = getStateField ( state . data , slot ) . epoch
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
2021-06-03 13:32:00 +00:00
2020-10-18 15:47:39 +00:00
doAssert process_slots (
2021-10-13 14:24:36 +00:00
dag . cfg , state . data , getStateField ( state . data , slot ) + 1 , cache , info ,
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
dag . updateFlags ) ,
2020-10-15 12:28:44 +00:00
" process_slots shouldn ' t fail when state slot is correct "
2020-10-18 15:47:39 +00:00
if save :
dag . putState ( state )
2020-05-19 14:18:07 +00:00
2021-12-20 19:20:31 +00:00
# The reward information in the state transition is computed for epoch
# transitions - when transitioning into epoch N, the activities in epoch
# N-2 are translated into balance updates, and this is what we capture
# in the monitor. This may be inaccurate during a deep reorg (>1 epoch)
# which is an acceptable tradeoff for monitoring.
withState ( state . data ) :
let postEpoch = state . data . slot . epoch
if preEpoch ! = postEpoch :
dag . validatorMonitor [ ] . registerEpochInfo ( postEpoch , info , state . data )
2020-08-13 09:50:05 +00:00
proc applyBlock (
2020-07-31 14:49:06 +00:00
dag : ChainDAGRef ,
2020-09-08 07:23:48 +00:00
state : var StateData , blck : BlockData , flags : UpdateFlags ,
2021-10-13 14:24:36 +00:00
cache : var StateCache , info : var ForkedEpochInfo ) : bool =
2020-08-13 09:50:05 +00:00
# Apply a single block to the state - the state must be positioned at the
# parent of the block with a slot lower than the one of the block being
# applied
doAssert state . blck = = blck . refs . parent
2020-05-19 14:18:07 +00:00
var statePtr = unsafeAddr state # safe because `restore` is locally scoped
2021-06-11 17:51:46 +00:00
func restore ( v : var ForkedHashedBeaconState ) =
2020-05-19 14:18:07 +00:00
doAssert ( addr ( statePtr . data ) = = addr v )
2021-10-18 12:32:54 +00:00
assign ( statePtr [ ] , dag . headState )
2020-05-19 14:18:07 +00:00
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2021-10-18 12:32:54 +00:00
let ok = withBlck ( blck . data ) :
state_transition (
dag . cfg , state . data , blck , cache , info ,
flags + dag . updateFlags + { slotProcessed } , restore )
2020-08-13 09:50:05 +00:00
if ok :
state . blck = blck . refs
2020-05-19 14:18:07 +00:00
ok
2020-07-22 07:51:45 +00:00
proc updateStateData * (
2020-10-18 15:47:39 +00:00
dag : ChainDAGRef , state : var StateData , bs : BlockSlot , save : bool ,
2020-08-18 20:29:33 +00:00
cache : var StateCache ) =
2020-05-19 14:18:07 +00:00
## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a
## different branch or has advanced to a higher slot number than slot
## If slot is higher than blck.slot, replay will fill in with empty/non-block
## slots, else it is ignored
2020-08-13 09:50:05 +00:00
# First, see if we're already at the requested block. If we are, also check
# that the state has not been advanced past the desired block - if it has,
# an earlier state must be loaded since there's no way to undo the slot
# transitions
2020-05-19 14:18:07 +00:00
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2021-03-01 19:50:43 +00:00
2020-08-13 09:50:05 +00:00
var
ancestors : seq [ BlockRef ]
cur = bs
2020-10-22 10:53:33 +00:00
found = false
2021-05-30 08:14:17 +00:00
template exactMatch ( state : StateData , bs : BlockSlot ) : bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
2021-06-11 17:51:46 +00:00
state . blck = = bs . blck and getStateField ( state . data , slot ) = = bs . slot
2021-05-30 08:14:17 +00:00
2020-10-22 10:53:33 +00:00
template canAdvance ( state : StateData , bs : BlockSlot ) : bool =
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
2021-06-11 17:51:46 +00:00
state . blck = = bs . blck and getStateField ( state . data , slot ) < = bs . slot
2020-10-22 10:53:33 +00:00
2021-05-30 08:14:17 +00:00
# Fast path: check all caches for an exact match - this is faster than
# advancing a state where there's epoch processing to do, by a wide margin -
# it also avoids `hash_tree_root` for slot processing
if exactMatch ( state , cur ) :
found = true
elif exactMatch ( dag . headState , cur ) :
assign ( state , dag . headState )
found = true
elif exactMatch ( dag . clearanceState , cur ) :
assign ( state , dag . clearanceState )
found = true
elif exactMatch ( dag . epochRefState , cur ) :
assign ( state , dag . epochRefState )
found = true
2020-10-22 10:53:33 +00:00
# First, run a quick check if we can simply apply a few blocks to an in-memory
# state - any in-memory state will be faster than loading from database.
# The limit here how many blocks we apply is somewhat arbitrary but two full
# epochs (might be more slots if there are skips) seems like a good enough
# first guess.
# This happens in particular during startup where we replay blocks
# sequentially to grab their votes.
const RewindBlockThreshold = 64
2021-05-30 08:14:17 +00:00
while not found and ancestors . len < RewindBlockThreshold :
2020-10-22 10:53:33 +00:00
if canAdvance ( state , cur ) :
found = true
break
if canAdvance ( dag . headState , cur ) :
assign ( state , dag . headState )
found = true
break
if canAdvance ( dag . clearanceState , cur ) :
assign ( state , dag . clearanceState )
found = true
break
2020-11-10 13:48:59 +00:00
if canAdvance ( dag . epochRefState , cur ) :
assign ( state , dag . epochRefState )
found = true
break
2020-10-22 10:53:33 +00:00
if cur . slot = = cur . blck . slot :
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
2020-08-13 09:50:05 +00:00
ancestors . add ( cur . blck )
2020-10-22 10:53:33 +00:00
if cur . blck . parent = = nil :
break
# Moving slot by slot helps find states that were advanced with empty slots
2021-05-28 16:34:00 +00:00
cur = cur . parentOrSlot ( )
2020-10-22 10:53:33 +00:00
if not found :
debug " UpdateStateData cache miss " ,
2021-06-11 17:51:46 +00:00
bs , stateBlock = state . blck , stateSlot = getStateField ( state . data , slot )
2020-10-22 10:53:33 +00:00
# Either the state is too new or was created by applying a different block.
# We'll now resort to loading the state from the database then reapplying
# blocks until we reach the desired point in time.
cur = bs
ancestors . setLen ( 0 )
# Look for a state in the database and load it - as long as it cannot be
# found, keep track of the blocks that are needed to reach it from the
# state that eventually will be found
while not dag . getState ( state , cur ) :
# There's no state saved for this particular BlockSlot combination, keep
# looking...
2021-05-28 16:34:00 +00:00
if cur . slot = = cur . blck . slot :
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
2020-08-18 20:29:33 +00:00
ancestors . add ( cur . blck )
2020-08-13 09:50:05 +00:00
2021-05-28 16:34:00 +00:00
if cur . slot = = dag . tail . slot :
# If we've walked all the way to the tail and still not found a state,
# there's no hope finding one - the database likely has become corrupt
# and one will have to resync from start.
fatal " Cannot find state to load, the database is likely corrupt " ,
cur , bs , head = dag . head , tail = dag . tail
quit 1
# Move slot by slot to capture epoch boundary states
cur = cur . parentOrSlot ( )
2020-10-22 10:53:33 +00:00
beacon_state_rewinds . inc ( )
2020-08-13 09:50:05 +00:00
2021-05-28 16:34:00 +00:00
# Starting state has been assigned, either from memory or database
2020-08-18 20:29:33 +00:00
let
2021-05-28 16:34:00 +00:00
assignTick = Moment . now ( )
2021-06-11 17:51:46 +00:00
startSlot {. used . } = getStateField ( state . data , slot ) # used in logs below
startRoot {. used . } = getStateRoot ( state . data )
2021-10-13 14:24:36 +00:00
var info : ForkedEpochInfo
2020-08-13 09:50:05 +00:00
# Time to replay all the blocks between then and now
2020-05-19 14:18:07 +00:00
for i in countdown ( ancestors . len - 1 , 0 ) :
# Because the ancestors are in the database, there's no need to persist them
# again. Also, because we're applying blocks that were loaded from the
# database, we can skip certain checks that have already been performed
2020-08-13 09:50:05 +00:00
# before adding the block to the database.
2020-05-19 14:18:07 +00:00
let ok =
2021-10-13 14:24:36 +00:00
dag . applyBlock ( state , dag . get ( ancestors [ i ] ) , { } , cache , info )
2020-05-19 14:18:07 +00:00
doAssert ok , " Blocks in database should never fail to apply.. "
2020-10-15 12:28:44 +00:00
# ...and make sure to process empty slots as requested
2021-10-13 14:24:36 +00:00
dag . advanceSlots ( state , bs . slot , save , cache , info )
2020-08-13 09:50:05 +00:00
2021-06-03 13:32:00 +00:00
# ...and make sure to load the state cache, if it exists
2021-06-11 17:51:46 +00:00
loadStateCache ( dag , cache , state . blck , getStateField ( state . data , slot ) . epoch )
2021-06-03 13:32:00 +00:00
2021-05-28 16:34:00 +00:00
let
assignDur = assignTick - startTick
replayDur = Moment . now ( ) - assignTick
2021-03-01 19:50:43 +00:00
logScope :
blocks = ancestors . len
2021-06-11 17:51:46 +00:00
slots = getStateField ( state . data , slot ) - startSlot
stateRoot = shortLog ( getStateRoot ( state . data ) )
stateSlot = getStateField ( state . data , slot )
2021-03-01 19:50:43 +00:00
startRoot = shortLog ( startRoot )
startSlot
blck = shortLog ( bs )
2020-10-22 10:53:33 +00:00
found
2021-05-28 16:34:00 +00:00
assignDur
replayDur
2021-03-01 19:50:43 +00:00
2021-06-01 15:33:00 +00:00
if ( assignDur + replayDur ) > = 250 . millis :
2021-03-01 19:50:43 +00:00
# This might indicate there's a cache that's not in order or a disk that is
# too slow - for now, it's here for investigative purposes and the cutoff
# time might need tuning
info " State replayed "
elif ancestors . len > 0 :
debug " State replayed "
else :
2021-05-30 18:05:45 +00:00
trace " State advanced " # Normal case!
2020-05-19 14:18:07 +00:00
2020-07-31 14:49:06 +00:00
proc delState ( dag : ChainDAGRef , bs : BlockSlot ) =
2020-05-19 14:18:07 +00:00
# Delete state state and mapping for a particular block+slot
2021-06-10 22:07:16 +00:00
if not isStateCheckpoint ( bs ) :
2020-08-13 09:50:05 +00:00
return # We only ever save epoch states
2021-06-10 22:07:16 +00:00
2020-05-19 14:18:07 +00:00
if ( let root = dag . db . getStateRoot ( bs . blck . root , bs . slot ) ; root . isSome ( ) ) :
dag . db . delState ( root . get ( ) )
2020-08-13 09:50:05 +00:00
dag . db . delStateRoot ( bs . blck . root , bs . slot )
2020-05-19 14:18:07 +00:00
2021-03-09 14:36:17 +00:00
proc pruneBlocksDAG ( dag : ChainDAGRef ) =
## This prunes the block DAG
## This does NOT prune the cached state checkpoints and EpochRef
2021-04-01 11:26:17 +00:00
## This must be done after a new finalization point is reached
2021-03-09 14:36:17 +00:00
## to invalidate pending blocks or attestations referring
## to a now invalid fork.
##
## This does NOT update the `dag.lastPrunePoint` field.
## as the caches and fork choice can be pruned at a later time.
# Clean up block refs, walking block by block
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2021-04-01 11:26:17 +00:00
# Finalization means that we choose a single chain as the canonical one -
# it also means we're no longer interested in any branches from that chain
# up to the finalization point
let hlen = dag . heads . len
for i in 0 .. < hlen :
let n = hlen - i - 1
let head = dag . heads [ n ]
if dag . finalizedHead . blck . isAncestorOf ( head ) :
continue
2021-12-09 17:06:21 +00:00
var cur = head . atSlot ( )
2021-04-01 11:26:17 +00:00
while not cur . blck . isAncestorOf ( dag . finalizedHead . blck ) :
dag . delState ( cur ) # TODO: should we move that disk I/O to `onSlotEnd`
if cur . blck . slot = = cur . slot :
dag . blocks . excl ( KeyedBlockRef . init ( cur . blck ) )
dag . db . delBlock ( cur . blck . root )
if cur . blck . parent . isNil :
break
cur = cur . parentOrSlot
dag . heads . del ( n )
debug " Pruned the blockchain DAG " ,
currentCandidateHeads = dag . heads . len ,
prunedHeads = hlen - dag . heads . len ,
2021-05-28 19:03:20 +00:00
dagPruneDur = Moment . now ( ) - startTick
2021-03-09 14:36:17 +00:00
2021-10-07 13:19:47 +00:00
iterator syncSubcommittee * (
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
subcommitteeIdx : SyncSubcommitteeIndex ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-08-28 10:40:01 +00:00
2021-10-07 13:19:47 +00:00
while i < onePastEndIdx :
yield syncCommittee [ i ]
inc i
2021-08-28 10:40:01 +00:00
2021-10-14 10:38:38 +00:00
iterator syncSubcommitteePairs * (
2021-10-20 16:32:46 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : tuple [ validatorIdx : ValidatorIndex ,
subcommitteeIdx : int ] =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-10-14 10:38:38 +00:00
while i < onePastEndIdx :
yield ( syncCommittee [ i ] , i )
inc i
2021-10-20 16:32:46 +00:00
func syncCommitteeParticipants * ( dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
slot : Slot ) : seq [ ValidatorIndex ] =
2021-10-06 17:05:06 +00:00
withState ( dag . headState . data ) :
2021-10-18 16:37:27 +00:00
when stateFork > = BeaconStateFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
curPeriod = sync_committee_period ( state . data . slot )
2021-12-07 12:25:54 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-08-28 10:40:01 +00:00
else :
@ [ ]
2021-09-28 07:44:20 +00:00
func getSubcommitteePositionsAux (
2021-08-28 10:40:01 +00:00
dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
2021-09-28 07:44:20 +00:00
validatorIdx : uint64 ) : seq [ uint64 ] =
2021-12-07 12:25:54 +00:00
var pos = 0 'u64
for valIdx in syncCommittee . syncSubcommittee ( subcommitteeIdx ) :
if validatorIdx = = uint64 ( valIdx ) :
result . add pos
inc pos
func getSubcommitteePositions * (
dag : ChainDAGRef ,
slot : Slot ,
subcommitteeIdx : SyncSubcommitteeIndex ,
validatorIdx : uint64 ) : seq [ uint64 ] =
2021-10-06 17:05:06 +00:00
withState ( dag . headState . data ) :
2021-10-18 16:37:27 +00:00
when stateFork > = BeaconStateFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
curPeriod = sync_committee_period ( state . data . slot )
2021-10-06 17:05:06 +00:00
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
template search ( syncCommittee : openArray [ ValidatorIndex ] ) : seq [ uint64 ] =
2021-11-05 15:39:47 +00:00
dag . getSubcommitteePositionsAux (
syncCommittee , subcommitteeIdx , validatorIdx )
2021-10-06 17:05:06 +00:00
2021-10-20 16:32:46 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-10-06 17:05:06 +00:00
else :
@ [ ]
2021-08-28 10:40:01 +00:00
template syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : seq [ ValidatorIndex ] =
2021-11-05 15:39:47 +00:00
toSeq ( syncSubcommittee ( dag . syncCommitteeParticipants ( slot ) , subcommitteeIdx ) )
2021-08-28 10:40:01 +00:00
iterator syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
aggregationBits : SyncCommitteeAggregationBits ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
for pos , valIdx in dag . syncCommitteeParticipants ( slot , subcommitteeIdx ) :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
if pos < aggregationBits . bits and aggregationBits [ pos ] :
2021-08-28 10:40:01 +00:00
yield valIdx
2021-03-09 14:36:17 +00:00
func needStateCachesAndForkChoicePruning * ( dag : ChainDAGRef ) : bool =
dag . lastPrunePoint ! = dag . finalizedHead
proc pruneStateCachesDAG * ( dag : ChainDAGRef ) =
## This prunes the cached state checkpoints and EpochRef
## This does NOT prune the state associated with invalidated blocks on a fork
## They are pruned via `pruneBlocksDAG`
##
## This updates the `dag.lastPrunePoint` variable
doAssert dag . needStateCachesAndForkChoicePruning ( )
2021-05-28 19:03:20 +00:00
let startTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Remove states, walking slot by slot
# We remove all state checkpoints that come _before_ the current finalized
# head, as we might frequently be asked to replay states from the
# finalized checkpoint and onwards (for example when validating blocks and
# attestations)
var
cur = dag . finalizedHead . stateCheckpoint . parentOrSlot
prev = dag . lastPrunePoint . stateCheckpoint . parentOrSlot
while cur . blck ! = nil and cur ! = prev :
# TODO This is a quick fix to prune some states from the database, but
# not all, pending a smarter storage - the downside of pruning these
# states is that certain rewinds will take longer
# After long periods of non-finalization, it can also take some time to
# release all these states!
if cur . slot . epoch mod 32 ! = 0 and cur . slot ! = dag . tail . slot :
dag . delState ( cur )
cur = cur . parentOrSlot
2021-05-28 19:03:20 +00:00
let statePruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Clean up old EpochRef instances
# After finalization, we can clear up the epoch cache and save memory -
# it will be recomputed if needed
2021-03-17 10:17:15 +00:00
for i in 0 .. < dag . epochRefs . len :
2021-06-10 22:07:16 +00:00
if dag . epochRefs [ i ] ! = nil and
dag . epochRefs [ i ] . epoch < dag . finalizedHead . slot . epoch :
dag . epochRefs [ i ] = nil
2021-05-28 19:03:20 +00:00
let epochRefPruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
dag . lastPrunePoint = dag . finalizedHead
2021-03-17 06:30:16 +00:00
debug " Pruned the state checkpoints and DAG caches. " ,
2021-05-28 19:03:20 +00:00
statePruneDur = statePruneTick - startTick ,
epochRefPruneDur = epochRefPruneTick - statePruneTick
2021-03-09 14:36:17 +00:00
2020-08-31 09:00:38 +00:00
proc updateHead * (
2021-03-09 14:36:17 +00:00
dag : ChainDAGRef ,
newHead : BlockRef ,
2021-12-06 09:49:01 +00:00
quarantine : var Quarantine ) =
2020-05-19 14:18:07 +00:00
## Update what we consider to be the current head, as given by the fork
## choice.
2021-03-09 14:36:17 +00:00
##
2020-05-19 14:18:07 +00:00
## The choice of head affects the choice of finalization point - the order
## of operations naturally becomes important here - after updating the head,
## blocks that were once considered potential candidates for a tree will
## now fall from grace, or no longer be considered resolved.
2020-08-26 15:23:34 +00:00
doAssert not newHead . isNil ( )
2020-09-22 20:42:42 +00:00
doAssert not newHead . parent . isNil ( ) or newHead . slot < = dag . tail . slot
2020-07-16 13:16:51 +00:00
logScope :
newHead = shortLog ( newHead )
2020-05-19 14:18:07 +00:00
2020-07-28 13:54:32 +00:00
if dag . head = = newHead :
2020-10-01 18:56:42 +00:00
trace " No head block update "
2020-05-19 14:18:07 +00:00
return
let
lastHead = dag . head
2021-09-22 12:17:15 +00:00
lastHeadStateRoot = getStateRoot ( dag . headState . data )
2020-05-19 14:18:07 +00:00
2020-11-02 17:34:23 +00:00
# Start off by making sure we have the right state - updateStateData will try
# to use existing in-memory states to make this smooth
var cache : StateCache
updateStateData (
2021-12-09 17:06:21 +00:00
dag , dag . headState , newHead . atSlot ( ) , false , cache )
2020-05-19 14:18:07 +00:00
2020-11-27 22:16:13 +00:00
dag . db . putHeadBlock ( newHead . root )
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
withState ( dag . headState . data ) :
when stateFork > = BeaconStateFork . Altair :
dag . headSyncCommittees = state . data . get_sync_committee_cache ( cache )
2020-11-27 22:16:13 +00:00
let
2021-12-09 17:06:21 +00:00
finalized_checkpoint =
getStateField ( dag . headState . data , finalized_checkpoint )
finalizedSlot = max (
finalized_checkpoint . epoch . compute_start_slot_at_epoch ( ) ,
dag . tail . slot )
finalizedHead = newHead . atSlot ( finalizedSlot )
2020-11-27 22:16:13 +00:00
doAssert ( not finalizedHead . blck . isNil ) ,
" Block graph should always lead to a finalized block "
2021-09-22 12:17:15 +00:00
let ( isAncestor , ancestorDepth ) = lastHead . getDepth ( newHead )
if not ( isAncestor ) :
2020-10-01 18:56:42 +00:00
notice " Updated head block with chain reorg " ,
2020-07-28 13:54:32 +00:00
lastHead = shortLog ( lastHead ) ,
2020-05-19 14:18:07 +00:00
headParent = shortLog ( newHead . parent ) ,
2021-06-11 17:51:46 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
2020-05-19 14:18:07 +00:00
headBlock = shortLog ( dag . headState . blck ) ,
2021-06-11 17:51:46 +00:00
stateSlot = shortLog ( getStateField ( dag . headState . data , slot ) ) ,
justified = shortLog ( getStateField (
dag . headState . data , current_justified_checkpoint ) ) ,
finalized = shortLog ( getStateField (
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onReorgHappened ) ) :
let data = ReorgInfoObject . init ( dag . head . slot , uint64 ( ancestorDepth ) ,
lastHead . root , newHead . root ,
lastHeadStateRoot ,
getStateRoot ( dag . headState . data ) )
dag . onReorgHappened ( data )
2020-05-19 14:18:07 +00:00
# A reasonable criterion for "reorganizations of the chain"
2020-08-31 09:00:38 +00:00
quarantine . clearQuarantine ( )
2021-10-07 06:19:07 +00:00
beacon_reorgs_total_total . inc ( )
2020-05-19 14:18:07 +00:00
beacon_reorgs_total . inc ( )
else :
2020-10-01 18:56:42 +00:00
debug " Updated head block " ,
2021-11-02 17:06:36 +00:00
head = shortLog ( dag . headState . blck ) ,
2021-06-11 17:51:46 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
2021-04-08 08:24:25 +00:00
justified = shortLog ( getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , current_justified_checkpoint ) ) ,
2021-04-08 08:24:25 +00:00
finalized = shortLog ( getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onHeadChanged ) ) :
let currentEpoch = epoch ( newHead . slot )
let
currentDutyDepRoot =
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if currentEpoch > dag . tail . slot . epoch :
2021-09-22 12:17:15 +00:00
dag . head . atSlot (
compute_start_slot_at_epoch ( currentEpoch ) - 1 ) . blck . root
else :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . tail . root
2021-09-22 12:17:15 +00:00
previousDutyDepRoot =
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if currentEpoch > dag . tail . slot . epoch + 1 :
2021-09-22 12:17:15 +00:00
dag . head . atSlot (
compute_start_slot_at_epoch ( currentEpoch - 1 ) - 1 ) . blck . root
else :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . tail . root
2021-09-22 12:17:15 +00:00
epochTransition = ( finalizedHead ! = dag . finalizedHead )
let data = HeadChangeInfoObject . init ( dag . head . slot , dag . head . root ,
getStateRoot ( dag . headState . data ) ,
epochTransition , previousDutyDepRoot ,
currentDutyDepRoot )
dag . onHeadChanged ( data )
2020-08-26 15:25:39 +00:00
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
2021-03-01 19:55:25 +00:00
# both non-negative, so difference can't overflow or underflow int64
beacon_pending_deposits . set (
2021-06-11 17:51:46 +00:00
getStateField ( dag . headState . data , eth1_data ) . deposit_count . toGaugeValue -
getStateField ( dag . headState . data , eth1_deposit_index ) . toGaugeValue )
2021-03-01 19:55:25 +00:00
beacon_processed_deposits_total . set (
2021-06-11 17:51:46 +00:00
getStateField ( dag . headState . data , eth1_deposit_index ) . toGaugeValue )
2020-08-26 15:25:39 +00:00
2020-11-27 22:16:13 +00:00
beacon_head_root . set newHead . root . toGaugeValue
2021-03-01 19:55:25 +00:00
beacon_head_slot . set newHead . slot . toGaugeValue
2020-11-27 22:16:13 +00:00
2021-12-20 19:20:31 +00:00
withState ( dag . headState . data ) :
# Every time the head changes, the "canonical" view of balances and other
# state-related metrics change - notify the validator monitor.
# Doing this update during head update ensures there's a reasonable number
# of such updates happening - at most once per valid block.
dag . validatorMonitor [ ] . registerState ( state . data )
2020-11-27 22:16:13 +00:00
if lastHead . slot . epoch ! = newHead . slot . epoch :
# Epoch updated - in theory, these could happen when the wall clock
# changes epoch, even if there is no new block / head, but we'll delay
# updating them until a block confirms the change
beacon_current_justified_epoch . set (
2021-04-08 08:24:25 +00:00
getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , current_justified_checkpoint ) . epoch . toGaugeValue )
2020-11-27 22:16:13 +00:00
beacon_current_justified_root . set (
2021-04-08 08:24:25 +00:00
getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , current_justified_checkpoint ) . root . toGaugeValue )
2020-11-27 22:16:13 +00:00
beacon_previous_justified_epoch . set (
2021-04-08 08:24:25 +00:00
getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , previous_justified_checkpoint ) . epoch . toGaugeValue )
2020-11-27 22:16:13 +00:00
beacon_previous_justified_root . set (
2021-04-08 08:24:25 +00:00
getStateField (
2021-06-11 17:51:46 +00:00
dag . headState . data , previous_justified_checkpoint ) . root . toGaugeValue )
2020-11-27 22:16:13 +00:00
2021-10-07 06:19:07 +00:00
let
epochRef = getEpochRef ( dag , newHead , newHead . slot . epoch )
number_of_active_validators = epochRef . shuffled_active_validator_indices . lenu64 ( ) . toGaugeValue
beacon_active_validators . set ( number_of_active_validators )
beacon_current_active_validators . set ( number_of_active_validators )
2020-05-19 14:18:07 +00:00
if finalizedHead ! = dag . finalizedHead :
2021-11-02 17:06:36 +00:00
info " Reached new finalization checkpoint " ,
head = shortLog ( dag . headState . blck ) ,
stateRoot = shortLog ( getStateRoot ( dag . headState . data ) ) ,
justified = shortLog ( getStateField (
dag . headState . data , current_justified_checkpoint ) ) ,
finalized = shortLog ( getStateField (
dag . headState . data , finalized_checkpoint ) )
2020-05-19 14:18:07 +00:00
2021-12-06 18:52:35 +00:00
block :
# Update `dag.finalizedBlocks` with all newly finalized blocks (those
# newer than the previous finalized head), then update `dag.finalizedHead`
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks . setLen ( finalizedHead . slot - dag . tail . slot + 1 )
2021-12-06 18:52:35 +00:00
var tmp = finalizedHead . blck
while not isNil ( tmp ) and tmp . slot > = dag . finalizedHead . slot :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
dag . finalizedBlocks [ ( tmp . slot - dag . tail . slot ) . int ] = tmp
2021-12-06 18:52:35 +00:00
tmp = tmp . parent
dag . finalizedHead = finalizedHead
2020-07-25 19:41:12 +00:00
2021-06-11 17:51:46 +00:00
beacon_finalized_epoch . set ( getStateField (
dag . headState . data , finalized_checkpoint ) . epoch . toGaugeValue )
beacon_finalized_root . set ( getStateField (
dag . headState . data , finalized_checkpoint ) . root . toGaugeValue )
2020-11-27 22:16:13 +00:00
2021-04-01 11:26:17 +00:00
# Pruning the block dag is required every time the finalized head changes
# in order to clear out blocks that are no longer viable and should
# therefore no longer be considered as part of the chain we're following
dag . pruneBlocksDAG ( )
2021-03-09 14:36:17 +00:00
2021-09-22 12:17:15 +00:00
# Send notification about new finalization point via callback.
if not ( isNil ( dag . onFinHappened ) ) :
2021-12-09 17:06:21 +00:00
let stateRoot =
if dag . finalizedHead . slot = = dag . head . slot :
getStateRoot ( dag . headState . data )
elif dag . finalizedHead . slot + SLOTS_PER_HISTORICAL_ROOT > dag . head . slot :
getStateField ( dag . headState . data , state_roots ) . data [
int ( dag . finalizedHead . slot mod SLOTS_PER_HISTORICAL_ROOT ) ]
else :
Eth2Digest ( ) # The thing that finalized was >8192 blocks old?
let data = FinalizationInfoObject . init (
dag . finalizedHead . blck . root ,
stateRoot ,
dag . finalizedHead . slot . epoch )
2021-09-22 12:17:15 +00:00
dag . onFinHappened ( data )
2021-12-21 10:40:14 +00:00
proc isInitialized * ( T : type ChainDAGRef , db : BeaconChainDB ) : Result [ void , cstring ] =
2021-11-10 11:39:08 +00:00
# Lightweight check to see if we have the minimal information needed to
# load up a database - we don't check head here - if something is wrong with
# head, it's likely an initialized, but corrupt database - init will detect
# that
2020-05-19 14:18:07 +00:00
let
2021-11-10 11:39:08 +00:00
genesisBlockRoot = db . getGenesisBlock ( )
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
if not genesisBlockRoot . isSome ( ) :
return err ( " Genesis block root missing " )
2020-05-19 14:18:07 +00:00
let
2021-11-10 11:39:08 +00:00
genesisBlock = db . getForkedBlock ( genesisBlockRoot . get ( ) )
2021-12-21 10:40:14 +00:00
if not genesisBlock . isSome ( ) :
return err ( " Genesis block missing " )
2020-05-19 14:18:07 +00:00
2021-11-10 11:39:08 +00:00
let
genesisStateRoot = withBlck ( genesisBlock . get ( ) ) : blck . message . state_root
2021-12-21 10:40:14 +00:00
if not db . containsState ( genesisStateRoot ) :
return err ( " Genesis state missing " )
let
tailBlockRoot = db . getTailBlock ( )
if not tailBlockRoot . isSome ( ) :
return err ( " Tail block root missing " )
let
tailBlock = db . getForkedBlock ( tailBlockRoot . get ( ) )
if not tailBlock . isSome ( ) :
return err ( " Tail block missing " )
let
2021-11-10 11:39:08 +00:00
tailStateRoot = withBlck ( tailBlock . get ( ) ) : blck . message . state_root
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
if not db . containsState ( tailStateRoot ) :
return err ( " Tail state missing " )
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
ok ( )
2020-05-19 14:18:07 +00:00
proc preInit * (
2020-09-22 20:42:42 +00:00
T : type ChainDAGRef , db : BeaconChainDB ,
2021-11-10 11:39:08 +00:00
genesisState , tailState : ForkedHashedBeaconState ,
tailBlock : ForkedTrustedSignedBeaconBlock ) =
2020-07-31 14:49:06 +00:00
# write a genesis state, the way the ChainDAGRef expects it to be stored in
2020-05-19 14:18:07 +00:00
# database
2020-07-30 19:18:17 +00:00
# TODO probably should just init a block pool with the freshly written
2020-05-19 14:18:07 +00:00
# state - but there's more refactoring needed to make it nice - doing
# a minimal patch for now..
2021-11-10 11:39:08 +00:00
logScope :
genesisStateRoot = getStateRoot ( genesisState )
genesisStateSlot = getStateField ( genesisState , slot )
tailStateRoot = getStateRoot ( tailState )
tailStateSlot = getStateField ( tailState , slot )
let genesisBlockRoot = withState ( genesisState ) :
if state . root ! = getStateRoot ( tailState ) :
# Different tail and genesis
if state . data . slot > = getStateField ( tailState , slot ) :
fatal " Tail state must be newer or the same as genesis state "
quit 1
let tail_genesis_validators_root =
getStateField ( tailState , genesis_validators_root )
if state . data . genesis_validators_root ! = tail_genesis_validators_root :
fatal " Tail state doesn ' t match genesis validators root, it is likely from a different network! " ,
genesis_validators_root = shortLog ( state . data . genesis_validators_root ) ,
tail_genesis_validators_root = shortLog ( tail_genesis_validators_root )
quit 1
2021-11-18 12:02:43 +00:00
let blck = get_initial_beacon_block ( state )
2021-11-10 11:39:08 +00:00
db . putGenesisBlock ( blck . root )
db . putBlock ( blck )
2021-11-18 12:02:43 +00:00
db . putStateRoot ( state . latest_block_root ( ) , state . data . slot , state . root )
2021-11-10 11:39:08 +00:00
db . putState ( state . root , state . data )
blck . root
else : # tail and genesis are the same
withBlck ( tailBlock ) :
db . putGenesisBlock ( blck . root )
blck . root
withState ( tailState ) :
withBlck ( tailBlock ) :
# When looking up the state root of the tail block, we don't use the
# BlockSlot->state_root map, so the only way the init code can find the
# state is through the state root in the block - this could be relaxed
# down the line
if blck . message . state_root ! = state . root :
fatal " State must match the given block " ,
tailBlck = shortLog ( blck )
quit 1
db . putBlock ( blck )
db . putTailBlock ( blck . root )
db . putHeadBlock ( blck . root )
2021-11-18 12:02:43 +00:00
db . putStateRoot ( state . latest_block_root ( ) , state . data . slot , state . root )
2021-11-10 11:39:08 +00:00
db . putState ( state . root , state . data )
notice " New database from snapshot " ,
genesisBlockRoot = shortLog ( genesisBlockRoot ) ,
genesisStateRoot = shortLog ( getStateRoot ( genesisState ) ) ,
tailBlockRoot = shortLog ( blck . root ) ,
tailStateRoot = shortLog ( state . root ) ,
fork = state . data . fork ,
validators = state . data . validators . len ( )
2020-09-22 20:42:42 +00:00
2020-05-22 14:21:22 +00:00
proc getProposer * (
2021-06-01 11:13:40 +00:00
dag : ChainDAGRef , head : BlockRef , slot : Slot ) : Option [ ValidatorIndex ] =
2020-08-05 06:28:43 +00:00
let
epochRef = dag . getEpochRef ( head , slot . compute_epoch_at_slot ( ) )
slotInEpoch = slot - slot . compute_epoch_at_slot ( ) . compute_start_slot_at_epoch ( )
2020-05-19 14:18:07 +00:00
2021-06-01 11:13:40 +00:00
let proposer = epochRef . beacon_proposers [ slotInEpoch ]
if proposer . isSome ( ) :
2021-06-10 07:37:02 +00:00
if proposer . get ( ) . uint64 > = dag . db . immutableValidators . lenu64 ( ) :
2021-06-01 11:13:40 +00:00
# Sanity check - it should never happen that the key cache doesn't contain
# a key for the selected proposer - that would mean that we somehow
# created validators in the state without updating the cache!
warn " Proposer key not found " ,
2021-06-10 07:37:02 +00:00
keys = dag . db . immutableValidators . lenu64 ( ) , proposer = proposer . get ( )
2021-06-01 11:13:40 +00:00
return none ( ValidatorIndex )
proposer
2021-12-09 12:56:54 +00:00
proc aggregateAll * (
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex ] ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len = = 0 :
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
# Eth2 spec requires at least one attesting index in attestation
# - https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
return err ( " aggregate: no attesting keys " )
let
firstKey = dag . validatorKey ( validator_indices [ 0 ] )
if not firstKey . isSome ( ) :
return err ( " aggregate: invalid validator index " )
var aggregateKey {. noInit . } : AggregatePublicKey
aggregateKey . init ( firstKey . get ( ) )
for i in 1 .. < validator_indices . len :
let key = dag . validatorKey ( validator_indices [ i ] )
if not key . isSome ( ) :
return err ( " aggregate: invalid validator index " )
aggregateKey . aggregate ( key . get ( ) )
ok ( finish ( aggregateKey ) )
proc aggregateAll * (
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex | uint64 ] ,
bits : BitSeq | BitArray ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len ( ) ! = bits . len ( ) :
return err ( " aggregateAll: mismatch in bits length " )
var
aggregateKey {. noInit . } : AggregatePublicKey
inited = false
for i in 0 .. < bits . len ( ) :
if bits [ i ] :
let key = dag . validatorKey ( validator_indices [ i ] )
if not key . isSome ( ) :
return err ( " aggregate: invalid validator index " )
if inited :
aggregateKey . aggregate ( key . get )
else :
aggregateKey = AggregatePublicKey . init ( key . get )
inited = true
if not inited :
err ( " aggregate: no attesting keys " )
else :
ok ( finish ( aggregateKey ) )