2020-05-19 14:18:07 +00:00
# beacon_chain
2024-01-06 14:26:56 +00:00
# Copyright (c) 2018-2024 Status Research & Development GmbH
2020-05-19 14:18:07 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
2023-01-20 14:14:37 +00:00
{. push raises : [ ] . }
2020-05-19 14:18:07 +00:00
import
2023-01-11 12:29:21 +00:00
std / [ algorithm , sequtils , tables , sets ] ,
2024-01-16 22:37:14 +00:00
stew / [ arrayops , assign2 , byteutils ] ,
metrics , results , snappy , chronicles ,
2021-11-10 11:39:08 +00:00
.. / spec / [ beaconstate , eth2_merkleization , eth2_ssz_serialization , helpers ,
2021-08-18 18:57:58 +00:00
state_transition , validator ] ,
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
.. / spec / forks ,
2022-12-05 16:29:09 +00:00
.. / spec / datatypes / [ phase0 , altair , bellatrix , capella ] ,
2024-02-19 10:00:11 +00:00
" .. " / [ beacon_chain_db , beacon_clock , era_db ] ,
2021-11-10 11:39:08 +00:00
" . " / [ block_pools_types , block_quarantine ]
2020-05-19 14:18:07 +00:00
2023-03-02 20:09:24 +00:00
from .. / spec / datatypes / deneb import shortLog
2022-11-02 16:23:30 +00:00
2021-11-05 07:34:34 +00:00
export
2021-11-10 11:39:08 +00:00
eth2_merkleization , eth2_ssz_serialization ,
block_pools_types , results , beacon_chain_db
2020-07-30 19:18:17 +00:00
2024-02-19 10:00:11 +00:00
logScope : topics = " chaindag "
2022-06-09 14:30:13 +00:00
# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics
2020-11-27 22:16:13 +00:00
declareGauge beacon_head_root , " Root of the head block of the beacon chain "
declareGauge beacon_head_slot , " Slot of the head block of the beacon chain "
2022-06-09 14:30:13 +00:00
# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#interop-metrics
2020-11-27 22:16:13 +00:00
declareGauge beacon_finalized_epoch , " Current finalized epoch " # On epoch transition
declareGauge beacon_finalized_root , " Current finalized root " # On epoch transition
declareGauge beacon_current_justified_epoch , " Current justified epoch " # On epoch transition
declareGauge beacon_current_justified_root , " Current justified root " # On epoch transition
declareGauge beacon_previous_justified_epoch , " Current previously justified epoch " # On epoch transition
declareGauge beacon_previous_justified_root , " Current previously justified root " # On epoch transition
2021-10-07 06:19:07 +00:00
declareGauge beacon_reorgs_total_total , " Total occurrences of reorganizations of the chain " # On fork choice; backwards-compat name (used to be a counter)
declareGauge beacon_reorgs_total , " Total occurrences of reorganizations of the chain " # Interop copy
2020-08-06 19:48:47 +00:00
declareCounter beacon_state_data_cache_hits , " EpochRef hits "
declareCounter beacon_state_data_cache_misses , " EpochRef misses "
2020-08-13 09:50:05 +00:00
declareCounter beacon_state_rewinds , " State database rewinds "
2020-05-19 14:18:07 +00:00
2020-11-27 22:16:13 +00:00
declareGauge beacon_active_validators , " Number of validators in the active validator set "
2021-10-07 06:19:07 +00:00
declareGauge beacon_current_active_validators , " Number of validators in the active validator set " # Interop copy
2020-08-26 15:25:39 +00:00
declareGauge beacon_pending_deposits , " Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index) " # On block
declareGauge beacon_processed_deposits_total , " Number of total deposits included on chain " # On block
2024-02-20 19:34:41 +00:00
declareCounter beacon_dag_state_replay_seconds , " Time spent replaying states "
2020-05-19 14:18:07 +00:00
2022-01-05 18:38:04 +00:00
const
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
EPOCHS_PER_STATE_SNAPSHOT * = 32
2023-05-12 10:37:15 +00:00
## When finality happens, we prune historical states from the database except
## for a snapshot every 32 epochs from which replays can happen - there's a
## balance here between making long replays and saving on disk space
MAX_SLOTS_PER_PRUNE * = SLOTS_PER_EPOCH
## We prune the database incrementally so as not to introduce long
## processing breaks - this number is the maximum number of blocks we allow
## to be pruned every time the prune call is made (once per slot typically)
## unless head is moving faster (ie during sync)
2022-01-05 18:38:04 +00:00
2020-06-25 10:23:10 +00:00
proc putBlock * (
2021-11-05 07:34:34 +00:00
dag : ChainDAGRef , signedBlock : ForkyTrustedSignedBeaconBlock ) =
2020-07-16 13:16:51 +00:00
dag . db . putBlock ( signedBlock )
2020-05-19 14:18:07 +00:00
2022-03-16 07:20:40 +00:00
proc updateState * (
2022-06-16 09:46:35 +00:00
dag : ChainDAGRef , state : var ForkedHashedBeaconState , bsi : BlockSlotId ,
save : bool , cache : var StateCache ) : bool {. gcsafe . }
2021-03-17 10:17:15 +00:00
2022-01-05 18:38:04 +00:00
template withUpdatedState * (
2022-06-16 09:46:35 +00:00
dag : ChainDAGRef , stateParam : var ForkedHashedBeaconState ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
bsiParam : BlockSlotId , okBody : untyped , failureBody : untyped ) : untyped =
2020-08-18 20:29:33 +00:00
## Helper template that updates stateData to a particular BlockSlot - usage of
2022-01-05 18:38:04 +00:00
## stateData is unsafe outside of block, or across `await` boundaries
2020-05-19 14:18:07 +00:00
2021-11-29 10:25:47 +00:00
block :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let bsi {. inject . } = bsiParam
2021-11-29 10:25:47 +00:00
var cache {. inject . } = StateCache ( )
2022-06-16 09:46:35 +00:00
if updateState ( dag , stateParam , bsi , false , cache ) :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
template bid ( ) : BlockId {. inject , used . } = bsi . bid
2022-11-30 14:37:23 +00:00
template updatedState ( ) : ForkedHashedBeaconState {. inject , used . } = stateParam
2022-06-16 09:46:35 +00:00
okBody
2022-01-05 18:38:04 +00:00
else :
failureBody
2020-05-19 14:18:07 +00:00
2023-01-31 12:35:01 +00:00
func get_effective_balances (
2023-04-18 19:26:36 +00:00
validators : openArray [ Validator ] , epoch : Epoch ) : seq [ Gwei ] =
2020-08-12 04:49:52 +00:00
## Get the balances from a state as counted for fork choice
2021-06-01 12:40:13 +00:00
result . newSeq ( validators . len ) # zero-init
2020-08-12 04:49:52 +00:00
for i in 0 .. < result . len :
# All non-active validators have a 0 balance
2021-06-01 12:40:13 +00:00
let validator = unsafeAddr validators [ i ]
2023-04-18 19:26:36 +00:00
if validator [ ] . is_active_validator ( epoch ) and not validator [ ] . slashed :
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
result [ i ] = validator [ ] . effective_balance
2020-08-12 04:49:52 +00:00
2021-06-01 11:13:40 +00:00
proc updateValidatorKeys * ( dag : ChainDAGRef , validators : openArray [ Validator ] ) =
2021-06-10 07:37:02 +00:00
# Update validator key cache - must be called every time a valid block is
# applied to the state - this is important to ensure that when we sync blocks
# without storing a state (non-epoch blocks essentially), the deposits from
# those blocks are persisted to the in-database cache of immutable validator
# data (but no earlier than that the whole block as been validated)
dag . db . updateImmutableValidators ( validators )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
proc updateFinalizedBlocks * ( db : BeaconChainDB , newFinalized : openArray [ BlockId ] ) =
if db . db . readOnly : return # TODO abstraction leak - where to put this?
2022-02-26 18:16:19 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
db . withManyWrites :
for bid in newFinalized :
db . finalizedBlocks . insert ( bid . slot , bid . root )
2022-01-30 16:51:04 +00:00
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
proc updateFrontfillBlocks * ( dag : ChainDAGRef ) =
# When backfilling is done and manages to reach the frontfill point, we can
# write the frontfill index knowing that the block information in the
# era files match the chain
if dag . db . db . readOnly : return # TODO abstraction leak - where to put this?
2024-02-09 19:44:54 +00:00
if dag . frontfillBlocks . len = = 0 or dag . backfill . slot > GENESIS_SLOT :
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
return
info " Writing frontfill index " , slots = dag . frontfillBlocks . len
dag . db . withManyWrites :
let low = dag . db . finalizedBlocks . low . expect (
" wrote at least tailRef during init " )
let blocks = min ( low . int , dag . frontfillBlocks . len - 1 )
var parent : Eth2Digest
for i in 0 .. blocks :
let root = dag . frontfillBlocks [ i ]
if not isZero ( root ) :
dag . db . finalizedBlocks . insert ( Slot ( i ) , root )
dag . db . putBeaconBlockSummary (
root , BeaconBlockSummary ( slot : Slot ( i ) , parent_root : parent ) )
parent = root
reset ( dag . frontfillBlocks )
2021-06-10 07:37:02 +00:00
func validatorKey * (
2023-01-11 12:29:21 +00:00
dag : ChainDAGRef , index : ValidatorIndex or uint64 ) : Opt [ CookedPubKey ] =
2021-06-10 07:37:02 +00:00
## Returns the validator pubkey for the index, assuming it's been observed
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
dag . db . immutableValidators . load ( index )
2022-09-03 20:56:20 +00:00
template is_merge_transition_complete * (
2022-07-06 10:33:02 +00:00
stateParam : ForkedHashedBeaconState ) : bool =
withState ( stateParam ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Bellatrix :
2022-08-26 22:47:40 +00:00
is_merge_transition_complete ( forkyState . data )
2022-07-06 10:33:02 +00:00
else :
false
2020-10-22 10:53:33 +00:00
func effective_balances * ( epochRef : EpochRef ) : seq [ Gwei ] =
try :
SSZ . decode ( snappy . decode ( epochRef . effective_balances_bytes , uint32 . high ) ,
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] ) . toSeq ( )
except CatchableError as exc :
raiseAssert exc . msg
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func getBlockRef * ( dag : ChainDAGRef , root : Eth2Digest ) : Opt [ BlockRef ] =
## Retrieve a resolved block reference, if available - this function does
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
## not return historical finalized blocks, see `getBlockIdAtSlot` for a
## function that covers the entire known history
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
let key = KeyedBlockRef . asLookupKey ( root )
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
# the copy of the instance in the set which has more fields than `root` set!
if key in dag . forkBlocks :
try : ok ( dag . forkBlocks [ key ] . blockRef ( ) )
except KeyError : raiseAssert " contains "
else :
err ( )
2024-02-09 10:13:00 +00:00
func getBlockIdAtSlot * (
state : ForkyHashedBeaconState , slot : Slot ) : Opt [ BlockSlotId ] =
## Use given state to attempt to find a historical `BlockSlotId`.
if slot > state . data . slot :
return Opt . none ( BlockSlotId ) # State does not know about requested slot
if state . data . slot > slot + SLOTS_PER_HISTORICAL_ROOT :
return Opt . none ( BlockSlotId ) # Cache has expired
var idx = slot mod SLOTS_PER_HISTORICAL_ROOT
let root =
if slot = = state . data . slot :
state . latest_block_root
else :
state . data . block_roots [ idx ]
var bid = BlockId ( slot : slot , root : root )
let availableSlots =
min ( slot . uint64 , slot + SLOTS_PER_HISTORICAL_ROOT - state . data . slot )
for i in 0 .. < availableSlots :
if idx = = 0 :
idx = SLOTS_PER_HISTORICAL_ROOT
dec idx
if state . data . block_roots [ idx ] ! = root :
return Opt . some BlockSlotId . init ( bid , slot )
dec bid . slot
if bid . slot = = GENESIS_SLOT :
return Opt . some BlockSlotId . init ( bid , slot )
Opt . none ( BlockSlotId ) # Unknown if there are more empty slots before
2022-03-15 08:24:55 +00:00
func getBlockIdAtSlot * ( dag : ChainDAGRef , slot : Slot ) : Opt [ BlockSlotId ] =
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
## Retrieve the canonical block at the given slot, or the last block that
2022-02-26 18:16:19 +00:00
## comes before - similar to atSlot, but without the linear scan - may hit
## the database to look up early indices.
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if slot > dag . finalizedHead . slot :
return dag . head . atSlot ( slot ) . toBlockSlotId ( ) # iterate to the given slot
2022-10-14 19:40:10 +00:00
if dag . finalizedHead . blck = = nil :
# Not initialized yet (in init)
return Opt . none ( BlockSlotId )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if slot > = dag . finalizedHead . blck . slot :
# finalized head is still in memory
return dag . finalizedHead . blck . atSlot ( slot ) . toBlockSlotId ( )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
2024-02-09 10:13:00 +00:00
# Load from memory, if the block ID is sufficiently recent.
# For checkpoint sync, this is the only available of historical block IDs
# until sufficient blocks have been backfilled.
template tryWithState ( state : ForkedHashedBeaconState ) =
block :
withState ( state ) :
# State must be a descendent of the finalized chain to be viable
let finBsi = forkyState . getBlockIdAtSlot ( dag . finalizedHead . slot )
if finBsi . isSome and # DAG finalized bid slot wrong if CP not @ epoch
finBsi . unsafeGet . bid . root = = dag . finalizedHead . blck . bid . root :
let bsi = forkyState . getBlockIdAtSlot ( slot )
if bsi . isSome :
return bsi
tryWithState dag . headState
tryWithState dag . epochRefState
tryWithState dag . clearanceState
# Fallback to database, this only works for backfilled blocks
2022-02-26 18:16:19 +00:00
let finlow = dag . db . finalizedBlocks . low . expect ( " at least tailRef written " )
if slot > = finlow :
var pos = slot
while true :
let root = dag . db . finalizedBlocks . get ( pos )
if root . isSome ( ) :
2022-03-15 08:24:55 +00:00
return ok BlockSlotId . init (
BlockId ( root : root . get ( ) , slot : pos ) , slot )
2022-02-26 18:16:19 +00:00
doAssert pos > finlow , " We should have returned the finlow "
pos = pos - 1
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
2022-10-14 19:40:10 +00:00
if slot = = GENESIS_SLOT and dag . genesis . isSome ( ) :
return ok dag . genesis . get ( ) . atSlot ( )
err ( ) # not backfilled yet
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
proc containsBlock (
cfg : RuntimeConfig , db : BeaconChainDB , slot : Slot , root : Eth2Digest ) : bool =
2023-02-16 20:16:54 +00:00
db . containsBlock ( root , cfg . consensusForkAtEpoch ( slot . epoch ) )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
2024-02-09 19:44:54 +00:00
proc containsBlock * ( dag : ChainDAGRef , bid : BlockId ) : bool =
dag . cfg . containsBlock ( dag . db , bid . slot , bid . root )
2022-10-14 19:40:10 +00:00
proc getForkedBlock * ( db : BeaconChainDB , root : Eth2Digest ) :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
Opt [ ForkedTrustedSignedBeaconBlock ] =
# When we only have a digest, we don't know which fork it's from so we try
# them one by one - this should be used sparingly
2024-02-25 19:42:44 +00:00
static : doAssert high ( ConsensusFork ) = = ConsensusFork . Deneb
if ( let blck = db . getBlock ( root , deneb . TrustedSignedBeaconBlock ) ;
2022-12-30 21:59:21 +00:00
blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getBlock ( root , capella . TrustedSignedBeaconBlock ) ;
blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getBlock ( root , bellatrix . TrustedSignedBeaconBlock ) ;
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getBlock ( root , altair . TrustedSignedBeaconBlock ) ;
blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
elif ( let blck = db . getBlock ( root , phase0 . TrustedSignedBeaconBlock ) ;
blck . isSome ( ) ) :
ok ( ForkedTrustedSignedBeaconBlock . init ( blck . get ( ) ) )
else :
err ( )
proc getBlock * (
dag : ChainDAGRef , bid : BlockId ,
T : type ForkyTrustedSignedBeaconBlock ) : Opt [ T ] =
dag . db . getBlock ( bid . root , T ) or
getBlock (
dag . era , getStateField ( dag . headState , historical_roots ) . asSeq ,
2023-04-24 13:26:28 +00:00
dag . headState . historical_summaries ( ) . asSeq ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
bid . slot , Opt [ Eth2Digest ] . ok ( bid . root ) , T )
proc getBlockSSZ * ( dag : ChainDAGRef , bid : BlockId , bytes : var seq [ byte ] ) : bool =
# Load the SSZ-encoded data of a block into `bytes`, overwriting the existing
# content
2023-02-16 20:16:54 +00:00
let fork = dag . cfg . consensusForkAtEpoch ( bid . slot . epoch )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
dag . db . getBlockSSZ ( bid . root , bytes , fork ) or
( bid . slot < = dag . finalizedHead . slot and
getBlockSSZ (
dag . era , getStateField ( dag . headState , historical_roots ) . asSeq ,
2023-04-24 13:26:28 +00:00
dag . headState . historical_summaries ( ) . asSeq ,
2023-12-05 06:55:25 +00:00
bid . slot , bytes ) . isOk ( ) and bytes . len > 0 )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
proc getBlockSZ * ( dag : ChainDAGRef , bid : BlockId , bytes : var seq [ byte ] ) : bool =
# Load the snappy-frame-compressed ("SZ") SSZ-encoded data of a block into
# `bytes`, overwriting the existing content
# careful: there are two snappy encodings in use, with and without framing!
# Returns true if the block is found, false if not
2023-02-16 20:16:54 +00:00
let fork = dag . cfg . consensusForkAtEpoch ( bid . slot . epoch )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
dag . db . getBlockSZ ( bid . root , bytes , fork ) or
( bid . slot < = dag . finalizedHead . slot and
getBlockSZ (
dag . era , getStateField ( dag . headState , historical_roots ) . asSeq ,
2023-04-24 13:26:28 +00:00
dag . headState . historical_summaries ( ) . asSeq ,
2023-12-05 06:55:25 +00:00
bid . slot , bytes ) . isOk and bytes . len > 0 )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
proc getForkedBlock * (
dag : ChainDAGRef , bid : BlockId ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
2023-02-16 20:16:54 +00:00
let fork = dag . cfg . consensusForkAtEpoch ( bid . slot . epoch )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
result . ok ( ForkedTrustedSignedBeaconBlock ( kind : fork ) )
withBlck ( result . get ( ) ) :
2023-09-21 10:49:14 +00:00
type T = type ( forkyBlck )
forkyBlck = getBlock ( dag , bid , T ) . valueOr :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
getBlock (
dag . era , getStateField ( dag . headState , historical_roots ) . asSeq ,
2023-04-24 13:26:28 +00:00
dag . headState . historical_summaries ( ) . asSeq ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
bid . slot , Opt [ Eth2Digest ] . ok ( bid . root ) , T ) . valueOr :
result . err ( )
return
2022-10-14 19:40:10 +00:00
2022-11-10 10:44:47 +00:00
proc getBlockId * ( db : BeaconChainDB , root : Eth2Digest ) : Opt [ BlockId ] =
2022-10-14 19:40:10 +00:00
block : # We might have a summary in the database
let summary = db . getBeaconBlockSummary ( root )
if summary . isOk ( ) :
return ok ( BlockId ( root : root , slot : summary . get ( ) . slot ) )
block :
# We might have a block without having written a summary - this can happen
# if there was a crash between writing the block and writing the summary,
# specially in databases written by older nimbus versions
let forked = db . getForkedBlock ( root )
if forked . isSome ( ) :
# Shouldn't happen too often but..
let
blck = forked . get ( )
2023-09-21 10:49:14 +00:00
summary = withBlck ( blck ) : forkyBlck . message . toBeaconBlockSummary ( )
2022-10-14 19:40:10 +00:00
debug " Writing summary " , blck = shortLog ( blck )
db . putBeaconBlockSummary ( root , summary )
return ok ( BlockId ( root : root , slot : summary . slot ) )
err ( )
2021-12-06 18:52:35 +00:00
2022-02-16 15:44:04 +00:00
proc getBlockId * ( dag : ChainDAGRef , root : Eth2Digest ) : Opt [ BlockId ] =
2022-02-26 18:16:19 +00:00
## Look up block id by root in history - useful for turning a root into a
## slot - may hit the database, may return blocks that have since become
## unviable - use `getBlockIdAtSlot` to check that the block is still viable
## if used in a sensitive context
2022-02-16 15:44:04 +00:00
block : # If we have a BlockRef, this is the fastest way to get a block id
let blck = dag . getBlockRef ( root )
if blck . isOk ( ) :
return ok ( blck . get ( ) . bid )
2022-10-14 19:40:10 +00:00
dag . db . getBlockId ( root )
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
proc getForkedBlock * (
dag : ChainDAGRef , root : Eth2Digest ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
let bid = dag . getBlockId ( root )
if bid . isSome ( ) :
dag . getForkedBlock ( bid . get ( ) )
else :
# In case we didn't have a summary - should be rare, but ..
dag . db . getForkedBlock ( root )
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func isCanonical * ( dag : ChainDAGRef , bid : BlockId ) : bool =
2023-09-27 14:45:33 +00:00
## Returns `true` if the given `bid` is part of the history selected by
## `dag.head`.
2022-03-15 08:24:55 +00:00
let current = dag . getBlockIdAtSlot ( bid . slot ) . valueOr :
return false # We don't know, so ..
return current . bid = = bid
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
2023-09-27 14:45:33 +00:00
func isFinalized * ( dag : ChainDAGRef , bid : BlockId ) : bool =
## Returns `true` if the given `bid` is part of the finalized history
## selected by `dag.finalizedHead`.
dag . isCanonical ( bid ) and ( bid . slot < = dag . finalizedHead . slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
func parent * ( dag : ChainDAGRef , bid : BlockId ) : Opt [ BlockId ] =
if bid . slot = = 0 :
return err ( )
if bid . slot > dag . finalizedHead . slot :
# Make sure we follow the correct history as there may be forks
let blck = ? dag . getBlockRef ( bid . root )
doAssert not isNil ( blck . parent ) , " should reach finalized head "
return ok blck . parent . bid
let bids = ? dag . getBlockIdAtSlot ( bid . slot - 1 )
ok ( bids . bid )
func parentOrSlot * ( dag : ChainDAGRef , bsi : BlockSlotId ) : Opt [ BlockSlotId ] =
if bsi . slot = = 0 :
return err ( )
if bsi . isProposed :
let parent = ? dag . parent ( bsi . bid )
ok BlockSlotId . init ( parent , bsi . slot )
else :
ok BlockSlotId . init ( bsi . bid , bsi . slot - 1 )
func atSlot * ( dag : ChainDAGRef , bid : BlockId , slot : Slot ) : Opt [ BlockSlotId ] =
if bid . slot > dag . finalizedHead . slot :
let blck = ? dag . getBlockRef ( bid . root )
if slot > dag . finalizedHead . slot :
return blck . atSlot ( slot ) . toBlockSlotId ( )
else :
# Check if the given `bid` is still part of history - it might hail from an
# orphaned fork
let existing = ? dag . getBlockIdAtSlot ( bid . slot )
if existing . bid ! = bid :
return err ( ) # Not part of known / relevant history
if existing . slot = = slot : # and bid.slot == slot
return ok existing
if bid . slot < = slot :
ok BlockSlotId . init ( bid , slot )
else :
dag . getBlockIdAtSlot ( slot )
2022-09-29 14:55:58 +00:00
func nextTimestamp [ I , T ] ( cache : var LRUCache [ I , T ] ) : uint32 =
if cache . timestamp = = uint32 . high :
for i in 0 .. < I :
template e : untyped = cache . entries [ i ]
if e . lastUsed ! = 0 :
e . lastUsed = 1
cache . timestamp = 1
inc cache . timestamp
cache . timestamp
2023-05-15 15:41:30 +00:00
template peekIt [ I , T ] ( cache : var LRUCache [ I , T ] , predicate : untyped ) : Opt [ T ] =
block :
var res : Opt [ T ]
for i in 0 .. < I :
template e : untyped = cache . entries [ i ]
template it : untyped {. inject , used . } = e . value
if e . lastUsed ! = 0 and predicate :
res . ok it
break
res
2022-09-29 14:55:58 +00:00
template findIt [ I , T ] ( cache : var LRUCache [ I , T ] , predicate : untyped ) : Opt [ T ] =
block :
var res : Opt [ T ]
for i in 0 .. < I :
template e : untyped = cache . entries [ i ]
template it : untyped {. inject , used . } = e . value
if e . lastUsed ! = 0 and predicate :
e . lastUsed = cache . nextTimestamp
res . ok it
break
res
template delIt [ I , T ] ( cache : var LRUCache [ I , T ] , predicate : untyped ) =
block :
for i in 0 .. < I :
template e : untyped = cache . entries [ i ]
template it : untyped {. inject , used . } = e . value
if e . lastUsed ! = 0 and predicate :
e . reset ( )
func put [ I , T ] ( cache : var LRUCache [ I , T ] , value : T ) =
var lru = 0
block :
var min = uint32 . high
for i in 0 .. < I :
template e : untyped = cache . entries [ i ]
if e . lastUsed < min :
min = e . lastUsed
lru = i
if min = = 0 :
break
template e : untyped = cache . entries [ lru ]
e . value = value
e . lastUsed = cache . nextTimestamp
2022-09-27 16:56:08 +00:00
func epochAncestor ( dag : ChainDAGRef , bid : BlockId , epoch : Epoch ) :
Opt [ BlockSlotId ] =
## The epoch ancestor is the last block that has an effect on the epoch-
## related state data, as updated in `process_epoch` - this block determines
## effective balances, validator addtions and removals etc and serves as a
## base for `EpochRef` construction.
if epoch < dag . tail . slot . epoch or bid . slot < dag . tail . slot :
# Not enough information in database to meaningfully process pre-tail epochs
return Opt . none BlockSlotId
let
dependentSlot =
if epoch = = dag . tail . slot . epoch :
# Use the tail as "dependent block" - this may be the genesis block, or,
# in the case of checkpoint sync, the checkpoint block
dag . tail . slot
else :
epoch . start_slot ( ) - 1
bsi = ? dag . atSlot ( bid , dependentSlot )
epochSlot =
if epoch = = dag . tail . slot . epoch :
dag . tail . slot
else :
epoch . start_slot ( )
ok BlockSlotId ( bid : bsi . bid , slot : epochSlot )
func epochKey ( dag : ChainDAGRef , bid : BlockId , epoch : Epoch ) : Opt [ EpochKey ] =
2020-08-18 20:29:33 +00:00
## The state transition works by storing information from blocks in a
## "working" area until the epoch transition, then batching work collected
## during the epoch. Thus, last block in the ancestor epochs is the block
## that has an impact on epoch currently considered.
##
2021-12-09 17:06:21 +00:00
## This function returns an epoch key pointing to that epoch boundary, i.e. the
2020-08-18 20:29:33 +00:00
## boundary where the last block has been applied to the state and epoch
2021-03-17 10:17:15 +00:00
## processing has been done.
2022-09-27 16:56:08 +00:00
let bsi = dag . epochAncestor ( bid , epoch ) . valueOr :
return Opt . none ( EpochKey )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2022-09-27 16:56:08 +00:00
Opt . some ( EpochKey ( bid : bsi . bid , epoch : epoch ) )
2020-08-18 20:29:33 +00:00
2022-08-18 18:07:01 +00:00
func putShufflingRef * ( dag : ChainDAGRef , shufflingRef : ShufflingRef ) =
## Store shuffling in the cache
if shufflingRef . epoch < dag . finalizedHead . slot . epoch ( ) :
# Only cache epoch information for unfinalized blocks - earlier states
# are seldomly used (ie RPC), so no need to cache
return
2022-09-29 14:55:58 +00:00
dag . shufflingRefs . put shufflingRef
2022-08-18 18:07:01 +00:00
2023-05-15 15:41:30 +00:00
func findShufflingRef * (
dag : ChainDAGRef , bid : BlockId , epoch : Epoch ) : Opt [ ShufflingRef ] =
## Lookup a shuffling in the cache, returning `none` if it's not present - see
## `getShufflingRef` for a version that creates a new instance if it's missing
let
2023-05-17 12:06:31 +00:00
dependent_slot = epoch . attester_dependent_slot ( )
2023-05-15 15:41:30 +00:00
dependent_bsi = ? dag . atSlot ( bid , dependent_slot )
# Check `ShufflingRef` cache
let shufflingRef = dag . shufflingRefs . findIt (
it . epoch = = epoch and it . attester_dependent_root = = dependent_bsi . bid . root )
if shufflingRef . isOk :
return shufflingRef
# Check `EpochRef` cache
let epochRef = dag . epochRefs . peekIt (
it . shufflingRef . epoch = = epoch and
it . shufflingRef . attester_dependent_root = = dependent_bsi . bid . root )
if epochRef . isOk :
dag . putShufflingRef ( epochRef . get . shufflingRef )
return ok epochRef . get . shufflingRef
err ( )
2021-03-17 10:17:15 +00:00
func findEpochRef * (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag : ChainDAGRef , bid : BlockId , epoch : Epoch ) : Opt [ EpochRef ] =
2022-08-18 18:07:01 +00:00
## Lookup an EpochRef in the cache, returning `none` if it's not present - see
## `getEpochRef` for a version that creates a new instance if it's missing
2022-09-27 16:56:08 +00:00
let key = ? dag . epochKey ( bid , epoch )
2022-01-05 18:38:04 +00:00
2022-09-29 14:55:58 +00:00
dag . epochRefs . findIt ( it . key = = key )
2022-08-18 18:07:01 +00:00
func putEpochRef ( dag : ChainDAGRef , epochRef : EpochRef ) =
if epochRef . epoch < dag . finalizedHead . slot . epoch ( ) :
# Only cache epoch information for unfinalized blocks - earlier states
# are seldomly used (ie RPC), so no need to cache
return
2022-09-29 14:55:58 +00:00
dag . epochRefs . put epochRef
2022-08-18 18:07:01 +00:00
func init * (
T : type ShufflingRef , state : ForkedHashedBeaconState ,
cache : var StateCache , epoch : Epoch ) : T =
2023-10-10 00:02:07 +00:00
let attester_dependent_root =
withState ( state ) : forkyState . dependent_root ( epoch . get_previous_epoch )
2022-08-18 18:07:01 +00:00
ShufflingRef (
epoch : epoch ,
attester_dependent_root : attester_dependent_root ,
shuffled_active_validator_indices :
cache . get_shuffled_active_validator_indices ( state , epoch ) ,
)
func init * (
T : type EpochRef , dag : ChainDAGRef , state : ForkedHashedBeaconState ,
cache : var StateCache ) : T =
let
epoch = state . get_current_epoch ( )
2022-08-26 22:47:40 +00:00
proposer_dependent_root = withState ( state ) :
forkyState . proposer_dependent_root
2022-08-18 18:07:01 +00:00
shufflingRef = dag . findShufflingRef ( state . latest_block_id , epoch ) . valueOr :
let tmp = ShufflingRef . init ( state , cache , epoch )
dag . putShufflingRef ( tmp )
tmp
2023-02-07 10:22:22 +00:00
total_active_balance = withState ( state ) :
get_total_active_balance ( forkyState . data , cache )
2022-08-18 18:07:01 +00:00
epochRef = EpochRef (
2022-09-27 16:56:08 +00:00
key : dag . epochKey ( state . latest_block_id , epoch ) . expect (
" Valid epoch ancestor when processing state " ) ,
2022-08-18 18:07:01 +00:00
eth1_data :
getStateField ( state , eth1_data ) ,
eth1_deposit_index :
getStateField ( state , eth1_deposit_index ) ,
checkpoints :
FinalityCheckpoints (
justified : getStateField ( state , current_justified_checkpoint ) ,
finalized : getStateField ( state , finalized_checkpoint ) ) ,
# beacon_proposers: Separately filled below
proposer_dependent_root : proposer_dependent_root ,
2023-02-07 10:22:22 +00:00
shufflingRef : shufflingRef ,
total_active_balance : total_active_balance
2022-08-18 18:07:01 +00:00
)
epochStart = epoch . start_slot ( )
for i in 0 'u64 .. < SLOTS_PER_EPOCH :
epochRef . beacon_proposers [ i ] =
get_beacon_proposer_index ( state , cache , epochStart + i )
# When fork choice runs, it will need the effective balance of the justified
# checkpoint - we pre-load the balances here to avoid rewinding the justified
# state later and compress them because not all checkpoints end up being used
# for fork choice - specially during long periods of non-finalization
2023-09-11 16:03:34 +00:00
func snappyEncode ( inp : openArray [ byte ] ) : seq [ byte ] =
2022-08-18 18:07:01 +00:00
try :
snappy . encode ( inp )
except CatchableError as err :
raiseAssert err . msg
epochRef . effective_balances_bytes =
snappyEncode ( SSZ . encode (
List [ Gwei , Limit VALIDATOR_REGISTRY_LIMIT ] (
2023-04-18 19:26:36 +00:00
get_effective_balances ( getStateField ( state , validators ) . asSeq , epoch ) ) ) )
2022-08-18 18:07:01 +00:00
epochRef
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2021-06-01 12:40:13 +00:00
func loadStateCache (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag : ChainDAGRef , cache : var StateCache , bid : BlockId , epoch : Epoch ) =
2020-08-18 20:29:33 +00:00
# When creating a state cache, we want the current and the previous epoch
# information to be preloaded as both of these are used in state transition
# functions
2020-07-30 15:48:25 +00:00
2020-08-18 20:29:33 +00:00
template load ( e : Epoch ) =
2022-01-05 18:38:04 +00:00
block :
let epoch = e
if epoch notin cache . shuffled_active_validator_indices :
2022-08-18 18:07:01 +00:00
let shufflingRef = dag . findShufflingRef ( bid , epoch )
if shufflingRef . isSome ( ) :
cache . shuffled_active_validator_indices [ epoch ] =
shufflingRef [ ] [ ] . shuffled_active_validator_indices
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let epochRef = dag . findEpochRef ( bid , epoch )
2022-01-05 18:38:04 +00:00
if epochRef . isSome ( ) :
2022-01-11 10:01:54 +00:00
let start_slot = epoch . start_slot ( )
2022-08-18 18:07:01 +00:00
for i , idx in epochRef [ ] [ ] . beacon_proposers :
2022-01-11 10:01:54 +00:00
cache . beacon_proposer_indices [ start_slot + i ] = idx
2023-02-07 10:22:22 +00:00
cache . total_active_balance [ epoch ] = epochRef [ ] [ ] . total_active_balance
2020-08-18 20:29:33 +00:00
load ( epoch )
if epoch > 0 :
load ( epoch - 1 )
2023-02-07 10:22:22 +00:00
if dag . head ! = nil : # nil during init.. sigh
let period = dag . head . slot . sync_committee_period
if period = = epoch . sync_committee_period and
period notin cache . sync_committees and
period > dag . cfg . ALTAIR_FORK_EPOCH . sync_committee_period ( ) :
# If the block we're aiming for shares ancestry with head, we can reuse
# the cached head committee - this accounts for most "live" cases like
# syncing and checking blocks since the committees rarely change
let periodBsi = dag . atSlot ( bid , period . start_slot )
if periodBsi . isSome and periodBsi = =
dag . atSlot ( dag . head . bid , period . start_slot ) :
# We often end up sharing sync committees with head during sync / gossip
# validation / head updates
cache . sync_committees [ period ] = dag . headSyncCommittees
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
func containsForkBlock * ( dag : ChainDAGRef , root : Eth2Digest ) : bool =
## Checks for blocks at the finalized checkpoint or newer
KeyedBlockRef . asLookupKey ( root ) in dag . forkBlocks
2021-03-17 10:17:15 +00:00
2022-03-11 12:49:47 +00:00
func isFinalizedStateSnapshot ( slot : Slot ) : bool =
slot . is_epoch and slot . epoch mod EPOCHS_PER_STATE_SNAPSHOT = = 0
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
func isStateCheckpoint ( dag : ChainDAGRef , bsi : BlockSlotId ) : bool =
2021-05-30 08:14:17 +00:00
## State checkpoints are the points in time for which we store full state
## snapshots, which later serve as rewind starting points when replaying state
## transitions from database, for example during reorgs.
##
# As a policy, we only store epoch boundary states without the epoch block
# (if it exists) applied - the rest can be reconstructed by loading an epoch
# boundary state and applying the missing blocks.
# We also avoid states that were produced with empty slots only - as such,
# there is only a checkpoint for the first epoch after a block.
# The tail block also counts as a state checkpoint!
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
( bsi . isProposed and bsi . bid = = dag . tail ) or
( bsi . slot . is_epoch and bsi . slot . epoch = = ( bsi . bid . slot . epoch + 1 ) )
2021-05-30 08:14:17 +00:00
2022-03-16 07:20:40 +00:00
proc getState (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
db : BeaconChainDB , cfg : RuntimeConfig , block_root : Eth2Digest , slot : Slot ,
state : var ForkedHashedBeaconState , rollback : RollbackProc ) : bool =
let state_root = db . getStateRoot ( block_root , slot ) . valueOr :
2021-10-18 12:32:54 +00:00
return false
2023-02-16 09:32:12 +00:00
db . getState ( cfg . consensusForkAtEpoch ( slot . epoch ) , state_root , state , rollback )
2021-10-18 12:32:54 +00:00
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
proc containsState * (
db : BeaconChainDB , cfg : RuntimeConfig , block_root : Eth2Digest ,
2023-06-24 14:14:28 +00:00
slots : Slice [ Slot ] , legacy = true ) : bool =
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
var slot = slots . b
while slot > = slots . a :
let state_root = db . getStateRoot ( block_root , slot )
if state_root . isSome ( ) and
2023-02-16 09:32:12 +00:00
db . containsState (
2023-06-24 14:14:28 +00:00
cfg . consensusForkAtEpoch ( slot . epoch ) , state_root . get ( ) , legacy ) :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
return true
if slot = = slots . a : # avoid underflow at genesis
break
slot - = 1
false
2022-11-10 10:44:47 +00:00
proc getState * (
db : BeaconChainDB , cfg : RuntimeConfig , block_root : Eth2Digest ,
slots : Slice [ Slot ] , state : var ForkedHashedBeaconState ,
rollback : RollbackProc ) : bool =
var slot = slots . b
while slot > = slots . a :
let state_root = db . getStateRoot ( block_root , slot )
if state_root . isSome ( ) and
db . getState (
2023-02-16 09:32:12 +00:00
cfg . consensusForkAtEpoch ( slot . epoch ) , state_root . get ( ) , state ,
rollback ) :
2022-11-10 10:44:47 +00:00
return true
if slot = = slots . a : # avoid underflow at genesis
break
slot - = 1
false
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
proc getState (
dag : ChainDAGRef , bsi : BlockSlotId , state : var ForkedHashedBeaconState ) : bool =
## Load a state from the database given a block and a slot - this will first
## lookup the state root in the state root table then load the corresponding
## state, if it exists
if not dag . isStateCheckpoint ( bsi ) :
2022-03-11 12:49:47 +00:00
return false
2021-10-18 12:32:54 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let rollbackAddr =
# Any restore point will do as long as it's not the object being updated
if unsafeAddr ( state ) = = unsafeAddr ( dag . headState ) :
unsafeAddr dag . clearanceState
else :
unsafeAddr dag . headState
let v = addr state
func rollback ( ) =
assign ( v [ ] , rollbackAddr [ ] )
dag . db . getState ( dag . cfg , bsi . bid . root , bsi . slot , state , rollback )
2021-10-18 12:32:54 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
proc getStateByParent (
dag : ChainDAGRef , bid : BlockId , state : var ForkedHashedBeaconState ) : bool =
## Try to load the state referenced by the parent of the given `bid` - this
## state can be used to advance to the `bid` state itself.
2022-12-05 21:36:53 +00:00
let slot = bid . slot
2021-11-10 11:39:08 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let
summary = dag . db . getBeaconBlockSummary ( bid . root ) . valueOr :
return false
parentMinSlot =
dag . db . getBeaconBlockSummary ( summary . parent_root ) .
map ( proc ( x : auto ) : auto = x . slot ) . valueOr :
# in the cases that we don't have slot information, we'll search for the
# state for a few back from the `bid` slot - if there are gaps of empty
# slots larger than this, we will not be able to load the state using this
# trick
if slot . uint64 > = ( EPOCHS_PER_STATE_SNAPSHOT * 2 ) * SLOTS_PER_EPOCH :
slot - ( EPOCHS_PER_STATE_SNAPSHOT * 2 ) * SLOTS_PER_EPOCH
else :
Slot ( 0 )
2022-02-21 08:48:02 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let rollbackAddr =
# Any restore point will do as long as it's not the object being updated
if unsafeAddr ( state ) = = unsafeAddr ( dag . headState ) :
unsafeAddr dag . clearanceState
else :
unsafeAddr dag . headState
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let v = addr state
func rollback ( ) =
assign ( v [ ] , rollbackAddr [ ] )
2022-03-29 11:33:06 +00:00
2022-11-10 10:44:47 +00:00
dag . db . getState (
dag . cfg , summary . parent_root , parentMinSlot .. slot , state , rollback )
2022-02-21 08:48:02 +00:00
2023-07-31 13:11:45 +00:00
proc getNearbyState (
2024-02-12 14:58:55 +00:00
dag : ChainDAGRef , state : var ForkedHashedBeaconState , bid : BlockId ,
2023-07-31 13:11:45 +00:00
lowSlot : Slot ) : Opt [ void ] =
## Load state from DB that is close to `bid` and has at least slot `lowSlot`.
var
e = bid . slot . epoch
b = bid
while true :
let stateSlot = e . start_slot
if stateSlot < lowSlot :
return err ( )
b = ( ? dag . atSlot ( b , max ( stateSlot , 1 . Slot ) - 1 ) ) . bid
let bsi = BlockSlotId . init ( b , stateSlot )
2024-02-12 14:58:55 +00:00
if not dag . getState ( bsi , state ) :
2023-07-31 13:11:45 +00:00
if e = = GENESIS_EPOCH :
return err ( )
dec e
continue
return ok ( )
2022-06-07 17:01:11 +00:00
proc currentSyncCommitteeForPeriod * (
dag : ChainDAGRef ,
tmpState : var ForkedHashedBeaconState ,
period : SyncCommitteePeriod ) : Opt [ SyncCommittee ] =
## Fetch a `SyncCommittee` for a given sync committee period.
## For non-finalized periods, follow the chain as selected by fork choice.
2022-06-14 20:02:03 +00:00
let lowSlot = max ( dag . tail . slot , dag . cfg . ALTAIR_FORK_EPOCH . start_slot )
if period < lowSlot . sync_committee_period :
2022-06-07 17:01:11 +00:00
return err ( )
let
periodStartSlot = period . start_slot
2022-06-14 20:02:03 +00:00
syncCommitteeSlot = max ( periodStartSlot , lowSlot )
2022-06-07 17:01:11 +00:00
bsi = ? dag . getBlockIdAtSlot ( syncCommitteeSlot )
dag . withUpdatedState ( tmpState , bsi ) do :
2022-11-30 14:37:23 +00:00
withState ( updatedState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2022-08-26 14:14:18 +00:00
ok forkyState . data . current_sync_committee
2022-06-07 17:01:11 +00:00
else : err ( )
do : err ( )
2023-11-21 22:51:05 +00:00
proc getBlockIdAtSlot * (
dag : ChainDAGRef , state : ForkyHashedBeaconState , slot : Slot ) : Opt [ BlockId ] =
if slot > = state . data . slot :
Opt . some state . latest_block_id
elif state . data . slot < = slot + SLOTS_PER_HISTORICAL_ROOT :
dag . getBlockId ( state . data . get_block_root_at_slot ( slot ) )
2022-06-14 15:31:10 +00:00
else :
2023-11-21 22:51:05 +00:00
Opt . none ( BlockId )
2022-06-14 15:31:10 +00:00
2022-03-16 07:20:40 +00:00
proc updateBeaconMetrics (
state : ForkedHashedBeaconState , bid : BlockId , cache : var StateCache ) =
2022-06-09 14:30:13 +00:00
# https://github.com/ethereum/beacon-metrics/blob/master/metrics.md#additional-metrics
2022-01-31 07:36:29 +00:00
# both non-negative, so difference can't overflow or underflow int64
2022-03-16 07:20:40 +00:00
beacon_head_root . set ( bid . root . toGaugeValue )
beacon_head_slot . set ( bid . slot . toGaugeValue )
2022-01-31 07:36:29 +00:00
2022-03-16 07:20:40 +00:00
withState ( state ) :
2022-01-31 07:36:29 +00:00
beacon_pending_deposits . set (
2022-08-26 14:14:18 +00:00
( forkyState . data . eth1_data . deposit_count -
forkyState . data . eth1_deposit_index ) . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_processed_deposits_total . set (
2022-08-26 14:14:18 +00:00
forkyState . data . eth1_deposit_index . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_current_justified_epoch . set (
2022-08-26 14:14:18 +00:00
forkyState . data . current_justified_checkpoint . epoch . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_current_justified_root . set (
2022-08-26 14:14:18 +00:00
forkyState . data . current_justified_checkpoint . root . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_previous_justified_epoch . set (
2022-08-26 14:14:18 +00:00
forkyState . data . previous_justified_checkpoint . epoch . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_previous_justified_root . set (
2022-08-26 14:14:18 +00:00
forkyState . data . previous_justified_checkpoint . root . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_finalized_epoch . set (
2022-08-26 14:14:18 +00:00
forkyState . data . finalized_checkpoint . epoch . toGaugeValue )
2022-01-31 07:36:29 +00:00
beacon_finalized_root . set (
2022-08-26 14:14:18 +00:00
forkyState . data . finalized_checkpoint . root . toGaugeValue )
2022-01-31 07:36:29 +00:00
let active_validators = count_active_validators (
2022-08-26 14:14:18 +00:00
forkyState . data , forkyState . data . slot . epoch , cache ) . toGaugeValue
2022-01-31 07:36:29 +00:00
beacon_active_validators . set ( active_validators )
beacon_current_active_validators . set ( active_validators )
2022-03-11 20:28:10 +00:00
import blockchain_dag_light_client
export
2022-05-23 12:02:54 +00:00
blockchain_dag_light_client . getLightClientBootstrap ,
blockchain_dag_light_client . getLightClientUpdateForPeriod ,
blockchain_dag_light_client . getLightClientFinalityUpdate ,
blockchain_dag_light_client . getLightClientOptimisticUpdate
2022-03-11 20:28:10 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
proc putState ( dag : ChainDAGRef , state : ForkedHashedBeaconState , bid : BlockId ) =
# Store a state and its root
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
let slot = getStateField ( state , slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
logScope :
blck = shortLog ( bid )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
stateSlot = shortLog ( slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
stateRoot = shortLog ( getStateRoot ( state ) )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if not dag . isStateCheckpoint ( BlockSlotId . init ( bid , slot ) ) :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
return
# Don't consider legacy tables here, they are slow to read so we'll want to
# rewrite things in the new table anyway.
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if dag . db . containsState (
2023-02-16 09:32:12 +00:00
dag . cfg . consensusForkAtEpoch ( slot . epoch ) , getStateRoot ( state ) ,
legacy = false ) :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
return
let startTick = Moment . now ( )
# Ideally we would save the state and the root lookup cache in a single
# transaction to prevent database inconsistencies, but the state loading code
# is resilient against one or the other going missing
withState ( state ) :
2022-08-26 22:47:40 +00:00
dag . db . putState ( forkyState )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
debug " Stored state " , putStateDur = Moment . now ( ) - startTick
proc advanceSlots * (
dag : ChainDAGRef , state : var ForkedHashedBeaconState , slot : Slot , save : bool ,
cache : var StateCache , info : var ForkedEpochInfo ) =
# Given a state, advance it zero or more slots by applying empty slot
# processing - the state must be positioned at or before `slot`
doAssert getStateField ( state , slot ) < = slot
let stateBid = state . latest_block_id
while getStateField ( state , slot ) < slot :
let
preEpoch = getStateField ( state , slot ) . epoch
loadStateCache ( dag , cache , stateBid , getStateField ( state , slot ) . epoch )
process_slots (
dag . cfg , state , getStateField ( state , slot ) + 1 , cache , info ,
dag . updateFlags ) . expect ( " process_slots shouldn ' t fail when state slot is correct " )
if save :
dag . putState ( state , stateBid )
# The reward information in the state transition is computed for epoch
# transitions - when transitioning into epoch N, the activities in epoch
# N-2 are translated into balance updates, and this is what we capture
# in the monitor. This may be inaccurate during a deep reorg (>1 epoch)
# which is an acceptable tradeoff for monitoring.
withState ( state ) :
2022-08-26 22:47:40 +00:00
let postEpoch = forkyState . data . slot . epoch
2024-02-20 04:40:18 +00:00
if preEpoch ! = postEpoch and postEpoch > = 2 :
var proposers : array [ SLOTS_PER_EPOCH , Opt [ ValidatorIndex ] ]
let epochRef = dag . findEpochRef ( stateBid , postEpoch - 2 )
if epochRef . isSome ( ) :
proposers = epochRef [ ] [ ] . beacon_proposers
2022-08-26 22:47:40 +00:00
dag . validatorMonitor [ ] . registerEpochInfo (
2024-02-20 04:40:18 +00:00
forkyState . data , proposers , info )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
proc applyBlock (
dag : ChainDAGRef , state : var ForkedHashedBeaconState , bid : BlockId ,
cache : var StateCache , info : var ForkedEpochInfo ) : Result [ void , cstring ] =
loadStateCache ( dag , cache , bid , getStateField ( state , slot ) . epoch )
2024-03-04 17:00:46 +00:00
discard case dag . cfg . consensusForkAtEpoch ( bid . slot . epoch )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Phase0 :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let data = getBlock ( dag , bid , phase0 . TrustedSignedBeaconBlock ) . valueOr :
return err ( " Block load failed " )
2024-03-04 17:00:46 +00:00
? state_transition (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . cfg , state , data , cache , info ,
dag . updateFlags + { slotProcessed } , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Altair :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let data = getBlock ( dag , bid , altair . TrustedSignedBeaconBlock ) . valueOr :
return err ( " Block load failed " )
2024-03-04 17:00:46 +00:00
? state_transition (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . cfg , state , data , cache , info ,
dag . updateFlags + { slotProcessed } , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Bellatrix :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let data = getBlock ( dag , bid , bellatrix . TrustedSignedBeaconBlock ) . valueOr :
return err ( " Block load failed " )
2024-03-04 17:00:46 +00:00
? state_transition (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . cfg , state , data , cache , info ,
dag . updateFlags + { slotProcessed } , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Capella :
2022-11-09 17:32:10 +00:00
let data = getBlock ( dag , bid , capella . TrustedSignedBeaconBlock ) . valueOr :
return err ( " Block load failed " )
2024-03-04 17:00:46 +00:00
? state_transition (
2022-11-09 17:32:10 +00:00
dag . cfg , state , data , cache , info ,
dag . updateFlags + { slotProcessed } , noRollback )
2023-03-04 13:35:39 +00:00
of ConsensusFork . Deneb :
2023-03-02 20:09:24 +00:00
let data = getBlock ( dag , bid , deneb . TrustedSignedBeaconBlock ) . valueOr :
2022-12-05 16:29:09 +00:00
return err ( " Block load failed " )
2024-03-04 17:00:46 +00:00
? state_transition (
2022-12-05 16:29:09 +00:00
dag . cfg , state , data , cache , info ,
dag . updateFlags + { slotProcessed } , noRollback )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2024-03-04 17:00:46 +00:00
ok ( )
2021-09-22 12:17:15 +00:00
proc init * ( T : type ChainDAGRef , cfg : RuntimeConfig , db : BeaconChainDB ,
2021-12-20 19:20:31 +00:00
validatorMonitor : ref ValidatorMonitor , updateFlags : UpdateFlags ,
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
eraPath = " . " ,
2021-12-20 19:20:31 +00:00
onBlockCb : OnBlockCallback = nil , onHeadCb : OnHeadCallback = nil ,
2022-05-23 12:02:54 +00:00
onReorgCb : OnReorgCallback = nil , onFinCb : OnFinalizedCallback = nil ,
2022-06-28 20:52:29 +00:00
vanityLogs = default ( VanityLogs ) ,
lcDataConfig = default ( LightClientDataConfig ) ) : ChainDAGRef =
2022-06-04 19:15:15 +00:00
cfg . checkForkConsistency ( )
2022-06-03 09:37:03 +00:00
2023-04-18 19:26:36 +00:00
doAssert updateFlags - { strictVerification } = = { } ,
" Other flags not supported in ChainDAG "
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2020-05-19 14:18:07 +00:00
# TODO we require that the db contains both a head and a tail block -
# asserting here doesn't seem like the right way to go about it however..
2022-02-16 15:44:04 +00:00
# Tail is the first block for which we can construct a state - either
# genesis or a checkpoint
2020-05-19 14:18:07 +00:00
let
2022-02-16 15:44:04 +00:00
startTick = Moment . now ( )
2022-10-14 19:40:10 +00:00
genesisRoot = db . getGenesisBlock ( )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
tailRoot = db . getTailBlock ( ) . expect (
" preInit should have initialized the database with a tail block root " )
2022-10-14 19:40:10 +00:00
tail = db . getBlockId ( tailRoot ) . expect (
" tail block summary in database, database corrupt? " )
headRoot = db . getHeadBlock ( ) . expect ( " head root, database corrupt? " )
head = db . getBlockId ( headRoot ) . expect ( " head block id, database corrupt? " )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
# Have to be careful with this instance, it is not yet fully initialized so
# as to avoid having to allocate a separate "init" state
dag = ChainDAGRef (
db : db ,
validatorMonitor : validatorMonitor ,
2022-10-14 19:40:10 +00:00
genesis : genesisRoot . map (
proc ( x : auto ) : auto = BlockId ( root : x , slot : GENESIS_SLOT ) ) ,
tail : tail ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2022-07-13 13:48:09 +00:00
# The only allowed flag right now is strictVerification, as the others all
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
# allow skipping some validation.
2023-04-18 19:26:36 +00:00
updateFlags : updateFlags * { strictVerification } ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
cfg : cfg ,
2022-05-30 08:25:27 +00:00
vanityLogs : vanityLogs ,
2022-07-04 20:46:32 +00:00
lcDataStore : initLightClientDataStore (
lcDataConfig , cfg , db . getLightClientDataDB ( ) ) ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
onBlockAdded : onBlockCb ,
onHeadChanged : onHeadCb ,
onReorgHappened : onReorgCb ,
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
onFinHappened : onFinCb ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
)
loadTick = Moment . now ( )
2020-10-06 15:32:17 +00:00
2020-05-19 14:18:07 +00:00
var
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
headRef , curRef : BlockRef
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
# When starting from a checkpoint with an empty block, we'll store the state
# "ahead" of the head slot - this slot would be considered finalized
slot = max ( head . slot , ( tail . slot . epoch + 1 ) . start_slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
# To know the finalized checkpoint of the head, we need to recreate its
# state - the tail is implicitly finalized, and if we have a finalized block
# table, that provides another hint
2022-10-14 19:40:10 +00:00
finalizedSlot = db . finalizedBlocks . high . get ( tail . slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
cache : StateCache
foundHeadState = false
headBlocks : seq [ BlockRef ]
# Load head -> finalized, or all summaries in case the finalized block table
# hasn't been written yet
for blck in db . getAncestorSummaries ( head . root ) :
2023-05-20 12:18:51 +00:00
# The execution block root gets filled in as needed. Nonfinalized Bellatrix
# and later blocks are loaded as optimistic, which gets adjusted that first
# `VALID` fcU from an EL plus markBlockVerified. Pre-merge blocks still get
# marked as `VALID`.
let newRef = BlockRef . init (
blck . root , Opt . none Eth2Digest , executionValid = false ,
blck . summary . slot )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if headRef = = nil :
headRef = newRef
2022-02-16 15:44:04 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if curRef ! = nil :
link ( newRef , curRef )
2022-01-30 16:51:04 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
curRef = newRef
2020-05-19 14:18:07 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . forkBlocks . incl ( KeyedBlockRef . init ( curRef ) )
2020-05-19 14:18:07 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if not foundHeadState :
2022-11-10 10:44:47 +00:00
foundHeadState = db . getState (
cfg , blck . root , blck . summary . slot .. slot , dag . headState , noRollback )
slot = blck . summary . slot
2022-02-26 18:16:19 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if not foundHeadState :
2022-10-14 19:40:10 +00:00
# When the database has been written with a pre-fork version of the
# software, it may happen that blocks produced using an "unforked"
# chain get written to the database - we need to skip such blocks
# when loading the database with a fork-compatible version
if containsBlock ( cfg , db , curRef . slot , curRef . root ) :
headBlocks . add curRef
else :
if headBlocks . len > 0 :
fatal " Missing block needed to create head state, database corrupt? " ,
curRef = shortLog ( curRef )
quit 1
# Without the block data we can't form a state for this root, so
# we'll need to move the head back
headRef = nil
dag . forkBlocks . excl ( KeyedBlockRef . init ( curRef ) )
2022-01-30 16:51:04 +00:00
2022-03-19 11:02:17 +00:00
if curRef . slot < = finalizedSlot :
# Only non-finalized slots get a `BlockRef`
break
2022-01-30 16:51:04 +00:00
let summariesTick = Moment . now ( )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if not foundHeadState :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not dag . getStateByParent ( curRef . bid , dag . headState ) :
fatal " Could not load head state, database corrupt? " ,
head = shortLog ( head ) , tail = shortLog ( dag . tail )
quit 1
2023-02-07 10:22:22 +00:00
withState ( dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2023-02-07 10:22:22 +00:00
dag . headSyncCommittees = forkyState . data . get_sync_committee_cache ( cache )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
block :
# EpochRef needs an epoch boundary state
assign ( dag . epochRefState , dag . headState )
var info : ForkedEpochInfo
while headBlocks . len > 0 :
dag . applyBlock (
dag . headState , headBlocks . pop ( ) . bid , cache ,
info ) . expect ( " head blocks should apply " )
dag . head = headRef
2023-05-12 10:37:15 +00:00
dag . heads = @ [ headRef ]
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
assign ( dag . clearanceState , dag . headState )
if dag . headState . latest_block_root = = tail . root :
# In case we started from a checkpoint with an empty slot
finalizedSlot = getStateField ( dag . headState , slot )
finalizedSlot =
max (
finalizedSlot ,
getStateField ( dag . headState , finalized_checkpoint ) . epoch . start_slot )
2021-11-13 16:27:28 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let
configFork = case dag . headState . kind
2024-02-25 19:42:44 +00:00
of ConsensusFork . Phase0 : genesisFork ( cfg )
of ConsensusFork . Altair : altairFork ( cfg )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Bellatrix : bellatrixFork ( cfg )
2024-02-25 19:42:44 +00:00
of ConsensusFork . Capella : capellaFork ( cfg )
of ConsensusFork . Deneb : denebFork ( cfg )
2022-04-08 16:22:49 +00:00
stateFork = getStateField ( dag . headState , fork )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2023-02-23 02:10:07 +00:00
# Here, we check only the `current_version` field because the spec
# mandates that testnets starting directly from a particular fork
# should have `previous_version` set to `current_version` while
# this doesn't happen to be the case in network that go through
# regular hard-fork upgrades. See for example:
2024-03-14 06:26:36 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/bellatrix/beacon-chain.md#testing
2023-02-23 02:10:07 +00:00
if stateFork . current_version ! = configFork . current_version :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
error " State from database does not match network, check --network parameter " ,
2022-10-14 19:40:10 +00:00
tail = dag . tail , headRef , stateFork , configFork
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
quit 1
2020-05-19 14:18:07 +00:00
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
# Need to load state to find genesis validators root, before loading era db
dag . era = EraDB . new (
cfg , eraPath , getStateField ( dag . headState , genesis_validators_root ) )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
# We used an interim finalizedHead while loading the head state above - now
# that we have loaded the dag up to the finalized slot, we can also set
# finalizedHead to its real value
dag . finalizedHead = headRef . atSlot ( finalizedSlot )
dag . lastPrunePoint = dag . finalizedHead . toBlockSlotId ( ) . expect ( " not nil " )
2022-01-30 16:51:04 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
doAssert dag . finalizedHead . blck ! = nil ,
" The finalized head should exist at the slot "
2021-09-08 03:46:33 +00:00
2022-03-19 11:02:17 +00:00
block : # Top up finalized blocks
if db . finalizedBlocks . high . isNone or
db . finalizedBlocks . high . get ( ) < dag . finalizedHead . blck . slot :
2023-11-22 23:44:20 +00:00
# Versions prior to 1.7.0 did not store finalized blocks in the
# database, and / or the application might have crashed between the head
# and finalized blocks updates.
2022-03-19 11:02:17 +00:00
info " Loading finalized blocks " ,
finHigh = db . finalizedBlocks . high ,
2022-10-14 19:40:10 +00:00
finalizedHead = shortLog ( dag . finalizedHead )
2022-03-19 11:02:17 +00:00
2023-11-22 23:44:20 +00:00
var
newFinalized : seq [ BlockId ]
tmp = dag . finalizedHead . blck
while tmp . parent ! = nil :
newFinalized . add ( tmp . bid )
let p = tmp . parent
tmp . parent = nil
tmp = p
for blck in db . getAncestorSummaries ( tmp . root ) :
2022-03-19 11:02:17 +00:00
if db . finalizedBlocks . high . isSome and
blck . summary . slot < = db . finalizedBlocks . high . get :
break
newFinalized . add ( BlockId ( slot : blck . summary . slot , root : blck . root ) )
2023-11-22 23:44:20 +00:00
db . updateFinalizedBlocks ( newFinalized )
doAssert dag . finalizedHead . blck . parent = = nil ,
" The finalized head is the last BlockRef with a parent "
2022-01-30 16:51:04 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
block :
let finalized = db . finalizedBlocks . get ( db . finalizedBlocks . high . get ( ) ) . expect (
" tail at least " )
if finalized ! = dag . finalizedHead . blck . root :
error " Head does not lead to finalized block, database corrupt? " ,
head = shortLog ( head ) , finalizedHead = shortLog ( dag . finalizedHead ) ,
tail = shortLog ( dag . tail ) , finalized = shortLog ( finalized )
2022-01-30 16:51:04 +00:00
quit 1
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . backfill = block :
let backfillSlot = db . finalizedBlocks . low . expect ( " tail at least " )
2023-01-26 10:23:17 +00:00
if backfillSlot < = dag . horizon :
# Backfill done, no need to load anything
BeaconBlockSummary ( )
elif backfillSlot < dag . tail . slot :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let backfillRoot = db . finalizedBlocks . get ( backfillSlot ) . expect (
" low to be loadable " )
db . getBeaconBlockSummary ( backfillRoot ) . expect (
2022-05-31 08:09:01 +00:00
" Backfill block must have a summary: " & $ backfillRoot )
2024-02-09 19:44:54 +00:00
elif dag . containsBlock ( dag . tail ) :
2022-10-14 19:40:10 +00:00
db . getBeaconBlockSummary ( dag . tail . root ) . expect (
" Tail block must have a summary: " & $ dag . tail . root )
2024-02-09 19:44:54 +00:00
else :
# Checkpoint sync, checkpoint block unavailable
BeaconBlockSummary (
slot : dag . tail . slot + 1 ,
parent_root : dag . tail . root )
2022-01-30 16:51:04 +00:00
dag . forkDigests = newClone ForkDigests . init (
2022-03-16 07:20:40 +00:00
cfg , getStateField ( dag . headState , genesis_validators_root ) )
2022-01-30 16:51:04 +00:00
2022-03-16 07:20:40 +00:00
withState ( dag . headState ) :
2022-09-10 06:12:07 +00:00
dag . validatorMonitor [ ] . registerState ( forkyState . data )
2022-01-31 13:02:38 +00:00
2022-03-16 07:20:40 +00:00
updateBeaconMetrics ( dag . headState , dag . head . bid , cache )
2022-01-31 07:36:29 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let finalizedTick = Moment . now ( )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
2024-02-09 19:44:54 +00:00
if dag . backfill . slot > GENESIS_SLOT : # Try frontfill from era files
let backfillSlot = dag . backfill . slot - 1
dag . frontfillBlocks = newSeqOfCap [ Eth2Digest ] ( backfillSlot . int )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
let
historical_roots = getStateField ( dag . headState , historical_roots ) . asSeq ( )
2023-04-24 13:26:28 +00:00
historical_summaries = dag . headState . historical_summaries . asSeq ( )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
var
blocks = 0
# Here, we'll build up the slot->root mapping in memory for the range of
# blocks from genesis to backfill, if possible.
2023-04-24 13:26:28 +00:00
for bid in dag . era . getBlockIds (
historical_roots , historical_summaries , Slot ( 0 ) , Eth2Digest ( ) ) :
2024-02-09 19:44:54 +00:00
if bid . slot > = backfillSlot :
2022-05-10 00:28:46 +00:00
# If we end up in here, we failed the root comparison just below in
# an earlier iteration
fatal " Era summaries don ' t lead up to backfill, database or era files corrupt? " ,
2022-11-10 10:44:47 +00:00
bid
2022-05-10 00:28:46 +00:00
quit 1
# In BeaconState.block_roots, empty slots are filled with the root of
# the previous block - in our data structure, we use a zero hash instead
2022-11-10 10:44:47 +00:00
dag . frontfillBlocks . setLen ( bid . slot . int + 1 )
dag . frontfillBlocks [ bid . slot . int ] = bid . root
2022-05-10 00:28:46 +00:00
2022-11-10 10:44:47 +00:00
if bid . root = = dag . backfill . parent_root :
# We've reached the backfill point, meaning blocks are available
# in the sqlite database from here onwards - remember this point in
# time so that we can write summaries to the database - it's a lot
# faster to load from database than to iterate over era files with
# the current naive era file reader.
reset ( dag . backfill )
2022-05-10 00:28:46 +00:00
2022-11-10 10:44:47 +00:00
dag . updateFrontfillBlocks ( )
2022-05-10 00:28:46 +00:00
2022-11-10 10:44:47 +00:00
break
2022-05-10 00:28:46 +00:00
blocks + = 1
if blocks > 0 :
info " Front-filled blocks from era files " , blocks
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
let frontfillTick = Moment . now ( )
2022-01-30 16:51:04 +00:00
# Fill validator key cache in case we're loading an old database that doesn't
# have a cache
2022-03-16 07:20:40 +00:00
dag . updateValidatorKeys ( getStateField ( dag . headState , validators ) . asSeq ( ) )
2021-08-05 08:26:10 +00:00
2023-05-12 10:37:15 +00:00
# Initialize pruning such that when starting with a database that hasn't been
# pruned, we work our way from the tail to the horizon in incremental steps
dag . lastHistoryPruneHorizon = dag . horizon ( )
dag . lastHistoryPruneBlockHorizon = block :
let boundary = min ( dag . tail . slot , dag . horizon ( ) )
if boundary . epoch ( ) > = EPOCHS_PER_STATE_SNAPSHOT :
start_slot ( boundary . epoch ( ) - EPOCHS_PER_STATE_SNAPSHOT )
else :
Slot ( 0 )
2022-01-30 16:51:04 +00:00
info " Block DAG initialized " ,
2021-12-09 17:06:21 +00:00
head = shortLog ( dag . head ) ,
2021-06-01 11:13:40 +00:00
finalizedHead = shortLog ( dag . finalizedHead ) ,
2021-12-09 17:06:21 +00:00
tail = shortLog ( dag . tail ) ,
2024-02-09 19:44:54 +00:00
backfill = shortLog ( dag . backfill ) ,
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
loadDur = loadTick - startTick ,
summariesDur = summariesTick - loadTick ,
finalizedDur = finalizedTick - summariesTick ,
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
frontfillDur = frontfillTick - finalizedTick ,
2022-05-31 08:09:01 +00:00
keysDur = Moment . now ( ) - frontfillTick
2020-05-19 14:18:07 +00:00
2022-06-24 14:57:50 +00:00
dag . initLightClientDataCache ( )
2022-03-11 20:28:10 +00:00
2021-06-01 11:13:40 +00:00
dag
2020-05-19 14:18:07 +00:00
2022-04-08 16:22:49 +00:00
template genesis_validators_root * ( dag : ChainDAGRef ) : Eth2Digest =
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , genesis_validators_root )
2021-08-09 12:54:45 +00:00
2022-05-31 10:45:37 +00:00
proc genesisBlockRoot * ( dag : ChainDAGRef ) : Eth2Digest =
dag . db . getGenesisBlock ( ) . expect ( " DB must be initialized with genesis block " )
2021-06-01 12:40:13 +00:00
func getEpochRef * (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag : ChainDAGRef , state : ForkedHashedBeaconState , cache : var StateCache ) : EpochRef =
2022-01-05 18:38:04 +00:00
## Get a cached `EpochRef` or construct one based on the given state - always
## returns an EpochRef instance
2021-05-29 18:56:30 +00:00
let
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
bid = state . latest_block_id
2022-03-16 07:20:40 +00:00
epoch = state . get_current_epoch ( )
2021-05-29 18:56:30 +00:00
2022-09-27 16:56:08 +00:00
dag . findEpochRef ( bid , epoch ) . valueOr :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let res = EpochRef . init ( dag , state , cache )
2022-08-18 18:07:01 +00:00
dag . putEpochRef ( res )
2022-01-05 18:38:04 +00:00
res
2021-05-29 18:56:30 +00:00
2022-01-05 18:38:04 +00:00
proc getEpochRef * (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag : ChainDAGRef , bid : BlockId , epoch : Epoch ,
2022-09-27 16:56:08 +00:00
preFinalized : bool ) : Result [ EpochRef , cstring ] =
2022-01-05 18:38:04 +00:00
## Return a cached EpochRef or construct one from the database, if possible -
## returns `none` on failure.
##
## When `preFinalized` is true, include epochs from before the finalized
## checkpoint in the search - this potentially can result in long processing
## times due to state replays.
##
## Requests for epochs >= dag.finalizedHead.slot.epoch always return an
## instance. One must be careful to avoid race conditions in `async` code
## where the finalized head might change during an `await`.
##
## Requests for epochs < dag.finalizedHead.slot.epoch may fail, either because
## the search was limited by the `preFinalized` flag or because state history
2022-07-06 10:33:02 +00:00
## has been pruned - `none` will be returned in this case.
2022-01-05 18:38:04 +00:00
if not preFinalized and epoch < dag . finalizedHead . slot . epoch :
2022-09-27 16:56:08 +00:00
return err ( " Requesting pre-finalized EpochRef " )
2021-03-17 10:17:15 +00:00
2022-03-18 12:13:57 +00:00
if bid . slot < dag . tail . slot or epoch < dag . tail . slot . epoch :
2022-09-27 16:56:08 +00:00
return err ( " Requesting EpochRef for pruned state " )
2022-03-18 12:13:57 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let epochRef = dag . findEpochRef ( bid , epoch )
2022-01-05 18:38:04 +00:00
if epochRef . isOk ( ) :
2020-09-22 20:42:42 +00:00
beacon_state_data_cache_hits . inc
2022-09-27 16:56:08 +00:00
return ok epochRef . get ( )
2020-07-28 13:54:32 +00:00
2020-08-06 19:48:47 +00:00
beacon_state_data_cache_misses . inc
2020-08-18 20:29:33 +00:00
let
2022-09-27 16:56:08 +00:00
ancestor = dag . epochAncestor ( bid , epoch ) . valueOr :
# If we got in here, the bid must be unknown or we would have gotten
# _some_ ancestor (like the tail)
return err ( " Requesting EpochRef for non-canonical block " )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
var cache : StateCache
2022-09-27 16:56:08 +00:00
if not updateState ( dag , dag . epochRefState , ancestor , false , cache ) :
return err ( " Could not load requested state " )
2020-08-18 20:29:33 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
ok ( dag . getEpochRef ( dag . epochRefState , cache ) )
proc getEpochRef * (
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ,
2022-09-27 16:56:08 +00:00
preFinalized : bool ) : Result [ EpochRef , cstring ] =
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . getEpochRef ( blck . bid , epoch , preFinalized )
2020-07-28 13:54:32 +00:00
2020-10-26 08:55:10 +00:00
proc getFinalizedEpochRef * ( dag : ChainDAGRef ) : EpochRef =
2022-01-05 18:38:04 +00:00
dag . getEpochRef (
dag . finalizedHead . blck , dag . finalizedHead . slot . epoch , false ) . expect (
" getEpochRef for finalized head should always succeed " )
2020-10-26 08:55:10 +00:00
2023-07-18 15:37:53 +00:00
proc ancestorSlot * (
dag : ChainDAGRef , state : ForkyHashedBeaconState , bid : BlockId ,
lowSlot : Slot ) : Opt [ Slot ] =
## Return common ancestor slot of `bid` and `state`, if at least `lowSlot`.
## Return `none` if no common ancestor is found with slot >= `lowSlot`.
if state . data . slot < lowSlot or bid . slot < lowSlot :
return Opt . none ( Slot )
var stateBid = ? dag . getBlockIdAtSlot ( state , bid . slot )
if stateBid . slot < lowSlot :
return Opt . none ( Slot )
var blockBid = ( ? dag . atSlot ( bid , stateBid . slot ) ) . bid
if blockBid . slot < lowSlot :
return Opt . none ( Slot )
while stateBid ! = blockBid :
if stateBid . slot > = blockBid . slot :
stateBid = ? dag . getBlockIdAtSlot (
state , min ( blockBid . slot , stateBid . slot - 1 ) )
if stateBid . slot < lowSlot :
return Opt . none ( Slot )
else :
blockBid = ? dag . parent ( blockBid )
if blockBid . slot < lowSlot :
return Opt . none ( Slot )
Opt . some stateBid . slot
2023-07-31 13:11:45 +00:00
proc computeRandaoMix (
2024-02-12 14:58:55 +00:00
bdata : ForkedTrustedSignedBeaconBlock ) : Opt [ Eth2Digest ] =
2023-07-31 13:11:45 +00:00
## Compute the requested RANDAO mix for `bdata` without `state`, if possible.
withBlck ( bdata ) :
2023-07-31 06:05:32 +00:00
when consensusFork > = ConsensusFork . Bellatrix :
2023-09-21 10:49:14 +00:00
if forkyBlck . message . is_execution_block :
var mix = eth2digest ( forkyBlck . message . body . randao_reveal . toRaw ( ) )
mix . data . mxor forkyBlck . message . body . execution_payload . prev_randao . data
2023-07-31 13:11:45 +00:00
return ok mix
Opt . none ( Eth2Digest )
proc computeRandaoMix * (
dag : ChainDAGRef , state : ForkyHashedBeaconState , bid : BlockId ,
lowSlot : Slot ) : Opt [ Eth2Digest ] =
## Compute the requested RANDAO mix for `bid` based on `state`.
## Return `none` if `state` and `bid` do not share a common ancestor
## with slot >= `lowSlot`.
let ancestorSlot = ? dag . ancestorSlot ( state , bid , lowSlot )
doAssert ancestorSlot < = state . data . slot
doAssert ancestorSlot < = bid . slot
# If `blck` is post merge, RANDAO information is immediately available
let
bdata = ? dag . getForkedBlock ( bid )
2024-02-12 14:58:55 +00:00
fullMix = computeRandaoMix ( bdata )
2023-07-31 13:11:45 +00:00
if fullMix . isSome :
return fullMix
2023-07-31 06:05:32 +00:00
2023-07-31 13:11:45 +00:00
# RANDAO mix has to be recomputed from `bid` and `state`
var mix {. noinit . } : Eth2Digest
2023-07-15 20:16:56 +00:00
proc mixToAncestor ( highBid : BlockId ) : Opt [ void ] =
## Mix in/out RANDAO reveals back to `ancestorSlot`
var bid = highBid
while bid . slot > ancestorSlot :
let bdata = ? dag . getForkedBlock ( bid )
withBlck ( bdata ) : # See `process_randao` / `process_randao_mixes_reset`
2023-09-21 10:49:14 +00:00
mix . data . mxor eth2digest (
forkyBlck . message . body . randao_reveal . toRaw ( ) ) . data
2023-07-15 20:16:56 +00:00
bid = ? dag . parent ( bid )
ok ( )
simplify RANDAO recovery in `ShufflingRef` acceleration (#5183)
Current RANDAO recovery logic is quite complex as it optimizes for the
minimum amount of database reads. Loading blocks isn't the bottleneck
though, so rather make the implementation more concise by avoiding the
complex strategy planning step. Note that this also prepares for an even
faster implementation for post-merge blocks in the future that extracts
RANDAO from `ExecutionPayload` directly if available, so even in cases
where efficiency is slightly lower, only historical data is affected.
`time nim c -r tests/test_blockchain_dag` (cached binary):
- new: 145.45s, 133.59s, 144.65s, 127.69s, 136.14s
- old: 149.15s, 150.84s, 135.77s, 137.49s, 133.89s
2023-07-12 15:27:05 +00:00
2023-07-31 13:11:45 +00:00
# Mix in RANDAO from `bid`
if ancestorSlot < bid . slot :
withBlck ( bdata ) :
2023-09-21 10:49:14 +00:00
mix = eth2digest ( forkyBlck . message . body . randao_reveal . toRaw ( ) )
2023-07-31 13:11:45 +00:00
? mixToAncestor ( ? dag . parent ( bid ) )
2023-07-15 20:16:56 +00:00
else :
mix . reset ( )
simplify RANDAO recovery in `ShufflingRef` acceleration (#5183)
Current RANDAO recovery logic is quite complex as it optimizes for the
minimum amount of database reads. Loading blocks isn't the bottleneck
though, so rather make the implementation more concise by avoiding the
complex strategy planning step. Note that this also prepares for an even
faster implementation for post-merge blocks in the future that extracts
RANDAO from `ExecutionPayload` directly if available, so even in cases
where efficiency is slightly lower, only historical data is affected.
`time nim c -r tests/test_blockchain_dag` (cached binary):
- new: 145.45s, 133.59s, 144.65s, 127.69s, 136.14s
- old: 149.15s, 150.84s, 135.77s, 137.49s, 133.89s
2023-07-12 15:27:05 +00:00
# Mix in RANDAO from `state`
let ancestorEpoch = ancestorSlot . epoch
2023-07-31 13:11:45 +00:00
if ancestorEpoch + EPOCHS_PER_HISTORICAL_VECTOR < = state . data . slot . epoch :
return Opt . none ( Eth2Digest )
simplify RANDAO recovery in `ShufflingRef` acceleration (#5183)
Current RANDAO recovery logic is quite complex as it optimizes for the
minimum amount of database reads. Loading blocks isn't the bottleneck
though, so rather make the implementation more concise by avoiding the
complex strategy planning step. Note that this also prepares for an even
faster implementation for post-merge blocks in the future that extracts
RANDAO from `ExecutionPayload` directly if available, so even in cases
where efficiency is slightly lower, only historical data is affected.
`time nim c -r tests/test_blockchain_dag` (cached binary):
- new: 145.45s, 133.59s, 144.65s, 127.69s, 136.14s
- old: 149.15s, 150.84s, 135.77s, 137.49s, 133.89s
2023-07-12 15:27:05 +00:00
let mixRoot = state . dependent_root ( ancestorEpoch + 1 )
if mixRoot . isZero :
2023-07-31 13:11:45 +00:00
return Opt . none ( Eth2Digest )
simplify RANDAO recovery in `ShufflingRef` acceleration (#5183)
Current RANDAO recovery logic is quite complex as it optimizes for the
minimum amount of database reads. Loading blocks isn't the bottleneck
though, so rather make the implementation more concise by avoiding the
complex strategy planning step. Note that this also prepares for an even
faster implementation for post-merge blocks in the future that extracts
RANDAO from `ExecutionPayload` directly if available, so even in cases
where efficiency is slightly lower, only historical data is affected.
`time nim c -r tests/test_blockchain_dag` (cached binary):
- new: 145.45s, 133.59s, 144.65s, 127.69s, 136.14s
- old: 149.15s, 150.84s, 135.77s, 137.49s, 133.89s
2023-07-12 15:27:05 +00:00
? mixToAncestor ( ? dag . getBlockId ( mixRoot ) )
mix . data . mxor state . data . get_randao_mix ( ancestorEpoch ) . data
2023-05-15 15:41:30 +00:00
2023-07-31 13:11:45 +00:00
ok mix
2023-05-15 15:41:30 +00:00
2023-07-31 13:11:45 +00:00
proc computeRandaoMixFromMemory * (
dag : ChainDAGRef , bid : BlockId , lowSlot : Slot ) : Opt [ Eth2Digest ] =
## Compute requested RANDAO mix for `bid` from available states (~5 ms).
template tryWithState ( state : ForkedHashedBeaconState ) =
block :
withState ( state ) :
let mix = dag . computeRandaoMix ( forkyState , bid , lowSlot )
if mix . isSome :
return mix
tryWithState dag . headState
tryWithState dag . epochRefState
tryWithState dag . clearanceState
proc computeRandaoMixFromDatabase * (
dag : ChainDAGRef , bid : BlockId , lowSlot : Slot ) : Opt [ Eth2Digest ] =
## Compute requested RANDAO mix for `bid` using closest DB state (~500 ms).
2024-02-12 14:58:55 +00:00
let state = assignClone ( dag . headState )
? dag . getNearbyState ( state [ ] , bid , lowSlot )
2023-07-31 13:11:45 +00:00
withState ( state [ ] ) :
dag . computeRandaoMix ( forkyState , bid , lowSlot )
proc computeRandaoMix (
dag : ChainDAGRef , bid : BlockId , lowSlot : Slot ) : Opt [ Eth2Digest ] =
# Try to compute from states available in memory
let mix = dag . computeRandaoMixFromMemory ( bid , lowSlot )
if mix . isSome :
return mix
2024-02-12 14:58:55 +00:00
# If `blck` is post merge, RANDAO information is immediately available
let
bdata = ? dag . getForkedBlock ( bid )
fullMix = computeRandaoMix ( bdata )
if fullMix . isSome :
return fullMix
2023-07-31 13:11:45 +00:00
# Fall back to database
dag . computeRandaoMixFromDatabase ( bid , lowSlot )
proc computeRandaoMix * ( dag : ChainDAGRef , bid : BlockId ) : Opt [ Eth2Digest ] =
## Compute requested RANDAO mix for `bid`.
const maxSlotDistance = SLOTS_PER_HISTORICAL_ROOT
let lowSlot = max ( bid . slot , maxSlotDistance . Slot ) - maxSlotDistance
dag . computeRandaoMix ( bid , lowSlot )
proc lowSlotForAttesterShuffling * ( epoch : Epoch ) : Slot =
## Return minimum slot that a state must share ancestry with a block history
## so that RANDAO at `epoch.attester_dependent_slot` can be computed.
# A state must be somewhat recent so that `get_active_validator_indices`
# for the queried `epoch` cannot be affected by any such skipped processing.
const numDelayEpochs = compute_activation_exit_epoch ( GENESIS_EPOCH ) . uint64
let lowEpoch = max ( epoch , ( numDelayEpochs - 1 ) . Epoch ) - ( numDelayEpochs - 1 )
lowEpoch . start_slot
proc computeShufflingRef * (
2023-05-15 15:41:30 +00:00
dag : ChainDAGRef , state : ForkyHashedBeaconState ,
blck : BlockRef , epoch : Epoch ) : Opt [ ShufflingRef ] =
2023-07-31 13:11:45 +00:00
## Compute `ShufflingRef` for `blck@epoch` based on `state`.
## If `state` has unviable `get_active_validator_indices`, return `none`.
let
dependentBid = ( ? dag . atSlot ( blck . bid , epoch . attester_dependent_slot ) ) . bid
lowSlot = epoch . lowSlotForAttesterShuffling
mix = ? dag . computeRandaoMix ( state , dependentBid , lowSlot )
2023-05-15 15:41:30 +00:00
return ok ShufflingRef (
epoch : epoch ,
attester_dependent_root : dependentBid . root ,
shuffled_active_validator_indices :
state . data . get_shuffled_active_validator_indices ( epoch , mix ) )
proc computeShufflingRefFromMemory * (
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ) : Opt [ ShufflingRef ] =
2023-07-31 13:11:45 +00:00
## Compute `ShufflingRef` from available states (~5 ms).
2023-05-15 15:41:30 +00:00
template tryWithState ( state : ForkedHashedBeaconState ) =
block :
withState ( state ) :
2023-07-31 13:11:45 +00:00
let shufflingRef = dag . computeShufflingRef ( forkyState , blck , epoch )
2023-05-15 15:41:30 +00:00
if shufflingRef . isOk :
return shufflingRef
tryWithState dag . headState
tryWithState dag . epochRefState
tryWithState dag . clearanceState
2022-08-18 18:07:01 +00:00
proc getShufflingRef * (
dag : ChainDAGRef , blck : BlockRef , epoch : Epoch ,
preFinalized : bool ) : Opt [ ShufflingRef ] =
## Return the shuffling in the given history and epoch - this potentially is
## faster than returning a full EpochRef because the shuffling is determined
## an epoch in advance and therefore is less sensitive to reorgs
2023-05-15 15:41:30 +00:00
var shufflingRef = dag . findShufflingRef ( blck . bid , epoch )
if shufflingRef . isSome :
return shufflingRef
# Use existing states to quickly compute the shuffling
2024-02-12 14:58:55 +00:00
shufflingRef = dag . computeShufflingRefFromMemory ( blck , epoch )
2023-05-15 15:41:30 +00:00
if shufflingRef . isSome :
dag . putShufflingRef ( shufflingRef . get )
return shufflingRef
# Last resort, this can take several seconds as this may replay states
let epochRef = dag . getEpochRef ( blck , epoch , preFinalized ) . valueOr :
return Opt . none ShufflingRef
dag . putShufflingRef ( epochRef . shufflingRef )
Opt . some epochRef . shufflingRef
2022-08-18 18:07:01 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
func stateCheckpoint * ( dag : ChainDAGRef , bsi : BlockSlotId ) : BlockSlotId =
2021-03-01 19:50:43 +00:00
## The first ancestor BlockSlot that is a state checkpoint
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
var bsi = bsi
2022-04-08 16:22:49 +00:00
while not dag . isStateCheckpoint ( bsi ) :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if bsi . isProposed :
bsi . bid = dag . parent ( bsi . bid ) . valueOr :
break
else :
bsi . slot = bsi . slot - 1
bsi
2021-03-01 19:50:43 +00:00
2021-08-09 12:54:45 +00:00
template forkAtEpoch * ( dag : ChainDAGRef , epoch : Epoch ) : Fork =
forkAtEpoch ( dag . cfg , epoch )
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
proc getBlockRange * (
2020-08-05 23:22:12 +00:00
dag : ChainDAGRef , startSlot : Slot , skipStep : uint64 ,
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
output : var openArray [ BlockId ] ) : Natural =
2020-05-19 14:18:07 +00:00
## This function populates an `output` buffer of blocks
## with a slots ranging from `startSlot` up to, but not including,
## `startSlot + skipStep * output.len`, skipping any slots that don't have
## a block.
##
## Blocks will be written to `output` from the end without gaps, even if
## a block is missing in a particular slot. The return value shows how
## many slots were missing blocks - to iterate over the result, start
## at this index.
##
## If there were no blocks in the range, `output.len` will be returned.
2020-09-22 20:42:42 +00:00
let
requestedCount = output . lenu64
headSlot = dag . head . slot
2020-05-19 14:18:07 +00:00
trace " getBlockRange entered " ,
2020-09-22 20:42:42 +00:00
head = shortLog ( dag . head . root ) , requestedCount , startSlot , skipStep , headSlot
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
if startSlot < dag . backfill . slot :
2024-02-09 19:32:31 +00:00
debug " Got request for pre-backfill slot " ,
startSlot , backfillSlot = dag . backfill . slot , horizonSlot = dag . horizon
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
return output . len
if headSlot < = startSlot or requestedCount = = 0 :
2020-09-22 20:42:42 +00:00
return output . len # Identical to returning an empty set of block as indicated above
2020-05-19 14:18:07 +00:00
let
2020-09-22 20:42:42 +00:00
runway = uint64 ( headSlot - startSlot )
2020-10-14 18:06:50 +00:00
# This is the number of blocks that will follow the start block
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
extraSlots = min ( runway div skipStep , requestedCount - 1 )
2020-10-14 18:06:50 +00:00
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
# If `skipStep` is very large, `extraSlots` should be 0 from
2020-10-14 18:06:50 +00:00
# the previous line, so `endSlot` will be equal to `startSlot`:
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
endSlot = startSlot + extraSlots * skipStep
2020-05-19 14:18:07 +00:00
var
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
curSlot = endSlot
2020-08-05 23:22:12 +00:00
o = output . len
2020-10-14 18:06:50 +00:00
# Process all blocks that follow the start block (may be zero blocks)
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
while curSlot > startSlot :
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
let bs = dag . getBlockIdAtSlot ( curSlot )
2022-03-15 08:24:55 +00:00
if bs . isSome and bs . get ( ) . isProposed ( ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
o - = 1
2022-03-15 08:24:55 +00:00
output [ o ] = bs . get ( ) . bid
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
curSlot - = skipStep
# Handle start slot separately (to avoid underflow when computing curSlot)
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
let bs = dag . getBlockIdAtSlot ( startSlot )
2022-03-15 08:24:55 +00:00
if bs . isSome and bs . get ( ) . isProposed ( ) :
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
o - = 1
2022-03-15 08:24:55 +00:00
output [ o ] = bs . get ( ) . bid
2020-05-19 14:18:07 +00:00
o # Return the index of the first non-nil item in the output
2022-03-16 07:20:40 +00:00
proc updateState * (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag : ChainDAGRef , state : var ForkedHashedBeaconState , bsi : BlockSlotId ,
2022-03-16 07:20:40 +00:00
save : bool , cache : var StateCache ) : bool =
2020-05-19 14:18:07 +00:00
## Rewind or advance state such that it matches the given block and slot -
## this may include replaying from an earlier snapshot if blck is on a
## different branch or has advanced to a higher slot number than slot
2022-03-16 07:20:40 +00:00
## If `bs.slot` is higher than `bs.blck.slot`, `updateState` will fill in
2022-01-05 18:38:04 +00:00
## with empty/non-block slots
2020-05-19 14:18:07 +00:00
2020-08-13 09:50:05 +00:00
# First, see if we're already at the requested block. If we are, also check
# that the state has not been advanced past the desired block - if it has,
# an earlier state must be loaded since there's no way to undo the slot
# transitions
2020-05-19 14:18:07 +00:00
2022-01-03 21:18:49 +00:00
let
startTick = Moment . now ( )
2022-03-16 07:20:40 +00:00
current {. used . } = withState ( state ) :
2022-08-26 22:47:40 +00:00
BlockSlotId . init ( forkyState . latest_block_id , forkyState . data . slot )
2021-03-01 19:50:43 +00:00
2020-08-13 09:50:05 +00:00
var
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
ancestors : seq [ BlockId ]
2020-10-22 10:53:33 +00:00
found = false
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
template exactMatch ( state : ForkedHashedBeaconState , bsi : BlockSlotId ) : bool =
2021-05-30 08:14:17 +00:00
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
state . matches_block_slot ( bsi . bid . root , bsi . slot )
2021-05-30 08:14:17 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
template canAdvance ( state : ForkedHashedBeaconState , bsi : BlockSlotId ) : bool =
2020-10-22 10:53:33 +00:00
# The block is the same and we're at an early enough slot - the state can
# be used to arrive at the desired blockslot
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
state . can_advance_slots ( bsi . bid . root , bsi . slot )
2020-10-22 10:53:33 +00:00
2021-05-30 08:14:17 +00:00
# Fast path: check all caches for an exact match - this is faster than
# advancing a state where there's epoch processing to do, by a wide margin -
# it also avoids `hash_tree_root` for slot processing
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if exactMatch ( state , bsi ) :
2021-05-30 08:14:17 +00:00
found = true
2021-12-30 11:33:03 +00:00
elif not save :
# When required to save states, we cannot rely on the caches because that
# would skip the extra processing that save does - not all information that
# goes into the database is cached
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if exactMatch ( dag . headState , bsi ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . headState )
found = true
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
elif exactMatch ( dag . clearanceState , bsi ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . clearanceState )
found = true
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
elif exactMatch ( dag . epochRefState , bsi ) :
2021-12-30 11:33:03 +00:00
assign ( state , dag . epochRefState )
found = true
2021-05-30 08:14:17 +00:00
2020-10-22 10:53:33 +00:00
const RewindBlockThreshold = 64
2022-01-05 18:38:04 +00:00
if not found :
# No exact match found - see if any in-memory state can be used as a base
# onto which we can apply a few blocks - there's a tradeoff here between
# loading the state from disk and performing the block applications
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
var cur = bsi
2022-01-05 18:38:04 +00:00
while ancestors . len < RewindBlockThreshold :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if isZero ( cur . bid . root ) : # tail reached
2021-12-30 11:33:03 +00:00
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( state , cur ) : # Typical case / fast path when there's no reorg
2021-12-30 11:33:03 +00:00
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if not save : # see above
if canAdvance ( dag . headState , cur ) :
assign ( state , dag . headState )
found = true
break
2020-11-10 13:48:59 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( dag . clearanceState , cur ) :
assign ( state , dag . clearanceState )
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if canAdvance ( dag . epochRefState , cur ) :
assign ( state , dag . epochRefState )
found = true
break
2020-10-22 10:53:33 +00:00
2022-01-05 18:38:04 +00:00
if cur . isProposed ( ) :
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
ancestors . add ( cur . bid )
2022-01-05 18:38:04 +00:00
# Move slot by slot to capture epoch boundary states
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
cur = dag . parentOrSlot ( cur ) . valueOr :
break
2020-10-22 10:53:33 +00:00
if not found :
debug " UpdateStateData cache miss " ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
current = shortLog ( current ) , target = shortLog ( bsi )
2020-10-22 10:53:33 +00:00
# Either the state is too new or was created by applying a different block.
# We'll now resort to loading the state from the database then reapplying
# blocks until we reach the desired point in time.
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
var cur = bsi
2020-10-22 10:53:33 +00:00
ancestors . setLen ( 0 )
# Look for a state in the database and load it - as long as it cannot be
# found, keep track of the blocks that are needed to reach it from the
2022-01-05 18:38:04 +00:00
# state that eventually will be found.
# If we hit the tail, it means that we've reached a point for which we can
# no longer recreate history - this happens for example when starting from
# a checkpoint block
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let startEpoch = bsi . slot . epoch
2022-09-27 16:56:08 +00:00
while not canAdvance ( state , cur ) and
not dag . db . getState ( dag . cfg , cur . bid . root , cur . slot , state , noRollback ) :
2022-01-05 18:38:04 +00:00
# There's no state saved for this particular BlockSlot combination, and
# the state we have can't trivially be advanced (in case it was older than
# RewindBlockThreshold), keep looking..
if cur . isProposed ( ) :
2021-05-28 16:34:00 +00:00
# This is not an empty slot, so the block will need to be applied to
# eventually reach bs
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
ancestors . add ( cur . bid )
2020-08-13 09:50:05 +00:00
2024-03-19 13:21:25 +00:00
if cur . slot = = GENESIS_SLOT or ( cur . slot < dag . finalizedHead . slot and
cur . slot . epoch + uint64 ( EPOCHS_PER_STATE_SNAPSHOT ) * 2 < startEpoch ) :
2022-01-05 18:38:04 +00:00
# We've either walked two full state snapshot lengths or hit the tail
# and still can't find a matching state: this can happen when
# starting the node from an arbitrary finalized checkpoint and not
# backfilling the states
notice " Request for pruned historical state " ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
request = shortLog ( bsi ) , tail = shortLog ( dag . tail ) ,
2024-03-19 13:21:25 +00:00
cur = shortLog ( cur ) , finalized = shortLog ( dag . finalizedHead )
2022-01-05 18:38:04 +00:00
return false
2021-05-28 16:34:00 +00:00
# Move slot by slot to capture epoch boundary states
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
cur = dag . parentOrSlot ( cur ) . valueOr :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not dag . getStateByParent ( cur . bid , state ) :
notice " Request for pruned historical state " ,
request = shortLog ( bsi ) , tail = shortLog ( dag . tail ) ,
cur = shortLog ( cur )
return false
break
2020-10-22 10:53:33 +00:00
beacon_state_rewinds . inc ( )
2020-08-13 09:50:05 +00:00
2021-05-28 16:34:00 +00:00
# Starting state has been assigned, either from memory or database
2020-08-18 20:29:33 +00:00
let
2021-05-28 16:34:00 +00:00
assignTick = Moment . now ( )
2022-03-16 07:20:40 +00:00
ancestor {. used . } = withState ( state ) :
2022-08-26 22:47:40 +00:00
BlockSlotId . init ( forkyState . latest_block_id , forkyState . data . slot )
2022-03-16 07:20:40 +00:00
ancestorRoot {. used . } = getStateRoot ( state )
2022-01-03 21:18:49 +00:00
2021-10-13 14:24:36 +00:00
var info : ForkedEpochInfo
2020-08-13 09:50:05 +00:00
# Time to replay all the blocks between then and now
2020-05-19 14:18:07 +00:00
for i in countdown ( ancestors . len - 1 , 0 ) :
# Because the ancestors are in the database, there's no need to persist them
# again. Also, because we're applying blocks that were loaded from the
# database, we can skip certain checks that have already been performed
2020-08-13 09:50:05 +00:00
# before adding the block to the database.
2022-09-27 16:56:08 +00:00
if ( let res = dag . applyBlock ( state , ancestors [ i ] , cache , info ) ; res . isErr ) :
warn " Failed to apply block from database " ,
blck = shortLog ( ancestors [ i ] ) ,
state_bid = shortLog ( state . latest_block_id ) ,
error = res . error ( )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
return false
2020-05-19 14:18:07 +00:00
2020-10-15 12:28:44 +00:00
# ...and make sure to process empty slots as requested
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . advanceSlots ( state , bsi . slot , save , cache , info )
2020-08-13 09:50:05 +00:00
2021-06-03 13:32:00 +00:00
# ...and make sure to load the state cache, if it exists
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
loadStateCache ( dag , cache , bsi . bid , getStateField ( state , slot ) . epoch )
2021-06-03 13:32:00 +00:00
2021-05-28 16:34:00 +00:00
let
assignDur = assignTick - startTick
replayDur = Moment . now ( ) - assignTick
2024-02-20 19:34:41 +00:00
beacon_dag_state_replay_seconds . inc ( replayDur . toFloatSeconds )
2021-03-01 19:50:43 +00:00
2022-01-03 21:18:49 +00:00
# TODO https://github.com/status-im/nim-chronicles/issues/108
2024-02-19 10:00:11 +00:00
if ( assignDur + replayDur ) > = MinSignificantProcessingDuration :
2021-03-01 19:50:43 +00:00
# This might indicate there's a cache that's not in order or a disk that is
# too slow - for now, it's here for investigative purposes and the cutoff
# time might need tuning
2022-01-03 21:18:49 +00:00
info " State replayed " ,
blocks = ancestors . len ,
2022-03-16 07:20:40 +00:00
slots = getStateField ( state , slot ) - ancestor . slot ,
2022-01-03 21:18:49 +00:00
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
target = shortLog ( bsi ) ,
2022-01-03 21:18:49 +00:00
ancestorStateRoot = shortLog ( ancestorRoot ) ,
2022-03-16 07:20:40 +00:00
targetStateRoot = shortLog ( getStateRoot ( state ) ) ,
2022-01-03 21:18:49 +00:00
found ,
assignDur ,
replayDur
2021-03-01 19:50:43 +00:00
elif ancestors . len > 0 :
2022-01-03 21:18:49 +00:00
debug " State replayed " ,
blocks = ancestors . len ,
2022-03-16 07:20:40 +00:00
slots = getStateField ( state , slot ) - ancestor . slot ,
2022-01-03 21:18:49 +00:00
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
target = shortLog ( bsi ) ,
2022-01-03 21:18:49 +00:00
ancestorStateRoot = shortLog ( ancestorRoot ) ,
2022-03-16 07:20:40 +00:00
targetStateRoot = shortLog ( getStateRoot ( state ) ) ,
2022-01-03 21:18:49 +00:00
found ,
assignDur ,
replayDur
else : # Normal case!
trace " State advanced " ,
blocks = ancestors . len ,
2022-03-16 07:20:40 +00:00
slots = getStateField ( state , slot ) - ancestor . slot ,
2022-01-03 21:18:49 +00:00
current = shortLog ( current ) ,
ancestor = shortLog ( ancestor ) ,
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
target = shortLog ( bsi ) ,
2022-01-03 21:18:49 +00:00
ancestorStateRoot = shortLog ( ancestorRoot ) ,
2022-03-16 07:20:40 +00:00
targetStateRoot = shortLog ( getStateRoot ( state ) ) ,
2022-01-03 21:18:49 +00:00
found ,
assignDur ,
replayDur
2020-05-19 14:18:07 +00:00
2022-01-05 18:38:04 +00:00
true
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
proc delState ( dag : ChainDAGRef , bsi : BlockSlotId ) =
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
# Delete state and mapping for a particular block+slot
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if not dag . isStateCheckpoint ( bsi ) :
2020-08-13 09:50:05 +00:00
return # We only ever save epoch states
2021-06-10 22:07:16 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
if ( let root = dag . db . getStateRoot ( bsi . bid . root , bsi . slot ) ; root . isSome ( ) ) :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
dag . db . withManyWrites :
dag . db . delStateRoot ( bsi . bid . root , bsi . slot )
2023-02-16 09:32:12 +00:00
dag . db . delState (
dag . cfg . consensusForkAtEpoch ( bsi . slot . epoch ) , root . get ( ) )
2020-05-19 14:18:07 +00:00
2022-07-04 20:35:33 +00:00
proc pruneBlockSlot ( dag : ChainDAGRef , bs : BlockSlot ) =
# TODO: should we move that disk I/O to `onSlotEnd`
dag . delState ( bs . toBlockSlotId ( ) . expect ( " not nil " ) )
if bs . isProposed ( ) :
# Update light client data
dag . deleteLightClientData ( bs . blck . bid )
2023-05-20 12:18:51 +00:00
bs . blck . executionValid = true
2022-07-04 20:35:33 +00:00
dag . forkBlocks . excl ( KeyedBlockRef . init ( bs . blck ) )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
discard dag . db . delBlock (
2023-02-16 20:16:54 +00:00
dag . cfg . consensusForkAtEpoch ( bs . blck . slot . epoch ) , bs . blck . root )
2022-07-04 20:35:33 +00:00
2021-03-09 14:36:17 +00:00
proc pruneBlocksDAG ( dag : ChainDAGRef ) =
## This prunes the block DAG
## This does NOT prune the cached state checkpoints and EpochRef
2021-04-01 11:26:17 +00:00
## This must be done after a new finalization point is reached
2021-03-09 14:36:17 +00:00
## to invalidate pending blocks or attestations referring
## to a now invalid fork.
##
## This does NOT update the `dag.lastPrunePoint` field.
## as the caches and fork choice can be pruned at a later time.
# Clean up block refs, walking block by block
2021-05-28 16:34:00 +00:00
let startTick = Moment . now ( )
2021-04-01 11:26:17 +00:00
# Finalization means that we choose a single chain as the canonical one -
# it also means we're no longer interested in any branches from that chain
# up to the finalization point
let hlen = dag . heads . len
for i in 0 .. < hlen :
let n = hlen - i - 1
let head = dag . heads [ n ]
if dag . finalizedHead . blck . isAncestorOf ( head ) :
continue
2021-12-09 17:06:21 +00:00
var cur = head . atSlot ( )
2022-03-21 09:20:26 +00:00
# The block whose parent is nil is the `BlockRef` that's part of the
# canonical chain but has now been finalized - in theory there could be
# states at empty slot iff the fork had epoch-long gaps where the epoch
# transition was not on the canonical chain - these will not properly get
# cleaned up by the current logic - but they should also be rare
# TODO clean up the above as well
doAssert dag . finalizedHead . blck . parent = = nil ,
" finalizedHead parent should have been pruned from memory already "
while cur . blck . parent ! = nil :
2022-07-04 20:35:33 +00:00
dag . pruneBlockSlot ( cur )
2021-04-01 11:26:17 +00:00
cur = cur . parentOrSlot
dag . heads . del ( n )
debug " Pruned the blockchain DAG " ,
currentCandidateHeads = dag . heads . len ,
prunedHeads = hlen - dag . heads . len ,
2021-05-28 19:03:20 +00:00
dagPruneDur = Moment . now ( ) - startTick
2021-03-09 14:36:17 +00:00
2024-03-14 06:26:36 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/sync/optimistic.md#helpers
2023-05-25 13:57:24 +00:00
template is_optimistic * ( dag : ChainDAGRef , bid : BlockId ) : bool =
let blck =
if bid . slot < = dag . finalizedHead . slot :
dag . finalizedHead . blck
else :
dag . getBlockRef ( bid . root ) . expect ( " Non-finalized block is known " )
not blck . executionValid
2022-07-04 20:35:33 +00:00
2023-05-20 12:18:51 +00:00
proc markBlockVerified * ( dag : ChainDAGRef , blck : BlockRef ) =
var cur = blck
2022-07-04 20:35:33 +00:00
while true :
2023-05-20 12:18:51 +00:00
cur . executionValid = true
2022-07-04 20:35:33 +00:00
2023-05-20 12:18:51 +00:00
debug " markBlockVerified " , blck = shortLog ( cur )
2022-07-04 20:35:33 +00:00
if cur . parent . isNil :
break
cur = cur . parent
2023-05-20 12:18:51 +00:00
# Always check at least as far back as the parent so that when a new block
# is added with executionValid already set, it stil sets the ancestors, to
# the next valid in the chain.
if cur . executionValid :
return
2021-10-07 13:19:47 +00:00
iterator syncSubcommittee * (
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
subcommitteeIdx : SyncSubcommitteeIndex ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-08-28 10:40:01 +00:00
2021-10-07 13:19:47 +00:00
while i < onePastEndIdx :
yield syncCommittee [ i ]
inc i
2021-08-28 10:40:01 +00:00
2021-10-14 10:38:38 +00:00
iterator syncSubcommitteePairs * (
2021-10-20 16:32:46 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : tuple [ validatorIdx : ValidatorIndex ,
subcommitteeIdx : int ] =
2021-12-07 12:25:54 +00:00
var i = subcommitteeIdx . asInt * SYNC_SUBCOMMITTEE_SIZE
let onePastEndIdx = min ( syncCommittee . len , i + SYNC_SUBCOMMITTEE_SIZE )
2021-10-14 10:38:38 +00:00
while i < onePastEndIdx :
yield ( syncCommittee [ i ] , i )
inc i
2021-10-20 16:32:46 +00:00
func syncCommitteeParticipants * ( dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
slot : Slot ) : seq [ ValidatorIndex ] =
2022-03-16 07:20:40 +00:00
withState ( dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
2022-08-26 22:47:40 +00:00
curPeriod = sync_committee_period ( forkyState . data . slot )
2021-10-20 16:32:46 +00:00
2021-12-07 12:25:54 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
@ ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-08-28 10:40:01 +00:00
else :
@ [ ]
2021-09-28 07:44:20 +00:00
func getSubcommitteePositionsAux (
2021-08-28 10:40:01 +00:00
dag : ChainDAGRef ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
syncCommittee : openArray [ ValidatorIndex ] ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
2021-09-28 07:44:20 +00:00
validatorIdx : uint64 ) : seq [ uint64 ] =
2021-12-07 12:25:54 +00:00
var pos = 0 'u64
for valIdx in syncCommittee . syncSubcommittee ( subcommitteeIdx ) :
if validatorIdx = = uint64 ( valIdx ) :
result . add pos
inc pos
func getSubcommitteePositions * (
dag : ChainDAGRef ,
slot : Slot ,
subcommitteeIdx : SyncSubcommitteeIndex ,
validatorIdx : uint64 ) : seq [ uint64 ] =
2022-03-16 07:20:40 +00:00
withState ( dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2021-10-06 17:05:06 +00:00
let
2021-10-20 16:32:46 +00:00
period = sync_committee_period ( slot )
2022-08-26 22:47:40 +00:00
curPeriod = sync_committee_period ( forkyState . data . slot )
2021-10-06 17:05:06 +00:00
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
template search ( syncCommittee : openArray [ ValidatorIndex ] ) : seq [ uint64 ] =
2021-11-05 15:39:47 +00:00
dag . getSubcommitteePositionsAux (
syncCommittee , subcommitteeIdx , validatorIdx )
2021-10-06 17:05:06 +00:00
2021-10-20 16:32:46 +00:00
if period = = curPeriod :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . current_sync_committee )
2021-10-20 16:32:46 +00:00
elif period = = curPeriod + 1 :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
search ( dag . headSyncCommittees . next_sync_committee )
2021-10-20 16:32:46 +00:00
else : @ [ ]
2021-10-06 17:05:06 +00:00
else :
@ [ ]
2021-08-28 10:40:01 +00:00
template syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ) : seq [ ValidatorIndex ] =
2021-11-05 15:39:47 +00:00
toSeq ( syncSubcommittee ( dag . syncCommitteeParticipants ( slot ) , subcommitteeIdx ) )
2021-08-28 10:40:01 +00:00
iterator syncCommitteeParticipants * (
dag : ChainDAGRef ,
slot : Slot ,
2021-11-05 15:39:47 +00:00
subcommitteeIdx : SyncSubcommitteeIndex ,
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
aggregationBits : SyncCommitteeAggregationBits ) : ValidatorIndex =
2021-12-07 12:25:54 +00:00
for pos , valIdx in dag . syncCommitteeParticipants ( slot , subcommitteeIdx ) :
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
if pos < aggregationBits . bits and aggregationBits [ pos ] :
2021-08-28 10:40:01 +00:00
yield valIdx
2021-03-09 14:36:17 +00:00
func needStateCachesAndForkChoicePruning * ( dag : ChainDAGRef ) : bool =
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . lastPrunePoint ! = dag . finalizedHead . toBlockSlotId ( ) . expect ( " not nil " )
2021-03-09 14:36:17 +00:00
proc pruneStateCachesDAG * ( dag : ChainDAGRef ) =
## This prunes the cached state checkpoints and EpochRef
## This does NOT prune the state associated with invalidated blocks on a fork
## They are pruned via `pruneBlocksDAG`
##
## This updates the `dag.lastPrunePoint` variable
doAssert dag . needStateCachesAndForkChoicePruning ( )
2021-05-28 19:03:20 +00:00
let startTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Remove states, walking slot by slot
# We remove all state checkpoints that come _before_ the current finalized
# head, as we might frequently be asked to replay states from the
# finalized checkpoint and onwards (for example when validating blocks and
# attestations)
var
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
finPoint = dag . finalizedHead . toBlockSlotId ( ) . expect ( " not nil " )
cur = dag . parentOrSlot ( dag . stateCheckpoint ( finPoint ) )
prev = dag . parentOrSlot ( dag . stateCheckpoint ( dag . lastPrunePoint ) )
while cur . isSome and prev . isSome and cur . get ( ) ! = prev . get ( ) :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
let bs = cur . get ( )
if not isFinalizedStateSnapshot ( bs . slot ) and
bs . slot ! = dag . tail . slot :
dag . delState ( bs )
2022-03-24 14:37:37 +00:00
let tmp = cur . get ( )
cur = dag . parentOrSlot ( tmp )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
2021-05-28 19:03:20 +00:00
let statePruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
block : # Clean up old EpochRef instances
# After finalization, we can clear up the epoch cache and save memory -
# it will be recomputed if needed
2022-09-29 14:55:58 +00:00
dag . epochRefs . delIt ( it . epoch < dag . finalizedHead . slot . epoch )
dag . shufflingRefs . delIt ( it . epoch < dag . finalizedHead . slot . epoch )
2022-08-18 18:07:01 +00:00
2021-05-28 19:03:20 +00:00
let epochRefPruneTick = Moment . now ( )
2021-03-09 14:36:17 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . lastPrunePoint = dag . finalizedHead . toBlockSlotId ( ) . expect ( " not nil " )
2021-03-09 14:36:17 +00:00
2021-03-17 06:30:16 +00:00
debug " Pruned the state checkpoints and DAG caches. " ,
2021-05-28 19:03:20 +00:00
statePruneDur = statePruneTick - startTick ,
epochRefPruneDur = epochRefPruneTick - statePruneTick
2021-03-09 14:36:17 +00:00
2023-07-11 14:36:37 +00:00
func pruneStep ( horizon , lastHorizon , lastBlockHorizon : Slot ) :
2023-05-12 10:37:15 +00:00
tuple [ stateHorizon , blockHorizon : Slot ] =
## Compute a reasonable incremental pruning step considering the current
## horizon, how far the database has been pruned already and where we want the
## tail to be - the return value shows the first state and block that we
## should _keep_ (inclusive).
const SLOTS_PER_STATE_SNAPSHOT =
uint64 ( EPOCHS_PER_STATE_SNAPSHOT * SLOTS_PER_EPOCH )
let
blockHorizon = block :
let
# Keep up with horizon if it's moving fast, ie if we're syncing
maxSlots = max ( horizon - lastHorizon , MAX_SLOTS_PER_PRUNE )
# Move the block horizon cap with a lag so that it moves slot-by-slot
# instead of a big jump every time we prune a state - assuming we
# prune every slot, this makes us prune one slot at a time instead of
# a burst of prunes (as computed by maxSlots) around every snapshot
# change followed by no pruning for the rest of the period
maxBlockHorizon =
if horizon + 1 > = SLOTS_PER_STATE_SNAPSHOT :
horizon + 1 - SLOTS_PER_STATE_SNAPSHOT
else :
Slot ( 0 )
# `lastBlockHorizon` captures the case where we're incrementally
# pruning a database that hasn't been pruned for a while: it's
# initialized to a pre-tail value on startup and moves to approach
# `maxBlockHorizon`.
min ( maxBlockHorizon , lastBlockHorizon + maxSlots )
# Round up such that we remove state only once blocks have been removed
stateHorizon =
( ( blockHorizon + SLOTS_PER_STATE_SNAPSHOT - 1 ) div
SLOTS_PER_STATE_SNAPSHOT ) * SLOTS_PER_STATE_SNAPSHOT
( Slot ( stateHorizon ) , blockHorizon )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
proc pruneHistory * ( dag : ChainDAGRef , startup = false ) =
2023-05-12 10:37:15 +00:00
## Perform an incremental pruning step of the history
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if dag . db . db . readOnly :
return
let
2023-05-12 10:37:15 +00:00
horizon = dag . horizon ( )
( stateHorizon , blockHorizon ) = pruneStep (
horizon , dag . lastHistoryPruneHorizon , dag . lastHistoryPruneBlockHorizon )
doAssert blockHorizon < = stateHorizon ,
" we must never prune blocks while leaving the state "
debug " Pruning history " ,
horizon , blockHorizon , stateHorizon ,
lastHorizon = dag . lastHistoryPruneHorizon ,
lastBlockHorizon = dag . lastHistoryPruneBlockHorizon ,
tail = dag . tail , head = dag . head
dag . lastHistoryPruneHorizon = horizon
dag . lastHistoryPruneBlockHorizon = blockHorizon
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
dag . db . withManyWrites :
2023-05-12 10:37:15 +00:00
if stateHorizon > dag . tail . slot :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
# First, we want to see if it's possible to prune any states - we store one
# state every EPOCHS_PER_STATE_SNAPSHOT, so this happens infrequently.
var
2023-05-12 10:37:15 +00:00
cur = dag . getBlockIdAtSlot ( stateHorizon )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
var first = true
while cur . isSome ( ) :
let bs = cur . get ( )
2023-06-24 14:14:28 +00:00
# We don't delete legacy states because the legacy database is openend
# in read-only and slow to delete from due to its sub-optimal structure
if dag . db . containsState (
dag . cfg , bs . bid . root , bs . slot .. bs . slot , legacy = first ) :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if first :
# We leave the state on the prune horizon intact and update the tail
# to point to this state, indicating the new point in time from
# which we can load states in general.
debug " Updating tail " , bs
dag . db . putTailBlock ( bs . bid . root )
dag . tail = bs . bid
first = false
else :
debug " Pruning historical state " , bs
dag . delState ( bs )
elif not bs . isProposed :
2023-05-12 10:37:15 +00:00
trace " Reached already-pruned slot, done pruning states " , bs
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
break
if bs . isProposed :
# We store states either at the same slot at the block (checkpoint) or
# by advancing the slot to the nearest epoch start - check both when
# pruning
cur = dag . parentOrSlot ( bs )
elif bs . slot . epoch > EPOCHS_PER_STATE_SNAPSHOT :
# Jump one snapshot interval at a time, but don't prune genesis
cur = dag . getBlockIdAtSlot ( start_slot ( bs . slot . epoch ( ) - EPOCHS_PER_STATE_SNAPSHOT ) )
else :
break
2023-05-12 10:37:15 +00:00
# Prune blocks after sanity-checking that we don't prune post-tail blocks -
# this could happen if a state is missing at the expected state horizon and
# would indicate a partially inconsistent database since the base
# invariant is that there exists a state at the snapshot slot - better not
# further mess things up regardless
if blockHorizon > GENESIS_SLOT and blockHorizon < = dag . tail . slot :
var
# Leave the horizon block itself
cur = dag . getBlockIdAtSlot ( blockHorizon - 1 ) . map ( proc ( x : auto ) : auto = x . bid )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
while cur . isSome :
let
bid = cur . get ( )
2023-02-16 20:16:54 +00:00
fork = dag . cfg . consensusForkAtEpoch ( bid . slot . epoch )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if bid . slot = = GENESIS_SLOT :
# Leave genesis block for nostalgia and the REST API
break
if not dag . db . delBlock ( fork , bid . root ) :
2023-05-12 10:37:15 +00:00
# Stop at the first gap - this is typically the pruning point of the
# previous call to pruneHistory. An inconsistent DB might have more
# blocks beyond that point but we have no efficient way of detecting
# that.
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
break
cur = dag . parent ( bid )
2023-07-18 20:29:23 +00:00
# TODO There have been varied reports of startup pruning causing long
# startup times - an incremental approach would be needed here also
if false and
startup and
2023-05-12 10:37:15 +00:00
dag . cfg . consensusForkAtEpoch ( blockHorizon . epoch ) > ConsensusFork . Phase0 :
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
# Once during start, we'll clear all "old fork" data - this ensures we get
# rid of any leftover junk in the tables - we do so after linear pruning
# so as to "mostly" clean up the phase0 tables as well (which cannot be
2023-06-24 14:14:28 +00:00
# pruned easily by fork) - one fork at a time, so as not to take too long
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
2023-05-12 10:37:15 +00:00
let stateFork = dag . cfg . consensusForkAtEpoch ( dag . tail . slot . epoch )
2023-06-24 14:14:28 +00:00
var clearedStates = false
2023-03-12 18:48:38 +00:00
if stateFork > ConsensusFork . Phase0 :
for fork in ConsensusFork . Phase0 .. < stateFork :
2023-06-24 14:14:28 +00:00
if dag . db . clearStates ( fork ) :
clearedStates = true
break
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
2023-02-16 20:16:54 +00:00
let blockFork = dag . cfg . consensusForkAtEpoch ( blockHorizon . epoch )
2023-06-24 14:14:28 +00:00
if not clearedStates and blockFork > ConsensusFork . Phase0 :
2023-01-28 19:53:41 +00:00
for fork in ConsensusFork . Phase0 .. < blockFork :
2023-06-24 14:14:28 +00:00
if dag . db . clearBlocks ( fork ) :
break
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
2024-02-09 22:10:38 +00:00
proc loadExecutionBlockHash * (
dag : ChainDAGRef , bid : BlockId ) : Opt [ Eth2Digest ] =
2022-08-29 22:02:29 +00:00
let blockData = dag . getForkedBlock ( bid ) . valueOr :
2024-02-09 22:10:38 +00:00
# Besides database inconsistency issues, this is hit with checkpoint sync.
# The initial `BlockRef` is creted before the checkpoint block is loaded.
# It is backfilled later, so return `none` and keep retrying.
return Opt . none ( Eth2Digest )
2022-07-04 20:35:33 +00:00
2022-08-29 22:02:29 +00:00
withBlck ( blockData ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Bellatrix :
2024-02-09 22:10:38 +00:00
Opt . some forkyBlck . message . body . execution_payload . block_hash
2022-08-29 22:02:29 +00:00
else :
2024-02-09 22:10:38 +00:00
Opt . some ZERO_HASH
2022-07-04 20:35:33 +00:00
2024-02-09 22:10:38 +00:00
proc loadExecutionBlockHash * (
dag : ChainDAGRef , blck : BlockRef ) : Opt [ Eth2Digest ] =
2023-04-11 23:31:47 +00:00
if blck . executionBlockHash . isNone :
2024-02-09 22:10:38 +00:00
blck . executionBlockHash = dag . loadExecutionBlockHash ( blck . bid )
blck . executionBlockHash
2022-07-04 20:35:33 +00:00
2023-03-02 16:13:35 +00:00
from std / packedsets import PackedSet , incl , items
func getValidatorChangeStatuses (
state : ForkedHashedBeaconState , vis : openArray [ ValidatorIndex ] ) :
PackedSet [ ValidatorIndex ] =
var res : PackedSet [ ValidatorIndex ]
withState ( state ) :
for vi in vis :
if forkyState . data . validators [ vi ] . withdrawal_credentials . data [ 0 ] = =
BLS_WITHDRAWAL_PREFIX :
res . incl vi
res
func checkBlsToExecutionChanges (
state : ForkedHashedBeaconState , vis : PackedSet [ ValidatorIndex ] ) : bool =
# Within each fork, BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX
# and never ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX. Latter
# can still happen via reorgs.
# Cases:
# 1) unchanged (BLS_WITHDRAWAL_PREFIX or ETH1_ADDRESS_WITHDRAWAL_PREFIX) from
# old to new head.
# 2) ETH1_ADDRESS_WITHDRAWAL_PREFIX to BLS_WITHDRAWAL_PREFIX
# 3) BLS_WITHDRAWAL_PREFIX to ETH1_ADDRESS_WITHDRAWAL_PREFIX
#
# Only report (3), i.e. whether there were validator indices with withdrawal
# credentials previously using BLS_WITHDRAWAL_PREFIX now using, instead, the
# ETH1_ADDRESS_WITHDRAWAL_PREFIX prefix indicating a BLS to execution change
# went through.
#
# Since it tracks head, it's possible reorgs trigger reporting the same
# validator indices multiple times; this is fine.
withState ( state ) :
anyIt ( vis , forkyState . data . validators [ it ] . has_eth1_withdrawal_credential )
2020-08-31 09:00:38 +00:00
proc updateHead * (
2023-03-02 16:13:35 +00:00
dag : ChainDAGRef , newHead : BlockRef , quarantine : var Quarantine ,
knownValidators : openArray [ ValidatorIndex ] ) =
2020-05-19 14:18:07 +00:00
## Update what we consider to be the current head, as given by the fork
## choice.
2021-03-09 14:36:17 +00:00
##
2020-05-19 14:18:07 +00:00
## The choice of head affects the choice of finalization point - the order
## of operations naturally becomes important here - after updating the head,
## blocks that were once considered potential candidates for a tree will
## now fall from grace, or no longer be considered resolved.
2020-08-26 15:23:34 +00:00
doAssert not newHead . isNil ( )
2022-03-21 14:18:05 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
# Could happen if enough blocks get invalidated and would corrupt database -
# When finalized checkpoint is empty, the slot may also be smaller
doAssert newHead . slot > = dag . finalizedHead . slot or
newHead = = dag . finalizedHead . blck
2022-09-28 21:07:31 +00:00
2023-03-02 16:13:35 +00:00
let lastHead = dag . head
2022-03-21 14:18:05 +00:00
2020-07-16 13:16:51 +00:00
logScope :
newHead = shortLog ( newHead )
2022-03-21 14:18:05 +00:00
lastHead = shortLog ( lastHead )
2020-05-19 14:18:07 +00:00
2022-03-21 14:18:05 +00:00
if lastHead = = newHead :
2020-10-01 18:56:42 +00:00
trace " No head block update "
2020-05-19 14:18:07 +00:00
return
2022-03-21 14:18:05 +00:00
if newHead . parent . isNil :
# The new head should always have the finalizedHead as ancestor - thus,
# this should not happen except in a race condition where the selected
# `BlockRef` had its parent set to nil as happens during finalization -
# notably, resetting the head to be the finalizedHead is not allowed
error " Cannot update head to block without parent "
return
2020-05-19 14:18:07 +00:00
let
2022-03-16 07:20:40 +00:00
lastHeadStateRoot = getStateRoot ( dag . headState )
2022-07-06 10:33:02 +00:00
lastHeadMergeComplete = dag . headState . is_merge_transition_complete ( )
2022-12-21 12:30:24 +00:00
lastHeadKind = dag . headState . kind
2023-03-02 16:13:35 +00:00
lastKnownValidatorsChangeStatuses = getValidatorChangeStatuses (
dag . headState , knownValidators )
2020-05-19 14:18:07 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
# Start off by making sure we have the right state - updateState will try
2020-11-02 17:34:23 +00:00
# to use existing in-memory states to make this smooth
var cache : StateCache
2022-03-16 07:20:40 +00:00
if not updateState (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag , dag . headState , newHead . bid . atSlot ( ) , false , cache ) :
2022-01-05 18:38:04 +00:00
# Advancing the head state should never fail, given that the tail is
# implicitly finalised, the head is an ancestor of the tail and we always
# store the tail state in the database, as well as every epoch slot state in
# between
fatal " Unable to load head state during head update, database corrupt? " ,
lastHead = shortLog ( lastHead )
quit 1
2022-05-30 08:25:27 +00:00
2022-03-16 07:20:40 +00:00
dag . head = newHead
2020-05-19 14:18:07 +00:00
2022-07-06 10:33:02 +00:00
if dag . headState . is_merge_transition_complete ( ) and not
lastHeadMergeComplete and
dag . vanityLogs . onMergeTransitionBlock ! = nil :
2022-05-30 08:25:27 +00:00
dag . vanityLogs . onMergeTransitionBlock ( )
2022-12-21 12:30:24 +00:00
if dag . headState . kind > lastHeadKind :
case dag . headState . kind
2023-01-28 19:53:41 +00:00
of ConsensusFork . Phase0 .. ConsensusFork . Bellatrix :
2022-12-21 12:30:24 +00:00
discard
2023-01-28 19:53:41 +00:00
of ConsensusFork . Capella :
2022-12-21 12:30:24 +00:00
if dag . vanityLogs . onUpgradeToCapella ! = nil :
dag . vanityLogs . onUpgradeToCapella ( )
2023-03-04 13:35:39 +00:00
of ConsensusFork . Deneb :
2023-04-10 12:42:19 +00:00
if dag . vanityLogs . onUpgradeToDeneb ! = nil :
dag . vanityLogs . onUpgradeToDeneb ( )
2022-12-21 12:30:24 +00:00
2023-03-02 16:13:35 +00:00
if dag . vanityLogs . onKnownBlsToExecutionChange ! = nil and
checkBlsToExecutionChanges (
dag . headState , lastKnownValidatorsChangeStatuses ) :
dag . vanityLogs . onKnownBlsToExecutionChange ( )
2020-11-27 22:16:13 +00:00
dag . db . putHeadBlock ( newHead . root )
2022-03-16 07:20:40 +00:00
updateBeaconMetrics ( dag . headState , dag . head . bid , cache )
2022-01-31 07:36:29 +00:00
2022-03-16 07:20:40 +00:00
withState ( dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2022-08-26 22:47:40 +00:00
dag . headSyncCommittees = forkyState . data . get_sync_committee_cache ( cache )
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
2020-11-27 22:16:13 +00:00
let
2021-12-09 17:06:21 +00:00
finalized_checkpoint =
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , finalized_checkpoint )
2022-07-07 14:24:31 +00:00
finalizedSlot =
# finalized checkpoint may move back in the head state compared to what
# we've seen in other forks - it does not move back in fork choice
# however, so we'll use the last-known-finalized in that case
max ( finalized_checkpoint . epoch . start_slot ( ) , dag . finalizedHead . slot )
2021-12-09 17:06:21 +00:00
finalizedHead = newHead . atSlot ( finalizedSlot )
2020-11-27 22:16:13 +00:00
doAssert ( not finalizedHead . blck . isNil ) ,
" Block graph should always lead to a finalized block "
2022-03-11 20:28:10 +00:00
# Update light client data
dag . processHeadChangeForLightClient ( )
2021-09-22 12:17:15 +00:00
let ( isAncestor , ancestorDepth ) = lastHead . getDepth ( newHead )
if not ( isAncestor ) :
2020-10-01 18:56:42 +00:00
notice " Updated head block with chain reorg " ,
2020-05-19 14:18:07 +00:00
headParent = shortLog ( newHead . parent ) ,
2022-03-16 07:20:40 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState ) ) ,
2021-06-11 17:51:46 +00:00
justified = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
dag . headState , current_justified_checkpoint ) ) ,
2022-07-04 20:35:33 +00:00
finalized = shortLog ( getStateField ( dag . headState , finalized_checkpoint ) ) ,
2023-05-20 12:18:51 +00:00
isOptHead = not newHead . executionValid
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onReorgHappened ) ) :
2022-06-20 05:53:39 +00:00
let
# TODO (cheatfate): Proper implementation required
data = ReorgInfoObject . init ( dag . head . slot , uint64 ( ancestorDepth ) ,
lastHead . root , newHead . root ,
lastHeadStateRoot ,
2022-06-28 10:21:16 +00:00
getStateRoot ( dag . headState ) )
2021-09-22 12:17:15 +00:00
dag . onReorgHappened ( data )
2020-05-19 14:18:07 +00:00
# A reasonable criterion for "reorganizations of the chain"
2022-01-28 10:59:55 +00:00
quarantine . clearAfterReorg ( )
2021-10-07 06:19:07 +00:00
beacon_reorgs_total_total . inc ( )
2020-05-19 14:18:07 +00:00
beacon_reorgs_total . inc ( )
else :
2020-10-01 18:56:42 +00:00
debug " Updated head block " ,
2022-03-16 07:20:40 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState ) ) ,
2021-04-08 08:24:25 +00:00
justified = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
dag . headState , current_justified_checkpoint ) ) ,
2022-07-04 20:35:33 +00:00
finalized = shortLog ( getStateField ( dag . headState , finalized_checkpoint ) ) ,
2023-08-27 07:45:24 +00:00
isOptHead = not newHead . executionValid
2020-05-19 14:18:07 +00:00
2021-09-22 12:17:15 +00:00
if not ( isNil ( dag . onHeadChanged ) ) :
let
2022-08-26 22:47:40 +00:00
depRoot = withState ( dag . headState ) : forkyState . proposer_dependent_root
prevDepRoot = withState ( dag . headState ) :
forkyState . attester_dependent_root
2021-09-22 12:17:15 +00:00
epochTransition = ( finalizedHead ! = dag . finalizedHead )
2022-06-20 05:53:39 +00:00
# TODO (cheatfate): Proper implementation required
data = HeadChangeInfoObject . init ( dag . head . slot , dag . head . root ,
getStateRoot ( dag . headState ) ,
2022-09-19 10:28:52 +00:00
epochTransition , prevDepRoot ,
depRoot )
2021-09-22 12:17:15 +00:00
dag . onHeadChanged ( data )
2022-03-16 07:20:40 +00:00
withState ( dag . headState ) :
2021-12-20 19:20:31 +00:00
# Every time the head changes, the "canonical" view of balances and other
# state-related metrics change - notify the validator monitor.
# Doing this update during head update ensures there's a reasonable number
# of such updates happening - at most once per valid block.
2022-08-26 22:47:40 +00:00
dag . validatorMonitor [ ] . registerState ( forkyState . data )
2021-12-20 19:20:31 +00:00
2020-05-19 14:18:07 +00:00
if finalizedHead ! = dag . finalizedHead :
2022-01-03 21:18:49 +00:00
debug " Reached new finalization checkpoint " ,
2022-03-16 07:20:40 +00:00
stateRoot = shortLog ( getStateRoot ( dag . headState ) ) ,
2021-11-02 17:06:36 +00:00
justified = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
dag . headState , current_justified_checkpoint ) ) ,
finalized = shortLog ( getStateField ( dag . headState , finalized_checkpoint ) )
2022-05-23 12:02:54 +00:00
let oldFinalizedHead = dag . finalizedHead
2020-05-19 14:18:07 +00:00
2021-12-06 18:52:35 +00:00
block :
# Update `dag.finalizedBlocks` with all newly finalized blocks (those
# newer than the previous finalized head), then update `dag.finalizedHead`
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
var newFinalized : seq [ BlockId ]
2021-12-06 18:52:35 +00:00
var tmp = finalizedHead . blck
while not isNil ( tmp ) and tmp . slot > = dag . finalizedHead . slot :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
newFinalized . add ( tmp . bid )
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
if tmp ! = finalizedHead . blck :
# The newly finalized block itself should remain in here so that fork
# choice still can find it via root
dag . forkBlocks . excl ( KeyedBlockRef . init ( tmp ) )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let p = tmp . parent
tmp . parent = nil # Reset all parent links to release memory
tmp = p
2021-12-06 18:52:35 +00:00
dag . finalizedHead = finalizedHead
2020-07-25 19:41:12 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . db . updateFinalizedBlocks ( newFinalized )
2022-01-30 16:51:04 +00:00
2024-02-09 22:10:38 +00:00
let oldBlockHash = dag . loadExecutionBlockHash ( oldFinalizedHead . blck )
if oldBlockHash . isSome and oldBlockHash . unsafeGet . isZero :
let newBlockHash = dag . loadExecutionBlockHash ( dag . finalizedHead . blck )
if newBlockHash . isSome and not newBlockHash . unsafeGet . isZero :
if dag . vanityLogs . onFinalizedMergeTransitionBlock ! = nil :
dag . vanityLogs . onFinalizedMergeTransitionBlock ( )
2022-05-30 08:25:27 +00:00
2021-04-01 11:26:17 +00:00
# Pruning the block dag is required every time the finalized head changes
# in order to clear out blocks that are no longer viable and should
# therefore no longer be considered as part of the chain we're following
dag . pruneBlocksDAG ( )
2021-03-09 14:36:17 +00:00
2022-03-11 20:28:10 +00:00
# Update light client data
2022-05-23 12:02:54 +00:00
dag . processFinalizationForLightClient ( oldFinalizedHead )
2022-03-11 20:28:10 +00:00
2021-09-22 12:17:15 +00:00
# Send notification about new finalization point via callback.
if not ( isNil ( dag . onFinHappened ) ) :
2021-12-09 17:06:21 +00:00
let stateRoot =
2022-03-16 07:20:40 +00:00
if dag . finalizedHead . slot = = dag . head . slot : getStateRoot ( dag . headState )
2021-12-09 17:06:21 +00:00
elif dag . finalizedHead . slot + SLOTS_PER_HISTORICAL_ROOT > dag . head . slot :
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , state_roots ) . data [
2021-12-09 17:06:21 +00:00
int ( dag . finalizedHead . slot mod SLOTS_PER_HISTORICAL_ROOT ) ]
else :
Eth2Digest ( ) # The thing that finalized was >8192 blocks old?
2022-06-20 05:53:39 +00:00
# TODO (cheatfate): Proper implementation required
2021-12-09 17:06:21 +00:00
let data = FinalizationInfoObject . init (
2022-06-28 10:21:16 +00:00
dag . finalizedHead . blck . root , stateRoot , dag . finalizedHead . slot . epoch )
2022-01-31 17:28:26 +00:00
dag . onFinHappened ( dag , data )
2021-09-22 12:17:15 +00:00
2021-12-21 10:40:14 +00:00
proc isInitialized * ( T : type ChainDAGRef , db : BeaconChainDB ) : Result [ void , cstring ] =
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
## Lightweight check to see if it is likely that the given database has been
## initialized
2021-12-21 10:40:14 +00:00
let
tailBlockRoot = db . getTailBlock ( )
if not tailBlockRoot . isSome ( ) :
return err ( " Tail block root missing " )
let
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
tailBlock = db . getBlockId ( tailBlockRoot . get ( ) )
2021-12-21 10:40:14 +00:00
if not tailBlock . isSome ( ) :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
return err ( " Tail block information missing " )
2020-05-19 14:18:07 +00:00
2021-12-21 10:40:14 +00:00
ok ( )
2020-05-19 14:18:07 +00:00
2022-10-14 19:40:10 +00:00
proc preInit * (
T : type ChainDAGRef , db : BeaconChainDB , state : ForkedHashedBeaconState ) =
## Initialize a database using the given state, which potentially may be a
## non-genesis state.
##
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
## When used with a non-genesis state, the resulting database will not be
## compatible with pre-22.11 versions.
2022-10-14 19:40:10 +00:00
logScope :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
stateRoot = $ getStateRoot ( state )
2022-10-14 19:40:10 +00:00
stateSlot = getStateField ( state , slot )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
doAssert getStateField ( state , slot ) . is_epoch ,
" Can only initialize database from epoch states "
2022-10-14 19:40:10 +00:00
withState ( state ) :
db . putState ( forkyState )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
2022-10-14 19:40:10 +00:00
if forkyState . data . slot = = GENESIS_SLOT :
let blck = get_initial_beacon_block ( forkyState )
db . putBlock ( blck )
db . putGenesisBlock ( blck . root )
db . putHeadBlock ( blck . root )
db . putTailBlock ( blck . root )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
notice " Database initialized from genesis " ,
blockRoot = $ blck . root
2022-10-14 19:40:10 +00:00
else :
let blockRoot = forkyState . latest_block_root ( )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
# We write a summary but not the block contents - these will have to be
# backfilled from the network
2022-10-14 19:40:10 +00:00
db . putBeaconBlockSummary ( blockRoot , BeaconBlockSummary (
slot : forkyState . data . latest_block_header . slot ,
parent_root : forkyState . data . latest_block_header . parent_root
) )
db . putHeadBlock ( blockRoot )
db . putTailBlock ( blockRoot )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if db . getGenesisBlock ( ) . isSome ( ) :
notice " Checkpoint written to database " , blockRoot = $ blockRoot
else :
notice " Database initialized from checkpoint " , blockRoot = $ blockRoot
2020-09-22 20:42:42 +00:00
2020-05-22 14:21:22 +00:00
proc getProposer * (
2023-01-11 12:29:21 +00:00
dag : ChainDAGRef , head : BlockRef , slot : Slot ) : Opt [ ValidatorIndex ] =
2020-08-05 06:28:43 +00:00
let
2022-09-27 16:56:08 +00:00
epochRef = dag . getEpochRef ( head . bid , slot . epoch ( ) , false ) . valueOr :
notice " Cannot load EpochRef for given head " , head , slot , error
2023-01-11 12:29:21 +00:00
return Opt . none ( ValidatorIndex )
2022-09-27 16:56:08 +00:00
2022-01-11 10:01:54 +00:00
slotInEpoch = slot . since_epoch_start ( )
2020-05-19 14:18:07 +00:00
2021-06-01 11:13:40 +00:00
let proposer = epochRef . beacon_proposers [ slotInEpoch ]
if proposer . isSome ( ) :
2021-06-10 07:37:02 +00:00
if proposer . get ( ) . uint64 > = dag . db . immutableValidators . lenu64 ( ) :
2021-06-01 11:13:40 +00:00
# Sanity check - it should never happen that the key cache doesn't contain
# a key for the selected proposer - that would mean that we somehow
# created validators in the state without updating the cache!
warn " Proposer key not found " ,
2021-06-10 07:37:02 +00:00
keys = dag . db . immutableValidators . lenu64 ( ) , proposer = proposer . get ( )
2023-01-11 12:29:21 +00:00
return Opt . none ( ValidatorIndex )
2021-06-01 11:13:40 +00:00
proposer
2021-12-09 12:56:54 +00:00
2022-10-04 11:24:16 +00:00
proc getProposalState * (
2022-10-14 19:40:10 +00:00
dag : ChainDAGRef , head : BlockRef , slot : Slot , cache : var StateCache ) :
2022-10-04 11:24:16 +00:00
Result [ ref ForkedHashedBeaconState , cstring ] =
## Return a state suitable for making proposals for the given head and slot -
## in particular, the state can be discarded after use and does not have a
## state root set
# Start with the clearance state, since this one typically has been advanced
# and thus has a hot hash tree cache
2024-02-12 14:58:55 +00:00
let state = assignClone ( dag . clearanceState )
2022-10-04 11:24:16 +00:00
var
info = ForkedEpochInfo ( )
if not state [ ] . can_advance_slots ( head . root , slot ) :
# The last state root will be computed as part of block production, so skip
# it now
if not dag . updateState (
state [ ] , head . atSlot ( slot - 1 ) . toBlockSlotId ( ) . expect ( " not nil " ) ,
false , cache ) :
error " Cannot get proposal state - skipping block production, database corrupt? " ,
head = shortLog ( head ) ,
slot
return err ( " Cannot create proposal state " )
else :
loadStateCache ( dag , cache , head . bid , slot . epoch )
if getStateField ( state [ ] , slot ) < slot :
process_slots (
dag . cfg , state [ ] , slot , cache , info ,
{ skipLastStateRootCalculation } ) . expect ( " advancing 1 slot should not fail " )
ok state
2023-07-11 14:36:37 +00:00
func aggregateAll * (
2021-12-09 12:56:54 +00:00
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex ] ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len = = 0 :
# Aggregation spec requires non-empty collection
# - https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04
2023-05-05 20:37:56 +00:00
# Consensus specs require at least one attesting index in attestation
2024-01-20 11:19:47 +00:00
# - https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.6/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
2021-12-09 12:56:54 +00:00
return err ( " aggregate: no attesting keys " )
let
2023-01-11 12:29:21 +00:00
firstKey = dag . validatorKey ( validator_indices [ 0 ] ) . valueOr :
return err ( " aggregate: invalid validator index " )
2021-12-09 12:56:54 +00:00
2022-02-16 22:24:44 +00:00
var aggregateKey {. noinit . } : AggregatePublicKey
2021-12-09 12:56:54 +00:00
2023-01-11 12:29:21 +00:00
aggregateKey . init ( firstKey )
2021-12-09 12:56:54 +00:00
for i in 1 .. < validator_indices . len :
2023-01-11 12:29:21 +00:00
let key = dag . validatorKey ( validator_indices [ i ] ) . valueOr :
2021-12-09 12:56:54 +00:00
return err ( " aggregate: invalid validator index " )
2023-01-11 12:29:21 +00:00
aggregateKey . aggregate ( key )
2021-12-09 12:56:54 +00:00
ok ( finish ( aggregateKey ) )
2023-07-11 14:36:37 +00:00
func aggregateAll * (
2021-12-09 12:56:54 +00:00
dag : ChainDAGRef ,
validator_indices : openArray [ ValidatorIndex | uint64 ] ,
bits : BitSeq | BitArray ) : Result [ CookedPubKey , cstring ] =
if validator_indices . len ( ) ! = bits . len ( ) :
return err ( " aggregateAll: mismatch in bits length " )
var
2022-02-16 22:24:44 +00:00
aggregateKey {. noinit . } : AggregatePublicKey
2021-12-09 12:56:54 +00:00
inited = false
for i in 0 .. < bits . len ( ) :
if bits [ i ] :
2023-01-11 12:29:21 +00:00
let key = dag . validatorKey ( validator_indices [ i ] ) . valueOr :
2021-12-09 12:56:54 +00:00
return err ( " aggregate: invalid validator index " )
if inited :
2023-01-11 12:29:21 +00:00
aggregateKey . aggregate ( key )
2021-12-09 12:56:54 +00:00
else :
2023-01-11 12:29:21 +00:00
aggregateKey = AggregatePublicKey . init ( key )
2021-12-09 12:56:54 +00:00
inited = true
if not inited :
err ( " aggregate: no attesting keys " )
else :
ok ( finish ( aggregateKey ) )
2022-01-07 10:13:19 +00:00
2022-01-20 07:25:45 +00:00
func needsBackfill * ( dag : ChainDAGRef ) : bool =
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
dag . backfill . slot > dag . horizon
2022-03-11 12:49:47 +00:00
proc rebuildIndex * ( dag : ChainDAGRef ) =
## After a checkpoint sync, we lack intermediate states to replay from - this
## function rebuilds them so that historical replay can take place again
2023-01-03 19:37:09 +00:00
## TODO the pruning of junk states could be moved to a separate function that
## runs either on startup
2022-03-11 12:49:47 +00:00
# First, we check what states we already have in the database - that allows
# resuming the operation at any time
let
roots = dag . db . loadStateRoots ( )
2023-01-03 19:37:09 +00:00
historicalRoots = getStateField ( dag . headState , historical_roots ) . asSeq ( )
2023-04-24 13:26:28 +00:00
historicalSummaries = dag . headState . historical_summaries . asSeq ( )
2022-03-11 12:49:47 +00:00
var
canonical = newSeq [ Eth2Digest ] (
( dag . finalizedHead . slot . epoch + EPOCHS_PER_STATE_SNAPSHOT - 1 ) div
EPOCHS_PER_STATE_SNAPSHOT )
# `junk` puts in place some infrastructure to prune unnecessary states - it
# will be more useful in the future as a base for pruning
junk : seq [ ( ( Slot , Eth2Digest ) , Eth2Digest ) ]
for k , v in roots :
if k [ 0 ] > = dag . finalizedHead . slot :
continue # skip newer stuff
2023-01-03 19:37:09 +00:00
if k [ 0 ] < dag . backfill . slot :
continue # skip stuff for which we have no blocks
2022-03-11 12:49:47 +00:00
if not isFinalizedStateSnapshot ( k [ 0 ] ) :
# `tail` will move at the end of the process, so we won't need any
# intermediate states
junk . add ( ( k , v ) )
continue # skip non-snapshot slots
2022-03-15 08:24:55 +00:00
if k [ 0 ] > 0 :
let bs = dag . getBlockIdAtSlot ( k [ 0 ] - 1 )
if bs . isNone or bs . get ( ) . bid . root ! = k [ 1 ] :
# remove things that are no longer a canonical part of the chain or
# cannot be reached via a block
junk . add ( ( k , v ) )
continue
2022-03-11 12:49:47 +00:00
2023-02-16 09:32:12 +00:00
if not dag . db . containsState ( dag . cfg . consensusForkAtEpoch ( k [ 0 ] . epoch ) , v ) :
2022-03-11 12:49:47 +00:00
continue # If it's not in the database..
canonical [ k [ 0 ] . epoch div EPOCHS_PER_STATE_SNAPSHOT ] = v
let
state = ( ref ForkedHashedBeaconState ) ( )
var
cache : StateCache
info : ForkedEpochInfo
2023-01-03 19:37:09 +00:00
tailBid : Opt [ BlockId ]
states : int
2022-03-11 12:49:47 +00:00
# `canonical` holds all slots at which a state is expected to appear, using a
# zero root whenever a particular state is missing - this way, if there's
# partial progress or gaps, they will be dealt with correctly
for i , state_root in canonical . mpairs ( ) :
2023-01-03 19:37:09 +00:00
let
slot = Epoch ( i * EPOCHS_PER_STATE_SNAPSHOT ) . start_slot
if slot < dag . backfill . slot :
# TODO if we have era files, we could try to load blocks from them at
# this point
# TODO if we don't do the above, we can of course compute the starting `i`
continue
if tailBid . isNone ( ) :
if state_root . isZero :
# If we can find an era file with this state, use it as an alternative
# starting point - ignore failures for now
2023-04-24 13:26:28 +00:00
if dag . era . getState (
historicalRoots , historicalSummaries , slot , state [ ] ) . isOk ( ) :
2023-01-03 19:37:09 +00:00
state_root = getStateRoot ( state [ ] )
withState ( state [ ] ) : dag . db . putState ( forkyState )
tailBid = Opt . some state [ ] . latest_block_id ( )
else :
if not dag . db . getState (
2023-02-16 09:32:12 +00:00
dag . cfg . consensusForkAtEpoch ( slot . epoch ) , state_root , state [ ] ,
noRollback ) :
2023-01-03 19:37:09 +00:00
fatal " Cannot load state, database corrupt or created for a different network? " ,
state_root , slot
quit 1
tailBid = Opt . some state [ ] . latest_block_id ( )
continue
if i = = 0 or canonical [ i - 1 ] . isZero :
reset ( tailBid ) # No unbroken history!
2022-03-11 12:49:47 +00:00
continue
2023-01-03 19:37:09 +00:00
if not state_root . isZero :
states + = 1
continue
2022-03-11 12:49:47 +00:00
let
startSlot = Epoch ( ( i - 1 ) * EPOCHS_PER_STATE_SNAPSHOT ) . start_slot
info " Recreating state snapshot " ,
slot , startStateRoot = canonical [ i - 1 ] , startSlot
if getStateRoot ( state [ ] ) ! = canonical [ i - 1 ] :
2023-02-16 09:32:12 +00:00
if not dag . db . getState (
dag . cfg . consensusForkAtEpoch ( startSlot . epoch ) , canonical [ i - 1 ] ,
state [ ] , noRollback ) :
2022-03-11 12:49:47 +00:00
error " Can ' t load start state, database corrupt? " ,
startStateRoot = shortLog ( canonical [ i - 1 ] ) , slot = startSlot
return
for slot in startSlot .. < startSlot + ( EPOCHS_PER_STATE_SNAPSHOT * SLOTS_PER_EPOCH ) :
2022-03-15 08:24:55 +00:00
let bids = dag . getBlockIdAtSlot ( slot ) . valueOr :
2022-03-11 12:49:47 +00:00
warn " Block id missing, cannot continue - database corrupt? " , slot
return
# The slot check is needed to avoid re-applying a block
if bids . isProposed and getStateField ( state [ ] , latest_block_header ) . slot < bids . bid . slot :
let res = dag . applyBlock ( state [ ] , bids . bid , cache , info )
if res . isErr :
2022-09-27 16:56:08 +00:00
error " Failed to apply block while building index " ,
state_bid = shortLog ( state [ ] . latest_block_id ( ) ) ,
error = res . error ( )
2022-03-11 12:49:47 +00:00
return
if slot . is_epoch :
cache . prune ( slot . epoch )
process_slots (
dag . cfg , state [ ] , slot , cache , info ,
dag . updateFlags ) . expect ( " process_slots shouldn ' t fail when state slot is correct " )
withState ( state [ ] ) :
2022-08-26 22:47:40 +00:00
dag . db . putState ( forkyState )
2022-03-18 11:32:20 +00:00
dag . db . checkpoint ( )
2022-03-11 12:49:47 +00:00
2022-08-26 22:47:40 +00:00
state_root = forkyState . root
2022-03-11 12:49:47 +00:00
2023-01-03 19:37:09 +00:00
# Now that we've found a starting point and topped up with "intermediate"
# states, we can update the tail to start at the starting point of the
# first loadable state
2022-03-11 12:49:47 +00:00
2023-01-03 19:37:09 +00:00
if tailBid . isSome ( ) :
dag . tail = tailBid . get ( )
dag . db . putTailBlock ( dag . tail . root )
2022-03-11 12:49:47 +00:00
if junk . len > 0 :
2023-01-03 19:37:09 +00:00
info " Dropping redundant states " , states , redundant = junk . len
2022-03-11 12:49:47 +00:00
for i in junk :
2023-01-03 19:37:09 +00:00
dag . db . delStateRoot ( i [ 0 ] [ 1 ] , i [ 0 ] [ 0 ] )
2023-02-16 09:32:12 +00:00
dag . db . delState ( dag . cfg . consensusForkAtEpoch ( i [ 0 ] [ 0 ] . epoch ) , i [ 1 ] )