2019-02-28 21:21:29 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2019-02-28 21:21:29 +00:00
|
|
|
# Licensed and distributed under either of
|
2019-11-25 15:30:02 +00:00
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
2019-02-28 21:21:29 +00:00
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2024-02-18 01:16:49 +00:00
|
|
|
{.push raises: [].}
|
2019-11-14 10:47:55 +00:00
|
|
|
{.used.}
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
import
|
2021-04-28 16:41:02 +00:00
|
|
|
unittest2,
|
2023-06-19 22:43:50 +00:00
|
|
|
taskpools,
|
2023-05-15 15:41:30 +00:00
|
|
|
../beacon_chain/el/merkle_minimal,
|
2021-06-21 08:35:24 +00:00
|
|
|
../beacon_chain/spec/datatypes/base,
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
../beacon_chain/spec/[beaconstate, forks, helpers, signatures, state_transition],
|
2021-08-18 18:57:58 +00:00
|
|
|
../beacon_chain/[beacon_chain_db],
|
2021-04-16 08:49:37 +00:00
|
|
|
../beacon_chain/consensus_object_pools/[
|
2021-09-08 03:46:33 +00:00
|
|
|
attestation_pool, blockchain_dag, block_quarantine, block_clearance],
|
2021-04-28 16:41:02 +00:00
|
|
|
./testutil, ./testdbutil, ./testblockutil
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2024-02-09 23:46:51 +00:00
|
|
|
from std/random import rand, randomize, sample
|
|
|
|
from std/sequtils import toSeq
|
2022-12-02 07:39:01 +00:00
|
|
|
from ../beacon_chain/spec/datatypes/capella import
|
|
|
|
SignedBLSToExecutionChangeList
|
2024-02-09 23:46:51 +00:00
|
|
|
from ./testbcutil import addHeadBlock
|
2022-12-02 07:39:01 +00:00
|
|
|
|
2021-12-09 17:06:21 +00:00
|
|
|
func `$`(x: BlockRef): string = shortLog(x)
|
2020-10-14 20:23:04 +00:00
|
|
|
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
const
|
|
|
|
nilPhase0Callback = OnPhase0BlockAdded(nil)
|
|
|
|
nilAltairCallback = OnAltairBlockAdded(nil)
|
2022-09-27 12:11:47 +00:00
|
|
|
nilBellatrixCallback = OnBellatrixBlockAdded(nil)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
proc pruneAtFinalization(dag: ChainDAGRef) =
|
|
|
|
if dag.needStateCachesAndForkChoicePruning():
|
|
|
|
dag.pruneStateCachesDAG()
|
|
|
|
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
type
|
2022-11-10 17:40:27 +00:00
|
|
|
AddHeadRes = Result[BlockRef, VerifierError]
|
|
|
|
AddBackRes = Result[void, VerifierError]
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
suite "Block pool processing" & preset():
|
2020-05-05 09:18:44 +00:00
|
|
|
setup:
|
2023-06-19 22:43:50 +00:00
|
|
|
let rng = HmacDrbgContext.new()
|
2020-05-05 09:18:44 +00:00
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine = Quarantine.init()
|
2022-03-16 07:20:40 +00:00
|
|
|
state = newClone(dag.headState)
|
2020-07-15 10:44:18 +00:00
|
|
|
cache = StateCache()
|
2021-10-13 14:24:36 +00:00
|
|
|
info = ForkedEpochInfo()
|
2021-05-21 09:23:28 +00:00
|
|
|
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
2021-11-18 12:02:43 +00:00
|
|
|
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
|
|
|
|
b2 = addTestBlock(state[], cache).phase0Data
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
test "basic ops":
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag.getBlockRef(default Eth2Digest).isNone()
|
2020-04-10 14:06:24 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
b0 = dag.getForkedBlock(dag.tail.root)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bh = dag.getForkedBlock(dag.head.root)
|
|
|
|
bh2 = dag.getForkedBlock(dag.head.bid)
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
|
|
|
b0.isSome()
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bh.isSome()
|
|
|
|
bh2.isSome()
|
|
|
|
|
|
|
|
dag.getBlockRef(dag.finalizedHead.blck.root).get() ==
|
|
|
|
dag.finalizedHead.blck
|
|
|
|
dag.getBlockRef(dag.head.root).get() == dag.head
|
2020-05-04 21:07:18 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "Simple block add&get" & preset():
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
b1Get = dag.getForkedBlock(b1.root)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
|
|
|
b1Get.isSome()
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
b1Get.get().root == b1.root
|
|
|
|
b1Add[].root == b1Get.get().root
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.heads.len == 1
|
|
|
|
dag.heads[0] == b1Add[]
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
b2Get = dag.getForkedBlock(b2.root)
|
2022-08-18 18:07:01 +00:00
|
|
|
sr = dag.findShufflingRef(b1Add[].bid, b1Add[].slot.epoch)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
er = dag.findEpochRef(b1Add[].bid, b1Add[].slot.epoch)
|
2022-03-16 07:20:40 +00:00
|
|
|
validators = getStateField(dag.headState, validators).lenu64()
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
|
|
|
b2Get.isSome()
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
b2Get.get().root == b2.root
|
|
|
|
b2Add[].root == b2Get.get().root
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.heads.len == 1
|
|
|
|
dag.heads[0] == b2Add[]
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag.containsForkBlock(b2.root)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.parent(b2Add[].bid).get() == b1Add[].bid
|
|
|
|
# head not updated yet - getBlockIdAtSlot won't give those blocks
|
|
|
|
dag.getBlockIdAtSlot(b2Add[].slot).get() ==
|
2022-10-14 19:40:10 +00:00
|
|
|
BlockSlotId.init(dag.getBlockIdAtSlot(GENESIS_SLOT).get().bid, b2Add[].slot)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2022-08-18 18:07:01 +00:00
|
|
|
sr.isSome()
|
|
|
|
er.isSome()
|
|
|
|
# er reuses shuffling ref instance
|
|
|
|
er[].shufflingRef == sr[]
|
2021-06-10 07:37:02 +00:00
|
|
|
# Same epoch - same epochRef
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
er[] == dag.findEpochRef(b2Add[].bid, b2Add[].slot.epoch)[]
|
2021-06-10 07:37:02 +00:00
|
|
|
# Different epoch that was never processed
|
2022-08-18 18:07:01 +00:00
|
|
|
dag.findEpochRef(b1Add[].bid, b1Add[].slot.epoch + 1).isNone()
|
|
|
|
# ... but we know the shuffling already!
|
|
|
|
dag.findShufflingRef(b1Add[].bid, b1Add[].slot.epoch + 1).isSome()
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2022-08-18 18:07:01 +00:00
|
|
|
dag.validatorKey(0'u64).isSome()
|
|
|
|
dag.validatorKey(validators - 1).isSome()
|
|
|
|
dag.validatorKey(validators).isNone()
|
2021-06-10 07:37:02 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
# Skip one slot to get a gap
|
2020-05-19 15:46:29 +00:00
|
|
|
check:
|
2021-06-11 17:51:46 +00:00
|
|
|
process_slots(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
defaultRuntimeConfig, state[], getStateField(state[], slot) + 1, cache,
|
2022-01-17 11:19:58 +00:00
|
|
|
info, {}).isOk()
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
2021-11-18 12:02:43 +00:00
|
|
|
b4 = addTestBlock(state[], cache).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b4Add = dag.addHeadBlock(verifier, b4, nilPhase0Callback)
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
2020-07-09 09:29:32 +00:00
|
|
|
b4Add[].parent == b2Add[]
|
2019-12-19 14:13:35 +00:00
|
|
|
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b4Add[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-04-21 06:43:39 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
check: # getBlockIdAtSlot operates on the head chain!
|
|
|
|
dag.getBlockIdAtSlot(b2Add[].slot).get() ==
|
|
|
|
BlockSlotId.init(b2Add[].bid, b2Add[].slot)
|
|
|
|
dag.parentOrSlot(dag.getBlockIdAtSlot(b2Add[].slot).get()).get() ==
|
|
|
|
BlockSlotId.init(b1Add[].bid, b2Add[].slot)
|
|
|
|
dag.parentOrSlot(dag.getBlockIdAtSlot(b2Add[].slot + 1).get()).get() ==
|
|
|
|
BlockSlotId.init(b2Add[].bid, b2Add[].slot)
|
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
var blocks: array[3, BlockId]
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 0)) == 0
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
blocks[0..<1] == [dag.tail]
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 1)) == 0
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
blocks[0..<2] == [dag.tail, b1Add[].bid]
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(0), 2, blocks.toOpenArray(0, 1)) == 0
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
blocks[0..<2] == [dag.tail, b2Add[].bid]
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(0), 3, blocks.toOpenArray(0, 1)) == 1
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
blocks[1..<2] == [dag.tail] # block 3 is missing!
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, 1)) == 0
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks[0..<2] == [b2Add[].bid, b4Add[].bid] # block 3 is missing!
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-10-14 20:23:04 +00:00
|
|
|
# large skip step
|
|
|
|
dag.getBlockRange(Slot(0), uint64.high, blocks.toOpenArray(0, 2)) == 2
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
blocks[2..2] == [dag.tail]
|
2020-10-14 20:23:04 +00:00
|
|
|
|
|
|
|
# large skip step
|
|
|
|
dag.getBlockRange(Slot(2), uint64.high, blocks.toOpenArray(0, 1)) == 1
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks[1..1] == [b2Add[].bid]
|
2020-10-14 20:23:04 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
# empty length
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, -1)) == 0
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
# No blocks in sight
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(5), 1, blocks.toOpenArray(0, 1)) == 2
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2020-10-14 20:23:04 +00:00
|
|
|
# No blocks in sight
|
|
|
|
dag.getBlockRange(Slot(uint64.high), 1, blocks.toOpenArray(0, 1)) == 2
|
|
|
|
|
2020-05-05 09:18:44 +00:00
|
|
|
# No blocks in sight either due to gaps
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.getBlockRange(Slot(3), 2, blocks.toOpenArray(0, 1)) == 2
|
2020-08-05 23:22:12 +00:00
|
|
|
blocks[2..<2].len == 0
|
2020-04-21 06:43:39 +00:00
|
|
|
|
2021-12-30 11:33:03 +00:00
|
|
|
# A fork forces the clearance state to a point where it cannot be advanced
|
|
|
|
let
|
|
|
|
nextEpoch = dag.head.slot.epoch + 1
|
2022-01-11 10:01:54 +00:00
|
|
|
nextEpochSlot = nextEpoch.start_slot()
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
parentBsi = dag.head.parent.atSlot(nextEpochSlot).toBlockSlotId().get()
|
|
|
|
stateCheckpoint = dag.stateCheckpoint(parentBsi)
|
2023-10-10 00:02:07 +00:00
|
|
|
shufflingRef = dag.getShufflingRef(dag.head, nextEpoch, false).valueOr:
|
|
|
|
raiseAssert "false"
|
|
|
|
nextEpochProposers = withState(dag.headState):
|
|
|
|
get_beacon_proposer_indices(
|
|
|
|
forkyState.data, shufflingRef.shuffled_active_validator_indices,
|
|
|
|
nextEpoch)
|
2021-12-30 11:33:03 +00:00
|
|
|
|
|
|
|
check:
|
2023-10-10 00:02:07 +00:00
|
|
|
# get_beacon_proposer_indices based on ShufflingRef matches EpochRef
|
|
|
|
nextEpochProposers == dag.getEpochRef(
|
|
|
|
dag.head, nextEpoch, true).get.beacon_proposers
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
parentBsi.bid == dag.head.parent.bid
|
|
|
|
parentBsi.slot == nextEpochSlot
|
2022-08-18 18:07:01 +00:00
|
|
|
# Pre-heated caches
|
|
|
|
dag.findShufflingRef(dag.head.parent.bid, dag.head.slot.epoch).isOk()
|
|
|
|
dag.findShufflingRef(dag.head.parent.bid, nextEpoch).isOk()
|
2022-01-05 18:38:04 +00:00
|
|
|
dag.getEpochRef(dag.head.parent, nextEpoch, true).isOk()
|
2021-12-30 11:33:03 +00:00
|
|
|
|
|
|
|
# Getting an EpochRef should not result in states being stored
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
db.getStateRoot(stateCheckpoint.bid.root, stateCheckpoint.slot).isErr()
|
2021-12-30 11:33:03 +00:00
|
|
|
# this is required for the test to work - it's not a "public"
|
|
|
|
# post-condition of getEpochRef
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(dag.epochRefState, slot) == nextEpochSlot
|
2023-10-10 00:02:07 +00:00
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(state[], dag.epochRefState)
|
2021-12-30 11:33:03 +00:00
|
|
|
|
2024-01-21 06:55:03 +00:00
|
|
|
let bnext = addTestBlock(state[], cache).phase0Data
|
|
|
|
discard dag.addHeadBlock(verifier, bnext, nilPhase0Callback)
|
2021-12-30 11:33:03 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
# Getting an EpochRef should not result in states being stored
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
db.getStateRoot(stateCheckpoint.bid.root, stateCheckpoint.slot).isOk()
|
2021-12-30 11:33:03 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
test "Adding the same block twice returns a Duplicate error" & preset():
|
|
|
|
let
|
|
|
|
b10 = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
|
|
|
b11 = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
|
|
|
|
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
b11 == AddHeadRes.err VerifierError.Duplicate
|
2021-12-20 19:20:31 +00:00
|
|
|
not b10[].isNil
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "updateHead updates head and headState" & preset():
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
2020-05-05 09:18:44 +00:00
|
|
|
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b1Add[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
check:
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.head == b1Add[]
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(dag.headState, slot) == b1Add[].slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
test "updateState sanity" & preset():
|
2020-05-05 09:18:44 +00:00
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
|
|
|
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
bs1 = BlockSlotId.init(b1Add[].bid, b1.message.slot)
|
|
|
|
bs1_3 = BlockSlotId.init(b1Add[].bid, 3.Slot)
|
|
|
|
bs2_3 = BlockSlotId.init(b2Add[].bid, 3.Slot)
|
2020-05-05 09:18:44 +00:00
|
|
|
|
2022-02-20 20:13:06 +00:00
|
|
|
let tmpState = assignClone(dag.headState)
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# move to specific block
|
2020-08-18 20:29:33 +00:00
|
|
|
var cache = StateCache()
|
2020-05-05 09:18:44 +00:00
|
|
|
check:
|
2024-10-30 05:38:53 +00:00
|
|
|
dag.updateState(tmpState[], bs1, false, cache, dag.updateFlags)
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState[].latest_block_root == b1Add[].root
|
|
|
|
getStateField(tmpState[], slot) == bs1.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# Skip slots
|
|
|
|
check:
|
2024-10-30 05:38:53 +00:00
|
|
|
dag.updateState(tmpState[], bs1_3, false, cache, dag.updateFlags) # skip slots
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState[].latest_block_root == b1Add[].root
|
|
|
|
getStateField(tmpState[], slot) == bs1_3.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# Move back slots, but not blocks
|
|
|
|
check:
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.updateState(
|
2024-10-30 05:38:53 +00:00
|
|
|
tmpState[], dag.parent(bs1_3.bid).expect("block").atSlot(), false,
|
|
|
|
cache, dag.updateFlags)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
tmpState[].latest_block_root == b1Add[].parent.root
|
|
|
|
getStateField(tmpState[], slot) == b1Add[].parent.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# Move to different block and slot
|
|
|
|
check:
|
2024-10-30 05:38:53 +00:00
|
|
|
dag.updateState(tmpState[], bs2_3, false, cache, dag.updateFlags)
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState[].latest_block_root == b2Add[].root
|
|
|
|
getStateField(tmpState[], slot) == bs2_3.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# Move back slot and block
|
|
|
|
check:
|
2024-10-30 05:38:53 +00:00
|
|
|
dag.updateState(tmpState[], bs1, false, cache, dag.updateFlags)
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState[].latest_block_root == b1Add[].root
|
|
|
|
getStateField(tmpState[], slot) == bs1.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
|
|
|
# Move back to genesis
|
|
|
|
check:
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.updateState(
|
2024-10-30 05:38:53 +00:00
|
|
|
tmpState[], dag.parent(bs1.bid).expect("block").atSlot(), false, cache,
|
|
|
|
dag.updateFlags)
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState[].latest_block_root == b1Add[].parent.root
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
getStateField(tmpState[], slot) == b1Add[].parent.slot
|
2020-05-05 09:18:44 +00:00
|
|
|
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
suite "Block pool altair processing" & preset():
|
|
|
|
setup:
|
2023-06-19 22:43:50 +00:00
|
|
|
let rng = HmacDrbgContext.new()
|
|
|
|
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
var
|
|
|
|
cfg = defaultRuntimeConfig
|
|
|
|
cfg.ALTAIR_FORK_EPOCH = Epoch(1)
|
|
|
|
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine = Quarantine.init()
|
2022-03-16 07:20:40 +00:00
|
|
|
state = newClone(dag.headState)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
cache = StateCache()
|
|
|
|
info = ForkedEpochInfo()
|
|
|
|
|
|
|
|
# Advance to altair
|
|
|
|
check:
|
|
|
|
process_slots(
|
2022-01-11 10:01:54 +00:00
|
|
|
cfg, state[], cfg.ALTAIR_FORK_EPOCH.start_slot(), cache,
|
2022-01-17 11:19:58 +00:00
|
|
|
info, {}).isOk()
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
2023-01-28 19:53:41 +00:00
|
|
|
state[].kind == ConsensusFork.Altair
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
var
|
|
|
|
b1 = addTestBlock(state[], cache).altairData
|
|
|
|
att1 = makeFullAttestations(state[], b1.root, b1.message.slot, cache)
|
|
|
|
b2 = addTestBlock(state[], cache, attestations = att1).altairData
|
|
|
|
|
|
|
|
test "Invalid signatures" & preset():
|
|
|
|
let badSignature = get_slot_signature(
|
2022-06-18 04:57:37 +00:00
|
|
|
Fork(), ZERO_HASH, 42.Slot,
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
MockPrivKeys[ValidatorIndex(0)]).toValidatorSig()
|
|
|
|
|
|
|
|
check:
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
dag.addHeadBlock(verifier, b1, nilAltairCallback).isOk()
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
block: # Main signature
|
|
|
|
var b = b2
|
|
|
|
b.signature = badSignature
|
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
bAdd = dag.addHeadBlock(verifier, b, nilAltairCallback)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
bAdd == AddHeadRes.err VerifierError.Invalid
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
block: # Randao reveal
|
|
|
|
var b = b2
|
|
|
|
b.message.body.randao_reveal = badSignature
|
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
bAdd = dag.addHeadBlock(verifier, b, nilAltairCallback)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
bAdd == AddHeadRes.err VerifierError.Invalid
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
block: # Attestations
|
|
|
|
var b = b2
|
|
|
|
b.message.body.attestations[0].signature = badSignature
|
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
bAdd = dag.addHeadBlock(verifier, b, nilAltairCallback)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
bAdd == AddHeadRes.err VerifierError.Invalid
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
block: # SyncAggregate empty
|
|
|
|
var b = b2
|
|
|
|
b.message.body.sync_aggregate.sync_committee_signature = badSignature
|
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
bAdd = dag.addHeadBlock(verifier, b, nilAltairCallback)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
bAdd == AddHeadRes.err VerifierError.Invalid
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
|
|
|
|
block: # SyncAggregate junk
|
|
|
|
var b = b2
|
|
|
|
b.message.body.sync_aggregate.sync_committee_signature = badSignature
|
|
|
|
b.message.body.sync_aggregate.sync_committee_bits[0] = true
|
|
|
|
|
|
|
|
let
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
bAdd = dag.addHeadBlock(verifier, b, nilAltairCallback)
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
bAdd == AddHeadRes.err VerifierError.Invalid
|
2020-05-05 09:18:44 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
suite "chain DAG finalization tests" & preset():
|
2020-07-01 17:00:14 +00:00
|
|
|
setup:
|
2023-06-19 22:43:50 +00:00
|
|
|
let rng = HmacDrbgContext.new()
|
2020-07-01 17:00:14 +00:00
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine = Quarantine.init()
|
2020-07-15 10:44:18 +00:00
|
|
|
cache = StateCache()
|
2021-10-13 14:24:36 +00:00
|
|
|
info = ForkedEpochInfo()
|
2020-02-05 11:41:46 +00:00
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "prune heads on finalization" & preset():
|
2020-07-01 17:00:14 +00:00
|
|
|
# Create a fork that will not be taken
|
|
|
|
var
|
2022-03-16 07:20:40 +00:00
|
|
|
blck = makeTestBlock(dag.headState, cache).phase0Data
|
|
|
|
tmpState = assignClone(dag.headState)
|
2020-07-01 17:00:14 +00:00
|
|
|
check:
|
|
|
|
process_slots(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
defaultRuntimeConfig, tmpState[],
|
|
|
|
getStateField(tmpState[], slot) + (5 * SLOTS_PER_EPOCH).uint64,
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2020-07-01 17:00:14 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
let lateBlock = addTestBlock(tmpState[], cache).phase0Data
|
2020-07-09 09:29:32 +00:00
|
|
|
block:
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let status = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-07-09 09:29:32 +00:00
|
|
|
check: status.isOk()
|
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(tmpState[], dag.headState)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
# skip slots so we can test gappy getBlockIdAtSlot
|
2022-02-26 18:16:19 +00:00
|
|
|
check process_slots(
|
|
|
|
defaultRuntimeConfig, tmpState[],
|
|
|
|
getStateField(tmpState[], slot) + 2.uint64,
|
|
|
|
cache, info, {}).isOk()
|
|
|
|
|
2020-07-01 17:00:14 +00:00
|
|
|
for i in 0 ..< (SLOTS_PER_EPOCH * 6):
|
|
|
|
if i == 1:
|
|
|
|
# There are 2 heads now because of the fork at slot 1
|
|
|
|
check:
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.heads.len == 2
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
blck = addTestBlock(
|
2021-11-18 12:02:43 +00:00
|
|
|
tmpState[], cache,
|
2020-07-09 09:29:32 +00:00
|
|
|
attestations = makeFullAttestations(
|
2021-10-18 16:37:27 +00:00
|
|
|
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {})).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-07-09 09:29:32 +00:00
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-07-01 17:00:14 +00:00
|
|
|
|
|
|
|
check:
|
2020-07-30 19:18:17 +00:00
|
|
|
dag.heads.len() == 1
|
2022-10-14 19:40:10 +00:00
|
|
|
dag.getBlockIdAtSlot(0.Slot).get().bid.slot == 0.Slot
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockIdAtSlot(2.Slot).get() ==
|
|
|
|
BlockSlotId.init(dag.getBlockIdAtSlot(1.Slot).get().bid, 2.Slot)
|
2022-02-26 18:16:19 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.head.slot).get() == BlockSlotId.init(
|
|
|
|
dag.head.bid, dag.head.slot)
|
|
|
|
dag.getBlockIdAtSlot(dag.head.slot + 1).get() == BlockSlotId.init(
|
|
|
|
dag.head.bid, dag.head.slot + 1)
|
2020-07-01 17:00:14 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
not dag.containsForkBlock(dag.getBlockIdAtSlot(5.Slot).get().bid.root)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag.containsForkBlock(dag.finalizedHead.blck.root)
|
|
|
|
|
2022-10-14 19:40:10 +00:00
|
|
|
dag.getBlockRef(dag.getBlockIdAtSlot(0.Slot).get().bid.root).isNone() # Finalized - no BlockRef
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
|
|
|
|
dag.getBlockRef(dag.finalizedHead.blck.root).isSome()
|
|
|
|
|
|
|
|
isNil dag.finalizedHead.blck.parent
|
|
|
|
|
2020-08-26 15:06:40 +00:00
|
|
|
check:
|
2022-03-16 07:20:40 +00:00
|
|
|
dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
|
2020-08-26 15:06:40 +00:00
|
|
|
|
2022-03-24 14:37:37 +00:00
|
|
|
block:
|
|
|
|
var cur = dag.head.bid
|
|
|
|
while true:
|
|
|
|
let parent = dag.parent(cur)
|
|
|
|
if cur.slot > 0:
|
|
|
|
check:
|
|
|
|
parent.isSome and parent.get().slot < cur.slot
|
|
|
|
cur = parent.get()
|
|
|
|
else:
|
|
|
|
check:
|
|
|
|
parent.isErr()
|
|
|
|
break
|
|
|
|
check: cur.slot == 0
|
|
|
|
|
|
|
|
block:
|
|
|
|
var cur = dag.head.bid.atSlot()
|
|
|
|
while true:
|
|
|
|
let parent = dag.parentOrSlot(cur)
|
|
|
|
if cur.slot > 0:
|
|
|
|
check:
|
|
|
|
parent.isSome and (parent.get().slot < cur.slot or parent.get().bid != cur.bid)
|
|
|
|
cur = parent.get()
|
|
|
|
else:
|
|
|
|
check:
|
|
|
|
parent.isErr()
|
|
|
|
break
|
|
|
|
check: cur.slot == 0
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
let
|
2022-01-05 18:38:04 +00:00
|
|
|
finalER = dag.getEpochRef(
|
|
|
|
dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false)
|
2020-08-11 19:39:53 +00:00
|
|
|
|
2020-11-20 14:16:04 +00:00
|
|
|
# The EpochRef for the finalized block is needed for eth1 voting, so we
|
|
|
|
# should never drop it!
|
2021-06-01 11:13:40 +00:00
|
|
|
check:
|
2022-01-05 18:38:04 +00:00
|
|
|
not finalER.isErr()
|
2020-11-20 14:16:04 +00:00
|
|
|
|
2020-08-26 15:06:40 +00:00
|
|
|
block:
|
2022-09-29 14:55:58 +00:00
|
|
|
for er in dag.epochRefs.entries:
|
|
|
|
check: er.value == nil or er.value.epoch >= dag.finalizedHead.slot.epoch
|
2021-03-17 10:17:15 +00:00
|
|
|
|
2021-06-03 13:32:00 +00:00
|
|
|
block:
|
|
|
|
let tmpStateData = assignClone(dag.headState)
|
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
# Check that cached data is available after updateState - since we
|
2021-06-03 13:32:00 +00:00
|
|
|
# just processed the head the relevant epochrefs should not have been
|
|
|
|
# evicted yet
|
|
|
|
cache = StateCache()
|
2022-03-16 07:20:40 +00:00
|
|
|
check: updateState(
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag, tmpStateData[],
|
|
|
|
dag.head.atSlot(dag.head.slot).toBlockSlotId().expect("not nil"),
|
2024-10-30 05:38:53 +00:00
|
|
|
false, cache, dag.updateFlags)
|
2021-06-03 13:32:00 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.head.slot.epoch in cache.shuffled_active_validator_indices
|
|
|
|
(dag.head.slot.epoch - 1) in cache.shuffled_active_validator_indices
|
|
|
|
|
|
|
|
dag.head.slot in cache.beacon_proposer_indices
|
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
block:
|
2020-07-01 17:00:14 +00:00
|
|
|
# The late block is a block whose parent was finalized long ago and thus
|
|
|
|
# is no longer a viable head candidate
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let status = dag.addHeadBlock(verifier, lateBlock, nilPhase0Callback)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
# This _should_ be Unviable, but we can't tell, from the data that we have
|
|
|
|
# so MissingParent is the least wrong thing to reply
|
2022-11-10 17:40:27 +00:00
|
|
|
check: status == AddHeadRes.err VerifierError.UnviableFork
|
2020-07-01 17:00:14 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
block:
|
|
|
|
let
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
finalizedCheckpoint = dag.stateCheckpoint(dag.finalizedHead.toBlockSlotId().get())
|
|
|
|
headCheckpoint = dag.stateCheckpoint(dag.head.bid.atSlot())
|
2022-03-24 14:37:37 +00:00
|
|
|
prunedCheckpoint = dag.stateCheckpoint(dag.parent(dag.finalizedHead.blck.bid).get().atSlot())
|
2021-03-01 19:50:43 +00:00
|
|
|
check:
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
db.getStateRoot(headCheckpoint.bid.root, headCheckpoint.slot).isSome
|
|
|
|
db.getStateRoot(finalizedCheckpoint.bid.root, finalizedCheckpoint.slot).isSome
|
2022-03-24 14:37:37 +00:00
|
|
|
db.getStateRoot(prunedCheckpoint.bid.root, prunedCheckpoint.slot).isNone
|
2021-03-01 19:50:43 +00:00
|
|
|
|
2022-09-28 21:07:31 +00:00
|
|
|
# Roll back head block (e.g., because it was declared INVALID)
|
|
|
|
let parentRoot = dag.head.parent.root
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(dag.head.parent, quarantine, [])
|
2022-09-28 21:07:31 +00:00
|
|
|
check: dag.head.root == parentRoot
|
|
|
|
|
2020-07-01 17:00:14 +00:00
|
|
|
let
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
2020-07-01 17:00:14 +00:00
|
|
|
|
|
|
|
# check that the state reloaded from database resembles what we had before
|
|
|
|
check:
|
2020-07-30 19:18:17 +00:00
|
|
|
dag2.tail.root == dag.tail.root
|
|
|
|
dag2.head.root == dag.head.root
|
2022-09-28 21:07:31 +00:00
|
|
|
dag2.head.root == parentRoot
|
2020-07-30 19:18:17 +00:00
|
|
|
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
|
|
|
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateRoot(dag2.headState) == getStateRoot(dag.headState)
|
2020-06-25 09:36:03 +00:00
|
|
|
|
2022-03-21 09:20:26 +00:00
|
|
|
# No canonical block data should be pruned by the removal of the fork
|
|
|
|
for i in Slot(0)..dag2.head.slot:
|
|
|
|
let bids = dag.getBlockIdAtSlot(i).expect("found it")
|
|
|
|
if bids.isProposed:
|
|
|
|
check: dag2.getForkedBlock(bids.bid).isSome
|
|
|
|
|
|
|
|
# The unviable block should have been pruned however
|
|
|
|
check: dag2.getForkedBlock(lateBlock.root).isNone
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "orphaned epoch block" & preset():
|
2023-01-28 19:53:41 +00:00
|
|
|
let prestate = (ref ForkedHashedBeaconState)(kind: ConsensusFork.Phase0)
|
2020-08-13 09:50:05 +00:00
|
|
|
for i in 0 ..< SLOTS_PER_EPOCH:
|
|
|
|
if i == SLOTS_PER_EPOCH - 1:
|
2022-03-16 07:20:40 +00:00
|
|
|
assign(prestate[], dag.headState)
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2022-03-16 07:20:40 +00:00
|
|
|
let blck = makeTestBlock(dag.headState, cache).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-08-13 09:50:05 +00:00
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.heads.len() == 1
|
|
|
|
|
2020-09-07 15:04:33 +00:00
|
|
|
# The loop creates multiple branches, which StateCache isn't suitable for
|
|
|
|
cache = StateCache()
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
doAssert process_slots(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
defaultRuntimeConfig, prestate[], getStateField(prestate[], slot) + 1,
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
# create another block, orphaning the head
|
2021-11-18 12:02:43 +00:00
|
|
|
let blck = makeTestBlock(prestate[], cache).phase0Data
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
# Add block, but don't update head
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-08-13 09:50:05 +00:00
|
|
|
check: added.isOk()
|
|
|
|
|
|
|
|
var
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
# check that we can apply the block after the orphaning
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added2 = dag2.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-08-13 09:50:05 +00:00
|
|
|
check: added2.isOk()
|
|
|
|
|
2021-04-28 16:41:02 +00:00
|
|
|
test "init with gaps" & preset():
|
2020-08-27 07:34:12 +00:00
|
|
|
for blck in makeTestBlocks(
|
2023-05-15 15:41:30 +00:00
|
|
|
dag.headState, cache, int(SLOTS_PER_EPOCH * 6 - 2), attested = true):
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
2020-08-13 09:50:05 +00:00
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
# Advance past epoch so that the epoch transition is gapped
|
|
|
|
check:
|
|
|
|
process_slots(
|
2022-03-16 07:20:40 +00:00
|
|
|
defaultRuntimeConfig, dag.headState, Slot(SLOTS_PER_EPOCH * 6 + 2),
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2022-02-20 20:13:06 +00:00
|
|
|
let blck = makeTestBlock(
|
2022-03-16 07:20:40 +00:00
|
|
|
dag.headState, cache,
|
2020-08-13 09:50:05 +00:00
|
|
|
attestations = makeFullAttestations(
|
2022-03-16 07:20:40 +00:00
|
|
|
dag.headState, dag.head.root, getStateField(dag.headState, slot),
|
2021-10-18 16:37:27 +00:00
|
|
|
cache, {})).phase0Data
|
2020-08-13 09:50:05 +00:00
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
2020-08-13 09:50:05 +00:00
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
2021-03-09 14:36:17 +00:00
|
|
|
dag.pruneAtFinalization()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-05-17 16:37:26 +00:00
|
|
|
block:
|
|
|
|
# Check that we can rewind to every block from head to finalized
|
|
|
|
var
|
|
|
|
cur = dag.head
|
|
|
|
tmpStateData = assignClone(dag.headState)
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
while cur != nil: # Go all the way to dag.finalizedHead
|
2021-05-17 16:37:26 +00:00
|
|
|
assign(tmpStateData[], dag.headState)
|
|
|
|
check:
|
2024-10-30 05:38:53 +00:00
|
|
|
dag.updateState(tmpStateData[], cur.bid.atSlot(), false, cache,
|
|
|
|
dag.updateFlags)
|
2022-03-11 12:08:17 +00:00
|
|
|
dag.getForkedBlock(cur.bid).get().phase0Data.message.state_root ==
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateRoot(tmpStateData[])
|
|
|
|
getStateRoot(tmpStateData[]) == hash_tree_root(
|
|
|
|
tmpStateData[].phase0Data.data)
|
2021-05-17 16:37:26 +00:00
|
|
|
cur = cur.parent
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
let
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
2020-08-13 09:50:05 +00:00
|
|
|
|
|
|
|
# check that the state reloaded from database resembles what we had before
|
|
|
|
check:
|
|
|
|
dag2.tail.root == dag.tail.root
|
|
|
|
dag2.head.root == dag.head.root
|
|
|
|
dag2.finalizedHead.blck.root == dag.finalizedHead.blck.root
|
|
|
|
dag2.finalizedHead.slot == dag.finalizedHead.slot
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateRoot(dag2.headState) == getStateRoot(dag.headState)
|
2021-08-05 08:26:10 +00:00
|
|
|
|
2023-11-22 23:44:20 +00:00
|
|
|
test "shutdown during finalization" & preset():
|
|
|
|
var testPassed: bool
|
|
|
|
|
|
|
|
# Configure a hook that is called during finalization while the
|
|
|
|
# database has been partially written, to test behaviour if the
|
|
|
|
# beacon node is exited while the database is inconsistent.
|
|
|
|
proc onHeadChanged(data: HeadChangeInfoObject) =
|
|
|
|
if data.epoch_transition:
|
|
|
|
# Check test assumption: Head block was written before this callback
|
|
|
|
let headBlock = dag.db.getHeadBlock().expect("Valid DB")
|
|
|
|
doAssert headBlock == data.block_root, "Head was written before CB"
|
|
|
|
|
|
|
|
# Check test assumption: New finalized blocks were not written yet
|
|
|
|
let
|
|
|
|
stateFinalizedSlot =
|
|
|
|
dag.headState.getStateField(finalized_checkpoint).epoch.start_slot
|
|
|
|
dbFinalizedSlot =
|
|
|
|
dag.db.finalizedBlocks.high.expect("Valid DB")
|
|
|
|
doAssert stateFinalizedSlot > dbFinalizedSlot, "Finalized not written"
|
|
|
|
|
|
|
|
# If the beacon node were to exit _now_, this is what the DB looks like.
|
|
|
|
# Validate that we can initialize a new DAG from this database.
|
|
|
|
let validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
discard ChainDAGRef.init(
|
|
|
|
defaultRuntimeConfig, db, validatorMonitor2, {})
|
|
|
|
testPassed = true
|
|
|
|
dag.setHeadCb(onHeadChanged)
|
|
|
|
|
|
|
|
for blck in makeTestBlocks(
|
|
|
|
dag.headState, cache, int(SLOTS_PER_EPOCH * 4), attested = true):
|
|
|
|
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
|
|
|
check: added.isOk
|
|
|
|
dag.updateHead(added[], quarantine, [])
|
|
|
|
dag.pruneAtFinalization()
|
|
|
|
|
|
|
|
check testPassed
|
|
|
|
|
2021-08-05 08:26:10 +00:00
|
|
|
suite "Old database versions" & preset():
|
|
|
|
setup:
|
|
|
|
let
|
2023-06-19 22:43:50 +00:00
|
|
|
rng = HmacDrbgContext.new()
|
2021-11-18 12:02:43 +00:00
|
|
|
genState = newClone(initialize_hashed_beacon_state_from_eth1(
|
2022-06-18 04:57:37 +00:00
|
|
|
defaultRuntimeConfig, ZERO_HASH, 0,
|
2021-08-05 08:26:10 +00:00
|
|
|
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
|
2021-11-18 12:02:43 +00:00
|
|
|
{skipBlsValidation}))
|
2021-08-05 08:26:10 +00:00
|
|
|
genBlock = get_initial_beacon_block(genState[])
|
2021-12-06 09:49:01 +00:00
|
|
|
var
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine = Quarantine.init()
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
test "pre-1.1.0":
|
|
|
|
# only kvstore, no immutable validator keys
|
2022-11-28 23:21:58 +00:00
|
|
|
let
|
|
|
|
sq = SqStoreRef.init("", "test", inMemory = true).expect(
|
|
|
|
"working database (out of memory?)")
|
|
|
|
v0 = BeaconChainDBV0.new(sq, readOnly = false)
|
|
|
|
db = BeaconChainDB.new(sq)
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
# preInit a database to a v1.0.12 state
|
2022-11-28 23:21:58 +00:00
|
|
|
v0.putStateV0(genState[].root, genState[].data)
|
|
|
|
v0.putBlockV0(genBlock)
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
db.putStateRoot(
|
2022-03-16 07:20:40 +00:00
|
|
|
genState[].latest_block_root, genState[].data.slot, genState[].root)
|
2021-08-05 08:26:10 +00:00
|
|
|
db.putTailBlock(genBlock.root)
|
|
|
|
db.putHeadBlock(genBlock.root)
|
2021-11-05 07:34:34 +00:00
|
|
|
db.putGenesisBlock(genBlock.root)
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
var
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db,validatorMonitor, {})
|
2022-03-16 07:20:40 +00:00
|
|
|
state = newClone(dag.headState)
|
2021-08-05 08:26:10 +00:00
|
|
|
cache = StateCache()
|
|
|
|
att0 = makeFullAttestations(state[], dag.tail.root, 0.Slot, cache)
|
2021-11-18 12:02:43 +00:00
|
|
|
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
2021-08-05 08:26:10 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
b1Add.isOk()
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
suite "Diverging hardforks":
|
|
|
|
setup:
|
2023-06-19 22:43:50 +00:00
|
|
|
let rng = HmacDrbgContext.new()
|
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
var
|
|
|
|
phase0RuntimeConfig = defaultRuntimeConfig
|
|
|
|
altairRuntimeConfig = defaultRuntimeConfig
|
|
|
|
|
|
|
|
phase0RuntimeConfig.ALTAIR_FORK_EPOCH = FAR_FUTURE_EPOCH
|
|
|
|
altairRuntimeConfig.ALTAIR_FORK_EPOCH = 2.Epoch
|
|
|
|
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, phase0RuntimeConfig, db, validatorMonitor, {})
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine = newClone(Quarantine.init())
|
2021-09-08 03:46:33 +00:00
|
|
|
cache = StateCache()
|
2021-10-13 14:24:36 +00:00
|
|
|
info = ForkedEpochInfo()
|
2022-03-16 07:20:40 +00:00
|
|
|
tmpState = assignClone(dag.headState)
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
test "Tail block only in common":
|
|
|
|
check:
|
|
|
|
process_slots(
|
|
|
|
phase0RuntimeConfig, tmpState[],
|
|
|
|
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
# Because the first block is after the Altair transition, the only block in
|
|
|
|
# common is the tail block
|
|
|
|
var
|
2021-11-18 12:02:43 +00:00
|
|
|
b1 = addTestBlock(tmpState[], cache).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
check b1Add.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b1Add[], quarantine[], [])
|
2021-09-08 03:46:33 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let validatorMonitorAltair = newClone(ValidatorMonitor.init())
|
|
|
|
|
2022-02-20 20:13:06 +00:00
|
|
|
let dagAltair = init(
|
2021-12-20 19:20:31 +00:00
|
|
|
ChainDAGRef, altairRuntimeConfig, db, validatorMonitorAltair, {})
|
2021-09-08 03:46:33 +00:00
|
|
|
discard AttestationPool.init(dagAltair, quarantine)
|
|
|
|
|
|
|
|
test "Non-tail block in common":
|
|
|
|
check:
|
|
|
|
process_slots(
|
|
|
|
phase0RuntimeConfig, tmpState[],
|
|
|
|
getStateField(tmpState[], slot) + SLOTS_PER_EPOCH.uint64,
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
# There's a block in the shared-correct phase0 hardfork, before epoch 2
|
|
|
|
var
|
2021-11-18 12:02:43 +00:00
|
|
|
b1 = addTestBlock(tmpState[], cache).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
b1Add.isOk()
|
|
|
|
process_slots(
|
|
|
|
phase0RuntimeConfig, tmpState[],
|
|
|
|
getStateField(tmpState[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
2022-01-17 11:19:58 +00:00
|
|
|
cache, info, {}).isOk()
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
var
|
2021-11-18 12:02:43 +00:00
|
|
|
b2 = addTestBlock(tmpState[], cache).phase0Data
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
check b2Add.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b2Add[], quarantine[], [])
|
2021-09-08 03:46:33 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
|
2022-02-20 20:13:06 +00:00
|
|
|
let dagAltair = init(
|
2021-12-20 19:20:31 +00:00
|
|
|
ChainDAGRef, altairRuntimeConfig, db, validatorMonitor, {})
|
2021-09-08 03:46:33 +00:00
|
|
|
discard AttestationPool.init(dagAltair, quarantine)
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
suite "Backfill":
|
|
|
|
setup:
|
|
|
|
let
|
|
|
|
genState = (ref ForkedHashedBeaconState)(
|
2023-01-28 19:53:41 +00:00
|
|
|
kind: ConsensusFork.Phase0,
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
phase0Data: initialize_hashed_beacon_state_from_eth1(
|
2022-06-18 04:57:37 +00:00
|
|
|
defaultRuntimeConfig, ZERO_HASH, 0,
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
|
|
|
|
{skipBlsValidation}))
|
|
|
|
tailState = assignClone(genState[])
|
|
|
|
|
|
|
|
blocks = block:
|
|
|
|
var blocks: seq[ForkedSignedBeaconBlock]
|
|
|
|
var cache: StateCache
|
2022-01-05 18:38:04 +00:00
|
|
|
for i in 0..<SLOTS_PER_EPOCH * 2:
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks.add addTestBlock(tailState[], cache)
|
|
|
|
blocks
|
|
|
|
|
|
|
|
let
|
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
test "Backfill to genesis":
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let
|
|
|
|
tailBlock = blocks[^1]
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
genBlock = get_initial_beacon_block(genState[])
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
ChainDAGRef.preInit(db, genState[])
|
|
|
|
ChainDAGRef.preInit(db, tailState[])
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
var cache = StateCache()
|
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
check:
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag.getBlockRef(blocks[^2].root).isNone()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockId(tailBlock.root).get() == dag.tail
|
|
|
|
dag.getBlockId(blocks[^2].root).isNone()
|
|
|
|
|
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 1).get().bid ==
|
|
|
|
blocks[^2].toBlockId() # recovered from tailState
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(Slot(0)).isSome() # genesis stored in db
|
|
|
|
dag.getBlockIdAtSlot(Slot(1)).isSome() # recovered from tailState
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2022-09-27 16:56:08 +00:00
|
|
|
# No EpochRef for pre-tail epochs
|
2022-01-05 18:38:04 +00:00
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch - 1, true).isErr()
|
|
|
|
|
2022-09-27 16:56:08 +00:00
|
|
|
# Should get EpochRef for the tail however
|
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch, true).isOk()
|
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch + 1, true).isOk()
|
|
|
|
|
|
|
|
# Should not get EpochRef for random block
|
|
|
|
dag.getEpochRef(
|
2024-02-09 10:13:00 +00:00
|
|
|
BlockId(root: blocks[^2].root, slot: dag.tail.slot), # incorrect slot
|
2022-09-27 16:56:08 +00:00
|
|
|
dag.tail.slot.epoch, true).isErr()
|
|
|
|
|
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch + 1, true).isOk()
|
|
|
|
|
2022-03-18 12:13:57 +00:00
|
|
|
dag.getFinalizedEpochRef() != nil
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
# Checkpoint block is unavailable, and should be backfileld first
|
|
|
|
not dag.containsBlock(dag.tail)
|
|
|
|
dag.backfill == BeaconBlockSummary(
|
|
|
|
slot: dag.tail.slot + 1,
|
|
|
|
parent_root: dag.tail.root)
|
2022-02-26 18:16:19 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
# Check that we can propose right from the checkpoint state
|
|
|
|
dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk()
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
var badBlock = blocks[^1].phase0Data
|
|
|
|
badBlock.signature = blocks[^2].phase0Data.signature
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
check:
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.addBackfillBlock(blocks[^3].phase0Data) ==
|
2022-11-10 17:40:27 +00:00
|
|
|
AddBackRes.err VerifierError.MissingParent
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned()) ==
|
2022-11-10 17:40:27 +00:00
|
|
|
AddBackRes.err VerifierError.MissingParent
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
dag.addBackfillBlock(blocks[^2].phase0Data) ==
|
|
|
|
AddBackRes.err VerifierError.MissingParent
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.addBackfillBlock(tailBlock.phase0Data).isOk()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
|
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag.getBlockRef(blocks[^2].root).isNone()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockId(tailBlock.root).get() == dag.tail
|
|
|
|
dag.getBlockId(blocks[^2].root).get().root == blocks[^2].root
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
|
2022-03-15 08:24:55 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks[^2].toBlockId().atSlot()
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 2).get() ==
|
|
|
|
blocks[^3].toBlockId().atSlot() # recovered from tailState
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2022-02-26 18:16:19 +00:00
|
|
|
dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^3].phase0Data).isOk()
|
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 2).get() ==
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks[^3].toBlockId().atSlot()
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 3).get() ==
|
|
|
|
blocks[^4].toBlockId().atSlot() # recovered from tailState
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
for i in 3..<blocks.len:
|
|
|
|
check: dag.addBackfillBlock(blocks[blocks.len - i - 1].phase0Data).isOk()
|
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
check:
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned) ==
|
2022-11-10 17:40:27 +00:00
|
|
|
AddBackRes.err VerifierError.Duplicate
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
|
|
|
dag.backfill.slot == GENESIS_SLOT
|
|
|
|
|
2022-03-11 12:49:47 +00:00
|
|
|
dag.rebuildIndex()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.getFinalizedEpochRef() != nil
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
for i in 0..<blocks.len:
|
|
|
|
check dag.containsBlock(blocks[i].toBlockId())
|
|
|
|
|
|
|
|
test "Reload backfill position":
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
let
|
|
|
|
tailBlock = blocks[^1]
|
|
|
|
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
ChainDAGRef.preInit(db, genState[])
|
|
|
|
ChainDAGRef.preInit(db, tailState[])
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
check:
|
2024-02-09 19:44:54 +00:00
|
|
|
dag.addBackfillBlock(blocks[^1].phase0Data).isOk()
|
|
|
|
dag.backfill == blocks[^1].phase0Data.message.toBeaconBlockSummary()
|
|
|
|
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
|
2022-02-26 18:16:19 +00:00
|
|
|
dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
|
|
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
|
|
|
check:
|
2022-03-18 12:13:57 +00:00
|
|
|
dag2.getFinalizedEpochRef() != nil
|
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
dag2.getBlockRef(tailBlock.root).get().root == dag.tail.root
|
|
|
|
dag2.getBlockRef(blocks[^2].root).isNone()
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
dag2.getBlockIdAtSlot(dag.tail.slot).get().bid.root == dag.tail.root
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
dag2.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
|
Backfill support for ChainDAG (#3171)
In the ChainDAG, 3 block pointers are kept: genesis, tail and head. This
PR adds one more block pointer: the backfill block which represents the
block that has been backfilled so far.
When doing a checkpoint sync, a random block is given as starting point
- this is the tail block, and we require that the tail block has a
corresponding state.
When backfilling, we end up with blocks without corresponding states,
hence we cannot use `tail` as a backfill pointer - there is no state.
Nonetheless, we need to keep track of where we are in the backfill
process between restarts, such that we can answer GetBeaconBlocksByRange
requests.
This PR adds the basic support for backfill handling - it needs to be
integrated with backfill sync, and the REST API needs to be adjusted to
take advantage of the new backfilled blocks when responding to certain
requests.
Future work will also enable moving the tail in either direction:
* pruning means moving the tail forward in time and removing states
* backwards means recreating past states from genesis, such that
intermediate states are recreated step by step all the way to the tail -
at that point, tail, genesis and backfill will match up.
* backfilling is done when backfill != genesis - later, this will be the
WSS checkpoint instead
2021-12-13 13:36:06 +00:00
|
|
|
blocks[^2].toBlockId().atSlot()
|
2024-02-09 10:13:00 +00:00
|
|
|
dag2.getBlockIdAtSlot(dag.tail.slot - 2).get() ==
|
|
|
|
blocks[^3].toBlockId().atSlot() # recovered from tailState
|
2022-02-26 18:16:19 +00:00
|
|
|
dag2.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
2022-09-27 12:11:47 +00:00
|
|
|
|
2022-10-14 19:40:10 +00:00
|
|
|
test "Init without genesis / block":
|
2024-01-21 06:55:03 +00:00
|
|
|
let genBlock = get_initial_beacon_block(genState[])
|
2022-10-14 19:40:10 +00:00
|
|
|
|
|
|
|
ChainDAGRef.preInit(db, tailState[])
|
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.getFinalizedEpochRef() != nil
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
# Try importing blocks too early
|
|
|
|
for i in 0..<blocks.len - 1:
|
|
|
|
check dag.addBackfillBlock(blocks[i].phase0Data) ==
|
|
|
|
AddBackRes.err VerifierError.MissingParent
|
|
|
|
|
2022-10-14 19:40:10 +00:00
|
|
|
for i in 0..<blocks.len:
|
|
|
|
check: dag.addBackfillBlock(
|
|
|
|
blocks[blocks.len - i - 1].phase0Data).isOk()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned).isOk()
|
|
|
|
dag.addBackfillBlock(
|
2022-11-10 17:40:27 +00:00
|
|
|
genBlock.phase0Data.asSigned) == AddBackRes.err VerifierError.Duplicate
|
2022-10-14 19:40:10 +00:00
|
|
|
|
2023-06-19 22:43:50 +00:00
|
|
|
let
|
|
|
|
rng = HmacDrbgContext.new()
|
|
|
|
taskpool = Taskpool.new()
|
2022-10-14 19:40:10 +00:00
|
|
|
var
|
|
|
|
cache: StateCache
|
2023-08-03 08:36:45 +00:00
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2022-10-14 19:40:10 +00:00
|
|
|
quarantine = newClone(Quarantine.init())
|
|
|
|
|
|
|
|
let
|
|
|
|
next = addTestBlock(tailState[], cache).phase0Data
|
|
|
|
nextAdd = dag.addHeadBlock(verifier, next, nilPhase0Callback).get()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(nextAdd, quarantine[], [])
|
2022-10-14 19:40:10 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
|
|
|
|
|
|
|
dag2 = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor2, {})
|
|
|
|
check:
|
|
|
|
dag2.head.root == next.root
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
test "Restart after each block":
|
|
|
|
ChainDAGRef.preInit(db, tailState[])
|
|
|
|
|
|
|
|
for i in 1..blocks.len:
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
|
|
|
|
check dag.backfill == (
|
|
|
|
if i > 1:
|
|
|
|
blocks[^(i - 1)].phase0Data.message.toBeaconBlockSummary()
|
|
|
|
else:
|
|
|
|
BeaconBlockSummary(
|
|
|
|
slot: blocks[^1].phase0Data.message.slot + 1,
|
|
|
|
parent_root: blocks[^1].phase0Data.root))
|
|
|
|
|
|
|
|
for j in 1..blocks.len:
|
|
|
|
if j < i:
|
|
|
|
check dag.addBackfillBlock(blocks[^j].phase0Data) ==
|
|
|
|
AddBackRes.err VerifierError.Duplicate
|
|
|
|
elif j > i:
|
|
|
|
check dag.addBackfillBlock(blocks[^j].phase0Data) ==
|
|
|
|
AddBackRes.err VerifierError.MissingParent
|
|
|
|
else:
|
|
|
|
discard
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^i].phase0Data).isOk()
|
|
|
|
dag.backfill == blocks[^i].phase0Data.message.toBeaconBlockSummary()
|
|
|
|
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
genBlock = get_initial_beacon_block(genState[])
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned()).isOk()
|
|
|
|
dag.backfill == default(BeaconBlockSummary)
|
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
check dag.backfill == default(BeaconBlockSummary)
|
|
|
|
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
suite "Starting states":
|
|
|
|
setup:
|
|
|
|
let
|
|
|
|
genState = (ref ForkedHashedBeaconState)(
|
2023-01-28 19:53:41 +00:00
|
|
|
kind: ConsensusFork.Phase0,
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
phase0Data: initialize_hashed_beacon_state_from_eth1(
|
|
|
|
defaultRuntimeConfig, ZERO_HASH, 0,
|
|
|
|
makeInitialDeposits(SLOTS_PER_EPOCH.uint64, flags = {skipBlsValidation}),
|
|
|
|
{skipBlsValidation}))
|
|
|
|
tailState = assignClone(genState[])
|
|
|
|
db = BeaconChainDB.new("", inMemory = true)
|
|
|
|
quarantine = newClone(Quarantine.init())
|
|
|
|
|
|
|
|
test "Starting state without block":
|
|
|
|
var
|
|
|
|
cache: StateCache
|
|
|
|
info: ForkedEpochInfo
|
|
|
|
let
|
|
|
|
genBlock = get_initial_beacon_block(genState[])
|
|
|
|
blocks = block:
|
|
|
|
var blocks: seq[ForkedSignedBeaconBlock]
|
|
|
|
while getStateField(tailState[], slot).uint64 + 1 < SLOTS_PER_EPOCH:
|
|
|
|
blocks.add addTestBlock(tailState[], cache)
|
|
|
|
blocks
|
|
|
|
tailBlock = blocks[^1]
|
|
|
|
|
|
|
|
check process_slots(
|
|
|
|
defaultRuntimeConfig, tailState[], Slot(SLOTS_PER_EPOCH), cache, info,
|
|
|
|
{}).isOk()
|
|
|
|
|
|
|
|
ChainDAGRef.preInit(db, tailState[])
|
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
|
|
|
|
|
|
|
# check that we can update head to itself
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(dag.head, quarantine[], [])
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.finalizedHead.toBlockSlotId()[] == BlockSlotId(
|
|
|
|
bid: dag.tail, slot: (dag.tail.slot.epoch+1).start_slot)
|
|
|
|
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
|
|
|
dag.getBlockRef(blocks[^2].root).isNone()
|
|
|
|
|
|
|
|
dag.getBlockId(tailBlock.root).get() == dag.tail
|
|
|
|
dag.getBlockId(blocks[^2].root).isNone()
|
|
|
|
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(Slot(0)).isSome() # recovered from tailState
|
|
|
|
dag.getBlockIdAtSlot(Slot(1)).isSome() # recovered from tailState
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
# Should get EpochRef for the tail however
|
|
|
|
# dag.getEpochRef(dag.tail, dag.tail.slot.epoch, true).isOk()
|
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch + 1, true).isOk()
|
|
|
|
|
|
|
|
# Should not get EpochRef for random block
|
|
|
|
dag.getEpochRef(
|
2024-02-09 10:13:00 +00:00
|
|
|
BlockId(root: blocks[^2].root, slot: dag.tail.slot), # incorrect slot
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.tail.slot.epoch, true).isErr()
|
|
|
|
|
|
|
|
dag.getEpochRef(dag.tail, dag.tail.slot.epoch + 1, true).isOk()
|
|
|
|
|
|
|
|
dag.getFinalizedEpochRef() != nil
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
# Checkpoint block is unavailable, and should be backfileld first
|
|
|
|
not dag.containsBlock(dag.tail)
|
|
|
|
dag.backfill == BeaconBlockSummary(
|
|
|
|
slot: dag.tail.slot + 1,
|
|
|
|
parent_root: dag.tail.root)
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
# Check that we can propose right from the checkpoint state
|
|
|
|
dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk()
|
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
var badBlock = blocks[^1].phase0Data
|
|
|
|
badBlock.signature = blocks[^2].phase0Data.signature
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
check:
|
2022-11-10 17:40:27 +00:00
|
|
|
dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^3].phase0Data) ==
|
2022-11-10 17:40:27 +00:00
|
|
|
AddBackRes.err VerifierError.MissingParent
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned()) ==
|
2022-11-10 17:40:27 +00:00
|
|
|
AddBackRes.err VerifierError.MissingParent
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
2024-02-09 19:44:54 +00:00
|
|
|
dag.addBackfillBlock(blocks[^2].phase0Data) ==
|
|
|
|
AddBackRes.err VerifierError.MissingParent
|
|
|
|
dag.addBackfillBlock(tailBlock.phase0Data).isOk()
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
|
|
|
|
|
|
|
|
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
|
|
|
dag.getBlockRef(blocks[^2].root).isNone()
|
|
|
|
|
|
|
|
dag.getBlockId(tailBlock.root).get() == dag.tail
|
|
|
|
dag.getBlockId(blocks[^2].root).get().root == blocks[^2].root
|
|
|
|
|
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
|
2024-02-09 19:44:54 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
|
|
|
|
blocks[^2].toBlockId().atSlot()
|
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 2).get() ==
|
|
|
|
blocks[^3].toBlockId().atSlot() # recovered from tailState
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(blocks[^3].phase0Data).isOk()
|
|
|
|
|
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 2).get() ==
|
|
|
|
blocks[^3].toBlockId().atSlot()
|
2024-02-09 10:13:00 +00:00
|
|
|
dag.getBlockIdAtSlot(dag.tail.slot - 3).get() ==
|
|
|
|
blocks[^4].toBlockId().atSlot() # recovered from tailState
|
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
|
|
|
|
|
|
|
for i in 3..<blocks.len:
|
|
|
|
check: dag.addBackfillBlock(blocks[blocks.len - i - 1].phase0Data).isOk()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.addBackfillBlock(genBlock.phase0Data.asSigned).isOk()
|
|
|
|
|
|
|
|
dag.backfill.slot == GENESIS_SLOT
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.getFinalizedEpochRef() != nil
|
|
|
|
|
2022-09-27 12:11:47 +00:00
|
|
|
suite "Latest valid hash" & preset():
|
|
|
|
setup:
|
2023-06-19 22:43:50 +00:00
|
|
|
let rng = HmacDrbgContext.new()
|
|
|
|
|
2022-09-27 12:11:47 +00:00
|
|
|
var runtimeConfig = defaultRuntimeConfig
|
|
|
|
runtimeConfig.ALTAIR_FORK_EPOCH = 1.Epoch
|
|
|
|
runtimeConfig.BELLATRIX_FORK_EPOCH = 2.Epoch
|
|
|
|
|
|
|
|
var
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, runtimeConfig, db, validatorMonitor, {})
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2022-09-27 12:11:47 +00:00
|
|
|
quarantine = newClone(Quarantine.init())
|
|
|
|
cache = StateCache()
|
|
|
|
info = ForkedEpochInfo()
|
|
|
|
state = newClone(dag.headState)
|
|
|
|
|
|
|
|
test "LVH searching":
|
|
|
|
# Reach Bellatrix, where execution payloads exist
|
|
|
|
check process_slots(
|
|
|
|
runtimeConfig, state[],
|
|
|
|
getStateField(state[], slot) + (3 * SLOTS_PER_EPOCH).uint64,
|
|
|
|
cache, info, {}).isOk()
|
|
|
|
|
|
|
|
var
|
|
|
|
b1 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
|
|
|
|
b1Add = dag.addHeadBlock(verifier, b1, nilBellatrixCallback)
|
|
|
|
b2 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
|
|
|
|
b2Add = dag.addHeadBlock(verifier, b2, nilBellatrixCallback)
|
|
|
|
b3 = addTestBlock(state[], cache, cfg = runtimeConfig).bellatrixData
|
|
|
|
b3Add = dag.addHeadBlock(verifier, b3, nilBellatrixCallback)
|
|
|
|
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b3Add[], quarantine[], [])
|
2022-09-28 21:07:31 +00:00
|
|
|
check: dag.head.root == b3.root
|
|
|
|
|
|
|
|
# Ensure that head can go backwards in case of head being marked invalid
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b2Add[], quarantine[], [])
|
2022-09-28 21:07:31 +00:00
|
|
|
check: dag.head.root == b2.root
|
|
|
|
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(b1Add[], quarantine[], [])
|
2022-09-28 21:07:31 +00:00
|
|
|
check: dag.head.root == b1.root
|
2022-09-27 12:11:47 +00:00
|
|
|
|
|
|
|
const fallbackEarliestInvalid =
|
|
|
|
Eth2Digest.fromHex("0x0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef")
|
|
|
|
check:
|
|
|
|
# Represents where LVH is two behind the invalid-marked block (because
|
|
|
|
# first param is parent). It searches using LVH (i.e. execution hash),
|
|
|
|
# but returns CL block hash, because that's what fork choice and other
|
|
|
|
# Nimbus components mostly use as a coordinate system. Since b1 is set
|
|
|
|
# to be valid here by being the LVH, it means that b2 must be invalid.
|
|
|
|
dag.getEarliestInvalidBlockRoot(
|
|
|
|
b2Add[].root, b1.message.body.execution_payload.block_hash,
|
|
|
|
fallbackEarliestInvalid) == b2Add[].root
|
|
|
|
|
|
|
|
# This simulates calling it based on b3 (child of b2), where there's no
|
|
|
|
# gap in detecting the invalid blocks. Because the API, due to testcase
|
|
|
|
# design, does not assume the block being tested is in the DAG, there's
|
|
|
|
# a manually specified fallback (CL) block root to use, because it does
|
|
|
|
# not have access to this information otherwise, because the very first
|
|
|
|
# newest block in the chain it's examining is already valid.
|
|
|
|
dag.getEarliestInvalidBlockRoot(
|
|
|
|
b2Add[].root, b2.message.body.execution_payload.block_hash,
|
|
|
|
fallbackEarliestInvalid) == fallbackEarliestInvalid
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
|
|
|
|
suite "Pruning":
|
|
|
|
setup:
|
|
|
|
let
|
2023-06-19 22:43:50 +00:00
|
|
|
rng = HmacDrbgContext.new()
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
cfg = block:
|
|
|
|
var res = defaultRuntimeConfig
|
|
|
|
res.MIN_VALIDATOR_WITHDRAWABILITY_DELAY = 4
|
|
|
|
res.CHURN_LIMIT_QUOTIENT = 1
|
2023-11-10 15:04:55 +00:00
|
|
|
res.MIN_EPOCHS_FOR_BLOCK_REQUESTS = res.safeMinEpochsForBlockRequests()
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
doAssert res.MIN_EPOCHS_FOR_BLOCK_REQUESTS == 4
|
|
|
|
res
|
|
|
|
db = makeTestDB(SLOTS_PER_EPOCH)
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
|
|
|
tmpState = assignClone(dag.headState)
|
|
|
|
|
|
|
|
var
|
2023-08-03 08:36:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
quarantine = Quarantine.init()
|
|
|
|
cache = StateCache()
|
|
|
|
blocks = @[dag.head]
|
|
|
|
|
|
|
|
for i in 0 ..< (SLOTS_PER_EPOCH * (EPOCHS_PER_STATE_SNAPSHOT + cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS)):
|
|
|
|
let blck = addTestBlock(
|
|
|
|
tmpState[], cache,
|
|
|
|
attestations = makeFullAttestations(
|
|
|
|
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {})).phase0Data
|
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
|
|
|
check: added.isOk()
|
|
|
|
blocks.add(added[])
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
dag.pruneAtFinalization()
|
|
|
|
|
|
|
|
test "prune states":
|
|
|
|
dag.pruneHistory()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.tail.slot == Epoch(EPOCHS_PER_STATE_SNAPSHOT).start_slot - 1
|
|
|
|
db.containsBlock(blocks[0].root)
|
|
|
|
db.containsBlock(blocks[1].root)
|
|
|
|
|
|
|
|
# Add a block
|
|
|
|
for i in 0..2:
|
|
|
|
let blck = addTestBlock(
|
|
|
|
tmpState[], cache,
|
|
|
|
attestations = makeFullAttestations(
|
|
|
|
tmpState[], dag.head.root, getStateField(tmpState[], slot), cache, {})).phase0Data
|
|
|
|
let added = dag.addHeadBlock(verifier, blck, nilPhase0Callback)
|
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
|
|
|
dag.pruneAtFinalization()
|
|
|
|
|
|
|
|
dag.pruneHistory()
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.tail.slot == Epoch(EPOCHS_PER_STATE_SNAPSHOT).start_slot - 1
|
|
|
|
not db.containsBlock(blocks[1].root)
|
2023-05-15 15:41:30 +00:00
|
|
|
|
2024-02-09 10:13:00 +00:00
|
|
|
suite "State history":
|
|
|
|
test "getBlockIdAtSlot":
|
|
|
|
const numValidators = SLOTS_PER_EPOCH
|
|
|
|
let
|
|
|
|
cfg = defaultRuntimeConfig
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(
|
|
|
|
cfg, makeTestDB(numValidators, cfg = cfg),
|
|
|
|
validatorMonitor, {})
|
|
|
|
quarantine = newClone(Quarantine.init())
|
|
|
|
rng = HmacDrbgContext.new()
|
|
|
|
taskpool = Taskpool.new()
|
|
|
|
var verifier = BatchVerifier.init(rng, taskpool)
|
|
|
|
|
|
|
|
var
|
|
|
|
cache: StateCache
|
|
|
|
info: ForkedEpochInfo
|
|
|
|
res: Result[void, cstring]
|
|
|
|
template state: untyped = dag.headState.phase0Data
|
|
|
|
|
|
|
|
let gen = get_initial_beacon_block(dag.headState).toBlockId()
|
|
|
|
check:
|
|
|
|
state.getBlockIdAtSlot(0.Slot) ==
|
|
|
|
Opt.some BlockSlotId.init(gen, 0.Slot)
|
|
|
|
state.getBlockIdAtSlot(1.Slot).isNone
|
|
|
|
|
|
|
|
# Miss 5 slots
|
|
|
|
res = process_slots(cfg, dag.headState, 5.Slot, cache, info, flags = {})
|
|
|
|
check res.isOk
|
|
|
|
for i in 0.Slot .. 5.Slot:
|
2024-05-22 11:56:37 +00:00
|
|
|
check state.getBlockIdAtSlot(i) == Opt.some BlockSlotId.init(gen, i)
|
2024-02-09 10:13:00 +00:00
|
|
|
check state.getBlockIdAtSlot(6.Slot).isNone
|
|
|
|
|
|
|
|
# Fill 5 slots
|
|
|
|
var bids: seq[BlockId]
|
|
|
|
for i in 0 ..< 5:
|
|
|
|
let blck = dag.headState.addTestBlock(cache, cfg = cfg)
|
|
|
|
bids.add blck.toBlockId()
|
|
|
|
let added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
|
|
|
check added.isOk()
|
|
|
|
dag.updateHead(added[], quarantine[], [])
|
|
|
|
for i in 0.Slot .. 5.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(gen, i)
|
|
|
|
for i in 6.Slot .. 10.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[(i - 6).int], i)
|
|
|
|
check state.getBlockIdAtSlot(11.Slot).isNone
|
|
|
|
|
|
|
|
# Jump to SLOTS_PER_HISTORICAL_ROOT
|
|
|
|
let periodSlot = SLOTS_PER_HISTORICAL_ROOT.Slot
|
|
|
|
res = process_slots(cfg, dag.headState, periodSlot, cache, info, flags = {})
|
|
|
|
for i in 0.Slot .. 5.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(gen, i)
|
|
|
|
for i in 6.Slot .. 10.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[(i - 6).int], i)
|
|
|
|
check:
|
|
|
|
state.getBlockIdAtSlot(11.Slot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], 11.Slot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], periodSlot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot + 1).isNone
|
|
|
|
|
|
|
|
# Create a block at periodSlot + 1
|
|
|
|
let
|
|
|
|
blck = dag.headState.addTestBlock(cache, cfg = cfg)
|
|
|
|
added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
|
|
|
check added.isOk()
|
|
|
|
dag.updateHead(added[], quarantine[], [])
|
|
|
|
for i in 0.Slot .. 5.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i).isNone
|
|
|
|
for i in 6.Slot .. 10.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[(i - 6).int], i)
|
|
|
|
check:
|
|
|
|
state.getBlockIdAtSlot(11.Slot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], 11.Slot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], periodSlot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot + 1) ==
|
|
|
|
Opt.some BlockSlotId.init(blck.toBlockId(), periodSlot + 1)
|
|
|
|
state.getBlockIdAtSlot(periodSlot + 2).isNone
|
|
|
|
|
|
|
|
# Go to periodSlot + 5
|
|
|
|
let plusFive = periodSlot + 5
|
|
|
|
res = process_slots(cfg, dag.headState, plusFive, cache, info, flags = {})
|
|
|
|
for i in 0.Slot .. 5.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i).isNone
|
|
|
|
for i in 6.Slot .. 10.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[(i - 6).int], i)
|
|
|
|
check:
|
|
|
|
state.getBlockIdAtSlot(11.Slot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], 11.Slot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], periodSlot)
|
|
|
|
for i in periodSlot + 1 .. plusFive:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(blck.toBlockId(), i)
|
|
|
|
check state.getBlockIdAtSlot(plusFive + 1).isNone
|
|
|
|
|
|
|
|
# Go to periodSlot + 6
|
|
|
|
let plusSix = periodSlot + 6
|
|
|
|
res = process_slots(cfg, dag.headState, plusSix, cache, info, flags = {})
|
|
|
|
for i in 0.Slot .. 6.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i).isNone
|
|
|
|
for i in 7.Slot .. 10.Slot:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[(i - 6).int], i)
|
|
|
|
check:
|
|
|
|
state.getBlockIdAtSlot(11.Slot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], 11.Slot)
|
|
|
|
state.getBlockIdAtSlot(periodSlot) ==
|
|
|
|
Opt.some BlockSlotId.init(bids[^1], periodSlot)
|
|
|
|
for i in periodSlot + 1 .. plusSix:
|
|
|
|
check state.getBlockIdAtSlot(i) ==
|
|
|
|
Opt.some BlockSlotId.init(blck.toBlockId(), i)
|
|
|
|
check state.getBlockIdAtSlot(plusSix + 1).isNone
|
|
|
|
|
2023-07-18 15:37:53 +00:00
|
|
|
suite "Ancestry":
|
|
|
|
test "ancestorSlot":
|
|
|
|
const numValidators = SLOTS_PER_EPOCH
|
|
|
|
let
|
|
|
|
cfg = defaultRuntimeConfig
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(
|
|
|
|
cfg, makeTestDB(numValidators, cfg = cfg),
|
|
|
|
validatorMonitor, {})
|
|
|
|
quarantine = newClone(Quarantine.init())
|
|
|
|
rng = HmacDrbgContext.new()
|
|
|
|
taskpool = Taskpool.new()
|
|
|
|
|
|
|
|
type Node = tuple[blck: BlockRef, state: ref phase0.HashedBeaconState]
|
|
|
|
template bid(n: Node): BlockId = n.blck.bid
|
|
|
|
|
2023-08-03 08:36:45 +00:00
|
|
|
var verifier = BatchVerifier.init(rng, taskpool)
|
2023-07-18 15:37:53 +00:00
|
|
|
proc addBlock(parent: Node, slot: Slot): Node =
|
|
|
|
dag.updateHead(parent.blck, quarantine[], [])
|
|
|
|
|
|
|
|
var
|
|
|
|
cache: StateCache
|
|
|
|
info: ForkedEpochInfo
|
|
|
|
let res = process_slots(cfg, dag.headState, slot, cache, info, flags = {})
|
|
|
|
check res.isOk
|
|
|
|
|
|
|
|
let
|
|
|
|
blck = dag.headState.addTestBlock(cache, nextSlot = false, cfg = cfg)
|
2023-09-13 17:57:54 +00:00
|
|
|
added = dag.addHeadBlock(verifier, blck.phase0Data, nilPhase0Callback)
|
2023-07-18 15:37:53 +00:00
|
|
|
check added.isOk()
|
|
|
|
dag.updateHead(added[], quarantine[], [])
|
|
|
|
(blck: dag.head, state: newClone(dag.headState.phase0Data))
|
|
|
|
|
|
|
|
# s0
|
|
|
|
# / \
|
|
|
|
# s1 s3
|
|
|
|
# / \
|
|
|
|
# s2 s6
|
|
|
|
# / \ \
|
|
|
|
# s4 s5 s7
|
|
|
|
# \
|
|
|
|
# s8
|
|
|
|
# \
|
|
|
|
# s9
|
|
|
|
let
|
|
|
|
sg = (blck: dag.head, state: newClone(dag.headState.phase0Data))
|
|
|
|
s0 = sg.addBlock(Slot(10))
|
|
|
|
s1 = s0.addBlock(Slot(11))
|
|
|
|
s2 = s1.addBlock(Slot(12))
|
|
|
|
s3 = s0.addBlock(Slot(13))
|
|
|
|
s4 = s2.addBlock(Slot(14))
|
|
|
|
s5 = s2.addBlock(Slot(15))
|
|
|
|
s6 = s3.addBlock(Slot(16))
|
|
|
|
s7 = s6.addBlock(Slot(17))
|
|
|
|
s8 = s4.addBlock(Slot(18))
|
|
|
|
s9 = s8.addBlock(Slot(19))
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s0.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s1.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s2.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s4.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s5.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s8.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s0.state[], s9.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s0.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s1.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s2.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s4.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s5.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s8.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s1.state[], s9.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s1.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s1.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s2.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s2.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s4.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s5.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s8.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s2.state[], s9.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s2.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s2.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s2.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s3.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s1.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s2.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s3.bid, Slot(10)) == Opt.some(s3.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s4.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s5.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s6.bid, Slot(10)) == Opt.some(s3.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s7.bid, Slot(10)) == Opt.some(s3.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s8.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s3.state[], s9.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s3.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s3.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s3.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s3.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s4.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s2.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s4.bid, Slot(10)) == Opt.some(s4.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s5.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s8.bid, Slot(10)) == Opt.some(s4.bid.slot)
|
|
|
|
dag.ancestorSlot(s4.state[], s9.bid, Slot(10)) == Opt.some(s4.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s4.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s4.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s4.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s4.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s4.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s5.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s2.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s4.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s5.bid, Slot(10)) == Opt.some(s5.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s8.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s5.state[], s9.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s5.state[], b.bid, Slot(16)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s6.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s1.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s2.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s3.bid, Slot(10)) == Opt.some(s3.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s4.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s5.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s6.bid, Slot(10)) == Opt.some(s6.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s7.bid, Slot(10)) == Opt.some(s6.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s8.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s6.state[], s9.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(16)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s6.state[], b.bid, Slot(17)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s7.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s1.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s2.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s3.bid, Slot(10)) == Opt.some(s3.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s4.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s5.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s6.bid, Slot(10)) == Opt.some(s6.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s7.bid, Slot(10)) == Opt.some(s7.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s8.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s7.state[], s9.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(16)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(17)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s7.state[], b.bid, Slot(18)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s8.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s2.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s4.bid, Slot(10)) == Opt.some(s4.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s5.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s8.bid, Slot(10)) == Opt.some(s8.bid.slot)
|
|
|
|
dag.ancestorSlot(s8.state[], s9.bid, Slot(10)) == Opt.some(s8.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(16)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(17)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(18)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s8.state[], b.bid, Slot(19)) == Opt.none(Slot)
|
|
|
|
|
|
|
|
check:
|
|
|
|
dag.ancestorSlot(s9.state[], s0.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s1.bid, Slot(10)) == Opt.some(s1.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s2.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s3.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s4.bid, Slot(10)) == Opt.some(s4.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s5.bid, Slot(10)) == Opt.some(s2.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s6.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s7.bid, Slot(10)) == Opt.some(s0.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s8.bid, Slot(10)) == Opt.some(s8.bid.slot)
|
|
|
|
dag.ancestorSlot(s9.state[], s9.bid, Slot(10)) == Opt.some(s9.bid.slot)
|
|
|
|
for b in [s0, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(11)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s3, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(12)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(13)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(14)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(15)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(16)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(17)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(18)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(19)) == Opt.none(Slot)
|
|
|
|
for b in [s0, s1, s2, s3, s4, s5, s6, s7, s8, s9]:
|
|
|
|
check dag.ancestorSlot(s9.state[], b.bid, Slot(20)) == Opt.none(Slot)
|
|
|
|
|
2023-07-15 20:16:56 +00:00
|
|
|
template runShufflingTests(cfg: RuntimeConfig, numRandomTests: int) =
|
2023-05-15 15:41:30 +00:00
|
|
|
const
|
|
|
|
numValidators = SLOTS_PER_EPOCH
|
|
|
|
targetNumValidators = 20 * SLOTS_PER_EPOCH * MAX_DEPOSITS
|
|
|
|
var deposits = newSeqOfCap[Deposit](targetNumValidators)
|
|
|
|
for depositIndex in 0 ..< targetNumValidators:
|
|
|
|
deposits.add Deposit(data: makeDeposit(depositIndex.int, cfg = cfg))
|
|
|
|
let
|
|
|
|
eth1Data = Eth1Data(
|
|
|
|
deposit_root: deposits.attachMerkleProofs(),
|
|
|
|
deposit_count: deposits.lenu64)
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(
|
|
|
|
cfg, makeTestDB(
|
|
|
|
numValidators, eth1Data = Opt.some(eth1Data),
|
|
|
|
flags = {}, cfg = cfg),
|
|
|
|
validatorMonitor, {})
|
|
|
|
quarantine = newClone(Quarantine.init())
|
2023-06-19 22:43:50 +00:00
|
|
|
rng = HmacDrbgContext.new()
|
2023-05-15 15:41:30 +00:00
|
|
|
taskpool = Taskpool.new()
|
|
|
|
|
|
|
|
var
|
2023-08-03 08:36:45 +00:00
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2023-05-15 15:41:30 +00:00
|
|
|
graffiti: GraffitiBytes
|
|
|
|
proc addBlocks(blocks: uint64, attested: bool, cache: var StateCache) =
|
|
|
|
inc distinctBase(graffiti)[0] # Avoid duplicate blocks across branches
|
2023-09-13 17:57:54 +00:00
|
|
|
for forkedBlck in makeTestBlocks(
|
2023-05-15 15:41:30 +00:00
|
|
|
dag.headState, cache, blocks.int, eth1_data = eth1Data,
|
|
|
|
attested = attested, allDeposits = deposits,
|
|
|
|
graffiti = graffiti, cfg = cfg):
|
2023-09-13 17:57:54 +00:00
|
|
|
let added = withBlck(forkedBlck):
|
|
|
|
const nilCallback = (consensusFork.OnBlockAddedCallback)(nil)
|
2023-09-21 10:49:14 +00:00
|
|
|
dag.addHeadBlock(verifier, forkyBlck, nilCallback)
|
2023-05-15 15:41:30 +00:00
|
|
|
check added.isOk()
|
|
|
|
dag.updateHead(added[], quarantine[], [])
|
|
|
|
|
|
|
|
var states: seq[ref ForkedHashedBeaconState]
|
|
|
|
|
|
|
|
# Genesis state
|
|
|
|
states.add newClone(dag.headState)
|
|
|
|
|
|
|
|
# Create a segment and cache the post state (0.75 epochs + empty slots)
|
|
|
|
proc createSegment(attested: bool, delaySlots = 0.uint64) =
|
|
|
|
var cache: StateCache
|
|
|
|
|
|
|
|
# Add some empty slots to have different deposit history
|
|
|
|
if delaySlots > 0:
|
|
|
|
var info: ForkedEpochInfo
|
|
|
|
check cfg.process_slots(
|
|
|
|
dag.headState,
|
|
|
|
getStateField(dag.headState, slot) + delaySlots,
|
|
|
|
cache, info, flags = {}).isOk
|
|
|
|
|
|
|
|
# Add 0.75 epochs
|
|
|
|
addBlocks((SLOTS_PER_EPOCH * 3) div 4, attested = attested, cache)
|
|
|
|
states.add newClone(dag.headState)
|
|
|
|
|
|
|
|
# Linear part of history (3.75 epochs)
|
|
|
|
for _ in 0 ..< 5:
|
|
|
|
createSegment(attested = true)
|
|
|
|
|
|
|
|
# Start branching (6 epochs + up to 0.5 epoch)
|
|
|
|
func numDelaySlots(branchId: int): uint64 =
|
|
|
|
branchId.uint64 * SLOTS_PER_EPOCH div 8
|
|
|
|
for a in 0 ..< 2:
|
|
|
|
let oldHead = dag.head
|
|
|
|
createSegment(attested = false, delaySlots = a.numDelaySlots)
|
|
|
|
for b in 0 ..< 2:
|
|
|
|
let oldHead = dag.head
|
|
|
|
createSegment(attested = false, delaySlots = b.numDelaySlots)
|
|
|
|
for _ in 0 ..< 3:
|
|
|
|
createSegment(attested = false, delaySlots = a.numDelaySlots)
|
|
|
|
createSegment(attested = false, delaySlots = b.numDelaySlots)
|
|
|
|
dag.updateHead(oldHead, quarantine[], [])
|
|
|
|
dag.updateHead(oldHead, quarantine[], [])
|
|
|
|
|
|
|
|
# Cover entire range of epochs plus some extra
|
|
|
|
const maxEpochOfInterest = compute_activation_exit_epoch(11.Epoch) + 2
|
|
|
|
|
2023-07-10 20:36:25 +00:00
|
|
|
template checkShuffling(
|
|
|
|
epochRef: Result[EpochRef, cstring],
|
|
|
|
computedShufflingRefParam: Opt[ShufflingRef]) =
|
|
|
|
## Check that computed shuffling matches the one from `EpochRef`.
|
|
|
|
block:
|
|
|
|
let computedShufflingRef = computedShufflingRefParam
|
2023-07-31 13:11:45 +00:00
|
|
|
if computedShufflingRef.isSome:
|
2023-07-10 20:36:25 +00:00
|
|
|
check computedShufflingRef.get[] == epochRef.get.shufflingRef[]
|
|
|
|
|
2023-05-15 15:41:30 +00:00
|
|
|
test "Accelerated shuffling computation":
|
|
|
|
randomize()
|
|
|
|
let forkBlocks = dag.forkBlocks.toSeq()
|
2023-07-15 20:16:56 +00:00
|
|
|
for _ in 0 ..< numRandomTests: # Each test runs against _all_ cached states
|
2023-05-15 15:41:30 +00:00
|
|
|
let
|
|
|
|
blck = sample(forkBlocks).data
|
|
|
|
epoch = rand(GENESIS_EPOCH .. maxEpochOfInterest)
|
|
|
|
checkpoint "blck: " & $shortLog(blck) & " / epoch: " & $shortLog(epoch)
|
|
|
|
|
|
|
|
let epochRef = dag.getEpochRef(blck, epoch, true)
|
|
|
|
check epochRef.isOk
|
|
|
|
|
2023-07-31 13:11:45 +00:00
|
|
|
let dependentBsi = dag.atSlot(blck.bid, epoch.attester_dependent_slot)
|
|
|
|
check dependentBsi.isSome
|
|
|
|
let
|
|
|
|
memoryMix = dag.computeRandaoMixFromMemory(
|
|
|
|
dependentBsi.get.bid, epoch.lowSlotForAttesterShuffling)
|
|
|
|
databaseMix = dag.computeRandaoMixFromDatabase(
|
|
|
|
dependentBsi.get.bid, epoch.lowSlotForAttesterShuffling)
|
|
|
|
|
2023-05-15 15:41:30 +00:00
|
|
|
# If shuffling is computable from DAG, check its correctness
|
2023-07-10 20:36:25 +00:00
|
|
|
epochRef.checkShuffling dag.computeShufflingRefFromMemory(blck, epoch)
|
2023-05-15 15:41:30 +00:00
|
|
|
|
|
|
|
# Shuffling should be correct when starting from any cached state
|
|
|
|
for state in states:
|
|
|
|
withState(state[]):
|
|
|
|
let
|
|
|
|
stateEpoch = forkyState.data.get_current_epoch
|
|
|
|
blckEpoch = blck.bid.slot.epoch
|
|
|
|
minEpoch = min(stateEpoch, blckEpoch)
|
2023-07-31 13:11:45 +00:00
|
|
|
shufflingRef = dag.computeShufflingRef(forkyState, blck, epoch)
|
|
|
|
mix = dag.computeRandaoMix(forkyState,
|
|
|
|
dependentBsi.get.bid, epoch.lowSlotForAttesterShuffling)
|
2023-05-15 15:41:30 +00:00
|
|
|
if compute_activation_exit_epoch(minEpoch) <= epoch or
|
2023-07-31 13:11:45 +00:00
|
|
|
dag.ancestorSlot(
|
|
|
|
forkyState, dependentBsi.get.bid,
|
|
|
|
epoch.lowSlotForAttesterShuffling).isNone:
|
|
|
|
check:
|
|
|
|
shufflingRef.isNone
|
|
|
|
mix.isNone
|
2023-05-15 15:41:30 +00:00
|
|
|
else:
|
2023-07-31 13:11:45 +00:00
|
|
|
check shufflingRef.isSome
|
2023-07-10 20:36:25 +00:00
|
|
|
epochRef.checkShuffling shufflingRef
|
2023-07-31 13:11:45 +00:00
|
|
|
check:
|
|
|
|
mix.isSome
|
|
|
|
memoryMix.isNone or mix == memoryMix
|
|
|
|
databaseMix.isNone or mix == databaseMix
|
|
|
|
epochRef.checkShuffling Opt.some ShufflingRef(
|
|
|
|
epoch: epoch,
|
|
|
|
attester_dependent_root: dependentBsi.get.bid.root,
|
|
|
|
shuffled_active_validator_indices: forkyState.data
|
|
|
|
.get_shuffled_active_validator_indices(epoch, mix.get))
|
2023-07-10 20:36:25 +00:00
|
|
|
|
|
|
|
test "Accelerated shuffling computation (with epochRefState jump)":
|
|
|
|
# Test cases where `epochRefState` is set to a very old block
|
|
|
|
# that is advanced by several epochs to a recent slot.
|
|
|
|
#
|
|
|
|
# This is not dependent on the multilayer branching of the "Shufflings"
|
|
|
|
# suite, but a function of getEpochRef extending epochRefState towards
|
|
|
|
# a slot which it is essentially hallucinating a state, because it is
|
|
|
|
# not accounting for the blocks with deposits. As it takes non-trivial
|
|
|
|
# time to set up the "Shufflings" suite, we reuse its more complex DAG.
|
|
|
|
#
|
|
|
|
# The purely random fuzzing/tests have difficulty triggering this, because
|
|
|
|
# this needs to happen across a wide portion of the sampled range so that:
|
|
|
|
# (1) it checks a maximally early slot, both to create the gaps needed for
|
|
|
|
# (2) and (3), and to keep both blocks on the same forks, with maximal
|
|
|
|
# likelihood;
|
|
|
|
# (2) calls getEpochRef with a late enough epoch to trigger the
|
|
|
|
# hallucination of relevance (>= epoch 4 typically works); and
|
|
|
|
# (3) there then have to be enough slots between the last added block and
|
|
|
|
# the next state which will be sampled so that the validators can get
|
|
|
|
# active, after some spec 5 epoch delay. This pushes the lowest epoch
|
|
|
|
# possible to not much less than 8 which is already near the high end
|
|
|
|
# of the epoch sampling. Too early an epoch and it is within range of
|
|
|
|
# the headState check which gets it first, so the epochStateRef isn't
|
|
|
|
# exercised.
|
|
|
|
|
|
|
|
let forkBlocks = dag.forkBlocks.toSeq()
|
|
|
|
|
|
|
|
proc findKeyedBlck(m: Slot): int =
|
|
|
|
# Avoid depending on implementation details of how `forkBlocks` is ordered
|
|
|
|
for idx, fb in forkBlocks:
|
|
|
|
if fb.data.slot == m:
|
|
|
|
return idx
|
|
|
|
raiseAssert "Unreachable"
|
|
|
|
|
|
|
|
# The epoch for the first block can range from at least 4 to 10
|
|
|
|
for (blockIdx, epoch) in [
|
|
|
|
(findKeyedBlck(64.Slot), 10.Epoch),
|
|
|
|
(findKeyedBlck(255.Slot), 8.Epoch)]:
|
|
|
|
let
|
|
|
|
blck = forkBlocks[blockIdx].data
|
|
|
|
epochRef = dag.getEpochRef(blck, epoch, true)
|
|
|
|
doAssert epochRef.isOk
|
|
|
|
|
|
|
|
# If shuffling is computable from DAG, check its correctness
|
|
|
|
epochRef.checkShuffling dag.computeShufflingRefFromMemory(blck, epoch)
|
|
|
|
|
2023-07-15 20:16:56 +00:00
|
|
|
suite "Shufflings":
|
|
|
|
let cfg = defaultRuntimeConfig
|
|
|
|
runShufflingTests(cfg, numRandomTests = 150)
|
|
|
|
|
|
|
|
suite "Shufflings (merged)":
|
|
|
|
let cfg = block:
|
|
|
|
var cfg = defaultRuntimeConfig
|
|
|
|
cfg.ALTAIR_FORK_EPOCH = GENESIS_EPOCH
|
|
|
|
cfg.BELLATRIX_FORK_EPOCH = GENESIS_EPOCH
|
|
|
|
cfg
|
|
|
|
runShufflingTests(cfg, numRandomTests = 50)
|