2022-04-14 10:47:14 +00:00
# beacon_chain
2023-03-04 13:35:39 +00:00
# Copyright (c) 2020-2023 Status Research & Development GmbH
2022-04-14 10:47:14 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
2020-10-12 01:07:20 +00:00
import
2022-01-25 08:28:26 +00:00
std / [ os , stats , strformat , tables ] ,
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
snappy ,
2021-11-18 12:02:43 +00:00
chronicles , confutils , stew / [ byteutils , io2 ] , eth / db / kvstore_sqlite3 ,
2021-03-05 13:12:00 +00:00
.. / beacon_chain / networking / network_metadata ,
2022-05-10 00:28:46 +00:00
.. / beacon_chain / [ beacon_chain_db , era_db ] ,
2021-11-10 11:39:08 +00:00
.. / beacon_chain / consensus_object_pools / [ blockchain_dag ] ,
2022-01-18 13:36:52 +00:00
.. / beacon_chain / spec / datatypes / [ phase0 , altair , bellatrix ] ,
2021-06-24 18:34:08 +00:00
.. / beacon_chain / spec / [
2022-01-25 08:28:26 +00:00
beaconstate , state_transition , state_transition_epoch , validator ,
2022-01-17 12:58:33 +00:00
ssz_codec ] ,
2021-08-18 18:57:58 +00:00
.. / beacon_chain / sszdump ,
2022-01-17 12:58:33 +00:00
.. / research / simutils ,
2022-01-31 12:06:16 +00:00
. / e2store , . / ncli_common , . / validator_db_aggregator
2022-01-17 12:58:33 +00:00
when defined ( posix ) :
import system / ansi_c
2020-05-28 14:19:25 +00:00
type Timers = enum
tInit = " Initialize DB "
tLoadBlock = " Load block from database "
tLoadState = " Load state from database "
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
tAdvanceSlot = " Advance slot, non-epoch "
tAdvanceEpoch = " Advance slot, epoch "
tApplyBlock = " Apply block, no slot processing "
2021-03-12 10:02:09 +00:00
tDbLoad = " Database load "
2021-02-15 16:40:00 +00:00
tDbStore = " Database store "
2020-05-28 14:19:25 +00:00
2020-06-01 14:48:24 +00:00
type
2021-11-18 12:02:43 +00:00
DbCmd * {. pure . } = enum
bench = " Run a replay benchmark for block and epoch processing "
dumpState = " Extract a state from the database as-is - only works for states that have been explicitly stored "
putState = " Store a given BeaconState in the database "
dumpBlock = " Extract a (trusted) SignedBeaconBlock from the database "
putBlock = " Store a given SignedBeaconBlock in the database, potentially updating some of the pointers "
rewindState = " Extract any state from the database based on a given block and slot, replaying if needed "
2022-05-10 00:28:46 +00:00
verifyEra = " Verify a single era file "
2023-01-11 16:20:47 +00:00
exportEra = " Export historical data to era store in current directory "
2022-01-25 08:28:26 +00:00
importEra = " Import era files to the database "
2021-05-07 11:36:21 +00:00
validatorPerf
2021-05-27 13:22:38 +00:00
validatorDb = " Create or update attestation performance database "
2020-06-01 14:48:24 +00:00
DbConf = object
databaseDir * {.
2023-01-11 16:20:47 +00:00
defaultValue : " db "
desc : " Directory where `nbc.sqlite` is stored "
name : " db " . } : InputDir
eraDir * {.
defaultValue : " era "
desc : " Directory where era files are read from "
name : " era-dir " . } : string
2020-06-01 14:48:24 +00:00
2020-09-01 09:01:57 +00:00
eth2Network * {.
desc : " The Eth2 network preset to use "
2023-01-11 16:20:47 +00:00
name : " network " . } : Option [ string ]
2020-09-01 09:01:57 +00:00
2020-06-01 14:48:24 +00:00
case cmd * {.
command
2020-06-06 11:26:19 +00:00
desc : " "
2020-06-01 14:48:24 +00:00
. } : DbCmd
2021-11-18 12:02:43 +00:00
of DbCmd . bench :
2021-05-07 11:36:21 +00:00
benchSlot * {.
defaultValue : 0
name : " start-slot "
desc : " Starting slot, negative = backwards from head " . } : int64
benchSlots * {.
2020-06-25 10:23:10 +00:00
defaultValue : 50000
2021-05-07 11:36:21 +00:00
name : " slots "
desc : " Number of slots to run benchmark for, 0 = all the way to head " . } : uint64
2020-08-27 12:52:22 +00:00
storeBlocks * {.
defaultValue : false
desc : " Store each read block back into a separate database " . } : bool
2021-02-15 16:40:00 +00:00
storeStates * {.
defaultValue : false
2022-01-25 08:28:26 +00:00
name : " store-states "
2021-02-15 16:40:00 +00:00
desc : " Store a state each epoch into a separate database " . } : bool
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
printTimes * {.
defaultValue : true
2022-01-25 08:28:26 +00:00
name : " print-times "
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
desc : " Print csv of block processing time " . } : bool
resetCache * {.
defaultValue : false
2022-01-25 08:28:26 +00:00
name : " reset-cache "
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
desc : " Process each block with a fresh cache " . } : bool
2021-04-06 18:56:45 +00:00
2021-11-18 12:02:43 +00:00
of DbCmd . dumpState :
2020-06-06 11:26:19 +00:00
stateRoot * {.
argument
2022-01-25 08:28:26 +00:00
name : " state-root "
desc : " State root(s) to save " . } : seq [ string ]
2020-06-06 11:26:19 +00:00
2021-11-18 12:02:43 +00:00
of DbCmd . putState :
stateFile {.
argument
name : " file "
desc : " Files to import " . } : seq [ string ]
of DbCmd . dumpBlock :
2020-06-25 10:23:10 +00:00
blockRootx * {.
argument
2022-01-25 08:28:26 +00:00
name : " block-root "
desc : " Block root(s) to save " . } : seq [ string ]
2020-06-25 10:23:10 +00:00
2021-11-18 12:02:43 +00:00
of DbCmd . putBlock :
blckFile {.
argument
name : " file "
desc : " Files to import " . } : seq [ string ]
setHead {.
defaultValue : false
name : " set-head "
desc : " Update head to this block " } : bool
setTail {.
defaultValue : false
name : " set-tail "
desc : " Update tail to this block " } : bool
setGenesis {.
defaultValue : false
name : " set-genesis "
desc : " Update genesis to this block " } : bool
of DbCmd . rewindState :
2020-06-16 08:49:32 +00:00
blockRoot * {.
argument
2022-01-25 08:28:26 +00:00
name : " block-root "
2020-06-16 08:49:32 +00:00
desc : " Block root " . } : string
slot * {.
argument
desc : " Slot " . } : uint64
2022-05-10 00:28:46 +00:00
of DbCmd . verifyEra :
eraFile * {.
desc : " Era file name " . } : string
2021-11-18 12:02:43 +00:00
of DbCmd . exportEra :
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
era * {.
defaultValue : 0
desc : " The era number to write " . } : uint64
eraCount * {.
2022-02-18 06:37:44 +00:00
defaultValue : 0
2022-01-25 08:28:26 +00:00
name : " count "
2022-02-18 06:37:44 +00:00
desc : " Number of eras to write (0=all) " . } : uint64
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
2022-01-25 08:28:26 +00:00
of DbCmd . importEra :
eraFiles * {.
argument
name : " file "
desc : " The name of the era file(s) to import " . } : seq [ string ]
2021-11-18 12:02:43 +00:00
of DbCmd . validatorPerf :
2021-05-07 11:36:21 +00:00
perfSlot * {.
defaultValue : - 128 * SLOTS_PER_EPOCH . int64
name : " start-slot "
desc : " Starting slot, negative = backwards from head " . } : int64
perfSlots * {.
defaultValue : 0
name : " slots "
desc : " Number of slots to run benchmark for, 0 = all the way to head " . } : uint64
2021-11-18 12:02:43 +00:00
of DbCmd . validatorDb :
2021-05-27 13:22:38 +00:00
outDir * {.
2022-01-17 12:58:33 +00:00
name : " out-dir "
abbr : " o "
desc : " Output directory " . } : string
2021-12-29 02:50:49 +00:00
startEpoch * {.
name : " start-epoch "
2022-01-17 12:58:33 +00:00
abbr : " s "
desc : " Epoch from which to start recording statistics. " &
" By default one past the last epoch in the output directory " . } : Option [ uint ]
endEpoch * {.
name : " end-epoch "
abbr : " e "
desc : " The last for which to record statistics. " &
" By default the last epoch in the input database " . } : Option [ uint ]
2022-01-31 12:06:16 +00:00
resolution {.
defaultValue : 225 ,
name : " resolution "
abbr : " r "
desc : " How many epochs to be aggregated in a single compacted file " . } : uint
writeAggregatedFiles {.
name : " aggregated "
defaultValue : true
abbr : " a "
desc : " Whether to write aggregated files for a range of epochs with a given resolution " . } : bool
writeUnaggregatedFiles {.
name : " unaggregated "
defaultValue : true
abbr : " u "
desc : " Whether to write unaggregated file for each epoch " . } : bool
2022-01-17 12:58:33 +00:00
var shouldShutDown = false
2021-05-07 11:36:21 +00:00
2021-08-25 14:51:52 +00:00
func getSlotRange ( dag : ChainDAGRef , startSlot : int64 , count : uint64 ) : ( Slot , Slot ) =
2021-05-07 11:36:21 +00:00
let
start =
if startSlot > = 0 : Slot ( startSlot )
elif uint64 ( - startSlot ) > = dag . head . slot : Slot ( 0 )
2022-02-03 19:03:01 +00:00
else : dag . head . slot - uint64 ( - startSlot )
2021-05-07 11:36:21 +00:00
ends =
if count = = 0 : dag . head . slot + 1
else : start + count
2021-05-27 13:22:38 +00:00
( start , ends )
2022-11-11 14:37:43 +00:00
from .. / beacon_chain / spec / datatypes / capella import
HashedBeaconState , TrustedSignedBeaconBlock
2021-07-13 14:27:10 +00:00
proc cmdBench ( conf : DbConf , cfg : RuntimeConfig ) =
2020-05-28 14:19:25 +00:00
var timers : array [ Timers , RunningStat ]
echo " Opening database... "
let
2022-11-28 23:21:58 +00:00
db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2021-07-13 14:27:10 +00:00
dbBenchmark = BeaconChainDB . new ( " benchmark " )
2021-02-15 16:40:00 +00:00
defer :
db . close ( )
dbBenchmark . close ( )
2020-05-28 14:19:25 +00:00
2021-12-21 10:40:14 +00:00
if ( let v = ChainDAGRef . isInitialized ( db ) ; v . isErr ( ) ) :
echo " Database not initialized: " , v . error ( )
2020-05-28 14:19:25 +00:00
quit 1
echo " Initializing block pool... "
2021-12-20 19:20:31 +00:00
let
validatorMonitor = newClone ( ValidatorMonitor . init ( ) )
dag = withTimerRet ( timers [ tInit ] ) :
2023-01-11 16:20:47 +00:00
ChainDAGRef . init ( cfg , db , validatorMonitor , { } , conf . eraDir )
2020-05-28 14:19:25 +00:00
2020-06-01 14:48:24 +00:00
var
2021-05-27 13:22:38 +00:00
( start , ends ) = dag . getSlotRange ( conf . benchSlot , conf . benchSlots )
2022-11-28 23:21:58 +00:00
blockRefs = dag . getBlockRange ( max ( start , Slot 1 ) , ends )
2021-11-10 11:39:08 +00:00
blocks : (
seq [ phase0 . TrustedSignedBeaconBlock ] ,
seq [ altair . TrustedSignedBeaconBlock ] ,
2022-11-11 14:37:43 +00:00
seq [ bellatrix . TrustedSignedBeaconBlock ] ,
2023-03-30 10:06:23 +00:00
seq [ capella . TrustedSignedBeaconBlock ] ,
seq [ deneb . TrustedSignedBeaconBlock ] )
2020-05-28 14:19:25 +00:00
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
echo & " Loaded head slot {dag.head.slot}, selected {blockRefs.len} blocks "
2021-05-07 11:36:21 +00:00
doAssert blockRefs . len ( ) > 0 , " Must select at least one block "
2020-06-25 10:23:10 +00:00
2021-12-29 02:50:49 +00:00
for b in 0 .. < blockRefs . len :
2021-11-10 11:39:08 +00:00
let blck = blockRefs [ blockRefs . len - b - 1 ]
2022-11-28 23:21:58 +00:00
2020-06-01 14:48:24 +00:00
withTimer ( timers [ tLoadBlock ] ) :
2023-02-16 20:16:54 +00:00
case cfg . consensusForkAtEpoch ( blck . slot . epoch )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Phase0 :
2022-02-21 08:48:02 +00:00
blocks [ 0 ] . add dag . db . getBlock (
blck . root , phase0 . TrustedSignedBeaconBlock ) . get ( )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Altair :
2022-02-21 08:48:02 +00:00
blocks [ 1 ] . add dag . db . getBlock (
blck . root , altair . TrustedSignedBeaconBlock ) . get ( )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Bellatrix :
2022-02-21 08:48:02 +00:00
blocks [ 2 ] . add dag . db . getBlock (
blck . root , bellatrix . TrustedSignedBeaconBlock ) . get ( )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Capella :
2022-11-11 14:37:43 +00:00
blocks [ 3 ] . add dag . db . getBlock (
blck . root , capella . TrustedSignedBeaconBlock ) . get ( )
2023-03-04 13:35:39 +00:00
of ConsensusFork . Deneb :
2023-03-30 10:06:23 +00:00
blocks [ 4 ] . add dag . db . getBlock (
blck . root , deneb . TrustedSignedBeaconBlock ) . get ( )
2020-05-28 14:19:25 +00:00
2021-11-10 11:39:08 +00:00
let stateData = newClone ( dag . headState )
2020-05-28 14:19:25 +00:00
2021-03-12 10:02:09 +00:00
var
cache = StateCache ( )
2021-10-13 14:24:36 +00:00
info = ForkedEpochInfo ( )
2021-11-10 11:39:08 +00:00
loadedState = (
( ref phase0 . HashedBeaconState ) ( ) ,
( ref altair . HashedBeaconState ) ( ) ,
2022-11-11 14:37:43 +00:00
( ref bellatrix . HashedBeaconState ) ( ) ,
2023-03-28 13:44:38 +00:00
( ref capella . HashedBeaconState ) ( ) ,
( ref deneb . HashedBeaconState ) ( ) )
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
2021-05-07 11:36:21 +00:00
withTimer ( timers [ tLoadState ] ) :
2022-03-16 07:20:40 +00:00
doAssert dag . updateState (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
stateData [ ] ,
dag . atSlot ( blockRefs [ ^ 1 ] , blockRefs [ ^ 1 ] . slot - 1 ) . expect ( " not nil " ) ,
false , cache )
2021-11-10 11:39:08 +00:00
template processBlocks ( blocks : auto ) =
for b in blocks . mitems ( ) :
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2022-03-16 07:20:40 +00:00
while getStateField ( stateData [ ] , slot ) < b . message . slot :
let isEpoch = ( getStateField ( stateData [ ] , slot ) + 1 ) . is_epoch ( )
2021-11-10 11:39:08 +00:00
withTimer ( timers [ if isEpoch : tAdvanceEpoch else : tAdvanceSlot ] ) :
2022-01-17 11:19:58 +00:00
process_slots (
2022-03-16 07:20:40 +00:00
dag . cfg , stateData [ ] , getStateField ( stateData [ ] , slot ) + 1 , cache ,
2022-01-17 11:19:58 +00:00
info , { } ) . expect ( " Slot processing can ' t fail with correct inputs " )
2021-11-10 11:39:08 +00:00
var start = Moment . now ( )
withTimer ( timers [ tApplyBlock ] ) :
if conf . resetCache :
cache = StateCache ( )
2022-01-17 11:19:58 +00:00
let res = state_transition_block (
2022-03-16 07:20:40 +00:00
dag . cfg , stateData [ ] , b , cache , { } , noRollback )
2022-01-17 11:19:58 +00:00
if res . isErr ( ) :
2021-11-10 11:39:08 +00:00
dump ( " ./ " , b )
2022-01-17 11:19:58 +00:00
echo " State transition failed (!) " , res . error ( )
2021-11-10 11:39:08 +00:00
quit 1
if conf . printTimes :
echo b . message . slot , " , " , toHex ( b . root . data ) , " , " , nanoseconds ( Moment . now ( ) - start )
if conf . storeBlocks :
2021-03-15 14:11:51 +00:00
withTimer ( timers [ tDbStore ] ) :
2021-11-10 11:39:08 +00:00
dbBenchmark . putBlock ( b )
2022-03-16 07:20:40 +00:00
withState ( stateData [ ] ) :
2022-09-13 11:53:12 +00:00
if forkyState . data . slot . is_epoch and conf . storeStates :
if forkyState . data . slot . epoch < 2 :
dbBenchmark . putState ( forkyState . root , forkyState . data )
2021-11-10 11:39:08 +00:00
dbBenchmark . checkpoint ( )
else :
withTimer ( timers [ tDbStore ] ) :
2022-09-13 11:53:12 +00:00
dbBenchmark . putState ( forkyState . root , forkyState . data )
2021-11-10 11:39:08 +00:00
dbBenchmark . checkpoint ( )
withTimer ( timers [ tDbLoad ] ) :
2023-03-11 00:35:52 +00:00
case consensusFork
2023-01-28 19:53:41 +00:00
of ConsensusFork . Phase0 :
2021-11-10 11:39:08 +00:00
doAssert dbBenchmark . getState (
2022-09-16 13:35:00 +00:00
forkyState . root , loadedState [ 0 ] [ ] . data , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Altair :
2021-11-10 11:39:08 +00:00
doAssert dbBenchmark . getState (
2022-09-16 13:35:00 +00:00
forkyState . root , loadedState [ 1 ] [ ] . data , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Bellatrix :
2021-11-10 11:39:08 +00:00
doAssert dbBenchmark . getState (
2022-09-16 13:35:00 +00:00
forkyState . root , loadedState [ 2 ] [ ] . data , noRollback )
2023-01-28 19:53:41 +00:00
of ConsensusFork . Capella :
2022-11-11 14:37:43 +00:00
doAssert dbBenchmark . getState (
forkyState . root , loadedState [ 3 ] [ ] . data , noRollback )
2023-03-04 13:35:39 +00:00
of ConsensusFork . Deneb :
2023-03-28 13:44:38 +00:00
doAssert dbBenchmark . getState (
forkyState . root , loadedState [ 4 ] [ ] . data , noRollback )
2021-11-10 11:39:08 +00:00
2022-09-16 13:35:00 +00:00
if forkyState . data . slot . epoch mod 16 = = 0 :
2023-03-11 00:35:52 +00:00
let loadedRoot = case consensusFork
2023-01-28 19:53:41 +00:00
of ConsensusFork . Phase0 : hash_tree_root ( loadedState [ 0 ] [ ] . data )
of ConsensusFork . Altair : hash_tree_root ( loadedState [ 1 ] [ ] . data )
of ConsensusFork . Bellatrix : hash_tree_root ( loadedState [ 2 ] [ ] . data )
of ConsensusFork . Capella : hash_tree_root ( loadedState [ 3 ] [ ] . data )
2023-03-28 13:44:38 +00:00
of ConsensusFork . Deneb : hash_tree_root ( loadedState [ 4 ] [ ] . data )
2022-09-16 13:35:00 +00:00
doAssert hash_tree_root ( forkyState . data ) = = loadedRoot
2021-11-10 11:39:08 +00:00
processBlocks ( blocks [ 0 ] )
processBlocks ( blocks [ 1 ] )
processBlocks ( blocks [ 2 ] )
2022-11-11 14:37:43 +00:00
processBlocks ( blocks [ 3 ] )
2021-03-12 10:02:09 +00:00
2020-06-25 10:23:10 +00:00
printTimers ( false , timers )
2020-06-01 14:48:24 +00:00
2021-07-13 14:27:10 +00:00
proc cmdDumpState ( conf : DbConf ) =
2022-02-18 06:37:44 +00:00
let db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2020-09-12 05:35:58 +00:00
defer : db . close ( )
2020-06-06 11:26:19 +00:00
2021-11-10 11:39:08 +00:00
let
2022-01-18 16:31:05 +00:00
phase0State = ( ref phase0 . HashedBeaconState ) ( )
altairState = ( ref altair . HashedBeaconState ) ( )
bellatrixState = ( ref bellatrix . HashedBeaconState ) ( )
2022-11-11 14:37:43 +00:00
capellaState = ( ref capella . HashedBeaconState ) ( )
2021-11-10 11:39:08 +00:00
2020-06-06 11:26:19 +00:00
for stateRoot in conf . stateRoot :
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2021-11-10 11:39:08 +00:00
template doit ( state : untyped ) =
try :
state . root = Eth2Digest . fromHex ( stateRoot )
if db . getState ( state . root , state . data , noRollback ) :
dump ( " ./ " , state )
continue
except CatchableError as e :
echo " Couldn ' t load " , state . root , " : " , e . msg
doit ( phase0State [ ] )
doit ( altairState [ ] )
2022-01-18 16:31:05 +00:00
doit ( bellatrixState [ ] )
2022-11-11 14:37:43 +00:00
doit ( capellaState [ ] )
2021-11-10 11:39:08 +00:00
echo " Couldn ' t load " , stateRoot
2020-06-06 11:26:19 +00:00
2021-11-18 12:02:43 +00:00
proc cmdPutState ( conf : DbConf , cfg : RuntimeConfig ) =
let db = BeaconChainDB . new ( conf . databaseDir . string )
defer : db . close ( )
for file in conf . stateFile :
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2021-11-18 12:02:43 +00:00
let state = newClone ( readSszForkedHashedBeaconState (
cfg , readAllBytes ( file ) . tryGet ( ) ) )
2022-01-25 08:28:26 +00:00
withState ( state [ ] ) :
2022-09-16 13:35:00 +00:00
db . putState ( forkyState )
2021-11-18 12:02:43 +00:00
2021-07-13 14:27:10 +00:00
proc cmdDumpBlock ( conf : DbConf ) =
2022-02-18 06:37:44 +00:00
let db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2020-09-12 05:35:58 +00:00
defer : db . close ( )
2020-06-25 10:23:10 +00:00
for blockRoot in conf . blockRootx :
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2020-06-25 10:23:10 +00:00
try :
2021-11-10 11:39:08 +00:00
let root = Eth2Digest . fromHex ( blockRoot )
2022-02-21 08:48:02 +00:00
if ( let blck = db . getBlock (
root , phase0 . TrustedSignedBeaconBlock ) ; blck . isSome ) :
2020-07-16 13:16:51 +00:00
dump ( " ./ " , blck . get ( ) )
2022-02-21 08:48:02 +00:00
elif ( let blck = db . getBlock (
root , altair . TrustedSignedBeaconBlock ) ; blck . isSome ) :
2021-11-10 11:39:08 +00:00
dump ( " ./ " , blck . get ( ) )
2022-02-21 08:48:02 +00:00
elif ( let blck = db . getBlock ( root , bellatrix . TrustedSignedBeaconBlock ) ; blck . isSome ) :
2021-11-10 11:39:08 +00:00
dump ( " ./ " , blck . get ( ) )
2022-11-11 14:37:43 +00:00
elif ( let blck = db . getBlock ( root , capella . TrustedSignedBeaconBlock ) ; blck . isSome ) :
dump ( " ./ " , blck . get ( ) )
2020-06-25 10:23:10 +00:00
else :
2021-11-10 11:39:08 +00:00
echo " Couldn ' t load " , blockRoot
2020-06-25 10:23:10 +00:00
except CatchableError as e :
echo " Couldn ' t load " , blockRoot , " : " , e . msg
2021-11-18 12:02:43 +00:00
proc cmdPutBlock ( conf : DbConf , cfg : RuntimeConfig ) =
let db = BeaconChainDB . new ( conf . databaseDir . string )
defer : db . close ( )
for file in conf . blckFile :
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2022-01-06 07:38:40 +00:00
let blck = readSszForkedSignedBeaconBlock (
2021-11-18 12:02:43 +00:00
cfg , readAllBytes ( file ) . tryGet ( ) )
2022-01-06 07:38:40 +00:00
withBlck ( blck . asTrusted ( ) ) :
2021-11-18 12:02:43 +00:00
db . putBlock ( blck )
if conf . setHead :
db . putHeadBlock ( blck . root )
if conf . setTail :
db . putTailBlock ( blck . root )
if conf . setGenesis :
db . putGenesisBlock ( blck . root )
2021-07-13 14:27:10 +00:00
proc cmdRewindState ( conf : DbConf , cfg : RuntimeConfig ) =
2020-06-16 08:49:32 +00:00
echo " Opening database... "
2022-02-18 06:37:44 +00:00
let db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2020-09-12 05:35:58 +00:00
defer : db . close ( )
2020-06-16 08:49:32 +00:00
2021-12-21 10:40:14 +00:00
if ( let v = ChainDAGRef . isInitialized ( db ) ; v . isErr ( ) ) :
echo " Database not initialized: " , v . error ( )
2020-06-16 08:49:32 +00:00
quit 1
echo " Initializing block pool... "
2021-12-20 19:20:31 +00:00
let
validatorMonitor = newClone ( ValidatorMonitor . init ( ) )
2023-01-11 16:20:47 +00:00
dag = ChainDAGRef . init ( cfg , db , validatorMonitor , { } , conf . eraDir )
2020-06-16 08:49:32 +00:00
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let bid = dag . getBlockId ( fromHex ( Eth2Digest , conf . blockRoot ) ) . valueOr :
2020-06-16 08:49:32 +00:00
echo " Block not found in database "
return
2021-03-17 10:17:15 +00:00
let tmpState = assignClone ( dag . headState )
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
dag . withUpdatedState (
tmpState [ ] , dag . atSlot ( bid , Slot ( conf . slot ) ) . expect ( " block found " ) ) do :
2020-06-16 08:49:32 +00:00
echo " Writing state... "
2022-11-30 14:37:23 +00:00
withState ( updatedState ) :
2022-09-16 13:35:00 +00:00
dump ( " ./ " , forkyState )
2022-01-05 18:38:04 +00:00
do : raiseAssert " withUpdatedState failed "
2020-06-16 08:49:32 +00:00
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
func atCanonicalSlot ( dag : ChainDAGRef , bid : BlockId , slot : Slot ) : Opt [ BlockSlotId ] =
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
if slot = = 0 :
2022-10-14 19:40:10 +00:00
dag . getBlockIdAtSlot ( GENESIS_SLOT )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
else :
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
ok BlockSlotId . init ( ( ? dag . atSlot ( bid , slot - 1 ) ) . bid , slot )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
2022-05-10 00:28:46 +00:00
proc cmdVerifyEra ( conf : DbConf , cfg : RuntimeConfig ) =
let
f = EraFile . open ( conf . eraFile ) . valueOr :
echo error
quit 1
root = f . verify ( cfg ) . valueOr :
echo error
quit 1
echo root
2021-07-13 14:27:10 +00:00
proc cmdExportEra ( conf : DbConf , cfg : RuntimeConfig ) =
2022-02-18 06:37:44 +00:00
let db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
defer : db . close ( )
2021-12-21 10:40:14 +00:00
if ( let v = ChainDAGRef . isInitialized ( db ) ; v . isErr ( ) ) :
2023-01-11 16:20:47 +00:00
fatal " Database not initialized " , error = v . error ( )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
quit 1
2022-01-07 10:13:19 +00:00
type Timers = enum
tState
tBlocks
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
let
2021-12-20 19:20:31 +00:00
validatorMonitor = newClone ( ValidatorMonitor . init ( ) )
2023-01-11 16:20:47 +00:00
dag = ChainDAGRef . init ( cfg , db , validatorMonitor , { } , conf . eraDir )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
2021-03-17 10:17:15 +00:00
let tmpState = assignClone ( dag . headState )
2022-01-07 10:13:19 +00:00
var
tmp : seq [ byte ]
timers : array [ Timers , RunningStat ]
2021-03-17 10:17:15 +00:00
2023-01-11 16:20:47 +00:00
var
era = Era ( conf . era )
missingHistory = false
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
while conf . eraCount = = 0 or era < Era ( conf . era ) + conf . eraCount :
2023-01-11 16:20:47 +00:00
defer : era + = 1
if shouldShutDown :
break
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
# Era files hold the blocks for the "previous" era, and the first state in
# the era itself
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
let
2022-01-07 10:13:19 +00:00
firstSlot =
if era = = 0 : none ( Slot )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
else : some ( ( era - 1 ) . start_slot )
endSlot = era . start_slot
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
if endSlot > dag . head . slot :
2023-01-11 16:20:47 +00:00
notice " Written all complete eras " , era , endSlot , head = dag . head
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
break
2022-05-10 00:28:46 +00:00
let
eraRoot = withState ( dag . headState ) :
eraRoot (
2022-09-16 13:35:00 +00:00
forkyState . data . genesis_validators_root ,
forkyState . data . historical_roots . asSeq ,
2023-04-24 13:26:28 +00:00
dag . headState . historical_summaries ( ) . asSeq ,
2022-05-10 00:28:46 +00:00
era ) . expect ( " have era root since we checked slot " )
name = eraFileName ( cfg , era , eraRoot )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
2022-02-18 06:37:44 +00:00
if isFile ( name ) :
2023-01-11 16:20:47 +00:00
debug " Era file already exists " , era , name
era + = 1
continue
2022-02-18 06:37:44 +00:00
2023-01-11 16:20:47 +00:00
# Check if we reasonably could write the era file given what's in the
# database - we perform this check after checking for existing era files
# since the database might have been pruned up to the "existing" era files!
if endSlot < dag . tail . slot and era ! = 0 :
notice " Skipping era, state history not available " ,
era , tail = shortLog ( dag . tail )
missingHistory = true
continue
let
eraBid = dag . atSlot ( dag . head . bid , endSlot ) . valueOr :
notice " Skipping era, blocks not available " , era , name
missingHistory = true
continue
withTimer ( timers [ tState ] ) :
var cache : StateCache
if not updateState ( dag , tmpState [ ] , eraBid , false , cache ) :
notice " Skipping era, state history not available " , era , name
missingHistory = true
continue
info " Writing " , name
let tmpName = name & " .tmp "
var completed = false
block writeFileBlock :
let e2 = openFile ( tmpName , { OpenFlags . Write , OpenFlags . Create , OpenFlags . Truncate } ) . get ( )
2022-02-18 06:37:44 +00:00
defer : discard closeFile ( e2 )
var group = EraGroup . init ( e2 , firstSlot ) . get ( )
if firstSlot . isSome ( ) :
withTimer ( timers [ tBlocks ] ) :
var blocks : array [ SLOTS_PER_HISTORICAL_ROOT . int , BlockId ]
for i in dag . getBlockRange ( firstSlot . get ( ) , 1 , blocks ) .. < blocks . len :
2023-01-11 16:20:47 +00:00
if not dag . getBlockSZ ( blocks [ i ] , tmp ) :
break writeFileBlock
group . update ( e2 , blocks [ i ] . slot , tmp ) . get ( )
withState ( tmpState [ ] ) :
group . finish ( e2 , forkyState . data ) . get ( )
completed = true
if completed :
try :
moveFile ( tmpName , name )
except IOError as e :
warn " Failed to rename era file to its final name " ,
name , tmpName , error = e . msg
else :
if ( let e = io2 . removeFile ( name ) ; e . isErr ) :
warn " Failed to clean up incomplete era file " , tmpName , error = e . error
2022-01-07 10:13:19 +00:00
2023-01-11 16:20:47 +00:00
if missingHistory :
notice " Some era files were not written due to missing state history - see https://nimbus.guide/trusted-node-sync.html#recreate-historical-state-access-indices for more information "
2022-01-07 10:13:19 +00:00
printTimers ( true , timers )
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
2022-01-25 08:28:26 +00:00
proc cmdImportEra ( conf : DbConf , cfg : RuntimeConfig ) =
let db = BeaconChainDB . new ( conf . databaseDir . string )
defer : db . close ( )
type Timers = enum
tBlock
tState
var
blocks = 0
states = 0
others = 0
timers : array [ Timers , RunningStat ]
var data : seq [ byte ]
for file in conf . eraFiles :
2022-02-18 06:37:44 +00:00
if shouldShutDown : quit QuitSuccess
2022-01-25 08:28:26 +00:00
let f = openFile ( file , { OpenFlags . Read } ) . valueOr :
warn " Can ' t open " , file
continue
defer : discard closeFile ( f )
while true :
let header = readRecord ( f , data ) . valueOr :
break
2022-02-21 08:48:02 +00:00
if header . typ = = SnappyBeaconBlock :
2022-01-25 08:28:26 +00:00
withTimer ( timers [ tBlock ] ) :
Snappy revamp (#3564)
This PR makes the necessary adjustments to deal with the revamped snappy
API.
In practical terms for nimbus-eth2, there are performance increases to
gossip processing, database reading and writing as well as era file
processing. Exporting `.era` files for example, a snappy-heavy
operation, almost halves in total processing time:
Pre:
```
Average, StdDev, Min, Max, Samples, Test
39.088, 8.735, 23.619, 53.301, 50, tState
237.079, 46.692, 165.620, 355.481, 49, tBlocks
```
Post:
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
25.350, 5.303, 15.351, 41.856, 50, tState
141.238, 24.164, 99.990, 199.329, 49, tBlocks
```
2022-04-15 07:44:06 +00:00
let uncompressed = decodeFramed ( data )
2022-01-25 08:28:26 +00:00
let blck = try : readSszForkedSignedBeaconBlock ( cfg , uncompressed )
except CatchableError as exc :
error " Invalid snappy block " , msg = exc . msg , file
continue
withBlck ( blck . asTrusted ( ) ) :
db . putBlock ( blck )
blocks + = 1
elif header . typ = = SnappyBeaconState :
2022-05-10 00:28:46 +00:00
info " Skipping beacon state (use reindexing to recreate state snapshots) "
2022-01-25 08:28:26 +00:00
states + = 1
else :
info " Skipping record " , typ = toHex ( header . typ )
others + = 1
notice " Done " , blocks , states , others
printTimers ( true , timers )
2021-05-07 11:36:21 +00:00
type
# Validator performance metrics tool based on
# https://github.com/paulhauner/lighthouse/blob/etl/lcli/src/etl/validator_performance.rs
# Credits to Paul Hauner
ValidatorPerformance = object
attestation_hits : uint64
attestation_misses : uint64
head_attestation_hits : uint64
head_attestation_misses : uint64
target_attestation_hits : uint64
target_attestation_misses : uint64
first_slot_head_attester_when_first_slot_empty : uint64
first_slot_head_attester_when_first_slot_not_empty : uint64
delays : Table [ uint64 , uint64 ]
2021-07-13 14:27:10 +00:00
proc cmdValidatorPerf ( conf : DbConf , cfg : RuntimeConfig ) =
2021-05-07 11:36:21 +00:00
echo " Opening database... "
let
2022-02-18 06:37:44 +00:00
db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2021-05-07 11:36:21 +00:00
defer :
db . close ( )
2021-12-21 10:40:14 +00:00
if ( let v = ChainDAGRef . isInitialized ( db ) ; v . isErr ( ) ) :
echo " Database not initialized: " , v . error ( )
2021-05-07 11:36:21 +00:00
quit 1
echo " # Initializing block pool... "
2021-12-20 19:20:31 +00:00
let
validatorMonitor = newClone ( ValidatorMonitor . init ( ) )
2023-01-11 16:20:47 +00:00
dag = ChainDAGRef . init ( cfg , db , validatorMonitor , { } , conf . eraDir )
2021-05-07 11:36:21 +00:00
var
2021-05-27 13:22:38 +00:00
( start , ends ) = dag . getSlotRange ( conf . perfSlot , conf . perfSlots )
blockRefs = dag . getBlockRange ( start , ends )
2021-05-07 11:36:21 +00:00
perfs = newSeq [ ValidatorPerformance ] (
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , validators ) . len ( ) )
2021-05-07 11:36:21 +00:00
cache = StateCache ( )
2021-10-13 14:24:36 +00:00
info = ForkedEpochInfo ( )
2021-08-12 13:08:20 +00:00
blck : phase0 . TrustedSignedBeaconBlock
2021-05-07 11:36:21 +00:00
doAssert blockRefs . len ( ) > 0 , " Must select at least one block "
echo " # Analyzing performance for epochs " ,
blockRefs [ ^ 1 ] . slot . epoch , " - " , blockRefs [ 0 ] . slot . epoch
let state = newClone ( dag . headState )
2022-03-16 07:20:40 +00:00
doAssert dag . updateState (
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
state [ ] ,
dag . atSlot ( blockRefs [ ^ 1 ] , blockRefs [ ^ 1 ] . slot - 1 ) . expect ( " block found " ) ,
false , cache )
2021-05-07 11:36:21 +00:00
2021-10-13 14:24:36 +00:00
proc processEpoch ( ) =
2021-05-07 11:36:21 +00:00
let
prev_epoch_target_slot =
2022-03-16 07:20:40 +00:00
state [ ] . get_previous_epoch ( ) . start_slot ( )
2021-05-07 11:36:21 +00:00
penultimate_epoch_end_slot =
if prev_epoch_target_slot = = 0 : Slot ( 0 )
else : prev_epoch_target_slot - 1
first_slot_empty =
2022-03-16 07:20:40 +00:00
state [ ] . get_block_root_at_slot ( prev_epoch_target_slot ) = =
state [ ] . get_block_root_at_slot ( penultimate_epoch_end_slot )
2021-05-07 11:36:21 +00:00
let first_slot_attesters = block :
2022-03-16 07:20:40 +00:00
let committees_per_slot = state [ ] . get_committee_count_per_slot (
2021-05-21 09:23:28 +00:00
prev_epoch_target_slot . epoch , cache )
2021-05-07 11:36:21 +00:00
var indices = HashSet [ ValidatorIndex ] ( )
2022-01-08 23:28:49 +00:00
for committee_index in get_committee_indices ( committees_per_slot ) :
2022-03-16 07:20:40 +00:00
for validator_index in state [ ] . get_beacon_committee (
2022-01-08 23:28:49 +00:00
prev_epoch_target_slot , committee_index , cache ) :
2021-05-07 11:36:21 +00:00
indices . incl ( validator_index )
indices
2021-10-13 14:24:36 +00:00
case info . kind
of EpochInfoFork . Phase0 :
2021-10-18 16:37:27 +00:00
template info : untyped = info . phase0Data
2022-05-10 10:03:40 +00:00
for i , s in info . validators :
2021-10-13 14:24:36 +00:00
let perf = addr perfs [ i ]
if RewardFlags . isActiveInPreviousEpoch in s . flags :
if s . is_previous_epoch_attester . isSome ( ) :
perf . attestation_hits + = 1 ;
2021-05-07 11:36:21 +00:00
2021-10-13 14:24:36 +00:00
if RewardFlags . isPreviousEpochHeadAttester in s . flags :
perf . head_attestation_hits + = 1
else :
perf . head_attestation_misses + = 1
2021-05-07 11:36:21 +00:00
2021-10-13 14:24:36 +00:00
if RewardFlags . isPreviousEpochTargetAttester in s . flags :
perf . target_attestation_hits + = 1
2021-05-07 11:36:21 +00:00
else :
2021-10-13 14:24:36 +00:00
perf . target_attestation_misses + = 1
2021-05-07 11:36:21 +00:00
2021-10-13 14:24:36 +00:00
if i . ValidatorIndex in first_slot_attesters :
if first_slot_empty :
perf . first_slot_head_attester_when_first_slot_empty + = 1
else :
perf . first_slot_head_attester_when_first_slot_not_empty + = 1
2021-05-07 11:36:21 +00:00
2021-10-13 14:24:36 +00:00
if s . is_previous_epoch_attester . isSome ( ) :
perf . delays . mgetOrPut (
s . is_previous_epoch_attester . get ( ) . delay , 0 'u64 ) + = 1
else :
perf . attestation_misses + = 1 ;
of EpochInfoFork . Altair :
echo " TODO altair "
2021-05-07 11:36:21 +00:00
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2021-12-29 02:50:49 +00:00
for bi in 0 .. < blockRefs . len :
2022-02-21 08:48:02 +00:00
blck = db . getBlock (
blockRefs [ blockRefs . len - bi - 1 ] . root ,
phase0 . TrustedSignedBeaconBlock ) . get ( )
2022-03-16 07:20:40 +00:00
while getStateField ( state [ ] , slot ) < blck . message . slot :
2021-09-30 13:21:06 +00:00
let
2022-03-16 07:20:40 +00:00
nextSlot = getStateField ( state [ ] , slot ) + 1
2021-09-30 13:21:06 +00:00
flags =
if nextSlot = = blck . message . slot : { skipLastStateRootCalculation }
else : { }
2022-01-17 11:19:58 +00:00
process_slots (
2022-03-16 07:20:40 +00:00
dag . cfg , state [ ] , nextSlot , cache , info , flags ) . expect (
2022-01-17 11:19:58 +00:00
" Slot processing can ' t fail with correct inputs " )
2021-05-07 11:36:21 +00:00
2022-03-16 07:20:40 +00:00
if getStateField ( state [ ] , slot ) . is_epoch ( ) :
2021-05-07 11:36:21 +00:00
processEpoch ( )
2022-01-17 11:19:58 +00:00
let res = state_transition_block (
2022-03-16 07:20:40 +00:00
dag . cfg , state [ ] , blck , cache , { } , noRollback )
2022-01-17 11:19:58 +00:00
if res . isErr :
echo " State transition failed (!) " , res . error ( )
2021-05-07 11:36:21 +00:00
quit 1
2021-05-27 13:22:38 +00:00
# Capture rewards of empty slots as well
2022-03-16 07:20:40 +00:00
while getStateField ( state [ ] , slot ) < ends :
2022-01-17 11:19:58 +00:00
process_slots (
2022-03-16 07:20:40 +00:00
dag . cfg , state [ ] , getStateField ( state [ ] , slot ) + 1 , cache ,
2022-01-17 11:19:58 +00:00
info , { } ) . expect ( " Slot processing can ' t fail with correct inputs " )
2021-05-27 13:22:38 +00:00
2022-03-16 07:20:40 +00:00
if getStateField ( state [ ] , slot ) . is_epoch ( ) :
2021-05-27 13:22:38 +00:00
processEpoch ( )
2021-05-07 11:36:21 +00:00
echo " validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty "
2022-05-10 10:03:40 +00:00
for i , perf in perfs :
2021-05-07 11:36:21 +00:00
var
count = 0 'u64
sum = 0 'u64
for delay , n in perf . delays :
count + = n
sum + = delay * n
echo i , " , " ,
perf . attestation_hits , " , " ,
perf . attestation_misses , " , " ,
perf . head_attestation_hits , " , " ,
perf . head_attestation_misses , " , " ,
perf . target_attestation_hits , " , " ,
perf . target_attestation_misses , " , " ,
if count = = 0 : 0 .0
else : sum . float / count . float , " , " ,
perf . first_slot_head_attester_when_first_slot_empty , " , " ,
perf . first_slot_head_attester_when_first_slot_not_empty
2021-12-29 02:50:49 +00:00
proc createValidatorsRawTable ( db : SqStoreRef ) =
db . exec ( """
2021-05-27 13:22:38 +00:00
CREATE TABLE IF NOT EXISTS validators_raw (
validator_index INTEGER PRIMARY KEY ,
2022-01-17 12:58:33 +00:00
pubkey BLOB NOT NULL UNIQUE
2021-05-27 13:22:38 +00:00
) ;
""" ).expect( " DB " )
2021-12-29 02:50:49 +00:00
proc createValidatorsView ( db : SqStoreRef ) =
db . exec ( """
2021-05-27 13:22:38 +00:00
CREATE VIEW IF NOT EXISTS validators AS
SELECT
validator_index ,
2022-01-17 12:58:33 +00:00
' 0 x ' | | lower ( hex ( pubkey ) ) as pubkey
2021-05-27 13:22:38 +00:00
FROM validators_raw ;
""" ).expect( " DB " )
2021-12-29 02:50:49 +00:00
proc createInsertValidatorProc ( db : SqStoreRef ) : auto =
db . prepareStmt ( """
INSERT OR IGNORE INTO validators_raw (
validator_index ,
2022-01-17 12:58:33 +00:00
pubkey )
VALUES ( ? , ? ) ; """ ,
( int64 , array [ 48 , byte ] ) , void ) . expect ( " DB " )
2021-12-29 02:50:49 +00:00
proc collectBalances ( balances : var seq [ uint64 ] , forkedState : ForkedHashedBeaconState ) =
withState ( forkedState ) :
2022-09-16 13:35:00 +00:00
balances = seq [ uint64 ] ( forkyState . data . balances . data )
2021-12-29 02:50:49 +00:00
proc calculateDelta ( info : RewardsAndPenalties ) : int64 =
info . source_outcome +
info . target_outcome +
info . head_outcome +
info . inclusion_delay_outcome +
info . sync_committee_outcome +
info . proposer_outcome +
info . slashing_outcome -
info . inactivity_penalty . int64 +
info . deposits . int64
proc printComponents ( info : RewardsAndPenalties ) =
echo " Components: "
echo " Source outcome: " , info . source_outcome
echo " Target outcome: " , info . target_outcome
echo " Head outcome: " , info . head_outcome
echo " Inclusion delay outcome: " , info . inclusion_delay_outcome
echo " Sync committee outcome: " , info . sync_committee_outcome
echo " Proposer outcome: " , info . proposer_outcome
echo " Slashing outcome: " , info . slashing_outcome
echo " Inactivity penalty: " , info . inactivity_penalty
echo " Deposits: " , info . deposits
proc checkBalance ( validatorIndex : int64 ,
validator : RewardStatus | ParticipationInfo ,
2022-02-22 12:14:17 +00:00
currentEpochBalance , previousEpochBalance : int64 ,
2021-12-29 02:50:49 +00:00
validatorInfo : RewardsAndPenalties ) =
let delta = validatorInfo . calculateDelta
2022-02-22 12:14:17 +00:00
if currentEpochBalance = = previousEpochBalance + delta :
2021-12-29 02:50:49 +00:00
return
echo " Validator: " , validatorIndex
echo " Is eligible: " , is_eligible_validator ( validator )
echo " Current epoch balance: " , currentEpochBalance
echo " Previous epoch balance: " , previousEpochBalance
echo " State delta: " , currentEpochBalance - previousEpochBalance
echo " Computed delta: " , delta
printComponents ( validatorInfo )
raiseAssert ( " Validator ' s previous epoch balance plus computed validator ' s " &
" delta is not equal to the validator ' s current epoch balance. " )
proc getDbValidatorsCount ( db : SqStoreRef ) : int64 =
var res : int64
discard db . exec ( " SELECT count(*) FROM validators " , ( ) ) do ( r : int64 ) :
res = r
return res
template inTransaction ( db : SqStoreRef , dbName : string , body : untyped ) =
try :
db . exec ( " BEGIN TRANSACTION; " ) . expect ( dbName )
body
finally :
db . exec ( " END TRANSACTION; " ) . expect ( dbName )
proc insertValidators ( db : SqStoreRef , state : ForkedHashedBeaconState ,
startIndex , endIndex : int64 ) =
var insertValidator {. global . } : SqliteStmt [
2022-01-17 12:58:33 +00:00
( int64 , array [ 48 , byte ] ) , void ]
2021-12-29 02:50:49 +00:00
once : insertValidator = db . createInsertValidatorProc
withState ( state ) :
db . inTransaction ( " DB " ) :
for i in startIndex .. < endIndex :
2022-01-17 12:58:33 +00:00
insertValidator . exec (
2022-09-16 13:35:00 +00:00
( i , forkyState . data . validators [ i ] . pubkey . toRaw ) ) . expect ( " DB " )
2021-12-29 02:50:49 +00:00
proc cmdValidatorDb ( conf : DbConf , cfg : RuntimeConfig ) =
# Create a database with performance information for every epoch
2022-01-17 12:58:33 +00:00
info " Opening database... "
2023-01-09 18:42:10 +00:00
let db = BeaconChainDB . new ( conf . databaseDir . string , readOnly = true )
2021-12-29 02:50:49 +00:00
defer : db . close ( )
if ( let v = ChainDAGRef . isInitialized ( db ) ; v . isErr ( ) ) :
echo " Database not initialized "
quit 1
echo " Initializing block pool... "
let
validatorMonitor = newClone ( ValidatorMonitor . init ( ) )
2023-01-11 16:20:47 +00:00
dag = ChainDAGRef . init ( cfg , db , validatorMonitor , { } , conf . eraDir )
2021-12-29 02:50:49 +00:00
let outDb = SqStoreRef . init ( conf . outDir , " validatorDb " ) . expect ( " DB " )
defer : outDb . close ( )
outDb . createValidatorsRawTable
outDb . createValidatorsView
let
2022-01-31 12:06:16 +00:00
unaggregatedFilesOutputDir = conf . outDir / " unaggregated "
aggregatedFilesOutputDir = conf . outDir / " aggregated "
2022-01-17 12:58:33 +00:00
startEpoch =
2022-01-18 18:22:56 +00:00
if conf . startEpoch . isSome :
2022-01-17 12:58:33 +00:00
Epoch ( conf . startEpoch . get )
2021-12-29 02:50:49 +00:00
else :
2022-01-31 12:06:16 +00:00
let unaggregatedFilesNextEpoch = getUnaggregatedFilesLastEpoch (
unaggregatedFilesOutputDir ) + 1
let aggregatedFilesNextEpoch = getAggregatedFilesLastEpoch (
aggregatedFilesOutputDir ) + 1
if conf . writeUnaggregatedFiles and conf . writeAggregatedFiles :
min ( unaggregatedFilesNextEpoch , aggregatedFilesNextEpoch )
elif conf . writeUnaggregatedFiles :
unaggregatedFilesNextEpoch
elif conf . writeAggregatedFiles :
aggregatedFilesNextEpoch
else :
min ( unaggregatedFilesNextEpoch , aggregatedFilesNextEpoch )
2022-01-17 12:58:33 +00:00
endEpoch =
if conf . endEpoch . isSome :
Epoch ( conf . endEpoch . get )
else :
dag . finalizedHead . slot . epoch # Avoid dealing with changes
2021-05-27 13:22:38 +00:00
2022-01-17 12:58:33 +00:00
if startEpoch > endEpoch :
fatal " Start epoch cannot be bigger than end epoch. " ,
startEpoch = startEpoch , endEpoch = endEpoch
quit QuitFailure
2021-05-27 13:22:38 +00:00
2022-01-17 12:58:33 +00:00
info " Analyzing performance for epochs. " ,
startEpoch = startEpoch , endEpoch = endEpoch
2021-05-27 13:22:38 +00:00
2022-01-17 12:58:33 +00:00
let
startSlot = startEpoch . start_slot
endSlot = endEpoch . start_slot + SLOTS_PER_EPOCH
blockRefs = dag . getBlockRange ( startSlot , endSlot )
2021-05-27 13:22:38 +00:00
2022-01-31 12:06:16 +00:00
if not unaggregatedFilesOutputDir . dirExists :
unaggregatedFilesOutputDir . createDir
if not aggregatedFilesOutputDir . dirExists :
aggregatedFilesOutputDir . createDir
2021-12-29 02:50:49 +00:00
let tmpState = newClone ( dag . headState )
var cache = StateCache ( )
2022-01-17 12:58:33 +00:00
let slot = if startSlot > 0 : startSlot - 1 else : 0 . Slot
2021-12-29 02:50:49 +00:00
if blockRefs . len > 0 :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
discard dag . updateState (
tmpState [ ] , dag . atSlot ( blockRefs [ ^ 1 ] , slot ) . expect ( " block " ) , false , cache )
2021-12-29 02:50:49 +00:00
else :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
discard dag . updateState (
tmpState [ ] , dag . getBlockIdAtSlot ( slot ) . expect ( " block " ) , false , cache )
2021-12-29 02:50:49 +00:00
2022-01-17 12:58:33 +00:00
let savedValidatorsCount = outDb . getDbValidatorsCount
2022-03-16 07:20:40 +00:00
var validatorsCount = getStateField ( tmpState [ ] , validators ) . len
outDb . insertValidators ( tmpState [ ] , savedValidatorsCount , validatorsCount )
2021-12-29 02:50:49 +00:00
var previousEpochBalances : seq [ uint64 ]
2022-03-16 07:20:40 +00:00
collectBalances ( previousEpochBalances , tmpState [ ] )
2021-12-29 02:50:49 +00:00
var forkedInfo = ForkedEpochInfo ( )
var rewardsAndPenalties : seq [ RewardsAndPenalties ]
rewardsAndPenalties . setLen ( validatorsCount )
var auxiliaryState : AuxiliaryState
2022-03-16 07:20:40 +00:00
auxiliaryState . copyParticipationFlags ( tmpState [ ] )
2021-05-27 13:22:38 +00:00
2022-01-31 12:06:16 +00:00
var aggregator = ValidatorDbAggregator . init (
aggregatedFilesOutputDir , conf . resolution , endEpoch )
2021-05-27 13:22:38 +00:00
proc processEpoch ( ) =
2022-03-16 07:20:40 +00:00
let epoch = getStateField ( tmpState [ ] , slot ) . epoch
2022-01-17 12:58:33 +00:00
info " Processing epoch ... " , epoch = epoch
var csvLines = newStringOfCap ( 1000000 )
2021-12-29 02:50:49 +00:00
2022-03-16 07:20:40 +00:00
withState ( tmpState [ ] ) :
2021-12-29 02:50:49 +00:00
withEpochInfo ( forkedInfo ) :
2022-09-16 13:35:00 +00:00
doAssert forkyState . data . balances . len = = info . validators . len
doAssert forkyState . data . balances . len = = previousEpochBalances . len
doAssert forkyState . data . balances . len = = rewardsAndPenalties . len
2021-12-29 02:50:49 +00:00
2022-05-10 10:03:40 +00:00
for index , validator in info . validators :
2022-01-17 12:58:33 +00:00
template rp : untyped = rewardsAndPenalties [ index ]
2021-12-29 02:50:49 +00:00
2022-09-16 13:35:00 +00:00
checkBalance (
index , validator , forkyState . data . balances . item ( index ) . int64 ,
previousEpochBalances [ index ] . int64 , rp )
2021-12-29 02:50:49 +00:00
2022-01-17 12:58:33 +00:00
when infoFork = = EpochInfoFork . Phase0 :
rp . inclusion_delay = block :
2021-12-29 02:50:49 +00:00
let notSlashed = ( RewardFlags . isSlashed notin validator . flags )
if notSlashed and validator . is_previous_epoch_attester . isSome ( ) :
2022-01-17 12:58:33 +00:00
some ( validator . is_previous_epoch_attester . get ( ) . delay . uint64 )
2021-12-29 02:50:49 +00:00
else :
2022-01-17 12:58:33 +00:00
none ( uint64 )
2021-12-29 02:50:49 +00:00
2022-01-31 12:06:16 +00:00
if conf . writeUnaggregatedFiles :
csvLines . add rp . serializeToCsv
if conf . writeAggregatedFiles :
aggregator . addValidatorData ( index , rp )
if conf . writeUnaggregatedFiles :
let fileName = getFilePathForEpoch ( epoch , unaggregatedFilesOutputDir )
var res = io2 . removeFile ( fileName )
doAssert res . isOk
res = io2 . writeFile ( fileName , snappy . encode ( csvLines . toBytes ) )
doAssert res . isOk
if conf . writeAggregatedFiles :
aggregator . advanceEpochs ( epoch , shouldShutDown )
2021-12-29 02:50:49 +00:00
2022-01-17 12:58:33 +00:00
if shouldShutDown : quit QuitSuccess
2022-03-16 07:20:40 +00:00
collectBalances ( previousEpochBalances , tmpState [ ] )
2021-12-29 02:50:49 +00:00
proc processSlots ( ends : Slot , endsFlags : UpdateFlags ) =
2022-03-16 07:20:40 +00:00
var currentSlot = getStateField ( tmpState [ ] , slot )
2021-12-29 02:50:49 +00:00
while currentSlot < ends :
let nextSlot = currentSlot + 1
let flags = if nextSlot = = ends : endsFlags else : { }
2022-04-14 10:47:14 +00:00
if nextSlot . is_epoch :
2022-03-16 07:20:40 +00:00
withState ( tmpState [ ] ) :
2022-09-16 13:35:00 +00:00
var stateData = newClone ( forkyState . data )
2023-02-02 22:24:06 +00:00
rewardsAndPenalties . collectEpochRewardsAndPenalties (
stateData [ ] , cache , cfg , flags )
2021-12-29 02:50:49 +00:00
2022-03-16 07:20:40 +00:00
let res = process_slots ( cfg , tmpState [ ] , nextSlot , cache , forkedInfo , flags )
2022-01-17 12:58:33 +00:00
doAssert res . isOk , " Slot processing can ' t fail with correct inputs "
2021-12-29 02:50:49 +00:00
currentSlot = nextSlot
2022-04-14 10:47:14 +00:00
if currentSlot . is_epoch :
2022-01-17 12:58:33 +00:00
processEpoch ( )
2021-12-29 02:50:49 +00:00
rewardsAndPenalties . setLen ( 0 )
rewardsAndPenalties . setLen ( validatorsCount )
2022-03-16 07:20:40 +00:00
auxiliaryState . copyParticipationFlags ( tmpState [ ] )
2022-01-17 12:58:33 +00:00
clear cache
2021-12-29 02:50:49 +00:00
for bi in 0 .. < blockRefs . len :
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
let forkedBlock = dag . getForkedBlock ( blockRefs [ blockRefs . len - bi - 1 ] ) . get ( )
2021-12-29 02:50:49 +00:00
withBlck ( forkedBlock ) :
processSlots ( blck . message . slot , { skipLastStateRootCalculation } )
rewardsAndPenalties . collectBlockRewardsAndPenalties (
2022-03-16 07:20:40 +00:00
tmpState [ ] , forkedBlock , auxiliaryState , cache , cfg )
2021-12-29 02:50:49 +00:00
let res = state_transition_block (
2022-03-16 07:20:40 +00:00
cfg , tmpState [ ] , blck , cache , { } , noRollback )
2021-12-29 02:50:49 +00:00
if res . isErr :
2022-01-17 12:58:33 +00:00
fatal " State transition failed (!) "
quit QuitFailure
2021-12-29 02:50:49 +00:00
2022-03-16 07:20:40 +00:00
let newValidatorsCount = getStateField ( tmpState [ ] , validators ) . len
2021-12-29 02:50:49 +00:00
if newValidatorsCount > validatorsCount :
# Resize the structures in case a new validator has appeared after
# the state_transition_block procedure call ...
rewardsAndPenalties . setLen ( newValidatorsCount )
previousEpochBalances . setLen ( newValidatorsCount )
# ... and add the new validators to the database.
2022-01-17 12:58:33 +00:00
outDb . insertValidators (
2022-03-16 07:20:40 +00:00
tmpState [ ] , validatorsCount , newValidatorsCount )
2021-12-29 02:50:49 +00:00
validatorsCount = newValidatorsCount
2021-05-27 13:22:38 +00:00
# Capture rewards of empty slots as well, including the epoch that got
# finalized
2022-01-17 12:58:33 +00:00
processSlots ( endSlot , { } )
proc controlCHook {. noconv . } =
notice " Shutting down after having received SIGINT. "
shouldShutDown = true
proc exitOnSigterm ( signal : cint ) {. noconv . } =
notice " Shutting down after having received SIGTERM. "
shouldShutDown = true
2021-09-30 13:21:06 +00:00
2020-06-01 14:48:24 +00:00
when isMainModule :
2022-01-17 12:58:33 +00:00
setControlCHook ( controlCHook )
when defined ( posix ) :
c_signal ( SIGTERM , exitOnSigterm )
2020-09-01 09:01:57 +00:00
var
2020-06-06 11:26:19 +00:00
conf = DbConf . load ( )
2021-07-13 14:27:10 +00:00
cfg = getRuntimeConfig ( conf . eth2Network )
2020-05-28 14:19:25 +00:00
2020-06-06 11:26:19 +00:00
case conf . cmd
2021-11-18 12:02:43 +00:00
of DbCmd . bench :
2021-07-13 14:27:10 +00:00
cmdBench ( conf , cfg )
2021-11-18 12:02:43 +00:00
of DbCmd . dumpState :
2021-07-13 14:27:10 +00:00
cmdDumpState ( conf )
2021-11-18 12:02:43 +00:00
of DbCmd . putState :
cmdPutState ( conf , cfg )
of DbCmd . dumpBlock :
2021-07-13 14:27:10 +00:00
cmdDumpBlock ( conf )
2021-11-18 12:02:43 +00:00
of DbCmd . putBlock :
cmdPutBlock ( conf , cfg )
of DbCmd . rewindState :
2021-07-13 14:27:10 +00:00
cmdRewindState ( conf , cfg )
2022-05-10 00:28:46 +00:00
of DbCmd . verifyEra :
cmdVerifyEra ( conf , cfg )
2021-11-18 12:02:43 +00:00
of DbCmd . exportEra :
2021-07-13 14:27:10 +00:00
cmdExportEra ( conf , cfg )
2022-01-25 08:28:26 +00:00
of DbCmd . importEra :
cmdImportEra ( conf , cfg )
2021-11-18 12:02:43 +00:00
of DbCmd . validatorPerf :
2021-07-13 14:27:10 +00:00
cmdValidatorPerf ( conf , cfg )
2021-11-18 12:02:43 +00:00
of DbCmd . validatorDb :
2021-07-13 14:27:10 +00:00
cmdValidatorDb ( conf , cfg )