2020-10-12 01:07:20 +00:00
|
|
|
import
|
2022-01-25 08:28:26 +00:00
|
|
|
std/[os, stats, strformat, tables],
|
|
|
|
snappy, snappy/framing,
|
2021-11-18 12:02:43 +00:00
|
|
|
chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3,
|
2021-03-05 13:12:00 +00:00
|
|
|
../beacon_chain/networking/network_metadata,
|
2021-08-12 13:08:20 +00:00
|
|
|
../beacon_chain/[beacon_chain_db],
|
2021-11-10 11:39:08 +00:00
|
|
|
../beacon_chain/consensus_object_pools/[blockchain_dag],
|
2022-01-18 13:36:52 +00:00
|
|
|
../beacon_chain/spec/datatypes/[phase0, altair, bellatrix],
|
2021-06-24 18:34:08 +00:00
|
|
|
../beacon_chain/spec/[
|
2022-01-25 08:28:26 +00:00
|
|
|
beaconstate, state_transition, state_transition_epoch, validator,
|
2022-01-17 12:58:33 +00:00
|
|
|
ssz_codec],
|
2021-08-18 18:57:58 +00:00
|
|
|
../beacon_chain/sszdump,
|
2022-01-17 12:58:33 +00:00
|
|
|
../research/simutils,
|
2022-01-31 12:06:16 +00:00
|
|
|
./e2store, ./ncli_common, ./validator_db_aggregator
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
when defined(posix):
|
|
|
|
import system/ansi_c
|
2020-05-28 14:19:25 +00:00
|
|
|
|
|
|
|
type Timers = enum
|
|
|
|
tInit = "Initialize DB"
|
|
|
|
tLoadBlock = "Load block from database"
|
|
|
|
tLoadState = "Load state from database"
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
tAdvanceSlot = "Advance slot, non-epoch"
|
|
|
|
tAdvanceEpoch = "Advance slot, epoch"
|
|
|
|
tApplyBlock = "Apply block, no slot processing"
|
2021-03-12 10:02:09 +00:00
|
|
|
tDbLoad = "Database load"
|
2021-02-15 16:40:00 +00:00
|
|
|
tDbStore = "Database store"
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2020-06-01 14:48:24 +00:00
|
|
|
type
|
2021-11-18 12:02:43 +00:00
|
|
|
DbCmd* {.pure.} = enum
|
|
|
|
bench = "Run a replay benchmark for block and epoch processing"
|
|
|
|
dumpState = "Extract a state from the database as-is - only works for states that have been explicitly stored"
|
|
|
|
putState = "Store a given BeaconState in the database"
|
|
|
|
dumpBlock = "Extract a (trusted) SignedBeaconBlock from the database"
|
|
|
|
putBlock = "Store a given SignedBeaconBlock in the database, potentially updating some of the pointers"
|
2020-09-11 13:20:34 +00:00
|
|
|
pruneDatabase
|
2021-11-18 12:02:43 +00:00
|
|
|
rewindState = "Extract any state from the database based on a given block and slot, replaying if needed"
|
|
|
|
exportEra = "Write an experimental era file"
|
2022-01-25 08:28:26 +00:00
|
|
|
importEra = "Import era files to the database"
|
2021-05-07 11:36:21 +00:00
|
|
|
validatorPerf
|
2021-05-27 13:22:38 +00:00
|
|
|
validatorDb = "Create or update attestation performance database"
|
2020-06-01 14:48:24 +00:00
|
|
|
|
2020-07-07 23:02:14 +00:00
|
|
|
# TODO:
|
|
|
|
# This should probably allow specifying a run-time preset
|
2020-06-01 14:48:24 +00:00
|
|
|
DbConf = object
|
|
|
|
databaseDir* {.
|
|
|
|
defaultValue: ""
|
|
|
|
desc: "Directory where `nbc.sqlite` is stored"
|
|
|
|
name: "db" }: InputDir
|
|
|
|
|
2020-09-01 09:01:57 +00:00
|
|
|
eth2Network* {.
|
|
|
|
desc: "The Eth2 network preset to use"
|
|
|
|
name: "network" }: Option[string]
|
|
|
|
|
2020-06-01 14:48:24 +00:00
|
|
|
case cmd* {.
|
|
|
|
command
|
2020-06-06 11:26:19 +00:00
|
|
|
desc: ""
|
2020-06-01 14:48:24 +00:00
|
|
|
.}: DbCmd
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.bench:
|
2021-05-07 11:36:21 +00:00
|
|
|
benchSlot* {.
|
|
|
|
defaultValue: 0
|
|
|
|
name: "start-slot"
|
|
|
|
desc: "Starting slot, negative = backwards from head".}: int64
|
|
|
|
benchSlots* {.
|
2020-06-25 10:23:10 +00:00
|
|
|
defaultValue: 50000
|
2021-05-07 11:36:21 +00:00
|
|
|
name: "slots"
|
|
|
|
desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64
|
2020-08-27 12:52:22 +00:00
|
|
|
storeBlocks* {.
|
|
|
|
defaultValue: false
|
|
|
|
desc: "Store each read block back into a separate database".}: bool
|
2021-02-15 16:40:00 +00:00
|
|
|
storeStates* {.
|
|
|
|
defaultValue: false
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "store-states"
|
2021-02-15 16:40:00 +00:00
|
|
|
desc: "Store a state each epoch into a separate database".}: bool
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
printTimes* {.
|
|
|
|
defaultValue: true
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "print-times"
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
desc: "Print csv of block processing time".}: bool
|
|
|
|
resetCache* {.
|
|
|
|
defaultValue: false
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "reset-cache"
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
desc: "Process each block with a fresh cache".}: bool
|
2021-04-06 18:56:45 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.dumpState:
|
2020-06-06 11:26:19 +00:00
|
|
|
stateRoot* {.
|
|
|
|
argument
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "state-root"
|
|
|
|
desc: "State root(s) to save".}: seq[string]
|
2020-06-06 11:26:19 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.putState:
|
|
|
|
stateFile {.
|
|
|
|
argument
|
|
|
|
name: "file"
|
|
|
|
desc: "Files to import".}: seq[string]
|
|
|
|
|
|
|
|
of DbCmd.dumpBlock:
|
2020-06-25 10:23:10 +00:00
|
|
|
blockRootx* {.
|
|
|
|
argument
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "block-root"
|
|
|
|
desc: "Block root(s) to save".}: seq[string]
|
2020-06-25 10:23:10 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.putBlock:
|
|
|
|
blckFile {.
|
|
|
|
argument
|
|
|
|
name: "file"
|
|
|
|
desc: "Files to import".}: seq[string]
|
|
|
|
setHead {.
|
|
|
|
defaultValue: false
|
|
|
|
name: "set-head"
|
|
|
|
desc: "Update head to this block"}: bool
|
|
|
|
setTail {.
|
|
|
|
defaultValue: false
|
|
|
|
name: "set-tail"
|
|
|
|
desc: "Update tail to this block"}: bool
|
|
|
|
setGenesis {.
|
|
|
|
defaultValue: false
|
|
|
|
name: "set-genesis"
|
|
|
|
desc: "Update genesis to this block"}: bool
|
|
|
|
|
|
|
|
of DbCmd.pruneDatabase:
|
2020-09-11 13:20:34 +00:00
|
|
|
dryRun* {.
|
|
|
|
defaultValue: false
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "dry-run"
|
2020-09-11 13:20:34 +00:00
|
|
|
desc: "Don't write to the database copy; only simulate actions; default false".}: bool
|
|
|
|
keepOldStates* {.
|
|
|
|
defaultValue: true
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "keep-old"
|
2020-09-11 13:20:34 +00:00
|
|
|
desc: "Keep pre-finalization states; default true".}: bool
|
|
|
|
verbose* {.
|
|
|
|
defaultValue: false
|
|
|
|
desc: "Enables verbose output; default false".}: bool
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.rewindState:
|
2020-06-16 08:49:32 +00:00
|
|
|
blockRoot* {.
|
|
|
|
argument
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "block-root"
|
2020-06-16 08:49:32 +00:00
|
|
|
desc: "Block root".}: string
|
|
|
|
|
|
|
|
slot* {.
|
|
|
|
argument
|
|
|
|
desc: "Slot".}: uint64
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.exportEra:
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
era* {.
|
|
|
|
defaultValue: 0
|
|
|
|
desc: "The era number to write".}: uint64
|
|
|
|
eraCount* {.
|
|
|
|
defaultValue: 1
|
2022-01-25 08:28:26 +00:00
|
|
|
name: "count"
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
desc: "Number of eras to write".}: uint64
|
|
|
|
|
2022-01-25 08:28:26 +00:00
|
|
|
of DbCmd.importEra:
|
|
|
|
eraFiles* {.
|
|
|
|
argument
|
|
|
|
name: "file"
|
|
|
|
desc: "The name of the era file(s) to import".}: seq[string]
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.validatorPerf:
|
2021-05-07 11:36:21 +00:00
|
|
|
perfSlot* {.
|
|
|
|
defaultValue: -128 * SLOTS_PER_EPOCH.int64
|
|
|
|
name: "start-slot"
|
|
|
|
desc: "Starting slot, negative = backwards from head".}: int64
|
|
|
|
perfSlots* {.
|
|
|
|
defaultValue: 0
|
|
|
|
name: "slots"
|
|
|
|
desc: "Number of slots to run benchmark for, 0 = all the way to head".}: uint64
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.validatorDb:
|
2021-05-27 13:22:38 +00:00
|
|
|
outDir* {.
|
2022-01-17 12:58:33 +00:00
|
|
|
name: "out-dir"
|
|
|
|
abbr: "o"
|
|
|
|
desc: "Output directory".}: string
|
2021-12-29 02:50:49 +00:00
|
|
|
startEpoch* {.
|
|
|
|
name: "start-epoch"
|
2022-01-17 12:58:33 +00:00
|
|
|
abbr: "s"
|
|
|
|
desc: "Epoch from which to start recording statistics." &
|
|
|
|
"By default one past the last epoch in the output directory".}: Option[uint]
|
|
|
|
endEpoch* {.
|
|
|
|
name: "end-epoch"
|
|
|
|
abbr: "e"
|
|
|
|
desc: "The last for which to record statistics." &
|
|
|
|
"By default the last epoch in the input database".}: Option[uint]
|
2022-01-31 12:06:16 +00:00
|
|
|
resolution {.
|
|
|
|
defaultValue: 225,
|
|
|
|
name: "resolution"
|
|
|
|
abbr: "r"
|
|
|
|
desc: "How many epochs to be aggregated in a single compacted file" .}: uint
|
|
|
|
writeAggregatedFiles {.
|
|
|
|
name: "aggregated"
|
|
|
|
defaultValue: true
|
|
|
|
abbr: "a"
|
|
|
|
desc: "Whether to write aggregated files for a range of epochs with a given resolution" .}: bool
|
|
|
|
writeUnaggregatedFiles {.
|
|
|
|
name: "unaggregated"
|
|
|
|
defaultValue: true
|
|
|
|
abbr: "u"
|
|
|
|
desc: "Whether to write unaggregated file for each epoch" .}: bool
|
2022-01-17 12:58:33 +00:00
|
|
|
|
|
|
|
var shouldShutDown = false
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2021-08-25 14:51:52 +00:00
|
|
|
func getSlotRange(dag: ChainDAGRef, startSlot: int64, count: uint64): (Slot, Slot) =
|
2021-05-07 11:36:21 +00:00
|
|
|
let
|
|
|
|
start =
|
|
|
|
if startSlot >= 0: Slot(startSlot)
|
|
|
|
elif uint64(-startSlot) >= dag.head.slot: Slot(0)
|
|
|
|
else: Slot(dag.head.slot - uint64(-startSlot))
|
|
|
|
ends =
|
|
|
|
if count == 0: dag.head.slot + 1
|
|
|
|
else: start + count
|
2021-05-27 13:22:38 +00:00
|
|
|
(start, ends)
|
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
2020-05-28 14:19:25 +00:00
|
|
|
var timers: array[Timers, RunningStat]
|
|
|
|
|
|
|
|
echo "Opening database..."
|
|
|
|
let
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new(conf.databaseDir.string,)
|
|
|
|
dbBenchmark = BeaconChainDB.new("benchmark")
|
2021-02-15 16:40:00 +00:00
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
dbBenchmark.close()
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2021-12-21 10:40:14 +00:00
|
|
|
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
|
|
echo "Database not initialized: ", v.error()
|
2020-05-28 14:19:25 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
echo "Initializing block pool..."
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = withTimerRet(timers[tInit]):
|
|
|
|
ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2020-06-01 14:48:24 +00:00
|
|
|
var
|
2021-05-27 13:22:38 +00:00
|
|
|
(start, ends) = dag.getSlotRange(conf.benchSlot, conf.benchSlots)
|
|
|
|
blockRefs = dag.getBlockRange(start, ends)
|
2021-11-10 11:39:08 +00:00
|
|
|
blocks: (
|
|
|
|
seq[phase0.TrustedSignedBeaconBlock],
|
|
|
|
seq[altair.TrustedSignedBeaconBlock],
|
2022-01-18 13:36:52 +00:00
|
|
|
seq[bellatrix.TrustedSignedBeaconBlock])
|
2020-05-28 14:19:25 +00:00
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
echo &"Loaded head slot {dag.head.slot}, selected {blockRefs.len} blocks"
|
2021-05-07 11:36:21 +00:00
|
|
|
doAssert blockRefs.len() > 0, "Must select at least one block"
|
2020-06-25 10:23:10 +00:00
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
for b in 0 ..< blockRefs.len:
|
2021-11-10 11:39:08 +00:00
|
|
|
let blck = blockRefs[blockRefs.len - b - 1]
|
2020-06-01 14:48:24 +00:00
|
|
|
withTimer(timers[tLoadBlock]):
|
2021-11-10 11:39:08 +00:00
|
|
|
case cfg.blockForkAtEpoch(blck.slot.epoch)
|
|
|
|
of BeaconBlockFork.Phase0:
|
|
|
|
blocks[0].add dag.db.getPhase0Block(blck.root).get()
|
|
|
|
of BeaconBlockFork.Altair:
|
|
|
|
blocks[1].add dag.db.getAltairBlock(blck.root).get()
|
2022-01-04 09:45:38 +00:00
|
|
|
of BeaconBlockFork.Bellatrix:
|
2021-11-10 11:39:08 +00:00
|
|
|
blocks[2].add dag.db.getMergeBlock(blck.root).get()
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2021-11-10 11:39:08 +00:00
|
|
|
let stateData = newClone(dag.headState)
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2021-03-12 10:02:09 +00:00
|
|
|
var
|
|
|
|
cache = StateCache()
|
2021-10-13 14:24:36 +00:00
|
|
|
info = ForkedEpochInfo()
|
2021-11-10 11:39:08 +00:00
|
|
|
loadedState = (
|
|
|
|
(ref phase0.HashedBeaconState)(),
|
|
|
|
(ref altair.HashedBeaconState)(),
|
2022-01-18 13:36:52 +00:00
|
|
|
(ref bellatrix.HashedBeaconState)())
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
|
2021-05-07 11:36:21 +00:00
|
|
|
withTimer(timers[tLoadState]):
|
2022-01-05 18:38:04 +00:00
|
|
|
doAssert dag.updateStateData(
|
2021-11-10 11:39:08 +00:00
|
|
|
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
|
|
|
|
|
|
|
template processBlocks(blocks: auto) =
|
|
|
|
for b in blocks.mitems():
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
2021-11-10 11:39:08 +00:00
|
|
|
while getStateField(stateData[].data, slot) < b.message.slot:
|
2022-01-11 10:01:54 +00:00
|
|
|
let isEpoch = (getStateField(stateData[].data, slot) + 1).is_epoch()
|
2021-11-10 11:39:08 +00:00
|
|
|
withTimer(timers[if isEpoch: tAdvanceEpoch else: tAdvanceSlot]):
|
2022-01-17 11:19:58 +00:00
|
|
|
process_slots(
|
2021-11-10 11:39:08 +00:00
|
|
|
dag.cfg, stateData[].data, getStateField(stateData[].data, slot) + 1, cache,
|
2022-01-17 11:19:58 +00:00
|
|
|
info, {}).expect("Slot processing can't fail with correct inputs")
|
2021-11-10 11:39:08 +00:00
|
|
|
|
|
|
|
var start = Moment.now()
|
|
|
|
withTimer(timers[tApplyBlock]):
|
|
|
|
if conf.resetCache:
|
|
|
|
cache = StateCache()
|
2022-01-17 11:19:58 +00:00
|
|
|
let res = state_transition_block(
|
|
|
|
dag.cfg, stateData[].data, b, cache, {}, noRollback)
|
|
|
|
if res.isErr():
|
2021-11-10 11:39:08 +00:00
|
|
|
dump("./", b)
|
2022-01-17 11:19:58 +00:00
|
|
|
echo "State transition failed (!) ", res.error()
|
2021-11-10 11:39:08 +00:00
|
|
|
quit 1
|
|
|
|
if conf.printTimes:
|
|
|
|
echo b.message.slot, ",", toHex(b.root.data), ",", nanoseconds(Moment.now() - start)
|
|
|
|
if conf.storeBlocks:
|
2021-03-15 14:11:51 +00:00
|
|
|
withTimer(timers[tDbStore]):
|
2021-11-10 11:39:08 +00:00
|
|
|
dbBenchmark.putBlock(b)
|
|
|
|
|
|
|
|
withState(stateData[].data):
|
2022-01-11 10:01:54 +00:00
|
|
|
if state.data.slot.is_epoch and conf.storeStates:
|
2021-11-10 11:39:08 +00:00
|
|
|
if state.data.slot.epoch < 2:
|
|
|
|
dbBenchmark.putState(state.root, state.data)
|
|
|
|
dbBenchmark.checkpoint()
|
|
|
|
else:
|
|
|
|
withTimer(timers[tDbStore]):
|
|
|
|
dbBenchmark.putState(state.root, state.data)
|
|
|
|
dbBenchmark.checkpoint()
|
|
|
|
|
|
|
|
withTimer(timers[tDbLoad]):
|
|
|
|
case stateFork
|
|
|
|
of BeaconStateFork.Phase0:
|
|
|
|
doAssert dbBenchmark.getState(
|
|
|
|
state.root, loadedState[0][].data, noRollback)
|
|
|
|
of BeaconStateFork.Altair:
|
|
|
|
doAssert dbBenchmark.getState(
|
|
|
|
state.root, loadedState[1][].data, noRollback)
|
2022-01-04 09:45:38 +00:00
|
|
|
of BeaconStateFork.Bellatrix:
|
2021-11-10 11:39:08 +00:00
|
|
|
doAssert dbBenchmark.getState(
|
|
|
|
state.root, loadedState[2][].data, noRollback)
|
|
|
|
|
|
|
|
if state.data.slot.epoch mod 16 == 0:
|
|
|
|
let loadedRoot = case stateFork
|
2022-01-04 09:45:38 +00:00
|
|
|
of BeaconStateFork.Phase0: hash_tree_root(loadedState[0][].data)
|
|
|
|
of BeaconStateFork.Altair: hash_tree_root(loadedState[1][].data)
|
|
|
|
of BeaconStateFork.Bellatrix: hash_tree_root(loadedState[2][].data)
|
2021-11-10 11:39:08 +00:00
|
|
|
doAssert hash_tree_root(state.data) == loadedRoot
|
|
|
|
|
|
|
|
processBlocks(blocks[0])
|
|
|
|
processBlocks(blocks[1])
|
|
|
|
processBlocks(blocks[2])
|
2021-03-12 10:02:09 +00:00
|
|
|
|
2020-06-25 10:23:10 +00:00
|
|
|
printTimers(false, timers)
|
2020-06-01 14:48:24 +00:00
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdDumpState(conf: DbConf) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
2020-09-12 05:35:58 +00:00
|
|
|
defer: db.close()
|
2020-06-06 11:26:19 +00:00
|
|
|
|
2021-11-10 11:39:08 +00:00
|
|
|
let
|
2022-01-18 16:31:05 +00:00
|
|
|
phase0State = (ref phase0.HashedBeaconState)()
|
|
|
|
altairState = (ref altair.HashedBeaconState)()
|
|
|
|
bellatrixState = (ref bellatrix.HashedBeaconState)()
|
2021-11-10 11:39:08 +00:00
|
|
|
|
2020-06-06 11:26:19 +00:00
|
|
|
for stateRoot in conf.stateRoot:
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
2021-11-10 11:39:08 +00:00
|
|
|
template doit(state: untyped) =
|
|
|
|
try:
|
|
|
|
state.root = Eth2Digest.fromHex(stateRoot)
|
|
|
|
|
|
|
|
if db.getState(state.root, state.data, noRollback):
|
|
|
|
dump("./", state)
|
|
|
|
continue
|
|
|
|
except CatchableError as e:
|
|
|
|
echo "Couldn't load ", state.root, ": ", e.msg
|
|
|
|
|
|
|
|
doit(phase0State[])
|
|
|
|
doit(altairState[])
|
2022-01-18 16:31:05 +00:00
|
|
|
doit(bellatrixState[])
|
2021-11-10 11:39:08 +00:00
|
|
|
|
|
|
|
echo "Couldn't load ", stateRoot
|
2020-06-06 11:26:19 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
proc cmdPutState(conf: DbConf, cfg: RuntimeConfig) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
|
|
|
defer: db.close()
|
|
|
|
|
|
|
|
for file in conf.stateFile:
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
2021-11-18 12:02:43 +00:00
|
|
|
let state = newClone(readSszForkedHashedBeaconState(
|
|
|
|
cfg, readAllBytes(file).tryGet()))
|
2022-01-25 08:28:26 +00:00
|
|
|
withState(state[]):
|
|
|
|
db.putState(state)
|
2021-11-18 12:02:43 +00:00
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdDumpBlock(conf: DbConf) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
2020-09-12 05:35:58 +00:00
|
|
|
defer: db.close()
|
2020-06-25 10:23:10 +00:00
|
|
|
|
|
|
|
for blockRoot in conf.blockRootx:
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
2020-06-25 10:23:10 +00:00
|
|
|
try:
|
2021-11-10 11:39:08 +00:00
|
|
|
let root = Eth2Digest.fromHex(blockRoot)
|
2021-11-05 07:34:34 +00:00
|
|
|
if (let blck = db.getPhase0Block(root); blck.isSome):
|
2020-07-16 13:16:51 +00:00
|
|
|
dump("./", blck.get())
|
2021-11-10 11:39:08 +00:00
|
|
|
elif (let blck = db.getAltairBlock(root); blck.isSome):
|
|
|
|
dump("./", blck.get())
|
|
|
|
elif (let blck = db.getMergeBlock(root); blck.isSome):
|
|
|
|
dump("./", blck.get())
|
2020-06-25 10:23:10 +00:00
|
|
|
else:
|
2021-11-10 11:39:08 +00:00
|
|
|
echo "Couldn't load ", blockRoot
|
2020-06-25 10:23:10 +00:00
|
|
|
except CatchableError as e:
|
|
|
|
echo "Couldn't load ", blockRoot, ": ", e.msg
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
proc cmdPutBlock(conf: DbConf, cfg: RuntimeConfig) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
|
|
|
defer: db.close()
|
|
|
|
|
|
|
|
for file in conf.blckFile:
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
|
|
|
|
2022-01-06 07:38:40 +00:00
|
|
|
let blck = readSszForkedSignedBeaconBlock(
|
2021-11-18 12:02:43 +00:00
|
|
|
cfg, readAllBytes(file).tryGet())
|
|
|
|
|
2022-01-06 07:38:40 +00:00
|
|
|
withBlck(blck.asTrusted()):
|
2021-11-18 12:02:43 +00:00
|
|
|
db.putBlock(blck)
|
|
|
|
if conf.setHead:
|
|
|
|
db.putHeadBlock(blck.root)
|
|
|
|
if conf.setTail:
|
|
|
|
db.putTailBlock(blck.root)
|
|
|
|
if conf.setGenesis:
|
|
|
|
db.putGenesisBlock(blck.root)
|
|
|
|
|
2020-09-11 13:20:34 +00:00
|
|
|
proc copyPrunedDatabase(
|
|
|
|
db: BeaconChainDB, copyDb: BeaconChainDB,
|
|
|
|
dryRun, verbose, keepOldStates: bool) =
|
|
|
|
## Create a pruned copy of the beacon chain database
|
|
|
|
|
|
|
|
let
|
|
|
|
headBlock = db.getHeadBlock()
|
|
|
|
tailBlock = db.getTailBlock()
|
2021-11-18 12:02:43 +00:00
|
|
|
genesisBlock = db.getGenesisBlock()
|
2020-09-11 13:20:34 +00:00
|
|
|
|
2021-11-05 07:34:34 +00:00
|
|
|
doAssert db.getPhase0Block(headBlock.get).isOk
|
|
|
|
doAssert db.getPhase0Block(tailBlock.get).isOk
|
2021-11-18 12:02:43 +00:00
|
|
|
doAssert db.getPhase0Block(genesisBlock.get).isOk
|
2020-09-11 13:20:34 +00:00
|
|
|
|
|
|
|
var
|
2021-11-18 12:02:43 +00:00
|
|
|
beaconState = (ref phase0.HashedBeaconState)()
|
2020-09-11 13:20:34 +00:00
|
|
|
finalizedEpoch: Epoch # default value of 0 is conservative/safe
|
2021-11-05 07:34:34 +00:00
|
|
|
prevBlockSlot = db.getPhase0Block(db.getHeadBlock().get).get.message.slot
|
2020-09-11 13:20:34 +00:00
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
let
|
|
|
|
headEpoch = db.getPhase0Block(headBlock.get).get.message.slot.epoch
|
|
|
|
tailStateRoot = db.getPhase0Block(tailBlock.get).get.message.state_root
|
2020-09-11 13:20:34 +00:00
|
|
|
|
|
|
|
# Tail states are specially addressed; no stateroot intermediary
|
2021-11-18 12:02:43 +00:00
|
|
|
if not db.getState(tailStateRoot, beaconState[].data, noRollback):
|
2020-09-11 13:20:34 +00:00
|
|
|
doAssert false, "could not load tail state"
|
2021-11-18 12:02:43 +00:00
|
|
|
beaconState[].root = tailStateRoot
|
|
|
|
|
2020-09-11 13:20:34 +00:00
|
|
|
if not dry_run:
|
2021-11-18 12:02:43 +00:00
|
|
|
copyDb.putStateRoot(
|
|
|
|
beaconState[].latest_block_root(), beaconState[].data.slot,
|
|
|
|
beaconState[].root)
|
|
|
|
copyDb.putState(beaconState[].root, beaconState[].data)
|
|
|
|
copyDb.putBlock(db.getPhase0Block(genesisBlock.get).get)
|
2020-09-11 13:20:34 +00:00
|
|
|
|
|
|
|
for signedBlock in getAncestors(db, headBlock.get):
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
2020-09-11 13:20:34 +00:00
|
|
|
if not dry_run:
|
|
|
|
copyDb.putBlock(signedBlock)
|
2021-03-12 17:13:26 +00:00
|
|
|
copyDb.checkpoint()
|
2020-09-11 13:20:34 +00:00
|
|
|
if verbose:
|
|
|
|
echo "copied block at slot ", signedBlock.message.slot
|
|
|
|
|
|
|
|
for slot in countdown(prevBlockSlot, signedBlock.message.slot + 1):
|
|
|
|
if slot mod SLOTS_PER_EPOCH != 0 or
|
|
|
|
((not keepOldStates) and slot.epoch < finalizedEpoch):
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Could also only copy these states, head and finalized, plus tail state
|
|
|
|
let stateRequired = slot.epoch in [finalizedEpoch, headEpoch]
|
|
|
|
|
|
|
|
let sr = db.getStateRoot(signedBlock.root, slot)
|
|
|
|
if sr.isErr:
|
|
|
|
if stateRequired:
|
2020-09-29 15:15:49 +00:00
|
|
|
echo "skipping state root required for slot ",
|
|
|
|
slot, " with root ", signedBlock.root
|
2020-09-11 13:20:34 +00:00
|
|
|
continue
|
|
|
|
|
2021-11-18 12:02:43 +00:00
|
|
|
if not db.getState(sr.get, beaconState[].data, noRollback):
|
2020-09-11 13:20:34 +00:00
|
|
|
# Don't copy dangling stateroot pointers
|
|
|
|
if stateRequired:
|
|
|
|
doAssert false, "state root and state required"
|
|
|
|
continue
|
2021-11-18 12:02:43 +00:00
|
|
|
beaconState[].root = sr.get()
|
2020-09-11 13:20:34 +00:00
|
|
|
|
|
|
|
finalizedEpoch = max(
|
2021-11-18 12:02:43 +00:00
|
|
|
finalizedEpoch, beaconState[].data.finalized_checkpoint.epoch)
|
2020-09-11 13:20:34 +00:00
|
|
|
|
|
|
|
if not dry_run:
|
2021-11-18 12:02:43 +00:00
|
|
|
copyDb.putStateRoot(
|
|
|
|
beaconState[].latest_block_root(), beaconState[].data.slot,
|
|
|
|
beaconState[].root)
|
|
|
|
copyDb.putState(beaconState[].root, beaconState[].data)
|
2020-09-11 13:20:34 +00:00
|
|
|
if verbose:
|
|
|
|
echo "copied state at slot ", slot, " from block at ", shortLog(signedBlock.message.slot)
|
|
|
|
|
|
|
|
prevBlockSlot = signedBlock.message.slot
|
|
|
|
|
|
|
|
if not dry_run:
|
|
|
|
copyDb.putHeadBlock(headBlock.get)
|
|
|
|
copyDb.putTailBlock(tailBlock.get)
|
2021-11-18 12:02:43 +00:00
|
|
|
copyDb.putGenesisBlock(genesisBlock.get)
|
2020-09-11 13:20:34 +00:00
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdPrune(conf: DbConf) =
|
2020-09-11 13:20:34 +00:00
|
|
|
let
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new(conf.databaseDir.string)
|
2020-10-12 01:07:20 +00:00
|
|
|
# TODO: add the destination as CLI paramter
|
2021-07-13 14:27:10 +00:00
|
|
|
copyDb = BeaconChainDB.new("pruned_db")
|
2020-09-11 13:20:34 +00:00
|
|
|
|
2020-09-12 05:35:58 +00:00
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
copyDb.close()
|
|
|
|
|
2020-09-11 13:20:34 +00:00
|
|
|
db.copyPrunedDatabase(copyDb, conf.dryRun, conf.verbose, conf.keepOldStates)
|
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
2020-06-16 08:49:32 +00:00
|
|
|
echo "Opening database..."
|
2021-07-13 14:27:10 +00:00
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
2020-09-12 05:35:58 +00:00
|
|
|
defer: db.close()
|
2020-06-16 08:49:32 +00:00
|
|
|
|
2021-12-21 10:40:14 +00:00
|
|
|
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
|
|
echo "Database not initialized: ", v.error()
|
2020-06-16 08:49:32 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
echo "Initializing block pool..."
|
2021-12-20 19:20:31 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
2020-06-16 08:49:32 +00:00
|
|
|
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let blckRef = dag.getBlockRef(fromHex(Eth2Digest, conf.blockRoot)).valueOr:
|
2020-06-16 08:49:32 +00:00
|
|
|
echo "Block not found in database"
|
|
|
|
return
|
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
let tmpState = assignClone(dag.headState)
|
2022-01-05 18:38:04 +00:00
|
|
|
dag.withUpdatedState(tmpState[], blckRef.atSlot(Slot(conf.slot))) do:
|
2020-06-16 08:49:32 +00:00
|
|
|
echo "Writing state..."
|
2021-11-10 11:39:08 +00:00
|
|
|
withState(stateData.data):
|
2021-11-18 12:02:43 +00:00
|
|
|
dump("./", state)
|
2022-01-05 18:38:04 +00:00
|
|
|
do: raiseAssert "withUpdatedState failed"
|
2020-06-16 08:49:32 +00:00
|
|
|
|
2021-08-25 14:51:52 +00:00
|
|
|
func atCanonicalSlot(blck: BlockRef, slot: Slot): BlockSlot =
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
if slot == 0:
|
|
|
|
blck.atSlot(slot)
|
|
|
|
else:
|
|
|
|
blck.atSlot(slot - 1).blck.atSlot(slot)
|
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
defer: db.close()
|
|
|
|
|
2021-12-21 10:40:14 +00:00
|
|
|
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
|
|
echo "Database not initialized: ", v.error()
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
quit 1
|
|
|
|
|
2022-01-07 10:13:19 +00:00
|
|
|
type Timers = enum
|
|
|
|
tState
|
|
|
|
tBlocks
|
|
|
|
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
echo "Initializing block pool..."
|
|
|
|
let
|
2021-12-20 19:20:31 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
let tmpState = assignClone(dag.headState)
|
2022-01-07 10:13:19 +00:00
|
|
|
var
|
|
|
|
tmp: seq[byte]
|
|
|
|
timers: array[Timers, RunningStat]
|
2021-03-17 10:17:15 +00:00
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
for era in conf.era ..< conf.era + conf.eraCount:
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
let
|
2022-01-07 10:13:19 +00:00
|
|
|
firstSlot =
|
|
|
|
if era == 0: none(Slot)
|
|
|
|
else: some(Slot((era - 1) * SLOTS_PER_HISTORICAL_ROOT))
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
endSlot = Slot(era * SLOTS_PER_HISTORICAL_ROOT)
|
|
|
|
canonical = dag.head.atCanonicalSlot(endSlot)
|
|
|
|
|
|
|
|
if endSlot > dag.head.slot:
|
|
|
|
echo "Written all complete eras"
|
|
|
|
break
|
|
|
|
|
2022-01-07 10:13:19 +00:00
|
|
|
let name = withState(dag.headState.data): eraFileName(cfg, state.data, era)
|
|
|
|
echo "Writing ", name
|
|
|
|
|
|
|
|
let e2 = openFile(name, {OpenFlags.Write, OpenFlags.Create}).get()
|
|
|
|
defer: discard closeFile(e2)
|
|
|
|
|
|
|
|
var group = EraGroup.init(e2, firstSlot).get()
|
|
|
|
if firstSlot.isSome():
|
|
|
|
withTimer(timers[tBlocks]):
|
|
|
|
var blocks: array[SLOTS_PER_HISTORICAL_ROOT.int, BlockId]
|
|
|
|
for i in dag.getBlockRange(firstSlot.get(), 1, blocks)..<blocks.len:
|
|
|
|
if dag.getBlockSSZ(blocks[i], tmp):
|
|
|
|
group.update(e2, blocks[i].slot, tmp).get()
|
|
|
|
|
|
|
|
withTimer(timers[tState]):
|
|
|
|
dag.withUpdatedState(tmpState[], canonical) do:
|
|
|
|
withState(stateData.data):
|
|
|
|
group.finish(e2, state.data).get()
|
|
|
|
do: raiseAssert "withUpdatedState failed"
|
|
|
|
|
|
|
|
printTimers(true, timers)
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
|
2022-01-25 08:28:26 +00:00
|
|
|
proc cmdImportEra(conf: DbConf, cfg: RuntimeConfig) =
|
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string)
|
|
|
|
defer: db.close()
|
|
|
|
|
|
|
|
type Timers = enum
|
|
|
|
tBlock
|
|
|
|
tState
|
|
|
|
|
|
|
|
var
|
|
|
|
blocks = 0
|
|
|
|
states = 0
|
|
|
|
others = 0
|
|
|
|
timers: array[Timers, RunningStat]
|
|
|
|
|
|
|
|
var data: seq[byte]
|
|
|
|
for file in conf.eraFiles:
|
|
|
|
let f = openFile(file, {OpenFlags.Read}).valueOr:
|
|
|
|
warn "Can't open ", file
|
|
|
|
continue
|
|
|
|
defer: discard closeFile(f)
|
|
|
|
|
|
|
|
while true:
|
|
|
|
let header = readRecord(f, data).valueOr:
|
|
|
|
break
|
|
|
|
|
|
|
|
if header.typ == SnappyBeaconBlock:
|
|
|
|
withTimer(timers[tBlock]):
|
|
|
|
let uncompressed = framingFormatUncompress(data)
|
|
|
|
let blck = try: readSszForkedSignedBeaconBlock(cfg, uncompressed)
|
|
|
|
except CatchableError as exc:
|
|
|
|
error "Invalid snappy block", msg = exc.msg, file
|
|
|
|
continue
|
|
|
|
|
|
|
|
withBlck(blck.asTrusted()):
|
|
|
|
db.putBlock(blck)
|
|
|
|
blocks += 1
|
|
|
|
elif header.typ == SnappyBeaconState:
|
|
|
|
withTimer(timers[tState]):
|
|
|
|
let uncompressed = framingFormatUncompress(data)
|
|
|
|
let state = try: newClone(
|
|
|
|
readSszForkedHashedBeaconState(cfg, uncompressed))
|
|
|
|
except CatchableError as exc:
|
|
|
|
error "Invalid snappy state", msg = exc.msg, file
|
|
|
|
continue
|
|
|
|
withState(state[]):
|
|
|
|
db.putState(state)
|
|
|
|
states += 1
|
|
|
|
else:
|
|
|
|
info "Skipping record", typ = toHex(header.typ)
|
|
|
|
others += 1
|
|
|
|
|
|
|
|
notice "Done", blocks, states, others
|
|
|
|
printTimers(true, timers)
|
|
|
|
|
2021-05-07 11:36:21 +00:00
|
|
|
type
|
|
|
|
# Validator performance metrics tool based on
|
|
|
|
# https://github.com/paulhauner/lighthouse/blob/etl/lcli/src/etl/validator_performance.rs
|
|
|
|
# Credits to Paul Hauner
|
|
|
|
ValidatorPerformance = object
|
|
|
|
attestation_hits: uint64
|
|
|
|
attestation_misses: uint64
|
|
|
|
head_attestation_hits: uint64
|
|
|
|
head_attestation_misses: uint64
|
|
|
|
target_attestation_hits: uint64
|
|
|
|
target_attestation_misses: uint64
|
|
|
|
first_slot_head_attester_when_first_slot_empty: uint64
|
|
|
|
first_slot_head_attester_when_first_slot_not_empty: uint64
|
|
|
|
delays: Table[uint64, uint64]
|
|
|
|
|
2021-07-13 14:27:10 +00:00
|
|
|
proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
2021-05-07 11:36:21 +00:00
|
|
|
echo "Opening database..."
|
|
|
|
let
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new(conf.databaseDir.string,)
|
2021-05-07 11:36:21 +00:00
|
|
|
defer:
|
|
|
|
db.close()
|
|
|
|
|
2021-12-21 10:40:14 +00:00
|
|
|
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
|
|
echo "Database not initialized: ", v.error()
|
2021-05-07 11:36:21 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
echo "# Initializing block pool..."
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
2021-05-07 11:36:21 +00:00
|
|
|
|
|
|
|
var
|
2021-05-27 13:22:38 +00:00
|
|
|
(start, ends) = dag.getSlotRange(conf.perfSlot, conf.perfSlots)
|
|
|
|
blockRefs = dag.getBlockRange(start, ends)
|
2021-05-07 11:36:21 +00:00
|
|
|
perfs = newSeq[ValidatorPerformance](
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, validators).len())
|
2021-05-07 11:36:21 +00:00
|
|
|
cache = StateCache()
|
2021-10-13 14:24:36 +00:00
|
|
|
info = ForkedEpochInfo()
|
2021-08-12 13:08:20 +00:00
|
|
|
blck: phase0.TrustedSignedBeaconBlock
|
2021-05-07 11:36:21 +00:00
|
|
|
|
|
|
|
doAssert blockRefs.len() > 0, "Must select at least one block"
|
|
|
|
|
|
|
|
echo "# Analyzing performance for epochs ",
|
|
|
|
blockRefs[^1].slot.epoch, " - ", blockRefs[0].slot.epoch
|
|
|
|
|
|
|
|
let state = newClone(dag.headState)
|
2022-01-05 18:38:04 +00:00
|
|
|
doAssert dag.updateStateData(
|
2021-05-07 11:36:21 +00:00
|
|
|
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
proc processEpoch() =
|
2021-05-07 11:36:21 +00:00
|
|
|
let
|
|
|
|
prev_epoch_target_slot =
|
2022-01-11 10:01:54 +00:00
|
|
|
state[].data.get_previous_epoch().start_slot()
|
2021-05-07 11:36:21 +00:00
|
|
|
penultimate_epoch_end_slot =
|
|
|
|
if prev_epoch_target_slot == 0: Slot(0)
|
|
|
|
else: prev_epoch_target_slot - 1
|
|
|
|
first_slot_empty =
|
2021-06-11 17:51:46 +00:00
|
|
|
state[].data.get_block_root_at_slot(prev_epoch_target_slot) ==
|
|
|
|
state[].data.get_block_root_at_slot(penultimate_epoch_end_slot)
|
2021-05-07 11:36:21 +00:00
|
|
|
|
|
|
|
let first_slot_attesters = block:
|
2022-01-08 23:28:49 +00:00
|
|
|
let committees_per_slot = state[].data.get_committee_count_per_slot(
|
2021-05-21 09:23:28 +00:00
|
|
|
prev_epoch_target_slot.epoch, cache)
|
2021-05-07 11:36:21 +00:00
|
|
|
var indices = HashSet[ValidatorIndex]()
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2021-06-11 17:51:46 +00:00
|
|
|
for validator_index in state[].data.get_beacon_committee(
|
2022-01-08 23:28:49 +00:00
|
|
|
prev_epoch_target_slot, committee_index, cache):
|
2021-05-07 11:36:21 +00:00
|
|
|
indices.incl(validator_index)
|
|
|
|
indices
|
2021-10-13 14:24:36 +00:00
|
|
|
case info.kind
|
|
|
|
of EpochInfoFork.Phase0:
|
2021-10-18 16:37:27 +00:00
|
|
|
template info: untyped = info.phase0Data
|
2021-11-25 12:20:36 +00:00
|
|
|
for i, s in info.validators.pairs():
|
2021-10-13 14:24:36 +00:00
|
|
|
let perf = addr perfs[i]
|
|
|
|
if RewardFlags.isActiveInPreviousEpoch in s.flags:
|
|
|
|
if s.is_previous_epoch_attester.isSome():
|
|
|
|
perf.attestation_hits += 1;
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
if RewardFlags.isPreviousEpochHeadAttester in s.flags:
|
|
|
|
perf.head_attestation_hits += 1
|
|
|
|
else:
|
|
|
|
perf.head_attestation_misses += 1
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
if RewardFlags.isPreviousEpochTargetAttester in s.flags:
|
|
|
|
perf.target_attestation_hits += 1
|
2021-05-07 11:36:21 +00:00
|
|
|
else:
|
2021-10-13 14:24:36 +00:00
|
|
|
perf.target_attestation_misses += 1
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
if i.ValidatorIndex in first_slot_attesters:
|
|
|
|
if first_slot_empty:
|
|
|
|
perf.first_slot_head_attester_when_first_slot_empty += 1
|
|
|
|
else:
|
|
|
|
perf.first_slot_head_attester_when_first_slot_not_empty += 1
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2021-10-13 14:24:36 +00:00
|
|
|
if s.is_previous_epoch_attester.isSome():
|
|
|
|
perf.delays.mgetOrPut(
|
|
|
|
s.is_previous_epoch_attester.get().delay, 0'u64) += 1
|
|
|
|
|
|
|
|
else:
|
|
|
|
perf.attestation_misses += 1;
|
|
|
|
of EpochInfoFork.Altair:
|
|
|
|
echo "TODO altair"
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
for bi in 0 ..< blockRefs.len:
|
2021-11-05 07:34:34 +00:00
|
|
|
blck = db.getPhase0Block(blockRefs[blockRefs.len - bi - 1].root).get()
|
2021-06-11 17:51:46 +00:00
|
|
|
while getStateField(state[].data, slot) < blck.message.slot:
|
2021-09-30 13:21:06 +00:00
|
|
|
let
|
|
|
|
nextSlot = getStateField(state[].data, slot) + 1
|
|
|
|
flags =
|
|
|
|
if nextSlot == blck.message.slot: {skipLastStateRootCalculation}
|
|
|
|
else: {}
|
2022-01-17 11:19:58 +00:00
|
|
|
process_slots(
|
|
|
|
dag.cfg, state[].data, nextSlot, cache, info, flags).expect(
|
|
|
|
"Slot processing can't fail with correct inputs")
|
2021-05-07 11:36:21 +00:00
|
|
|
|
2022-01-11 10:01:54 +00:00
|
|
|
if getStateField(state[].data, slot).is_epoch():
|
2021-05-07 11:36:21 +00:00
|
|
|
processEpoch()
|
|
|
|
|
2022-01-17 11:19:58 +00:00
|
|
|
let res = state_transition_block(
|
|
|
|
dag.cfg, state[].data, blck, cache, {}, noRollback)
|
|
|
|
if res.isErr:
|
|
|
|
echo "State transition failed (!) ", res.error()
|
2021-05-07 11:36:21 +00:00
|
|
|
quit 1
|
|
|
|
|
2021-05-27 13:22:38 +00:00
|
|
|
# Capture rewards of empty slots as well
|
2021-06-11 17:51:46 +00:00
|
|
|
while getStateField(state[].data, slot) < ends:
|
2022-01-17 11:19:58 +00:00
|
|
|
process_slots(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
dag.cfg, state[].data, getStateField(state[].data, slot) + 1, cache,
|
2022-01-17 11:19:58 +00:00
|
|
|
info, {}).expect("Slot processing can't fail with correct inputs")
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-11 10:01:54 +00:00
|
|
|
if getStateField(state[].data, slot).is_epoch():
|
2021-05-27 13:22:38 +00:00
|
|
|
processEpoch()
|
2021-05-07 11:36:21 +00:00
|
|
|
|
|
|
|
echo "validator_index,attestation_hits,attestation_misses,head_attestation_hits,head_attestation_misses,target_attestation_hits,target_attestation_misses,delay_avg,first_slot_head_attester_when_first_slot_empty,first_slot_head_attester_when_first_slot_not_empty"
|
|
|
|
|
|
|
|
for (i, perf) in perfs.pairs:
|
|
|
|
var
|
|
|
|
count = 0'u64
|
|
|
|
sum = 0'u64
|
|
|
|
for delay, n in perf.delays:
|
|
|
|
count += n
|
|
|
|
sum += delay * n
|
|
|
|
echo i,",",
|
|
|
|
perf.attestation_hits,",",
|
|
|
|
perf.attestation_misses,",",
|
|
|
|
perf.head_attestation_hits,",",
|
|
|
|
perf.head_attestation_misses,",",
|
|
|
|
perf.target_attestation_hits,",",
|
|
|
|
perf.target_attestation_misses,",",
|
|
|
|
if count == 0: 0.0
|
|
|
|
else: sum.float / count.float,",",
|
|
|
|
perf.first_slot_head_attester_when_first_slot_empty,",",
|
|
|
|
perf.first_slot_head_attester_when_first_slot_not_empty
|
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
proc createValidatorsRawTable(db: SqStoreRef) =
|
|
|
|
db.exec("""
|
2021-05-27 13:22:38 +00:00
|
|
|
CREATE TABLE IF NOT EXISTS validators_raw(
|
|
|
|
validator_index INTEGER PRIMARY KEY,
|
2022-01-17 12:58:33 +00:00
|
|
|
pubkey BLOB NOT NULL UNIQUE
|
2021-05-27 13:22:38 +00:00
|
|
|
);
|
|
|
|
""").expect("DB")
|
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
proc createValidatorsView(db: SqStoreRef) =
|
|
|
|
db.exec("""
|
2021-05-27 13:22:38 +00:00
|
|
|
CREATE VIEW IF NOT EXISTS validators AS
|
|
|
|
SELECT
|
|
|
|
validator_index,
|
2022-01-17 12:58:33 +00:00
|
|
|
'0x' || lower(hex(pubkey)) as pubkey
|
2021-05-27 13:22:38 +00:00
|
|
|
FROM validators_raw;
|
|
|
|
""").expect("DB")
|
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
proc createInsertValidatorProc(db: SqStoreRef): auto =
|
|
|
|
db.prepareStmt("""
|
|
|
|
INSERT OR IGNORE INTO validators_raw(
|
|
|
|
validator_index,
|
2022-01-17 12:58:33 +00:00
|
|
|
pubkey)
|
|
|
|
VALUES(?, ?);""",
|
|
|
|
(int64, array[48, byte]), void).expect("DB")
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
proc collectBalances(balances: var seq[uint64], forkedState: ForkedHashedBeaconState) =
|
|
|
|
withState(forkedState):
|
|
|
|
balances = seq[uint64](state.data.balances.data)
|
|
|
|
|
|
|
|
proc calculateDelta(info: RewardsAndPenalties): int64 =
|
|
|
|
info.source_outcome +
|
|
|
|
info.target_outcome +
|
|
|
|
info.head_outcome +
|
|
|
|
info.inclusion_delay_outcome +
|
|
|
|
info.sync_committee_outcome +
|
|
|
|
info.proposer_outcome +
|
|
|
|
info.slashing_outcome -
|
|
|
|
info.inactivity_penalty.int64 +
|
|
|
|
info.deposits.int64
|
|
|
|
|
|
|
|
proc printComponents(info: RewardsAndPenalties) =
|
|
|
|
echo "Components:"
|
|
|
|
echo "Source outcome: ", info.source_outcome
|
|
|
|
echo "Target outcome: ", info.target_outcome
|
|
|
|
echo "Head outcome: ", info.head_outcome
|
|
|
|
echo "Inclusion delay outcome: ", info.inclusion_delay_outcome
|
|
|
|
echo "Sync committee outcome: ", info.sync_committee_outcome
|
|
|
|
echo "Proposer outcome: ", info.proposer_outcome
|
|
|
|
echo "Slashing outcome: ", info.slashing_outcome
|
|
|
|
echo "Inactivity penalty: ", info.inactivity_penalty
|
|
|
|
echo "Deposits: ", info.deposits
|
|
|
|
|
|
|
|
proc checkBalance(validatorIndex: int64,
|
|
|
|
validator: RewardStatus | ParticipationInfo,
|
|
|
|
currentEpochBalance, previousEpochBalance: Gwei,
|
|
|
|
validatorInfo: RewardsAndPenalties) =
|
|
|
|
let delta = validatorInfo.calculateDelta
|
|
|
|
if currentEpochBalance.int64 == previousEpochBalance.int64 + delta:
|
|
|
|
return
|
|
|
|
echo "Validator: ", validatorIndex
|
|
|
|
echo "Is eligible: ", is_eligible_validator(validator)
|
|
|
|
echo "Current epoch balance: ", currentEpochBalance
|
|
|
|
echo "Previous epoch balance: ", previousEpochBalance
|
|
|
|
echo "State delta: ", currentEpochBalance - previousEpochBalance
|
|
|
|
echo "Computed delta: ", delta
|
|
|
|
printComponents(validatorInfo)
|
|
|
|
raiseAssert("Validator's previous epoch balance plus computed validator's " &
|
|
|
|
"delta is not equal to the validator's current epoch balance.")
|
|
|
|
|
|
|
|
proc getDbValidatorsCount(db: SqStoreRef): int64 =
|
|
|
|
var res: int64
|
|
|
|
discard db.exec("SELECT count(*) FROM validators", ()) do (r: int64):
|
|
|
|
res = r
|
|
|
|
return res
|
|
|
|
|
|
|
|
template inTransaction(db: SqStoreRef, dbName: string, body: untyped) =
|
|
|
|
try:
|
|
|
|
db.exec("BEGIN TRANSACTION;").expect(dbName)
|
|
|
|
body
|
|
|
|
finally:
|
|
|
|
db.exec("END TRANSACTION;").expect(dbName)
|
|
|
|
|
|
|
|
proc insertValidators(db: SqStoreRef, state: ForkedHashedBeaconState,
|
|
|
|
startIndex, endIndex: int64) =
|
|
|
|
var insertValidator {.global.}: SqliteStmt[
|
2022-01-17 12:58:33 +00:00
|
|
|
(int64, array[48, byte]), void]
|
2021-12-29 02:50:49 +00:00
|
|
|
once: insertValidator = db.createInsertValidatorProc
|
|
|
|
withState(state):
|
|
|
|
db.inTransaction("DB"):
|
|
|
|
for i in startIndex ..< endIndex:
|
2022-01-17 12:58:33 +00:00
|
|
|
insertValidator.exec(
|
|
|
|
(i, state.data.validators[i].pubkey.toRaw)).expect("DB")
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|
|
|
# Create a database with performance information for every epoch
|
2022-01-17 12:58:33 +00:00
|
|
|
info "Opening database..."
|
2021-12-29 02:50:49 +00:00
|
|
|
let db = BeaconChainDB.new(conf.databaseDir.string, false, true)
|
|
|
|
defer: db.close()
|
|
|
|
|
|
|
|
if (let v = ChainDAGRef.isInitialized(db); v.isErr()):
|
|
|
|
echo "Database not initialized"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
echo "Initializing block pool..."
|
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {})
|
|
|
|
|
|
|
|
let outDb = SqStoreRef.init(conf.outDir, "validatorDb").expect("DB")
|
|
|
|
defer: outDb.close()
|
|
|
|
|
|
|
|
outDb.createValidatorsRawTable
|
|
|
|
outDb.createValidatorsView
|
|
|
|
|
|
|
|
let
|
2022-01-31 12:06:16 +00:00
|
|
|
unaggregatedFilesOutputDir = conf.outDir / "unaggregated"
|
|
|
|
aggregatedFilesOutputDir = conf.outDir / "aggregated"
|
2022-01-17 12:58:33 +00:00
|
|
|
startEpoch =
|
2022-01-18 18:22:56 +00:00
|
|
|
if conf.startEpoch.isSome:
|
2022-01-17 12:58:33 +00:00
|
|
|
Epoch(conf.startEpoch.get)
|
2021-12-29 02:50:49 +00:00
|
|
|
else:
|
2022-01-31 12:06:16 +00:00
|
|
|
let unaggregatedFilesNextEpoch = getUnaggregatedFilesLastEpoch(
|
|
|
|
unaggregatedFilesOutputDir) + 1
|
|
|
|
let aggregatedFilesNextEpoch = getAggregatedFilesLastEpoch(
|
|
|
|
aggregatedFilesOutputDir) + 1
|
|
|
|
if conf.writeUnaggregatedFiles and conf.writeAggregatedFiles:
|
|
|
|
min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch)
|
|
|
|
elif conf.writeUnaggregatedFiles:
|
|
|
|
unaggregatedFilesNextEpoch
|
|
|
|
elif conf.writeAggregatedFiles:
|
|
|
|
aggregatedFilesNextEpoch
|
|
|
|
else:
|
|
|
|
min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch)
|
2022-01-17 12:58:33 +00:00
|
|
|
endEpoch =
|
|
|
|
if conf.endEpoch.isSome:
|
|
|
|
Epoch(conf.endEpoch.get)
|
|
|
|
else:
|
|
|
|
dag.finalizedHead.slot.epoch # Avoid dealing with changes
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
if startEpoch > endEpoch:
|
|
|
|
fatal "Start epoch cannot be bigger than end epoch.",
|
|
|
|
startEpoch = startEpoch, endEpoch = endEpoch
|
|
|
|
quit QuitFailure
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
info "Analyzing performance for epochs.",
|
|
|
|
startEpoch = startEpoch, endEpoch = endEpoch
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
let
|
|
|
|
startSlot = startEpoch.start_slot
|
|
|
|
endSlot = endEpoch.start_slot + SLOTS_PER_EPOCH
|
|
|
|
blockRefs = dag.getBlockRange(startSlot, endSlot)
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-31 12:06:16 +00:00
|
|
|
if not unaggregatedFilesOutputDir.dirExists:
|
|
|
|
unaggregatedFilesOutputDir.createDir
|
|
|
|
|
|
|
|
if not aggregatedFilesOutputDir.dirExists:
|
|
|
|
aggregatedFilesOutputDir.createDir
|
|
|
|
|
2021-12-29 02:50:49 +00:00
|
|
|
let tmpState = newClone(dag.headState)
|
|
|
|
var cache = StateCache()
|
2022-01-17 12:58:33 +00:00
|
|
|
let slot = if startSlot > 0: startSlot - 1 else: 0.Slot
|
2021-12-29 02:50:49 +00:00
|
|
|
if blockRefs.len > 0:
|
2022-01-17 12:58:33 +00:00
|
|
|
discard dag.updateStateData(tmpState[], blockRefs[^1].atSlot(slot), false, cache)
|
2021-12-29 02:50:49 +00:00
|
|
|
else:
|
2022-01-17 12:58:33 +00:00
|
|
|
discard dag.updateStateData(tmpState[], dag.head.atSlot(slot), false, cache)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
let savedValidatorsCount = outDb.getDbValidatorsCount
|
2021-12-29 02:50:49 +00:00
|
|
|
var validatorsCount = getStateField(tmpState[].data, validators).len
|
2022-01-17 12:58:33 +00:00
|
|
|
outDb.insertValidators(tmpState[].data, savedValidatorsCount, validatorsCount)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
var previousEpochBalances: seq[uint64]
|
|
|
|
collectBalances(previousEpochBalances, tmpState[].data)
|
|
|
|
|
|
|
|
var forkedInfo = ForkedEpochInfo()
|
|
|
|
var rewardsAndPenalties: seq[RewardsAndPenalties]
|
|
|
|
rewardsAndPenalties.setLen(validatorsCount)
|
|
|
|
|
|
|
|
var auxiliaryState: AuxiliaryState
|
|
|
|
auxiliaryState.copyParticipationFlags(tmpState[].data)
|
2021-05-27 13:22:38 +00:00
|
|
|
|
2022-01-31 12:06:16 +00:00
|
|
|
var aggregator = ValidatorDbAggregator.init(
|
|
|
|
aggregatedFilesOutputDir, conf.resolution, endEpoch)
|
|
|
|
|
2021-05-27 13:22:38 +00:00
|
|
|
proc processEpoch() =
|
2022-01-17 12:58:33 +00:00
|
|
|
let epoch = getStateField(tmpState[].data, slot).epoch
|
|
|
|
info "Processing epoch ...", epoch = epoch
|
|
|
|
|
|
|
|
var csvLines = newStringOfCap(1000000)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
withState(tmpState[].data):
|
|
|
|
withEpochInfo(forkedInfo):
|
|
|
|
doAssert state.data.balances.len == info.validators.len
|
|
|
|
doAssert state.data.balances.len == previousEpochBalances.len
|
|
|
|
doAssert state.data.balances.len == rewardsAndPenalties.len
|
|
|
|
|
|
|
|
for index, validator in info.validators.pairs:
|
2022-01-17 12:58:33 +00:00
|
|
|
template rp: untyped = rewardsAndPenalties[index]
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
checkBalance(index, validator, state.data.balances[index],
|
2022-01-17 12:58:33 +00:00
|
|
|
previousEpochBalances[index], rp)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
when infoFork == EpochInfoFork.Phase0:
|
|
|
|
rp.inclusion_delay = block:
|
2021-12-29 02:50:49 +00:00
|
|
|
let notSlashed = (RewardFlags.isSlashed notin validator.flags)
|
|
|
|
if notSlashed and validator.is_previous_epoch_attester.isSome():
|
2022-01-17 12:58:33 +00:00
|
|
|
some(validator.is_previous_epoch_attester.get().delay.uint64)
|
2021-12-29 02:50:49 +00:00
|
|
|
else:
|
2022-01-17 12:58:33 +00:00
|
|
|
none(uint64)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
2022-01-31 12:06:16 +00:00
|
|
|
if conf.writeUnaggregatedFiles:
|
|
|
|
csvLines.add rp.serializeToCsv
|
|
|
|
|
|
|
|
if conf.writeAggregatedFiles:
|
|
|
|
aggregator.addValidatorData(index, rp)
|
|
|
|
|
|
|
|
if conf.writeUnaggregatedFiles:
|
|
|
|
let fileName = getFilePathForEpoch(epoch, unaggregatedFilesOutputDir)
|
|
|
|
var res = io2.removeFile(fileName)
|
|
|
|
doAssert res.isOk
|
|
|
|
res = io2.writeFile(fileName, snappy.encode(csvLines.toBytes))
|
|
|
|
doAssert res.isOk
|
|
|
|
|
|
|
|
if conf.writeAggregatedFiles:
|
|
|
|
aggregator.advanceEpochs(epoch, shouldShutDown)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
if shouldShutDown: quit QuitSuccess
|
|
|
|
collectBalances(previousEpochBalances, tmpState[].data)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
proc processSlots(ends: Slot, endsFlags: UpdateFlags) =
|
|
|
|
var currentSlot = getStateField(tmpState[].data, slot)
|
|
|
|
while currentSlot < ends:
|
|
|
|
let nextSlot = currentSlot + 1
|
|
|
|
let flags = if nextSlot == ends: endsFlags else: {}
|
|
|
|
|
|
|
|
if nextSlot.isEpoch:
|
|
|
|
withState(tmpState[].data):
|
|
|
|
rewardsAndPenalties.collectEpochRewardsAndPenalties(
|
2022-01-23 19:41:29 +00:00
|
|
|
state.data, cache, cfg, flags)
|
2021-12-29 02:50:49 +00:00
|
|
|
|
2022-01-17 12:58:33 +00:00
|
|
|
let res = process_slots(cfg, tmpState[].data, nextSlot, cache, forkedInfo, flags)
|
|
|
|
doAssert res.isOk, "Slot processing can't fail with correct inputs"
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
currentSlot = nextSlot
|
|
|
|
|
|
|
|
if currentSlot.isEpoch:
|
2022-01-17 12:58:33 +00:00
|
|
|
processEpoch()
|
2021-12-29 02:50:49 +00:00
|
|
|
rewardsAndPenalties.setLen(0)
|
|
|
|
rewardsAndPenalties.setLen(validatorsCount)
|
|
|
|
auxiliaryState.copyParticipationFlags(tmpState[].data)
|
2022-01-17 12:58:33 +00:00
|
|
|
clear cache
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
for bi in 0 ..< blockRefs.len:
|
|
|
|
let forkedBlock = dag.getForkedBlock(blockRefs[blockRefs.len - bi - 1])
|
|
|
|
withBlck(forkedBlock):
|
|
|
|
processSlots(blck.message.slot, {skipLastStateRootCalculation})
|
|
|
|
|
|
|
|
rewardsAndPenalties.collectBlockRewardsAndPenalties(
|
|
|
|
tmpState[].data, forkedBlock, auxiliaryState, cache, cfg)
|
|
|
|
|
|
|
|
let res = state_transition_block(
|
|
|
|
cfg, tmpState[].data, blck, cache, {}, noRollback)
|
|
|
|
if res.isErr:
|
2022-01-17 12:58:33 +00:00
|
|
|
fatal "State transition failed (!)"
|
|
|
|
quit QuitFailure
|
2021-12-29 02:50:49 +00:00
|
|
|
|
|
|
|
let newValidatorsCount = getStateField(tmpState[].data, validators).len
|
|
|
|
if newValidatorsCount > validatorsCount:
|
|
|
|
# Resize the structures in case a new validator has appeared after
|
|
|
|
# the state_transition_block procedure call ...
|
|
|
|
rewardsAndPenalties.setLen(newValidatorsCount)
|
|
|
|
previousEpochBalances.setLen(newValidatorsCount)
|
|
|
|
# ... and add the new validators to the database.
|
2022-01-17 12:58:33 +00:00
|
|
|
outDb.insertValidators(
|
|
|
|
tmpState[].data, validatorsCount, newValidatorsCount)
|
2021-12-29 02:50:49 +00:00
|
|
|
validatorsCount = newValidatorsCount
|
2021-05-27 13:22:38 +00:00
|
|
|
|
|
|
|
# Capture rewards of empty slots as well, including the epoch that got
|
|
|
|
# finalized
|
2022-01-17 12:58:33 +00:00
|
|
|
processSlots(endSlot, {})
|
|
|
|
|
|
|
|
proc controlCHook {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGINT."
|
|
|
|
shouldShutDown = true
|
|
|
|
|
|
|
|
proc exitOnSigterm(signal: cint) {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGTERM."
|
|
|
|
shouldShutDown = true
|
2021-09-30 13:21:06 +00:00
|
|
|
|
2020-06-01 14:48:24 +00:00
|
|
|
when isMainModule:
|
2022-01-17 12:58:33 +00:00
|
|
|
setControlCHook(controlCHook)
|
|
|
|
when defined(posix):
|
|
|
|
c_signal(SIGTERM, exitOnSigterm)
|
|
|
|
|
2020-09-01 09:01:57 +00:00
|
|
|
var
|
2020-06-06 11:26:19 +00:00
|
|
|
conf = DbConf.load()
|
2021-07-13 14:27:10 +00:00
|
|
|
cfg = getRuntimeConfig(conf.eth2Network)
|
2020-05-28 14:19:25 +00:00
|
|
|
|
2020-06-06 11:26:19 +00:00
|
|
|
case conf.cmd
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.bench:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdBench(conf, cfg)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.dumpState:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdDumpState(conf)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.putState:
|
|
|
|
cmdPutState(conf, cfg)
|
|
|
|
of DbCmd.dumpBlock:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdDumpBlock(conf)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.putBlock:
|
|
|
|
cmdPutBlock(conf, cfg)
|
|
|
|
of DbCmd.pruneDatabase:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdPrune(conf)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.rewindState:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdRewindState(conf, cfg)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.exportEra:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdExportEra(conf, cfg)
|
2022-01-25 08:28:26 +00:00
|
|
|
of DbCmd.importEra:
|
|
|
|
cmdImportEra(conf, cfg)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.validatorPerf:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdValidatorPerf(conf, cfg)
|
2021-11-18 12:02:43 +00:00
|
|
|
of DbCmd.validatorDb:
|
2021-07-13 14:27:10 +00:00
|
|
|
cmdValidatorDb(conf, cfg)
|