2020-05-19 14:18:07 +00:00
|
|
|
# beacon_chain
|
2021-02-25 13:37:22 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-05-19 14:18:07 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
|
|
|
import
|
2020-10-22 10:53:33 +00:00
|
|
|
std/[options, sequtils, tables, sets],
|
2021-09-23 22:13:25 +00:00
|
|
|
stew/[assign2, byteutils, results],
|
2020-10-22 10:53:33 +00:00
|
|
|
metrics, snappy, chronicles,
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[
|
|
|
|
beaconstate, eth2_merkleization, eth2_ssz_serialization, forks, helpers,
|
|
|
|
state_transition, validator],
|
2021-09-27 14:22:58 +00:00
|
|
|
../spec/datatypes/[phase0, altair, merge],
|
2021-08-20 08:58:15 +00:00
|
|
|
".."/beacon_chain_db,
|
2021-06-24 18:34:08 +00:00
|
|
|
"."/[block_pools_types, block_quarantine, forkedbeaconstate_dbhelpers]
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-09-23 22:13:25 +00:00
|
|
|
export block_pools_types, results
|
2020-07-30 19:18:17 +00:00
|
|
|
|
2020-11-27 22:16:13 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
|
|
|
declareGauge beacon_head_root, "Root of the head block of the beacon chain"
|
|
|
|
declareGauge beacon_head_slot, "Slot of the head block of the beacon chain"
|
|
|
|
|
|
|
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
|
|
|
declareGauge beacon_finalized_epoch, "Current finalized epoch" # On epoch transition
|
|
|
|
declareGauge beacon_finalized_root, "Current finalized root" # On epoch transition
|
|
|
|
declareGauge beacon_current_justified_epoch, "Current justified epoch" # On epoch transition
|
|
|
|
declareGauge beacon_current_justified_root, "Current justified root" # On epoch transition
|
|
|
|
declareGauge beacon_previous_justified_epoch, "Current previously justified epoch" # On epoch transition
|
|
|
|
declareGauge beacon_previous_justified_root, "Current previously justified root" # On epoch transition
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
declareCounter beacon_reorgs_total, "Total occurrences of reorganizations of the chain" # On fork choice
|
2020-08-06 19:48:47 +00:00
|
|
|
declareCounter beacon_state_data_cache_hits, "EpochRef hits"
|
|
|
|
declareCounter beacon_state_data_cache_misses, "EpochRef misses"
|
2020-08-13 09:50:05 +00:00
|
|
|
declareCounter beacon_state_rewinds, "State database rewinds"
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-11-27 22:16:13 +00:00
|
|
|
declareGauge beacon_active_validators, "Number of validators in the active validator set"
|
2020-08-26 15:25:39 +00:00
|
|
|
declareGauge beacon_pending_deposits, "Number of pending deposits (state.eth1_data.deposit_count - state.eth1_deposit_index)" # On block
|
|
|
|
declareGauge beacon_processed_deposits_total, "Number of total deposits included on chain" # On block
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
logScope: topics = "chaindag"
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-06-25 10:23:10 +00:00
|
|
|
proc putBlock*(
|
2021-09-27 14:22:58 +00:00
|
|
|
dag: ChainDAGRef,
|
|
|
|
signedBlock: phase0.TrustedSignedBeaconBlock | altair.TrustedSignedBeaconBlock |
|
|
|
|
merge.TrustedSignedBeaconBlock) =
|
2020-07-16 13:16:51 +00:00
|
|
|
dag.db.putBlock(signedBlock)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
proc updateStateData*(
|
2020-10-18 15:47:39 +00:00
|
|
|
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool,
|
2020-08-18 20:29:33 +00:00
|
|
|
cache: var StateCache) {.gcsafe.}
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-04-13 13:05:44 +00:00
|
|
|
template withStateVars*(
|
|
|
|
stateDataInternal: var StateData, body: untyped): untyped =
|
2021-03-17 10:17:15 +00:00
|
|
|
## Inject a few more descriptive names for the members of `stateData` -
|
|
|
|
## the stateData instance may get mutated through these names as well
|
2021-04-13 13:05:44 +00:00
|
|
|
template stateData(): StateData {.inject, used.} = stateDataInternal
|
2021-05-28 12:51:15 +00:00
|
|
|
template stateRoot(): Eth2Digest {.inject, used.} =
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateRoot(stateDataInternal.data)
|
2021-04-13 13:05:44 +00:00
|
|
|
template blck(): BlockRef {.inject, used.} = stateDataInternal.blck
|
|
|
|
template root(): Eth2Digest {.inject, used.} = stateDataInternal.data.root
|
2021-03-17 10:17:15 +00:00
|
|
|
|
|
|
|
body
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
template withState*(
|
2020-08-18 20:29:33 +00:00
|
|
|
dag: ChainDAGRef, stateData: var StateData, blockSlot: BlockSlot,
|
|
|
|
body: untyped): untyped =
|
|
|
|
## Helper template that updates stateData to a particular BlockSlot - usage of
|
|
|
|
## stateData is unsafe outside of block.
|
|
|
|
## TODO async transformations will lead to a race where stateData gets updated
|
2020-05-19 14:18:07 +00:00
|
|
|
## while waiting for future to complete - catch this here somehow?
|
|
|
|
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
var cache {.inject.} = StateCache()
|
2020-10-18 15:47:39 +00:00
|
|
|
updateStateData(dag, stateData, blockSlot, false, cache)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
withStateVars(stateData):
|
|
|
|
body
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
func parent*(bs: BlockSlot): BlockSlot =
|
|
|
|
## Return a blockslot representing the previous slot, using the parent block
|
|
|
|
## if the current slot had a block
|
|
|
|
if bs.slot == Slot(0):
|
|
|
|
BlockSlot(blck: nil, slot: Slot(0))
|
|
|
|
else:
|
|
|
|
BlockSlot(
|
|
|
|
blck: if bs.slot > bs.blck.slot: bs.blck else: bs.blck.parent,
|
|
|
|
slot: bs.slot - 1
|
|
|
|
)
|
|
|
|
|
2020-09-11 08:03:50 +00:00
|
|
|
func parentOrSlot*(bs: BlockSlot): BlockSlot =
|
|
|
|
## Return a blockslot representing the previous slot, using the parent block
|
|
|
|
## with the current slot if the current had a block
|
2021-03-01 19:50:43 +00:00
|
|
|
if bs.blck.isNil():
|
2020-09-11 08:03:50 +00:00
|
|
|
BlockSlot(blck: nil, slot: Slot(0))
|
|
|
|
elif bs.slot == bs.blck.slot:
|
|
|
|
BlockSlot(blck: bs.blck.parent, slot: bs.slot)
|
|
|
|
else:
|
|
|
|
BlockSlot(blck: bs.blck, slot: bs.slot - 1)
|
|
|
|
|
2021-06-01 12:40:13 +00:00
|
|
|
func get_effective_balances(validators: openArray[Validator], epoch: Epoch):
|
|
|
|
seq[Gwei] =
|
2020-08-12 04:49:52 +00:00
|
|
|
## Get the balances from a state as counted for fork choice
|
2021-06-01 12:40:13 +00:00
|
|
|
result.newSeq(validators.len) # zero-init
|
2020-08-12 04:49:52 +00:00
|
|
|
|
|
|
|
for i in 0 ..< result.len:
|
|
|
|
# All non-active validators have a 0 balance
|
2021-06-01 12:40:13 +00:00
|
|
|
let validator = unsafeAddr validators[i]
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
if validator[].is_active_validator(epoch):
|
|
|
|
result[i] = validator[].effective_balance
|
2020-08-12 04:49:52 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
proc updateValidatorKeys*(dag: ChainDAGRef, validators: openArray[Validator]) =
|
2021-06-10 07:37:02 +00:00
|
|
|
# Update validator key cache - must be called every time a valid block is
|
|
|
|
# applied to the state - this is important to ensure that when we sync blocks
|
|
|
|
# without storing a state (non-epoch blocks essentially), the deposits from
|
|
|
|
# those blocks are persisted to the in-database cache of immutable validator
|
|
|
|
# data (but no earlier than that the whole block as been validated)
|
|
|
|
dag.db.updateImmutableValidators(validators)
|
|
|
|
|
|
|
|
func validatorKey*(
|
|
|
|
dag: ChainDAGRef, index: ValidatorIndex or uint64): Option[CookedPubKey] =
|
|
|
|
## Returns the validator pubkey for the index, assuming it's been observed
|
|
|
|
## at any point in time - this function may return pubkeys for indicies that
|
|
|
|
## are not (yet) part of the head state (if the key has been observed on a
|
|
|
|
## non-head branch)!
|
|
|
|
dag.db.immutableValidators.load(index)
|
|
|
|
|
|
|
|
func validatorKey*(
|
|
|
|
epochRef: EpochRef, index: ValidatorIndex or uint64): Option[CookedPubKey] =
|
|
|
|
## Returns the validator pubkey for the index, assuming it's been observed
|
|
|
|
## at any point in time - this function may return pubkeys for indicies that
|
|
|
|
## are not (yet) part of the head state (if the key has been observed on a
|
|
|
|
## non-head branch)!
|
2021-08-24 19:49:51 +00:00
|
|
|
validatorKey(epochRef.dag, index)
|
2021-06-01 11:13:40 +00:00
|
|
|
|
|
|
|
func init*(
|
|
|
|
T: type EpochRef, dag: ChainDAGRef, state: StateData,
|
|
|
|
cache: var StateCache): T =
|
2020-08-05 06:28:43 +00:00
|
|
|
let
|
2021-06-11 17:51:46 +00:00
|
|
|
epoch = state.data.get_current_epoch()
|
2020-08-05 06:28:43 +00:00
|
|
|
epochRef = EpochRef(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: dag, # This gives access to the validator pubkeys through an EpochRef
|
2021-06-10 22:07:16 +00:00
|
|
|
key: state.blck.epochAncestor(epoch),
|
2021-06-11 17:51:46 +00:00
|
|
|
eth1_data: getStateField(state.data, eth1_data),
|
|
|
|
eth1_deposit_index: getStateField(state.data, eth1_deposit_index),
|
2021-05-21 09:23:28 +00:00
|
|
|
current_justified_checkpoint:
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state.data, current_justified_checkpoint),
|
|
|
|
finalized_checkpoint: getStateField(state.data, finalized_checkpoint),
|
2020-08-05 06:28:43 +00:00
|
|
|
shuffled_active_validator_indices:
|
2021-08-24 19:49:51 +00:00
|
|
|
cache.get_shuffled_active_validator_indices(state.data, epoch)
|
|
|
|
)
|
|
|
|
|
2020-08-05 06:28:43 +00:00
|
|
|
for i in 0'u64..<SLOTS_PER_EPOCH:
|
2021-06-01 11:13:40 +00:00
|
|
|
epochRef.beacon_proposers[i] = get_beacon_proposer_index(
|
2021-06-11 17:51:46 +00:00
|
|
|
state.data, cache, epoch.compute_start_slot_at_epoch() + i)
|
2020-08-12 04:49:52 +00:00
|
|
|
|
|
|
|
# When fork choice runs, it will need the effective balance of the justified
|
2020-10-22 10:53:33 +00:00
|
|
|
# checkpoint - we pre-load the balances here to avoid rewinding the justified
|
|
|
|
# state later and compress them because not all checkpoints end up being used
|
|
|
|
# for fork choice - specially during long periods of non-finalization
|
|
|
|
proc snappyEncode(inp: openArray[byte]): seq[byte] =
|
|
|
|
try:
|
|
|
|
snappy.encode(inp)
|
|
|
|
except CatchableError as err:
|
|
|
|
raiseAssert err.msg
|
|
|
|
|
|
|
|
epochRef.effective_balances_bytes =
|
|
|
|
snappyEncode(SSZ.encode(
|
2021-06-01 12:40:13 +00:00
|
|
|
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT](get_effective_balances(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state.data, validators).asSeq,
|
|
|
|
get_current_epoch(state.data)))))
|
2020-08-12 04:49:52 +00:00
|
|
|
|
2020-08-05 06:28:43 +00:00
|
|
|
epochRef
|
2020-05-29 06:10:20 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
func effective_balances*(epochRef: EpochRef): seq[Gwei] =
|
|
|
|
try:
|
|
|
|
SSZ.decode(snappy.decode(epochRef.effective_balances_bytes, uint32.high),
|
|
|
|
List[Gwei, Limit VALIDATOR_REGISTRY_LIMIT]).toSeq()
|
|
|
|
except CatchableError as exc:
|
|
|
|
raiseAssert exc.msg
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
func link*(parent, child: BlockRef) =
|
|
|
|
doAssert (not (parent.root == Eth2Digest() or child.root == Eth2Digest())),
|
|
|
|
"blocks missing root!"
|
|
|
|
doAssert parent.root != child.root, "self-references not allowed"
|
|
|
|
|
|
|
|
child.parent = parent
|
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
func getDepth*(a, b: BlockRef): tuple[ancestor: bool, depth: int] =
|
2020-05-19 14:18:07 +00:00
|
|
|
var b = b
|
|
|
|
var depth = 0
|
|
|
|
const maxDepth = (100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int)
|
|
|
|
while true:
|
2021-09-22 12:17:15 +00:00
|
|
|
if a == b:
|
|
|
|
return (true, depth)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
# for now, use an assert for block chain length since a chain this long
|
|
|
|
# indicates a circular reference here..
|
|
|
|
doAssert depth < maxDepth
|
|
|
|
depth += 1
|
|
|
|
|
|
|
|
if a.slot >= b.slot or b.parent.isNil:
|
2021-09-22 12:17:15 +00:00
|
|
|
return (false, depth)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
doAssert b.slot > b.parent.slot
|
|
|
|
b = b.parent
|
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
func isAncestorOf*(a, b: BlockRef): bool =
|
|
|
|
let (isAncestor, _) = getDepth(a, b)
|
|
|
|
isAncestor
|
|
|
|
|
2020-10-19 09:25:06 +00:00
|
|
|
func get_ancestor*(blck: BlockRef, slot: Slot,
|
|
|
|
maxDepth = 100'i64 * 365 * 24 * 60 * 60 div SECONDS_PER_SLOT.int):
|
|
|
|
BlockRef =
|
2021-02-25 13:37:22 +00:00
|
|
|
## https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/fork-choice.md#get_ancestor
|
2020-05-19 14:18:07 +00:00
|
|
|
## Return the most recent block as of the time at `slot` that not more recent
|
|
|
|
## than `blck` itself
|
2020-08-03 19:47:42 +00:00
|
|
|
doAssert not blck.isNil
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
var blck = blck
|
|
|
|
|
|
|
|
var depth = 0
|
|
|
|
|
|
|
|
while true:
|
|
|
|
if blck.slot <= slot:
|
|
|
|
return blck
|
|
|
|
|
|
|
|
if blck.parent.isNil:
|
|
|
|
return nil
|
|
|
|
|
|
|
|
doAssert depth < maxDepth
|
|
|
|
depth += 1
|
|
|
|
|
|
|
|
blck = blck.parent
|
|
|
|
|
|
|
|
func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
|
|
|
|
## Return a BlockSlot at a given slot, with the block set to the closest block
|
|
|
|
## available. If slot comes from before the block, a suitable block ancestor
|
|
|
|
## will be used, else blck is returned as if all slots after it were empty.
|
|
|
|
## This helper is useful when imagining what the chain looked like at a
|
|
|
|
## particular moment in time, or when imagining what it will look like in the
|
|
|
|
## near future if nothing happens (such as when looking ahead for the next
|
|
|
|
## block proposal)
|
2020-08-03 19:47:42 +00:00
|
|
|
BlockSlot(blck: blck.get_ancestor(slot), slot: slot)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
func atEpochStart*(blck: BlockRef, epoch: Epoch): BlockSlot =
|
|
|
|
## Return the BlockSlot corresponding to the first slot in the given epoch
|
|
|
|
atSlot(blck, epoch.compute_start_slot_at_epoch)
|
|
|
|
|
2021-06-10 22:07:16 +00:00
|
|
|
func epochAncestor*(blck: BlockRef, epoch: Epoch): EpochKey =
|
2020-08-18 20:29:33 +00:00
|
|
|
## The state transition works by storing information from blocks in a
|
|
|
|
## "working" area until the epoch transition, then batching work collected
|
|
|
|
## during the epoch. Thus, last block in the ancestor epochs is the block
|
|
|
|
## that has an impact on epoch currently considered.
|
|
|
|
##
|
|
|
|
## This function returns a BlockSlot pointing to that epoch boundary, ie the
|
|
|
|
## boundary where the last block has been applied to the state and epoch
|
2021-03-17 10:17:15 +00:00
|
|
|
## processing has been done.
|
2020-08-18 20:29:33 +00:00
|
|
|
var blck = blck
|
|
|
|
while blck.slot.epoch >= epoch and not blck.parent.isNil:
|
|
|
|
blck = blck.parent
|
|
|
|
|
2021-06-10 22:07:16 +00:00
|
|
|
EpochKey(epoch: epoch, blck: blck)
|
2020-08-18 20:29:33 +00:00
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
func findEpochRef*(
|
|
|
|
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef = # may return nil!
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
let ancestor = blck.epochAncestor(epoch)
|
|
|
|
doAssert ancestor.blck != nil
|
2021-03-17 10:17:15 +00:00
|
|
|
for i in 0..<dag.epochRefs.len:
|
2021-06-10 22:07:16 +00:00
|
|
|
if dag.epochRefs[i] != nil and dag.epochRefs[i].key == ancestor:
|
|
|
|
return dag.epochRefs[i]
|
2021-03-17 10:17:15 +00:00
|
|
|
|
|
|
|
return nil
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
|
2021-06-01 12:40:13 +00:00
|
|
|
func loadStateCache(
|
2021-03-17 10:17:15 +00:00
|
|
|
dag: ChainDAGRef, cache: var StateCache, blck: BlockRef, epoch: Epoch) =
|
2020-08-18 20:29:33 +00:00
|
|
|
# When creating a state cache, we want the current and the previous epoch
|
|
|
|
# information to be preloaded as both of these are used in state transition
|
|
|
|
# functions
|
2020-07-30 15:48:25 +00:00
|
|
|
|
2020-08-18 20:29:33 +00:00
|
|
|
template load(e: Epoch) =
|
2021-06-03 13:32:00 +00:00
|
|
|
if e notin cache.shuffled_active_validator_indices:
|
|
|
|
let epochRef = dag.findEpochRef(blck, e)
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
if epochRef != nil:
|
|
|
|
cache.shuffled_active_validator_indices[epochRef.epoch] =
|
2020-08-18 20:29:33 +00:00
|
|
|
epochRef.shuffled_active_validator_indices
|
2020-07-30 15:48:25 +00:00
|
|
|
|
2021-06-03 13:32:00 +00:00
|
|
|
for i, idx in epochRef.beacon_proposers:
|
|
|
|
cache.beacon_proposer_indices[
|
|
|
|
epochRef.epoch.compute_start_slot_at_epoch + i] = idx
|
2020-08-18 20:29:33 +00:00
|
|
|
|
|
|
|
load(epoch)
|
|
|
|
|
|
|
|
if epoch > 0:
|
|
|
|
load(epoch - 1)
|
|
|
|
|
2020-05-26 05:04:24 +00:00
|
|
|
func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
2020-05-19 14:18:07 +00:00
|
|
|
BlockRef(
|
|
|
|
root: root,
|
|
|
|
slot: slot
|
|
|
|
)
|
|
|
|
|
2021-06-03 09:42:25 +00:00
|
|
|
func init*(T: type BlockRef, root: Eth2Digest, blck: SomeSomeBeaconBlock):
|
|
|
|
BlockRef =
|
2020-05-19 14:18:07 +00:00
|
|
|
BlockRef.init(root, blck.slot)
|
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
func contains*(dag: ChainDAGRef, root: Eth2Digest): bool =
|
|
|
|
KeyedBlockRef.asLookupKey(root) in dag.blocks
|
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
proc containsBlock(
|
|
|
|
cfg: RuntimeConfig, db: BeaconChainDB, blck: BlockRef): bool =
|
|
|
|
if blck.slot.epoch < cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
db.containsBlockPhase0(blck.root)
|
|
|
|
else:
|
|
|
|
db.containsBlockAltair(blck.root)
|
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
func isStateCheckpoint(bs: BlockSlot): bool =
|
|
|
|
## State checkpoints are the points in time for which we store full state
|
|
|
|
## snapshots, which later serve as rewind starting points when replaying state
|
|
|
|
## transitions from database, for example during reorgs.
|
|
|
|
##
|
|
|
|
# As a policy, we only store epoch boundary states without the epoch block
|
|
|
|
# (if it exists) applied - the rest can be reconstructed by loading an epoch
|
|
|
|
# boundary state and applying the missing blocks.
|
|
|
|
# We also avoid states that were produced with empty slots only - as such,
|
|
|
|
# there is only a checkpoint for the first epoch after a block.
|
|
|
|
|
|
|
|
# The tail block also counts as a state checkpoint!
|
|
|
|
(bs.slot == bs.blck.slot and bs.blck.parent == nil) or
|
|
|
|
(bs.slot.isEpoch and bs.slot.epoch == (bs.blck.slot.epoch + 1))
|
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
|
|
|
updateFlags: UpdateFlags, onBlockCb: OnBlockCallback = nil,
|
|
|
|
onHeadCb: OnHeadCallback = nil, onReorgCb: OnReorgCallback = nil,
|
|
|
|
onFinCb: OnFinalizedCallback = nil): ChainDAGRef =
|
2020-05-19 14:18:07 +00:00
|
|
|
# TODO we require that the db contains both a head and a tail block -
|
|
|
|
# asserting here doesn't seem like the right way to go about it however..
|
|
|
|
|
|
|
|
let
|
|
|
|
tailBlockRoot = db.getTailBlock()
|
|
|
|
headBlockRoot = db.getHeadBlock()
|
|
|
|
|
|
|
|
doAssert tailBlockRoot.isSome(), "Missing tail block, database corrupt?"
|
|
|
|
doAssert headBlockRoot.isSome(), "Missing head block, database corrupt?"
|
|
|
|
|
|
|
|
let
|
|
|
|
tailRoot = tailBlockRoot.get()
|
|
|
|
tailBlock = db.getBlock(tailRoot).get()
|
|
|
|
tailRef = BlockRef.init(tailRoot, tailBlock.message)
|
|
|
|
headRoot = headBlockRoot.get()
|
|
|
|
|
2020-10-06 15:32:17 +00:00
|
|
|
let genesisRef = if tailBlock.message.slot == GENESIS_SLOT:
|
|
|
|
tailRef
|
|
|
|
else:
|
|
|
|
let
|
2021-05-26 07:07:18 +00:00
|
|
|
genesisBlockRoot = db.getGenesisBlockRoot().expect(
|
|
|
|
"preInit should have initialized the database with a genesis block root")
|
2020-10-06 15:32:17 +00:00
|
|
|
genesisBlock = db.getBlock(genesisBlockRoot).expect(
|
|
|
|
"preInit should have initialized the database with a genesis block")
|
|
|
|
BlockRef.init(genesisBlockRoot, genesisBlock.message)
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
var
|
2021-03-17 10:17:15 +00:00
|
|
|
blocks: HashSet[KeyedBlockRef]
|
2020-05-19 14:18:07 +00:00
|
|
|
headRef: BlockRef
|
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
blocks.incl(KeyedBlockRef.init(tailRef))
|
|
|
|
|
2020-10-06 15:32:17 +00:00
|
|
|
if genesisRef != tailRef:
|
2021-03-17 10:17:15 +00:00
|
|
|
blocks.incl(KeyedBlockRef.init(genesisRef))
|
2020-10-06 15:32:17 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
if headRoot != tailRoot:
|
|
|
|
var curRef: BlockRef
|
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
for blck in db.getAncestorSummaries(headRoot):
|
2020-07-16 13:16:51 +00:00
|
|
|
if blck.root == tailRef.root:
|
2020-05-19 14:18:07 +00:00
|
|
|
doAssert(not curRef.isNil)
|
|
|
|
link(tailRef, curRef)
|
|
|
|
curRef = curRef.parent
|
|
|
|
break
|
|
|
|
|
2020-11-03 22:30:43 +00:00
|
|
|
let newRef = BlockRef.init(blck.root, blck.summary.slot)
|
2020-05-19 14:18:07 +00:00
|
|
|
if curRef == nil:
|
|
|
|
curRef = newRef
|
|
|
|
else:
|
|
|
|
link(newRef, curRef)
|
|
|
|
curRef = curRef.parent
|
2021-09-08 03:46:33 +00:00
|
|
|
|
|
|
|
# Don't include blocks on incorrect hardforks
|
|
|
|
if headRef == nil and cfg.containsBlock(db, newRef):
|
|
|
|
headRef = newRef
|
|
|
|
|
2021-03-17 10:17:15 +00:00
|
|
|
blocks.incl(KeyedBlockRef.init(curRef))
|
2020-05-19 14:18:07 +00:00
|
|
|
trace "Populating block dag", key = curRef.root, val = curRef
|
|
|
|
|
|
|
|
doAssert curRef == tailRef,
|
|
|
|
"head block does not lead to tail, database corrupt?"
|
|
|
|
else:
|
|
|
|
headRef = tailRef
|
|
|
|
|
2021-09-08 03:46:33 +00:00
|
|
|
# Because of incorrect hardfork check, there might be no head block, in which
|
|
|
|
# case it's equivalent to the tail block
|
|
|
|
if headRef == nil:
|
|
|
|
headRef = tailRef
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
var
|
2020-08-18 20:29:33 +00:00
|
|
|
cur = headRef.atSlot(headRef.slot)
|
2020-05-19 14:18:07 +00:00
|
|
|
tmpState = (ref StateData)()
|
|
|
|
|
|
|
|
# Now that we have a head block, we need to find the most recent state that
|
|
|
|
# we have saved in the database
|
2020-08-18 20:29:33 +00:00
|
|
|
while cur.blck != nil:
|
2021-05-30 08:14:17 +00:00
|
|
|
if cur.isStateCheckpoint():
|
|
|
|
let root = db.getStateRoot(cur.blck.root, cur.slot)
|
|
|
|
if root.isSome():
|
2021-06-11 17:51:46 +00:00
|
|
|
if db.getState(root.get(), tmpState.data.hbsPhase0.data, noRollback):
|
2021-06-17 17:13:14 +00:00
|
|
|
setStateRoot(tmpState.data, root.get())
|
2021-05-30 08:14:17 +00:00
|
|
|
tmpState.blck = cur.blck
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
break
|
|
|
|
cur = cur.parentOrSlot()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
if tmpState.blck == nil:
|
|
|
|
warn "No state found in head history, database corrupt?"
|
|
|
|
# TODO Potentially we could recover from here instead of crashing - what
|
|
|
|
# would be a good recovery model?
|
|
|
|
raiseAssert "No state found in head history, database corrupt?"
|
|
|
|
|
2021-08-10 20:46:35 +00:00
|
|
|
case tmpState.data.beaconStateFork
|
|
|
|
of forkPhase0:
|
|
|
|
if tmpState.data.hbsPhase0.data.fork != genesisFork(cfg):
|
|
|
|
error "State from database does not match network, check --network parameter",
|
|
|
|
stateFork = tmpState.data.hbsPhase0.data.fork,
|
|
|
|
configFork = genesisFork(cfg)
|
|
|
|
quit 1
|
|
|
|
of forkAltair:
|
|
|
|
if tmpState.data.hbsAltair.data.fork != altairFork(cfg):
|
|
|
|
error "State from database does not match network, check --network parameter",
|
|
|
|
stateFork = tmpState.data.hbsAltair.data.fork,
|
|
|
|
configFork = altairFork(cfg)
|
|
|
|
quit 1
|
2021-09-27 14:22:58 +00:00
|
|
|
of forkMerge:
|
|
|
|
if tmpState.data.hbsMerge.data.fork != mergeFork(cfg):
|
|
|
|
error "State from database does not match network, check --network parameter",
|
|
|
|
stateFork = tmpState.data.hbsMerge.data.fork,
|
|
|
|
configFork = mergeFork(cfg)
|
|
|
|
quit 1
|
2021-08-10 20:46:35 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
let dag = ChainDAGRef(
|
2020-05-19 14:18:07 +00:00
|
|
|
blocks: blocks,
|
|
|
|
tail: tailRef,
|
2020-10-06 15:32:17 +00:00
|
|
|
genesis: genesisRef,
|
2020-05-19 14:18:07 +00:00
|
|
|
db: db,
|
2021-07-07 09:09:47 +00:00
|
|
|
forkDigests: newClone ForkDigests.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2021-07-07 09:09:47 +00:00
|
|
|
getStateField(tmpState.data, genesis_validators_root)),
|
2020-07-28 13:54:32 +00:00
|
|
|
heads: @[headRef],
|
2020-05-19 14:18:07 +00:00
|
|
|
headState: tmpState[],
|
2020-11-10 13:48:59 +00:00
|
|
|
epochRefState: tmpState[],
|
2020-07-22 06:25:13 +00:00
|
|
|
clearanceState: tmpState[],
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
# The only allowed flag right now is verifyFinalization, as the others all
|
|
|
|
# allow skipping some validation.
|
2020-07-07 23:02:14 +00:00
|
|
|
updateFlags: {verifyFinalization} * updateFlags,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg: cfg,
|
2021-09-22 12:17:15 +00:00
|
|
|
|
|
|
|
onBlockAdded: onBlockCb,
|
|
|
|
onHeadChanged: onHeadCb,
|
|
|
|
onReorgHappened: onReorgCb,
|
|
|
|
onFinHappened: onFinCb
|
2020-05-19 14:18:07 +00:00
|
|
|
)
|
|
|
|
|
2021-07-14 12:18:52 +00:00
|
|
|
doAssert cfg.GENESIS_FORK_VERSION != cfg.ALTAIR_FORK_VERSION
|
2021-06-01 11:13:40 +00:00
|
|
|
doAssert dag.updateFlags in [{}, {verifyFinalization}]
|
|
|
|
|
2020-08-18 20:29:33 +00:00
|
|
|
var cache: StateCache
|
2021-06-01 11:13:40 +00:00
|
|
|
dag.updateStateData(dag.headState, headRef.atSlot(headRef.slot), false, cache)
|
2020-09-07 15:04:33 +00:00
|
|
|
# We presently save states on the epoch boundary - it means that the latest
|
2020-08-18 20:29:33 +00:00
|
|
|
# state we loaded might be older than head block - nonetheless, it will be
|
|
|
|
# from the same epoch as the head, thus the finalized and justified slots are
|
|
|
|
# the same - these only change on epoch boundaries.
|
2020-09-22 20:42:42 +00:00
|
|
|
# When we start from a snapshot state, the `finalized_checkpoint` in the
|
|
|
|
# snapshot will point to an even older state, but we trust the tail state
|
|
|
|
# (the snapshot) to be finalized, hence the `max` expression below.
|
2021-06-11 17:51:46 +00:00
|
|
|
let finalizedEpoch = max(getStateField(dag.headState.data, finalized_checkpoint).epoch,
|
2020-09-22 20:42:42 +00:00
|
|
|
tailRef.slot.epoch)
|
2021-06-01 11:13:40 +00:00
|
|
|
dag.finalizedHead = headRef.atEpochStart(finalizedEpoch)
|
2020-08-18 20:29:33 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
dag.clearanceState = dag.headState
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
# Pruning metadata
|
2021-06-01 11:13:40 +00:00
|
|
|
dag.lastPrunePoint = dag.finalizedHead
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2021-08-05 08:26:10 +00:00
|
|
|
# Fill validator key cache in case we're loading an old database that doesn't
|
|
|
|
# have a cache
|
2021-08-09 11:14:28 +00:00
|
|
|
dag.updateValidatorKeys(getStateField(dag.headState.data, validators).asSeq())
|
2021-08-05 08:26:10 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
info "Block dag initialized",
|
2020-07-28 13:54:32 +00:00
|
|
|
head = shortLog(headRef),
|
2021-06-01 11:13:40 +00:00
|
|
|
finalizedHead = shortLog(dag.finalizedHead),
|
2020-07-28 13:54:32 +00:00
|
|
|
tail = shortLog(tailRef),
|
2020-05-19 14:18:07 +00:00
|
|
|
totalBlocks = blocks.len
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
dag
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
|
|
|
|
getStateField(dag.headState.data, genesis_validators_root)
|
|
|
|
|
2021-06-01 12:40:13 +00:00
|
|
|
func getEpochRef*(
|
2021-05-29 18:56:30 +00:00
|
|
|
dag: ChainDAGRef, state: StateData, cache: var StateCache): EpochRef =
|
|
|
|
let
|
|
|
|
blck = state.blck
|
2021-06-11 17:51:46 +00:00
|
|
|
epoch = state.data.get_current_epoch()
|
2021-05-29 18:56:30 +00:00
|
|
|
|
|
|
|
var epochRef = dag.findEpochRef(blck, epoch)
|
|
|
|
if epochRef == nil:
|
2021-06-01 11:13:40 +00:00
|
|
|
epochRef = EpochRef.init(dag, state, cache)
|
2021-05-29 18:56:30 +00:00
|
|
|
|
|
|
|
if epoch >= dag.finalizedHead.slot.epoch():
|
|
|
|
# Only cache epoch information for unfinalized blocks - earlier states
|
|
|
|
# are seldomly used (ie RPC), so no need to cache
|
|
|
|
|
|
|
|
# Because we put a cap on the number of epochRefs we store, we want to
|
|
|
|
# prune the least useful state - for now, we'll assume that to be the
|
|
|
|
# oldest epochRef we know about.
|
|
|
|
|
|
|
|
var
|
|
|
|
oldest = 0
|
|
|
|
for x in 0..<dag.epochRefs.len:
|
|
|
|
let candidate = dag.epochRefs[x]
|
2021-06-10 22:07:16 +00:00
|
|
|
if candidate == nil:
|
2021-05-29 18:56:30 +00:00
|
|
|
oldest = x
|
|
|
|
break
|
2021-06-10 22:07:16 +00:00
|
|
|
if candidate.key.epoch < dag.epochRefs[oldest].epoch:
|
2021-05-29 18:56:30 +00:00
|
|
|
oldest = x
|
|
|
|
|
2021-06-10 22:07:16 +00:00
|
|
|
dag.epochRefs[oldest] = epochRef
|
2021-05-29 18:56:30 +00:00
|
|
|
|
|
|
|
epochRef
|
2021-03-17 10:17:15 +00:00
|
|
|
|
2020-08-18 20:29:33 +00:00
|
|
|
proc getEpochRef*(dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): EpochRef =
|
2021-03-17 10:17:15 +00:00
|
|
|
let epochRef = dag.findEpochRef(blck, epoch)
|
2020-08-18 20:29:33 +00:00
|
|
|
if epochRef != nil:
|
2020-09-22 20:42:42 +00:00
|
|
|
beacon_state_data_cache_hits.inc
|
|
|
|
return epochRef
|
2020-07-28 13:54:32 +00:00
|
|
|
|
2020-08-06 19:48:47 +00:00
|
|
|
beacon_state_data_cache_misses.inc
|
|
|
|
|
2020-08-18 20:29:33 +00:00
|
|
|
let
|
|
|
|
ancestor = blck.epochAncestor(epoch)
|
|
|
|
|
2021-06-10 22:07:16 +00:00
|
|
|
dag.withState(
|
|
|
|
dag.epochRefState, ancestor.blck.atEpochStart(ancestor.epoch)):
|
2021-05-29 18:56:30 +00:00
|
|
|
dag.getEpochRef(stateData, cache)
|
2020-07-28 13:54:32 +00:00
|
|
|
|
2020-10-26 08:55:10 +00:00
|
|
|
proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef =
|
|
|
|
dag.getEpochRef(dag.finalizedHead.blck, dag.finalizedHead.slot.epoch)
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
proc getState(
|
2020-08-13 09:50:05 +00:00
|
|
|
dag: ChainDAGRef, state: var StateData, stateRoot: Eth2Digest,
|
|
|
|
blck: BlockRef): bool =
|
2020-09-14 14:50:03 +00:00
|
|
|
let restoreAddr =
|
|
|
|
# Any restore point will do as long as it's not the object being updated
|
|
|
|
if unsafeAddr(state) == unsafeAddr(dag.headState):
|
2021-03-17 10:17:15 +00:00
|
|
|
unsafeAddr dag.clearanceState
|
2020-09-14 14:50:03 +00:00
|
|
|
else:
|
|
|
|
unsafeAddr dag.headState
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-06-29 15:09:29 +00:00
|
|
|
let v = addr state.data
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-06-29 15:09:29 +00:00
|
|
|
func restore() =
|
|
|
|
assign(v[], restoreAddr[].data)
|
|
|
|
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
if blck.slot.epoch < dag.cfg.ALTAIR_FORK_EPOCH:
|
2021-06-29 15:09:29 +00:00
|
|
|
if state.data.beaconStateFork != forkPhase0:
|
|
|
|
state.data = (ref ForkedHashedBeaconState)(beaconStateFork: forkPhase0)[]
|
|
|
|
|
|
|
|
if not dag.db.getState(stateRoot, state.data.hbsPhase0.data, restore):
|
|
|
|
return false
|
|
|
|
else:
|
|
|
|
if state.data.beaconStateFork != forkAltair:
|
|
|
|
state.data = (ref ForkedHashedBeaconState)(beaconStateFork: forkAltair)[]
|
|
|
|
|
|
|
|
if not dag.db.getAltairState(stateRoot, state.data.hbsAltair.data, restore):
|
|
|
|
return false
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
state.blck = blck
|
2021-06-17 17:13:14 +00:00
|
|
|
setStateRoot(state.data, stateRoot)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
true
|
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
func stateCheckpoint*(bs: BlockSlot): BlockSlot =
|
|
|
|
## The first ancestor BlockSlot that is a state checkpoint
|
|
|
|
var bs = bs
|
|
|
|
while not isStateCheckPoint(bs):
|
|
|
|
bs = bs.parentOrSlot
|
|
|
|
bs
|
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
template forkAtEpoch*(dag: ChainDAGRef, epoch: Epoch): Fork =
|
|
|
|
forkAtEpoch(dag.cfg, epoch)
|
|
|
|
|
|
|
|
proc forkDigestAtEpoch*(dag: ChainDAGRef, epoch: Epoch): ForkDigest =
|
|
|
|
if epoch < dag.cfg.ALTAIR_FORK_EPOCH:
|
2021-07-07 09:09:47 +00:00
|
|
|
dag.forkDigests.phase0
|
|
|
|
else:
|
|
|
|
dag.forkDigests.altair
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
proc getState(dag: ChainDAGRef, state: var StateData, bs: BlockSlot): bool =
|
|
|
|
## Load a state from the database given a block and a slot - this will first
|
|
|
|
## lookup the state root in the state root table then load the corresponding
|
|
|
|
## state, if it exists
|
2021-03-01 19:50:43 +00:00
|
|
|
if not bs.isStateCheckpoint():
|
|
|
|
return false # Only state checkpoints are stored - no need to hit DB
|
2020-08-18 20:29:33 +00:00
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
if (let stateRoot = dag.db.getStateRoot(bs.blck.root, bs.slot);
|
|
|
|
stateRoot.isSome()):
|
|
|
|
return dag.getState(state, stateRoot.get(), bs.blck)
|
|
|
|
|
|
|
|
false
|
|
|
|
|
2021-06-24 18:34:08 +00:00
|
|
|
proc putState(dag: ChainDAGRef, state: StateData) =
|
2020-08-13 09:50:05 +00:00
|
|
|
# Store a state and its root
|
2020-09-24 07:02:03 +00:00
|
|
|
logScope:
|
|
|
|
blck = shortLog(state.blck)
|
2021-06-11 17:51:46 +00:00
|
|
|
stateSlot = shortLog(getStateField(state.data, slot))
|
|
|
|
stateRoot = shortLog(getStateRoot(state.data))
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
if not isStateCheckpoint(state.blck.atSlot(getStateField(state.data, slot))):
|
2020-08-13 09:50:05 +00:00
|
|
|
return
|
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
# Don't consider legacy tables here, they are slow to read so we'll want to
|
|
|
|
# rewrite things in the new database anyway.
|
2021-06-11 17:51:46 +00:00
|
|
|
if dag.db.containsState(getStateRoot(state.data), legacy = false):
|
2020-08-13 09:50:05 +00:00
|
|
|
return
|
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
let startTick = Moment.now()
|
2020-08-13 09:50:05 +00:00
|
|
|
# Ideally we would save the state and the root lookup cache in a single
|
|
|
|
# transaction to prevent database inconsistencies, but the state loading code
|
|
|
|
# is resilient against one or the other going missing
|
2021-06-24 18:34:08 +00:00
|
|
|
dag.db.putState(state.data)
|
2021-04-08 08:24:25 +00:00
|
|
|
dag.db.putStateRoot(
|
2021-06-11 17:51:46 +00:00
|
|
|
state.blck.root, getStateField(state.data, slot), getStateRoot(state.data))
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
debug "Stored state", putStateDur = Moment.now() - startTick
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
func getRef*(dag: ChainDAGRef, root: Eth2Digest): BlockRef =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Retrieve a resolved block reference, if available
|
2021-03-17 10:17:15 +00:00
|
|
|
let key = KeyedBlockRef.asLookupKey(root)
|
|
|
|
# HashSet lacks the api to do check-and-get in one lookup - `[]` will return
|
|
|
|
# the copy of the instance in the set which has more fields than `root` set!
|
|
|
|
if key in dag.blocks:
|
|
|
|
try: dag.blocks[key].blockRef()
|
|
|
|
except KeyError: raiseAssert "contains"
|
|
|
|
else:
|
|
|
|
nil
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
func getBlockRange*(
|
2020-08-05 23:22:12 +00:00
|
|
|
dag: ChainDAGRef, startSlot: Slot, skipStep: uint64,
|
2020-05-19 14:18:07 +00:00
|
|
|
output: var openArray[BlockRef]): Natural =
|
|
|
|
## This function populates an `output` buffer of blocks
|
|
|
|
## with a slots ranging from `startSlot` up to, but not including,
|
|
|
|
## `startSlot + skipStep * output.len`, skipping any slots that don't have
|
|
|
|
## a block.
|
|
|
|
##
|
|
|
|
## Blocks will be written to `output` from the end without gaps, even if
|
|
|
|
## a block is missing in a particular slot. The return value shows how
|
|
|
|
## many slots were missing blocks - to iterate over the result, start
|
|
|
|
## at this index.
|
|
|
|
##
|
|
|
|
## If there were no blocks in the range, `output.len` will be returned.
|
2020-09-22 20:42:42 +00:00
|
|
|
let
|
|
|
|
requestedCount = output.lenu64
|
|
|
|
headSlot = dag.head.slot
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
trace "getBlockRange entered",
|
2020-09-22 20:42:42 +00:00
|
|
|
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot
|
|
|
|
|
2020-10-14 20:23:04 +00:00
|
|
|
if startSlot < dag.tail.slot or headSlot <= startSlot or requestedCount == 0:
|
2020-09-22 20:42:42 +00:00
|
|
|
return output.len # Identical to returning an empty set of block as indicated above
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
let
|
2020-09-22 20:42:42 +00:00
|
|
|
runway = uint64(headSlot - startSlot)
|
2020-10-14 18:06:50 +00:00
|
|
|
|
|
|
|
# This is the number of blocks that will follow the start block
|
|
|
|
extraBlocks = min(runway div skipStep, requestedCount - 1)
|
|
|
|
|
|
|
|
# If `skipStep` is very large, `extraBlocks` should be 0 from
|
|
|
|
# the previous line, so `endSlot` will be equal to `startSlot`:
|
|
|
|
endSlot = startSlot + extraBlocks * skipStep
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
var
|
2020-07-28 13:54:32 +00:00
|
|
|
b = dag.head.atSlot(endSlot)
|
2020-08-05 23:22:12 +00:00
|
|
|
o = output.len
|
2020-10-14 18:06:50 +00:00
|
|
|
|
|
|
|
# Process all blocks that follow the start block (may be zero blocks)
|
|
|
|
for i in 1..extraBlocks:
|
2020-05-19 14:18:07 +00:00
|
|
|
if b.blck.slot == b.slot:
|
|
|
|
dec o
|
|
|
|
output[o] = b.blck
|
2020-10-14 18:06:50 +00:00
|
|
|
for j in 1..skipStep:
|
|
|
|
b = b.parent
|
|
|
|
|
|
|
|
# We should now be at the start block.
|
|
|
|
# Like any "block slot", it may be a missing/skipped block:
|
|
|
|
if b.blck.slot == b.slot:
|
|
|
|
dec o
|
|
|
|
output[o] = b.blck
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
o # Return the index of the first non-nil item in the output
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
func getBlockBySlot*(dag: ChainDAGRef, slot: Slot): BlockRef =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Retrieves the first block in the current canonical chain
|
|
|
|
## with slot number less or equal to `slot`.
|
2020-07-28 13:54:32 +00:00
|
|
|
dag.head.atSlot(slot).blck
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-07-07 09:09:47 +00:00
|
|
|
proc getForkedBlock*(dag: ChainDAGRef, blck: BlockRef): ForkedTrustedSignedBeaconBlock =
|
|
|
|
# TODO implement this properly
|
|
|
|
let phase0Block = dag.db.getBlock(blck.root)
|
|
|
|
if phase0Block.isOk:
|
2021-08-24 19:49:51 +00:00
|
|
|
return ForkedTrustedSignedBeaconBlock.init(phase0Block.get)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
|
|
|
let altairBlock = dag.db.getAltairBlock(blck.root)
|
|
|
|
if altairBlock.isOk:
|
2021-08-24 19:49:51 +00:00
|
|
|
return ForkedTrustedSignedBeaconBlock.init(altairBlock.get)
|
2021-07-07 09:09:47 +00:00
|
|
|
|
|
|
|
raiseAssert "BlockRef without backing data, database corrupt?"
|
|
|
|
|
2021-07-14 12:18:52 +00:00
|
|
|
proc get*(dag: ChainDAGRef, blck: BlockRef): BlockData =
|
|
|
|
## Retrieve the associated block body of a block reference
|
|
|
|
doAssert (not blck.isNil), "Trying to get nil BlockRef"
|
|
|
|
|
|
|
|
BlockData(data: dag.getForkedBlock(blck), refs: blck)
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
proc get*(dag: ChainDAGRef, root: Eth2Digest): Option[BlockData] =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Retrieve a resolved block reference and its associated body, if available
|
|
|
|
let refs = dag.getRef(root)
|
|
|
|
|
|
|
|
if not refs.isNil:
|
|
|
|
some(dag.get(refs))
|
|
|
|
else:
|
|
|
|
none(BlockData)
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
proc advanceSlots(
|
2020-10-18 15:47:39 +00:00
|
|
|
dag: ChainDAGRef, state: var StateData, slot: Slot, save: bool,
|
2021-05-07 11:36:21 +00:00
|
|
|
cache: var StateCache, rewards: var RewardInfo) =
|
2020-08-13 09:50:05 +00:00
|
|
|
# Given a state, advance it zero or more slots by applying empty slot
|
2020-10-15 12:28:44 +00:00
|
|
|
# processing - the state must be positions at a slot before or equal to the
|
|
|
|
# target
|
2021-06-11 17:51:46 +00:00
|
|
|
doAssert getStateField(state.data, slot) <= slot
|
|
|
|
while getStateField(state.data, slot) < slot:
|
|
|
|
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
2021-06-03 13:32:00 +00:00
|
|
|
|
2020-10-18 15:47:39 +00:00
|
|
|
doAssert process_slots(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
dag.cfg, state.data, getStateField(state.data, slot) + 1, cache, rewards,
|
|
|
|
dag.updateFlags),
|
2020-10-15 12:28:44 +00:00
|
|
|
"process_slots shouldn't fail when state slot is correct"
|
2020-10-18 15:47:39 +00:00
|
|
|
if save:
|
|
|
|
dag.putState(state)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
proc applyBlock(
|
2020-07-31 14:49:06 +00:00
|
|
|
dag: ChainDAGRef,
|
2020-09-08 07:23:48 +00:00
|
|
|
state: var StateData, blck: BlockData, flags: UpdateFlags,
|
2021-05-07 11:36:21 +00:00
|
|
|
cache: var StateCache, rewards: var RewardInfo): bool =
|
2020-08-13 09:50:05 +00:00
|
|
|
# Apply a single block to the state - the state must be positioned at the
|
|
|
|
# parent of the block with a slot lower than the one of the block being
|
|
|
|
# applied
|
|
|
|
doAssert state.blck == blck.refs.parent
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
var statePtr = unsafeAddr state # safe because `restore` is locally scoped
|
2021-06-11 17:51:46 +00:00
|
|
|
func restore(v: var ForkedHashedBeaconState) =
|
2020-05-19 14:18:07 +00:00
|
|
|
doAssert (addr(statePtr.data) == addr v)
|
2021-06-11 17:51:46 +00:00
|
|
|
# TODO the block_clearance version uses assign() here
|
2020-05-19 14:18:07 +00:00
|
|
|
statePtr[] = dag.headState
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
performance fixes (#2259)
* performance fixes
* don't mark tree cache as dirty on read-only List accesses
* store only blob in memory for keys and signatures, parse blob lazily
* compare public keys by blob instead of parsing / converting to raw
* compare Eth2Digest using non-constant-time comparison
* avoid some unnecessary validator copying
This branch will in particular speed up deposit processing which has
been slowing down block replay.
Pre (mainnet, 1600 blocks):
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3450.269, 0.000, 3450.269, 3450.269, 1, Initialize DB
0.417, 0.822, 0.036, 21.098, 1400, Load block from database
16.521, 0.000, 16.521, 16.521, 1, Load state from database
27.906, 50.846, 8.104, 1507.633, 1350, Apply block
52.617, 37.029, 20.640, 135.938, 50, Apply epoch block
```
Post:
```
3502.715, 0.000, 3502.715, 3502.715, 1, Initialize DB
0.080, 0.560, 0.035, 21.015, 1400, Load block from database
17.595, 0.000, 17.595, 17.595, 1, Load state from database
15.706, 11.028, 8.300, 107.537, 1350, Apply block
33.217, 12.622, 17.331, 60.580, 50, Apply epoch block
```
* more perf fixes
* load EpochRef cache into StateCache more aggressively
* point out security concern with public key cache
* reuse proposer index from state when processing block
* avoid genericAssign in a few more places
* don't parse key when signature is unparseable
* fix `==` overload for Eth2Digest
* preallocate validator list when getting active validators
* speed up proposer index calculation a little bit
* reuse cache when replaying blocks in ncli_db
* avoid a few more copying loops
```
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3279.158, 0.000, 3279.158, 3279.158, 1, Initialize DB
0.072, 0.357, 0.035, 13.400, 1400, Load block from database
17.295, 0.000, 17.295, 17.295, 1, Load state from database
5.918, 9.896, 0.198, 98.028, 1350, Apply block
15.888, 10.951, 7.902, 39.535, 50, Apply epoch block
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
* clear full balance cache before processing rewards and penalties
```
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
3947.901, 0.000, 3947.901, 3947.901, 1, Initialize DB
0.124, 0.506, 0.026, 202.370, 363345, Load block from database
97.614, 0.000, 97.614, 97.614, 1, Load state from database
0.186, 0.188, 0.012, 99.561, 357262, Advance slot, non-epoch
14.161, 5.966, 1.099, 395.511, 11524, Advance slot, epoch
1.372, 4.170, 0.017, 276.401, 363345, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database block store
```
2021-01-25 12:04:18 +00:00
|
|
|
|
2021-07-14 12:18:52 +00:00
|
|
|
# TODO some abstractions
|
|
|
|
let ok =
|
|
|
|
case blck.data.kind:
|
|
|
|
of BeaconBlockFork.Phase0:
|
|
|
|
state_transition(
|
|
|
|
dag.cfg, state.data, blck.data.phase0Block,
|
|
|
|
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
|
|
|
of BeaconBlockFork.Altair:
|
|
|
|
state_transition(
|
|
|
|
dag.cfg, state.data, blck.data.altairBlock,
|
|
|
|
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
2021-09-27 14:22:58 +00:00
|
|
|
of BeaconBlockFork.Merge:
|
|
|
|
state_transition(
|
|
|
|
dag.cfg, state.data, blck.data.mergeBlock,
|
|
|
|
cache, rewards, flags + dag.updateFlags + {slotProcessed}, restore)
|
2020-08-13 09:50:05 +00:00
|
|
|
if ok:
|
|
|
|
state.blck = blck.refs
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
ok
|
|
|
|
|
2020-07-22 07:51:45 +00:00
|
|
|
proc updateStateData*(
|
2020-10-18 15:47:39 +00:00
|
|
|
dag: ChainDAGRef, state: var StateData, bs: BlockSlot, save: bool,
|
2020-08-18 20:29:33 +00:00
|
|
|
cache: var StateCache) =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Rewind or advance state such that it matches the given block and slot -
|
|
|
|
## this may include replaying from an earlier snapshot if blck is on a
|
|
|
|
## different branch or has advanced to a higher slot number than slot
|
|
|
|
## If slot is higher than blck.slot, replay will fill in with empty/non-block
|
|
|
|
## slots, else it is ignored
|
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
# First, see if we're already at the requested block. If we are, also check
|
|
|
|
# that the state has not been advanced past the desired block - if it has,
|
|
|
|
# an earlier state must be loaded since there's no way to undo the slot
|
|
|
|
# transitions
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
let startTick = Moment.now()
|
2021-03-01 19:50:43 +00:00
|
|
|
|
2020-08-13 09:50:05 +00:00
|
|
|
var
|
|
|
|
ancestors: seq[BlockRef]
|
|
|
|
cur = bs
|
2020-10-22 10:53:33 +00:00
|
|
|
found = false
|
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
template exactMatch(state: StateData, bs: BlockSlot): bool =
|
|
|
|
# The block is the same and we're at an early enough slot - the state can
|
|
|
|
# be used to arrive at the desired blockslot
|
2021-06-11 17:51:46 +00:00
|
|
|
state.blck == bs.blck and getStateField(state.data, slot) == bs.slot
|
2021-05-30 08:14:17 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
template canAdvance(state: StateData, bs: BlockSlot): bool =
|
|
|
|
# The block is the same and we're at an early enough slot - the state can
|
|
|
|
# be used to arrive at the desired blockslot
|
2021-06-11 17:51:46 +00:00
|
|
|
state.blck == bs.blck and getStateField(state.data, slot) <= bs.slot
|
2020-10-22 10:53:33 +00:00
|
|
|
|
2021-05-30 08:14:17 +00:00
|
|
|
# Fast path: check all caches for an exact match - this is faster than
|
|
|
|
# advancing a state where there's epoch processing to do, by a wide margin -
|
|
|
|
# it also avoids `hash_tree_root` for slot processing
|
|
|
|
if exactMatch(state, cur):
|
|
|
|
found = true
|
|
|
|
elif exactMatch(dag.headState, cur):
|
|
|
|
assign(state, dag.headState)
|
|
|
|
found = true
|
|
|
|
elif exactMatch(dag.clearanceState, cur):
|
|
|
|
assign(state, dag.clearanceState)
|
|
|
|
found = true
|
|
|
|
elif exactMatch(dag.epochRefState, cur):
|
|
|
|
assign(state, dag.epochRefState)
|
|
|
|
found = true
|
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
# First, run a quick check if we can simply apply a few blocks to an in-memory
|
|
|
|
# state - any in-memory state will be faster than loading from database.
|
|
|
|
# The limit here how many blocks we apply is somewhat arbitrary but two full
|
|
|
|
# epochs (might be more slots if there are skips) seems like a good enough
|
|
|
|
# first guess.
|
|
|
|
# This happens in particular during startup where we replay blocks
|
|
|
|
# sequentially to grab their votes.
|
|
|
|
const RewindBlockThreshold = 64
|
2021-05-30 08:14:17 +00:00
|
|
|
while not found and ancestors.len < RewindBlockThreshold:
|
2020-10-22 10:53:33 +00:00
|
|
|
if canAdvance(state, cur):
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
|
|
|
if canAdvance(dag.headState, cur):
|
|
|
|
assign(state, dag.headState)
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
|
|
|
if canAdvance(dag.clearanceState, cur):
|
|
|
|
assign(state, dag.clearanceState)
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
2020-11-10 13:48:59 +00:00
|
|
|
if canAdvance(dag.epochRefState, cur):
|
|
|
|
assign(state, dag.epochRefState)
|
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
if cur.slot == cur.blck.slot:
|
|
|
|
# This is not an empty slot, so the block will need to be applied to
|
|
|
|
# eventually reach bs
|
2020-08-13 09:50:05 +00:00
|
|
|
ancestors.add(cur.blck)
|
2020-10-22 10:53:33 +00:00
|
|
|
|
|
|
|
if cur.blck.parent == nil:
|
|
|
|
break
|
|
|
|
|
|
|
|
# Moving slot by slot helps find states that were advanced with empty slots
|
2021-05-28 16:34:00 +00:00
|
|
|
cur = cur.parentOrSlot()
|
2020-10-22 10:53:33 +00:00
|
|
|
|
|
|
|
if not found:
|
|
|
|
debug "UpdateStateData cache miss",
|
2021-06-11 17:51:46 +00:00
|
|
|
bs, stateBlock = state.blck, stateSlot = getStateField(state.data, slot)
|
2020-10-22 10:53:33 +00:00
|
|
|
|
|
|
|
# Either the state is too new or was created by applying a different block.
|
|
|
|
# We'll now resort to loading the state from the database then reapplying
|
|
|
|
# blocks until we reach the desired point in time.
|
|
|
|
|
|
|
|
cur = bs
|
|
|
|
ancestors.setLen(0)
|
|
|
|
|
|
|
|
# Look for a state in the database and load it - as long as it cannot be
|
|
|
|
# found, keep track of the blocks that are needed to reach it from the
|
|
|
|
# state that eventually will be found
|
|
|
|
while not dag.getState(state, cur):
|
|
|
|
# There's no state saved for this particular BlockSlot combination, keep
|
|
|
|
# looking...
|
2021-05-28 16:34:00 +00:00
|
|
|
if cur.slot == cur.blck.slot:
|
|
|
|
# This is not an empty slot, so the block will need to be applied to
|
|
|
|
# eventually reach bs
|
2020-08-18 20:29:33 +00:00
|
|
|
ancestors.add(cur.blck)
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
if cur.slot == dag.tail.slot:
|
|
|
|
# If we've walked all the way to the tail and still not found a state,
|
|
|
|
# there's no hope finding one - the database likely has become corrupt
|
|
|
|
# and one will have to resync from start.
|
|
|
|
fatal "Cannot find state to load, the database is likely corrupt",
|
|
|
|
cur, bs, head = dag.head, tail = dag.tail
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# Move slot by slot to capture epoch boundary states
|
|
|
|
cur = cur.parentOrSlot()
|
2020-10-22 10:53:33 +00:00
|
|
|
|
|
|
|
beacon_state_rewinds.inc()
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
# Starting state has been assigned, either from memory or database
|
2020-08-18 20:29:33 +00:00
|
|
|
let
|
2021-05-28 16:34:00 +00:00
|
|
|
assignTick = Moment.now()
|
2021-06-11 17:51:46 +00:00
|
|
|
startSlot {.used.} = getStateField(state.data, slot) # used in logs below
|
|
|
|
startRoot {.used.} = getStateRoot(state.data)
|
2021-05-07 11:36:21 +00:00
|
|
|
var rewards: RewardInfo
|
2020-08-13 09:50:05 +00:00
|
|
|
# Time to replay all the blocks between then and now
|
2020-05-19 14:18:07 +00:00
|
|
|
for i in countdown(ancestors.len - 1, 0):
|
|
|
|
# Because the ancestors are in the database, there's no need to persist them
|
|
|
|
# again. Also, because we're applying blocks that were loaded from the
|
|
|
|
# database, we can skip certain checks that have already been performed
|
2020-08-13 09:50:05 +00:00
|
|
|
# before adding the block to the database.
|
2020-05-19 14:18:07 +00:00
|
|
|
let ok =
|
2021-05-07 11:36:21 +00:00
|
|
|
dag.applyBlock(state, dag.get(ancestors[i]), {}, cache, rewards)
|
2020-05-19 14:18:07 +00:00
|
|
|
doAssert ok, "Blocks in database should never fail to apply.."
|
|
|
|
|
2020-10-15 12:28:44 +00:00
|
|
|
# ...and make sure to process empty slots as requested
|
2021-05-07 11:36:21 +00:00
|
|
|
dag.advanceSlots(state, bs.slot, save, cache, rewards)
|
2020-08-13 09:50:05 +00:00
|
|
|
|
2021-06-03 13:32:00 +00:00
|
|
|
# ...and make sure to load the state cache, if it exists
|
2021-06-11 17:51:46 +00:00
|
|
|
loadStateCache(dag, cache, state.blck, getStateField(state.data, slot).epoch)
|
2021-06-03 13:32:00 +00:00
|
|
|
|
2021-05-28 16:34:00 +00:00
|
|
|
let
|
|
|
|
assignDur = assignTick - startTick
|
|
|
|
replayDur = Moment.now() - assignTick
|
2021-03-01 19:50:43 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
blocks = ancestors.len
|
2021-06-11 17:51:46 +00:00
|
|
|
slots = getStateField(state.data, slot) - startSlot
|
|
|
|
stateRoot = shortLog(getStateRoot(state.data))
|
|
|
|
stateSlot = getStateField(state.data, slot)
|
2021-03-01 19:50:43 +00:00
|
|
|
startRoot = shortLog(startRoot)
|
|
|
|
startSlot
|
|
|
|
blck = shortLog(bs)
|
2020-10-22 10:53:33 +00:00
|
|
|
found
|
2021-05-28 16:34:00 +00:00
|
|
|
assignDur
|
|
|
|
replayDur
|
2021-03-01 19:50:43 +00:00
|
|
|
|
2021-06-01 15:33:00 +00:00
|
|
|
if (assignDur + replayDur) >= 250.millis:
|
2021-03-01 19:50:43 +00:00
|
|
|
# This might indicate there's a cache that's not in order or a disk that is
|
|
|
|
# too slow - for now, it's here for investigative purposes and the cutoff
|
|
|
|
# time might need tuning
|
|
|
|
info "State replayed"
|
|
|
|
elif ancestors.len > 0:
|
|
|
|
debug "State replayed"
|
|
|
|
else:
|
2021-05-30 18:05:45 +00:00
|
|
|
trace "State advanced" # Normal case!
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
proc delState(dag: ChainDAGRef, bs: BlockSlot) =
|
2020-05-19 14:18:07 +00:00
|
|
|
# Delete state state and mapping for a particular block+slot
|
2021-06-10 22:07:16 +00:00
|
|
|
if not isStateCheckpoint(bs):
|
2020-08-13 09:50:05 +00:00
|
|
|
return # We only ever save epoch states
|
2021-06-10 22:07:16 +00:00
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
if (let root = dag.db.getStateRoot(bs.blck.root, bs.slot); root.isSome()):
|
|
|
|
dag.db.delState(root.get())
|
2020-08-13 09:50:05 +00:00
|
|
|
dag.db.delStateRoot(bs.blck.root, bs.slot)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
proc pruneBlocksDAG(dag: ChainDAGRef) =
|
|
|
|
## This prunes the block DAG
|
|
|
|
## This does NOT prune the cached state checkpoints and EpochRef
|
2021-04-01 11:26:17 +00:00
|
|
|
## This must be done after a new finalization point is reached
|
2021-03-09 14:36:17 +00:00
|
|
|
## to invalidate pending blocks or attestations referring
|
|
|
|
## to a now invalid fork.
|
|
|
|
##
|
|
|
|
## This does NOT update the `dag.lastPrunePoint` field.
|
|
|
|
## as the caches and fork choice can be pruned at a later time.
|
|
|
|
|
|
|
|
# Clean up block refs, walking block by block
|
2021-05-28 16:34:00 +00:00
|
|
|
let startTick = Moment.now()
|
2021-04-01 11:26:17 +00:00
|
|
|
|
|
|
|
# Finalization means that we choose a single chain as the canonical one -
|
|
|
|
# it also means we're no longer interested in any branches from that chain
|
|
|
|
# up to the finalization point
|
|
|
|
let hlen = dag.heads.len
|
|
|
|
for i in 0..<hlen:
|
|
|
|
let n = hlen - i - 1
|
|
|
|
let head = dag.heads[n]
|
|
|
|
if dag.finalizedHead.blck.isAncestorOf(head):
|
|
|
|
continue
|
|
|
|
|
|
|
|
var cur = head.atSlot(head.slot)
|
|
|
|
while not cur.blck.isAncestorOf(dag.finalizedHead.blck):
|
|
|
|
dag.delState(cur) # TODO: should we move that disk I/O to `onSlotEnd`
|
|
|
|
|
|
|
|
if cur.blck.slot == cur.slot:
|
|
|
|
dag.blocks.excl(KeyedBlockRef.init(cur.blck))
|
|
|
|
dag.db.delBlock(cur.blck.root)
|
|
|
|
|
|
|
|
if cur.blck.parent.isNil:
|
|
|
|
break
|
|
|
|
cur = cur.parentOrSlot
|
|
|
|
|
|
|
|
dag.heads.del(n)
|
|
|
|
|
|
|
|
debug "Pruned the blockchain DAG",
|
|
|
|
currentCandidateHeads = dag.heads.len,
|
|
|
|
prunedHeads = hlen - dag.heads.len,
|
2021-05-28 19:03:20 +00:00
|
|
|
dagPruneDur = Moment.now() - startTick
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2021-08-28 10:40:01 +00:00
|
|
|
func syncSubcommittee*(syncCommittee: openarray[ValidatorPubKey],
|
|
|
|
committeeIdx: SyncCommitteeIndex): seq[ValidatorPubKey] =
|
|
|
|
## TODO Return a view type
|
|
|
|
## Unfortunately, this doesn't work as a template right now.
|
|
|
|
if syncCommittee.len == 0:
|
|
|
|
return @[]
|
|
|
|
|
|
|
|
let
|
|
|
|
startIdx = committeeIdx.asInt * SYNC_SUBCOMMITTEE_SIZE
|
|
|
|
onePastEndIdx = startIdx + SYNC_SUBCOMMITTEE_SIZE
|
|
|
|
doAssert startIdx < syncCommittee.len
|
|
|
|
|
|
|
|
@(toOpenArray(syncCommittee, startIdx, onePastEndIdx - 1))
|
|
|
|
|
|
|
|
func syncCommitteeParticipants*(dagParam: ChainDAGRef,
|
|
|
|
slotParam: Slot): seq[ValidatorPubKey] =
|
|
|
|
# TODO:
|
|
|
|
# Use view types in Nim 1.6
|
|
|
|
# Right now, the compiler is not able to handle turning this into a
|
|
|
|
# template and returning an openarray
|
|
|
|
let
|
|
|
|
dag = dagParam
|
|
|
|
slot = slotParam
|
|
|
|
|
|
|
|
if dag.headState.data.beaconStateFork == forkAltair:
|
|
|
|
let
|
|
|
|
headSlot = dag.headState.data.hbsAltair.data.slot
|
|
|
|
headCommitteePeriod = syncCommitteePeriod(headSlot)
|
|
|
|
periodStart = syncCommitteePeriodStartSlot(headCommitteePeriod)
|
|
|
|
nextPeriodStart = periodStart + SLOTS_PER_SYNC_COMMITTEE_PERIOD
|
|
|
|
|
|
|
|
if slot >= nextPeriodStart:
|
|
|
|
@(dag.headState.data.hbsAltair.data.next_sync_committee.pubkeys.data)
|
|
|
|
elif slot >= periodStart:
|
|
|
|
@(dag.headState.data.hbsAltair.data.current_sync_committee.pubkeys.data)
|
|
|
|
else:
|
|
|
|
@[]
|
|
|
|
else:
|
|
|
|
@[]
|
|
|
|
|
2021-09-28 07:44:20 +00:00
|
|
|
func getSubcommitteePositionsAux(
|
2021-08-28 10:40:01 +00:00
|
|
|
dag: ChainDAGRef,
|
|
|
|
syncCommittee: openarray[ValidatorPubKey],
|
|
|
|
committeeIdx: SyncCommitteeIndex,
|
2021-09-28 07:44:20 +00:00
|
|
|
validatorIdx: uint64): seq[uint64] =
|
2021-08-28 10:40:01 +00:00
|
|
|
# TODO Can we avoid the key conversions by getting a compressed key
|
|
|
|
# out of ImmutableValidatorData2? If we had this, we can define
|
|
|
|
# the function `dag.validatorKeyBytes` and use it here.
|
|
|
|
let validatorKey = dag.validatorKey(validatorIdx)
|
|
|
|
if validatorKey.isNone():
|
2021-09-28 07:44:20 +00:00
|
|
|
return @[]
|
2021-08-28 10:40:01 +00:00
|
|
|
let validatorPubKey = validatorKey.get().toPubKey
|
|
|
|
|
|
|
|
for pos, key in syncCommittee.syncSubcommittee(committeeIdx):
|
|
|
|
if validatorPubKey == key:
|
2021-09-28 07:44:20 +00:00
|
|
|
result.add uint64(pos)
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-09-28 07:44:20 +00:00
|
|
|
func getSubcommitteePositions*(dag: ChainDAGRef,
|
|
|
|
slot: Slot,
|
|
|
|
committeeIdx: SyncCommitteeIndex,
|
|
|
|
validatorIdx: uint64): seq[uint64] =
|
2021-08-28 10:40:01 +00:00
|
|
|
if dag.headState.data.beaconStateFork == forkPhase0:
|
2021-09-28 07:44:20 +00:00
|
|
|
return @[]
|
2021-08-28 10:40:01 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
headSlot = dag.headState.data.hbsAltair.data.slot
|
|
|
|
headCommitteePeriod = syncCommitteePeriod(headSlot)
|
|
|
|
periodStart = syncCommitteePeriodStartSlot(headCommitteePeriod)
|
|
|
|
nextPeriodStart = periodStart + SLOTS_PER_SYNC_COMMITTEE_PERIOD
|
|
|
|
|
2021-09-28 07:44:20 +00:00
|
|
|
template search(syncCommittee: openarray[ValidatorPubKey]): seq[uint64] =
|
|
|
|
dag.getSubcommitteePositionsAux(syncCommittee, committeeIdx, validatorIdx)
|
2021-08-28 10:40:01 +00:00
|
|
|
|
|
|
|
if slot < periodStart:
|
2021-09-28 07:44:20 +00:00
|
|
|
return @[]
|
2021-08-28 10:40:01 +00:00
|
|
|
elif slot >= nextPeriodStart:
|
|
|
|
return search(dag.headState.data.hbsAltair.data.next_sync_committee.pubkeys.data)
|
|
|
|
else:
|
|
|
|
return search(dag.headState.data.hbsAltair.data.current_sync_committee.pubkeys.data)
|
|
|
|
|
|
|
|
template syncCommitteeParticipants*(
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
slot: Slot,
|
|
|
|
committeeIdx: SyncCommitteeIndex): seq[ValidatorPubKey] =
|
|
|
|
let
|
|
|
|
startIdx = committeeIdx.asInt * SYNC_SUBCOMMITTEE_SIZE
|
|
|
|
onePastEndIdx = startIdx + SYNC_SUBCOMMITTEE_SIZE
|
|
|
|
# TODO Nim is not happy with returning an openarray here
|
|
|
|
@(toOpenArray(dag.syncCommitteeParticipants(slot), startIdx, onePastEndIdx - 1))
|
|
|
|
|
|
|
|
iterator syncCommitteeParticipants*(
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
slot: Slot,
|
|
|
|
committeeIdx: SyncCommitteeIndex,
|
|
|
|
aggregationBits: SyncCommitteeAggregationBits): ValidatorPubKey =
|
|
|
|
for pos, valIdx in pairs(dag.syncCommitteeParticipants(slot, committeeIdx)):
|
|
|
|
if aggregationBits[pos]:
|
|
|
|
yield valIdx
|
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
func needStateCachesAndForkChoicePruning*(dag: ChainDAGRef): bool =
|
|
|
|
dag.lastPrunePoint != dag.finalizedHead
|
|
|
|
|
|
|
|
proc pruneStateCachesDAG*(dag: ChainDAGRef) =
|
|
|
|
## This prunes the cached state checkpoints and EpochRef
|
|
|
|
## This does NOT prune the state associated with invalidated blocks on a fork
|
|
|
|
## They are pruned via `pruneBlocksDAG`
|
|
|
|
##
|
|
|
|
## This updates the `dag.lastPrunePoint` variable
|
|
|
|
doAssert dag.needStateCachesAndForkChoicePruning()
|
|
|
|
|
2021-05-28 19:03:20 +00:00
|
|
|
let startTick = Moment.now()
|
2021-03-09 14:36:17 +00:00
|
|
|
block: # Remove states, walking slot by slot
|
|
|
|
# We remove all state checkpoints that come _before_ the current finalized
|
|
|
|
# head, as we might frequently be asked to replay states from the
|
|
|
|
# finalized checkpoint and onwards (for example when validating blocks and
|
|
|
|
# attestations)
|
|
|
|
var
|
|
|
|
cur = dag.finalizedHead.stateCheckpoint.parentOrSlot
|
|
|
|
prev = dag.lastPrunePoint.stateCheckpoint.parentOrSlot
|
|
|
|
while cur.blck != nil and cur != prev:
|
|
|
|
# TODO This is a quick fix to prune some states from the database, but
|
|
|
|
# not all, pending a smarter storage - the downside of pruning these
|
|
|
|
# states is that certain rewinds will take longer
|
|
|
|
# After long periods of non-finalization, it can also take some time to
|
|
|
|
# release all these states!
|
|
|
|
if cur.slot.epoch mod 32 != 0 and cur.slot != dag.tail.slot:
|
|
|
|
dag.delState(cur)
|
|
|
|
cur = cur.parentOrSlot
|
2021-05-28 19:03:20 +00:00
|
|
|
let statePruneTick = Moment.now()
|
2021-03-09 14:36:17 +00:00
|
|
|
|
|
|
|
block: # Clean up old EpochRef instances
|
|
|
|
# After finalization, we can clear up the epoch cache and save memory -
|
|
|
|
# it will be recomputed if needed
|
2021-03-17 10:17:15 +00:00
|
|
|
for i in 0..<dag.epochRefs.len:
|
2021-06-10 22:07:16 +00:00
|
|
|
if dag.epochRefs[i] != nil and
|
|
|
|
dag.epochRefs[i].epoch < dag.finalizedHead.slot.epoch:
|
|
|
|
dag.epochRefs[i] = nil
|
2021-05-28 19:03:20 +00:00
|
|
|
let epochRefPruneTick = Moment.now()
|
2021-03-09 14:36:17 +00:00
|
|
|
|
|
|
|
dag.lastPrunePoint = dag.finalizedHead
|
|
|
|
|
2021-03-17 06:30:16 +00:00
|
|
|
debug "Pruned the state checkpoints and DAG caches.",
|
2021-05-28 19:03:20 +00:00
|
|
|
statePruneDur = statePruneTick - startTick,
|
|
|
|
epochRefPruneDur = epochRefPruneTick - statePruneTick
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2020-08-31 09:00:38 +00:00
|
|
|
proc updateHead*(
|
2021-03-09 14:36:17 +00:00
|
|
|
dag: ChainDAGRef,
|
|
|
|
newHead: BlockRef,
|
2021-06-01 11:13:40 +00:00
|
|
|
quarantine: QuarantineRef) =
|
2020-05-19 14:18:07 +00:00
|
|
|
## Update what we consider to be the current head, as given by the fork
|
|
|
|
## choice.
|
2021-03-09 14:36:17 +00:00
|
|
|
##
|
2020-05-19 14:18:07 +00:00
|
|
|
## The choice of head affects the choice of finalization point - the order
|
|
|
|
## of operations naturally becomes important here - after updating the head,
|
|
|
|
## blocks that were once considered potential candidates for a tree will
|
|
|
|
## now fall from grace, or no longer be considered resolved.
|
2020-08-26 15:23:34 +00:00
|
|
|
doAssert not newHead.isNil()
|
2020-09-22 20:42:42 +00:00
|
|
|
doAssert not newHead.parent.isNil() or newHead.slot <= dag.tail.slot
|
2020-07-16 13:16:51 +00:00
|
|
|
logScope:
|
|
|
|
newHead = shortLog(newHead)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if dag.head == newHead:
|
2020-10-01 18:56:42 +00:00
|
|
|
trace "No head block update"
|
2020-05-19 14:18:07 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
let
|
|
|
|
lastHead = dag.head
|
2021-09-22 12:17:15 +00:00
|
|
|
lastHeadStateRoot = getStateRoot(dag.headState.data)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-11-02 17:34:23 +00:00
|
|
|
# Start off by making sure we have the right state - updateStateData will try
|
|
|
|
# to use existing in-memory states to make this smooth
|
|
|
|
var cache: StateCache
|
|
|
|
updateStateData(
|
|
|
|
dag, dag.headState, newHead.atSlot(newHead.slot), false, cache)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-11-27 22:16:13 +00:00
|
|
|
dag.db.putHeadBlock(newHead.root)
|
|
|
|
|
|
|
|
let
|
|
|
|
finalizedHead = newHead.atEpochStart(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, finalized_checkpoint).epoch)
|
2020-11-27 22:16:13 +00:00
|
|
|
|
|
|
|
doAssert (not finalizedHead.blck.isNil),
|
|
|
|
"Block graph should always lead to a finalized block"
|
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead)
|
|
|
|
if not(isAncestor):
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Updated head block with chain reorg",
|
2020-07-28 13:54:32 +00:00
|
|
|
lastHead = shortLog(lastHead),
|
2020-05-19 14:18:07 +00:00
|
|
|
headParent = shortLog(newHead.parent),
|
2021-06-11 17:51:46 +00:00
|
|
|
stateRoot = shortLog(getStateRoot(dag.headState.data)),
|
2020-05-19 14:18:07 +00:00
|
|
|
headBlock = shortLog(dag.headState.blck),
|
2021-06-11 17:51:46 +00:00
|
|
|
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
|
|
|
|
justified = shortLog(getStateField(
|
|
|
|
dag.headState.data, current_justified_checkpoint)),
|
|
|
|
finalized = shortLog(getStateField(
|
|
|
|
dag.headState.data, finalized_checkpoint))
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
if not(isNil(dag.onReorgHappened)):
|
|
|
|
let data = ReorgInfoObject.init(dag.head.slot, uint64(ancestorDepth),
|
|
|
|
lastHead.root, newHead.root,
|
|
|
|
lastHeadStateRoot,
|
|
|
|
getStateRoot(dag.headState.data))
|
|
|
|
dag.onReorgHappened(data)
|
|
|
|
|
2020-05-19 14:18:07 +00:00
|
|
|
# A reasonable criterion for "reorganizations of the chain"
|
2020-08-31 09:00:38 +00:00
|
|
|
quarantine.clearQuarantine()
|
2020-05-19 14:18:07 +00:00
|
|
|
beacon_reorgs_total.inc()
|
|
|
|
else:
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Updated head block",
|
2021-06-11 17:51:46 +00:00
|
|
|
stateRoot = shortLog(getStateRoot(dag.headState.data)),
|
2020-05-19 14:18:07 +00:00
|
|
|
headBlock = shortLog(dag.headState.blck),
|
2021-06-11 17:51:46 +00:00
|
|
|
stateSlot = shortLog(getStateField(dag.headState.data, slot)),
|
2021-04-08 08:24:25 +00:00
|
|
|
justified = shortLog(getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, current_justified_checkpoint)),
|
2021-04-08 08:24:25 +00:00
|
|
|
finalized = shortLog(getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, finalized_checkpoint))
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
if not(isNil(dag.onHeadChanged)):
|
|
|
|
let currentEpoch = epoch(newHead.slot)
|
|
|
|
let
|
|
|
|
currentDutyDepRoot =
|
|
|
|
if currentEpoch > Epoch(0):
|
|
|
|
dag.head.atSlot(
|
|
|
|
compute_start_slot_at_epoch(currentEpoch) - 1).blck.root
|
|
|
|
else:
|
|
|
|
dag.genesis.root
|
|
|
|
previousDutyDepRoot =
|
|
|
|
if currentEpoch > Epoch(1):
|
|
|
|
dag.head.atSlot(
|
|
|
|
compute_start_slot_at_epoch(currentEpoch - 1) - 1).blck.root
|
|
|
|
else:
|
|
|
|
dag.genesis.root
|
|
|
|
epochTransition = (finalizedHead != dag.finalizedHead)
|
|
|
|
let data = HeadChangeInfoObject.init(dag.head.slot, dag.head.root,
|
|
|
|
getStateRoot(dag.headState.data),
|
|
|
|
epochTransition, previousDutyDepRoot,
|
|
|
|
currentDutyDepRoot)
|
|
|
|
dag.onHeadChanged(data)
|
|
|
|
|
2020-08-26 15:25:39 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
2021-03-01 19:55:25 +00:00
|
|
|
# both non-negative, so difference can't overflow or underflow int64
|
|
|
|
beacon_pending_deposits.set(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, eth1_data).deposit_count.toGaugeValue -
|
|
|
|
getStateField(dag.headState.data, eth1_deposit_index).toGaugeValue)
|
2021-03-01 19:55:25 +00:00
|
|
|
beacon_processed_deposits_total.set(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, eth1_deposit_index).toGaugeValue)
|
2020-08-26 15:25:39 +00:00
|
|
|
|
2020-11-27 22:16:13 +00:00
|
|
|
beacon_head_root.set newHead.root.toGaugeValue
|
2021-03-01 19:55:25 +00:00
|
|
|
beacon_head_slot.set newHead.slot.toGaugeValue
|
2020-11-27 22:16:13 +00:00
|
|
|
|
|
|
|
if lastHead.slot.epoch != newHead.slot.epoch:
|
|
|
|
# Epoch updated - in theory, these could happen when the wall clock
|
|
|
|
# changes epoch, even if there is no new block / head, but we'll delay
|
|
|
|
# updating them until a block confirms the change
|
|
|
|
beacon_current_justified_epoch.set(
|
2021-04-08 08:24:25 +00:00
|
|
|
getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, current_justified_checkpoint).epoch.toGaugeValue)
|
2020-11-27 22:16:13 +00:00
|
|
|
beacon_current_justified_root.set(
|
2021-04-08 08:24:25 +00:00
|
|
|
getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, current_justified_checkpoint).root.toGaugeValue)
|
2020-11-27 22:16:13 +00:00
|
|
|
beacon_previous_justified_epoch.set(
|
2021-04-08 08:24:25 +00:00
|
|
|
getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, previous_justified_checkpoint).epoch.toGaugeValue)
|
2020-11-27 22:16:13 +00:00
|
|
|
beacon_previous_justified_root.set(
|
2021-04-08 08:24:25 +00:00
|
|
|
getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data, previous_justified_checkpoint).root.toGaugeValue)
|
2020-11-27 22:16:13 +00:00
|
|
|
|
|
|
|
let epochRef = getEpochRef(dag, newHead, newHead.slot.epoch)
|
|
|
|
beacon_active_validators.set(
|
2021-03-01 19:55:25 +00:00
|
|
|
epochRef.shuffled_active_validator_indices.lenu64().toGaugeValue)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
if finalizedHead != dag.finalizedHead:
|
2021-03-09 14:36:17 +00:00
|
|
|
notice "Reached new finalization checkpoint",
|
|
|
|
newFinalizedHead = shortLog(finalizedHead),
|
|
|
|
oldFinalizedHead = shortLog(dag.finalizedHead)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
dag.finalizedHead = finalizedHead
|
|
|
|
|
2021-06-11 17:51:46 +00:00
|
|
|
beacon_finalized_epoch.set(getStateField(
|
|
|
|
dag.headState.data, finalized_checkpoint).epoch.toGaugeValue)
|
|
|
|
beacon_finalized_root.set(getStateField(
|
|
|
|
dag.headState.data, finalized_checkpoint).root.toGaugeValue)
|
2020-11-27 22:16:13 +00:00
|
|
|
|
2021-04-01 11:26:17 +00:00
|
|
|
# Pruning the block dag is required every time the finalized head changes
|
|
|
|
# in order to clear out blocks that are no longer viable and should
|
|
|
|
# therefore no longer be considered as part of the chain we're following
|
|
|
|
dag.pruneBlocksDAG()
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
# Send notification about new finalization point via callback.
|
|
|
|
if not(isNil(dag.onFinHappened)):
|
|
|
|
let epoch = getStateField(
|
|
|
|
dag.headState.data, finalized_checkpoint).epoch
|
|
|
|
let blckRoot = getStateField(
|
|
|
|
dag.headState.data, finalized_checkpoint).root
|
|
|
|
let data = FinalizationInfoObject.init(blckRoot,
|
|
|
|
getStateRoot(dag.headState.data),
|
|
|
|
epoch)
|
|
|
|
dag.onFinHappened(data)
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): bool =
|
2020-05-19 14:18:07 +00:00
|
|
|
let
|
|
|
|
headBlockRoot = db.getHeadBlock()
|
|
|
|
tailBlockRoot = db.getTailBlock()
|
|
|
|
|
|
|
|
if not (headBlockRoot.isSome() and tailBlockRoot.isSome()):
|
|
|
|
return false
|
|
|
|
|
|
|
|
let
|
2021-08-01 08:20:43 +00:00
|
|
|
headBlockPhase0 = db.getBlock(headBlockRoot.get())
|
|
|
|
headBlockAltair = db.getAltairBlock(headBlockRoot.get())
|
2020-05-19 14:18:07 +00:00
|
|
|
tailBlock = db.getBlock(tailBlockRoot.get())
|
|
|
|
|
2021-08-01 08:20:43 +00:00
|
|
|
if not ((headBlockPhase0.isSome() or headBlockAltair.isSome()) and
|
|
|
|
tailBlock.isSome()):
|
2020-05-19 14:18:07 +00:00
|
|
|
return false
|
|
|
|
|
|
|
|
if not db.containsState(tailBlock.get().message.state_root):
|
|
|
|
return false
|
|
|
|
|
2020-07-21 16:35:43 +00:00
|
|
|
true
|
2020-05-19 14:18:07 +00:00
|
|
|
|
|
|
|
proc preInit*(
|
2020-09-22 20:42:42 +00:00
|
|
|
T: type ChainDAGRef, db: BeaconChainDB,
|
2021-06-03 09:42:25 +00:00
|
|
|
genesisState, tailState: var phase0.BeaconState, tailBlock: phase0.TrustedSignedBeaconBlock) =
|
2020-07-31 14:49:06 +00:00
|
|
|
# write a genesis state, the way the ChainDAGRef expects it to be stored in
|
2020-05-19 14:18:07 +00:00
|
|
|
# database
|
2020-07-30 19:18:17 +00:00
|
|
|
# TODO probably should just init a block pool with the freshly written
|
2020-05-19 14:18:07 +00:00
|
|
|
# state - but there's more refactoring needed to make it nice - doing
|
|
|
|
# a minimal patch for now..
|
2020-09-22 20:42:42 +00:00
|
|
|
doAssert tailBlock.message.state_root == hash_tree_root(tailState)
|
2020-05-19 14:18:07 +00:00
|
|
|
notice "New database from snapshot",
|
2020-09-22 20:42:42 +00:00
|
|
|
blockRoot = shortLog(tailBlock.root),
|
|
|
|
stateRoot = shortLog(tailBlock.message.state_root),
|
|
|
|
fork = tailState.fork,
|
|
|
|
validators = tailState.validators.len()
|
|
|
|
|
|
|
|
db.putState(tailState)
|
|
|
|
db.putBlock(tailBlock)
|
|
|
|
db.putTailBlock(tailBlock.root)
|
|
|
|
db.putHeadBlock(tailBlock.root)
|
|
|
|
db.putStateRoot(tailBlock.root, tailState.slot, tailBlock.message.state_root)
|
|
|
|
|
|
|
|
if tailState.slot == GENESIS_SLOT:
|
|
|
|
db.putGenesisBlockRoot(tailBlock.root)
|
|
|
|
else:
|
|
|
|
doAssert genesisState.slot == GENESIS_SLOT
|
|
|
|
db.putState(genesisState)
|
|
|
|
let genesisBlock = get_initial_beacon_block(genesisState)
|
|
|
|
db.putBlock(genesisBlock)
|
|
|
|
db.putStateRoot(genesisBlock.root, GENESIS_SLOT, genesisBlock.message.state_root)
|
|
|
|
db.putGenesisBlockRoot(genesisBlock.root)
|
|
|
|
|
2021-06-01 12:40:13 +00:00
|
|
|
func setTailState*(dag: ChainDAGRef,
|
2021-06-03 09:42:25 +00:00
|
|
|
checkpointState: phase0.BeaconState,
|
|
|
|
checkpointBlock: phase0.TrustedSignedBeaconBlock) =
|
2020-11-10 18:41:04 +00:00
|
|
|
# TODO(zah)
|
2020-09-22 20:42:42 +00:00
|
|
|
# Delete all records up to the tail node. If the tail node is not
|
|
|
|
# in the database, init the dabase in a way similar to `preInit`.
|
|
|
|
discard
|
|
|
|
|
|
|
|
proc getGenesisBlockData*(dag: ChainDAGRef): BlockData =
|
2020-10-06 15:32:17 +00:00
|
|
|
dag.get(dag.genesis)
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2021-06-01 12:40:13 +00:00
|
|
|
func getGenesisBlockSlot*(dag: ChainDAGRef): BlockSlot =
|
e2store: add era format (#2382)
Era files contain 8192 blocks and a state corresponding to the length of
the array holding block roots in the state, meaning that each block is
verifiable using the pubkeys and block roots from the state. Of course,
one would need to know the root of the state as well, which is available
in the first block of the _next_ file - or known from outside.
This PR also adds an implementation to write e2s, e2i and era files, as
well as a python script to inspect them.
All in all, the format is very similar to what goes on in the network
requests meaning it can trivially serve as a backing format for serving
said requests.
Mainnet, up to the first 671k slots, take up 3.5gb - in each era file,
the BeaconState contributes about 9mb at current validator set sizes, up
from ~3mb in the early blocks, for a grand total of ~558mb for the 82 eras
tested - this overhead could potentially be calculated but one would lose
the ability to verify individual blocks (eras could still be verified using
historical roots).
```
-rw-rw-r--. 1 arnetheduck arnetheduck 16 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 1,8M 5 mar 11.47 ethereum2-mainnet-00000000-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 18M 5 mar 11.47 ethereum2-mainnet-00000001-00000001.e2s
...
-rw-rw-r--. 1 arnetheduck arnetheduck 65K 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 68M 5 mar 11.52 ethereum2-mainnet-00000051-00000001.e2s
-rw-rw-r--. 1 arnetheduck arnetheduck 61K 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2i
-rw-rw-r--. 1 arnetheduck arnetheduck 62M 5 mar 11.11 ethereum2-mainnet-00000052-00000001.e2s
```
2021-03-15 10:31:39 +00:00
|
|
|
BlockSlot(blck: dag.genesis, slot: GENESIS_SLOT)
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2020-05-22 14:21:22 +00:00
|
|
|
proc getProposer*(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: ChainDAGRef, head: BlockRef, slot: Slot): Option[ValidatorIndex] =
|
2020-08-05 06:28:43 +00:00
|
|
|
let
|
|
|
|
epochRef = dag.getEpochRef(head, slot.compute_epoch_at_slot())
|
|
|
|
slotInEpoch = slot - slot.compute_epoch_at_slot().compute_start_slot_at_epoch()
|
2020-05-19 14:18:07 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
let proposer = epochRef.beacon_proposers[slotInEpoch]
|
|
|
|
if proposer.isSome():
|
2021-06-10 07:37:02 +00:00
|
|
|
if proposer.get().uint64 >= dag.db.immutableValidators.lenu64():
|
2021-06-01 11:13:40 +00:00
|
|
|
# Sanity check - it should never happen that the key cache doesn't contain
|
|
|
|
# a key for the selected proposer - that would mean that we somehow
|
|
|
|
# created validators in the state without updating the cache!
|
|
|
|
warn "Proposer key not found",
|
2021-06-10 07:37:02 +00:00
|
|
|
keys = dag.db.immutableValidators.lenu64(), proposer = proposer.get()
|
2021-06-01 11:13:40 +00:00
|
|
|
return none(ValidatorIndex)
|
|
|
|
|
|
|
|
proposer
|