2022-03-11 20:28:10 +00:00
# beacon_chain
2024-01-06 14:26:56 +00:00
# Copyright (c) 2018-2024 Status Research & Development GmbH
2020-04-24 07:16:11 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
2023-01-20 14:14:37 +00:00
{. push raises : [ ] . }
2021-03-26 06:52:01 +00:00
2018-11-23 23:58:49 +00:00
import
2024-01-13 09:52:13 +00:00
std / [ os , random , terminal , times ] ,
2023-03-05 01:40:21 +00:00
chronos , chronicles ,
2022-02-11 20:40:49 +00:00
metrics , metrics / chronos_httpserver ,
stew / [ byteutils , io2 ] ,
eth / p2p / discoveryv5 / [ enr , random2 ] ,
2023-04-13 19:11:40 +00:00
. / consensus_object_pools / blob_quarantine ,
2024-06-18 22:16:03 +00:00
. / consensus_object_pools / data_column_quarantine ,
2022-12-21 12:30:24 +00:00
. / consensus_object_pools / vanity_logs / vanity_logs ,
2024-07-15 07:41:09 +00:00
. / networking / [ topic_params , network_metadata_downloads , eth2_network ] ,
2022-05-24 07:23:48 +00:00
. / rpc / [ rest_api , state_ttl_cache ] ,
2022-01-18 13:36:52 +00:00
. / spec / datatypes / [ altair , bellatrix , phase0 ] ,
2024-08-08 12:14:55 +00:00
. / spec / [ deposit_snapshots , eip7594_helpers , engine_authentication , weak_subjectivity ] ,
2024-01-13 09:54:24 +00:00
. / sync / [ sync_protocol , light_client_protocol ] ,
2023-08-23 16:39:57 +00:00
. / validators / [ keystore_management , beacon_validators ] ,
2022-02-11 20:40:49 +00:00
" . " / [
2023-02-01 15:29:55 +00:00
beacon_node , beacon_node_light_client , deposits ,
2024-01-15 15:53:34 +00:00
nimbus_binary_common , statusbar , trusted_node_sync , wallets ]
2020-12-04 16:28:42 +00:00
2021-06-03 09:43:04 +00:00
when defined ( posix ) :
import system / ansi_c
2023-03-09 00:34:17 +00:00
from . / spec / datatypes / deneb import SignedBeaconBlock
2024-08-08 12:14:55 +00:00
from . / spec / datatypes / electra import SignedBeaconBlock
2023-01-04 12:34:15 +00:00
2021-02-09 09:20:55 +00:00
from
libp2p / protocols / pubsub / gossipsub
import
TopicParams , validateParameters , init
2024-02-19 10:00:11 +00:00
logScope : topics = " beacnde "
2022-08-19 10:30:07 +00:00
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
2020-11-27 22:16:13 +00:00
declareGauge beacon_slot , " Latest slot of the beacon chain state "
declareGauge beacon_current_epoch , " Current epoch "
2019-09-07 17:48:05 +00:00
2020-07-30 14:27:34 +00:00
# Finalization tracking
declareGauge finalization_delay ,
" Epoch delay between scheduled epoch and finalized epoch "
2020-08-10 18:49:45 +00:00
declareGauge ticks_delay ,
" How long does to take to run the onSecond loop "
2021-03-12 09:46:26 +00:00
declareGauge next_action_wait ,
" Seconds until the next attestation will be sent "
2024-03-07 14:08:48 +00:00
declareGauge next_proposal_wait ,
" Seconds until the next proposal will be sent, or Inf if not known "
declareGauge sync_committee_active ,
" 1 if there are current sync committee duties, 0 otherwise "
2024-02-20 19:34:41 +00:00
declareCounter db_checkpoint_seconds ,
" Time spent checkpointing the database to clear the WAL file "
2019-09-12 01:45:04 +00:00
2024-03-05 14:41:22 +00:00
proc fetchGenesisState (
2024-03-08 13:52:54 +00:00
metadata : Eth2NetworkMetadata ,
genesisState = none ( InputFile ) ,
genesisStateUrl = none ( Uri )
2024-03-05 14:41:22 +00:00
) : Future [ ref ForkedHashedBeaconState ] {. async : ( raises : [ ] ) . } =
let genesisBytes =
2024-03-08 13:52:54 +00:00
if metadata . genesis . kind ! = BakedIn and genesisState . isSome :
let res = io2 . readAllBytes ( genesisState . get . string )
2024-03-05 14:41:22 +00:00
res . valueOr :
error " Failed to read genesis state file " , err = res . error . ioErrorMsg
quit 1
elif metadata . hasGenesis :
try :
if metadata . genesis . kind = = BakedInUrl :
info " Obtaining genesis state " ,
2024-03-08 13:52:54 +00:00
sourceUrl = $ genesisStateUrl
2024-03-05 14:41:22 +00:00
. get ( parseUri metadata . genesis . url )
2024-03-08 13:52:54 +00:00
await metadata . fetchGenesisBytes ( genesisStateUrl )
2024-03-05 14:41:22 +00:00
except CatchableError as err :
error " Failed to obtain genesis state " ,
source = metadata . genesis . sourceDesc ,
err = err . msg
quit 1
else :
@ [ ]
if genesisBytes . len > 0 :
try :
newClone readSszForkedHashedBeaconState ( metadata . cfg , genesisBytes )
except CatchableError as err :
error " Invalid genesis state " ,
size = genesisBytes . len ,
digest = eth2digest ( genesisBytes ) ,
err = err . msg
quit 1
else :
nil
2023-10-31 00:56:52 +00:00
proc doRunTrustedNodeSync (
db : BeaconChainDB ,
metadata : Eth2NetworkMetadata ,
databaseDir : string ,
eraDir : string ,
restUrl : string ,
stateId : Option [ string ] ,
trustedBlockRoot : Option [ Eth2Digest ] ,
backfill : bool ,
reindex : bool ,
2024-03-05 14:41:22 +00:00
downloadDepositSnapshot : bool ,
genesisState : ref ForkedHashedBeaconState ) {. async . } =
let syncTarget =
if stateId . isSome :
if trustedBlockRoot . isSome :
warn " Ignoring `trustedBlockRoot`, `stateId` is set " ,
stateId , trustedBlockRoot
TrustedNodeSyncTarget (
kind : TrustedNodeSyncKind . StateId ,
stateId : stateId . get )
elif trustedBlockRoot . isSome :
TrustedNodeSyncTarget (
kind : TrustedNodeSyncKind . TrustedBlockRoot ,
trustedBlockRoot : trustedBlockRoot . get )
else :
TrustedNodeSyncTarget (
kind : TrustedNodeSyncKind . StateId ,
stateId : " finalized " )
2023-10-31 00:56:52 +00:00
await db . doTrustedNodeSync (
2024-03-05 14:41:22 +00:00
metadata . cfg ,
2023-10-31 00:56:52 +00:00
databaseDir ,
eraDir ,
restUrl ,
syncTarget ,
backfill ,
reindex ,
downloadDepositSnapshot ,
2024-03-05 14:41:22 +00:00
genesisState )
2023-10-31 00:56:52 +00:00
2022-12-21 12:30:24 +00:00
func getVanityLogs ( stdoutKind : StdoutLogKind ) : VanityLogs =
2022-06-29 16:53:59 +00:00
case stdoutKind
of StdoutLogKind . Auto : raiseAssert " inadmissable here "
of StdoutLogKind . Colors :
VanityLogs (
2023-11-29 00:30:44 +00:00
onMergeTransitionBlock : bellatrixColor ,
onFinalizedMergeTransitionBlock : bellatrixBlink ,
onUpgradeToCapella : capellaColor ,
onKnownBlsToExecutionChange : capellaBlink ,
onUpgradeToDeneb : denebColor )
2022-06-29 16:53:59 +00:00
of StdoutLogKind . NoColors :
VanityLogs (
2023-11-29 00:30:44 +00:00
onMergeTransitionBlock : bellatrixMono ,
onFinalizedMergeTransitionBlock : bellatrixMono ,
onUpgradeToCapella : capellaMono ,
onKnownBlsToExecutionChange : capellaMono ,
onUpgradeToDeneb : denebMono )
2022-06-29 16:53:59 +00:00
of StdoutLogKind . Json , StdoutLogKind . None :
VanityLogs (
2022-12-21 12:30:24 +00:00
onMergeTransitionBlock :
( proc ( ) = notice " 🐼 Proof of Stake Activated 🐼 " ) ,
onFinalizedMergeTransitionBlock :
( proc ( ) = notice " 🐼 Proof of Stake Finalized 🐼 " ) ,
onUpgradeToCapella :
2023-03-02 16:13:35 +00:00
( proc ( ) = notice " 🦉 Withdrowls now available 🦉 " ) ,
onKnownBlsToExecutionChange :
2023-04-10 12:42:19 +00:00
( proc ( ) = notice " 🦉 BLS to execution changed 🦉 " ) ,
onUpgradeToDeneb :
( proc ( ) = notice " 🐟 Proto-Danksharding is ON 🐟 " ) )
2022-06-29 16:53:59 +00:00
2024-01-16 17:33:46 +00:00
func getVanityMascot ( consensusFork : ConsensusFork ) : string =
case consensusFork
2024-04-03 14:43:43 +00:00
of ConsensusFork . Electra :
" "
2024-01-16 17:33:46 +00:00
of ConsensusFork . Deneb :
" 🐟 "
of ConsensusFork . Capella :
" 🦉 "
of ConsensusFork . Bellatrix :
" 🐼 "
of ConsensusFork . Altair :
" ✨ "
of ConsensusFork . Phase0 :
" 🦏 "
2022-03-19 16:48:24 +00:00
proc loadChainDag (
config : BeaconNodeConf ,
cfg : RuntimeConfig ,
db : BeaconChainDB ,
2022-06-17 15:27:28 +00:00
eventBus : EventBus ,
2022-03-19 16:48:24 +00:00
validatorMonitor : ref ValidatorMonitor ,
2023-01-31 12:35:01 +00:00
networkGenesisValidatorsRoot : Opt [ Eth2Digest ] ) : ChainDAGRef =
2022-03-19 16:48:24 +00:00
info " Loading block DAG from database " , path = config . databaseDir
2023-01-12 17:11:38 +00:00
var dag : ChainDAGRef
proc onLightClientFinalityUpdate ( data : ForkedLightClientFinalityUpdate ) =
if dag = = nil : return
withForkyFinalityUpdate ( data ) :
2023-01-14 21:19:50 +00:00
when lcDataFork > LightClientDataFork . None :
2023-01-12 17:11:38 +00:00
let contextFork =
2023-02-16 09:32:12 +00:00
dag . cfg . consensusForkAtEpoch ( forkyFinalityUpdate . contextEpoch )
2023-01-12 17:11:38 +00:00
eventBus . finUpdateQueue . emit (
RestVersioned [ ForkedLightClientFinalityUpdate ] (
data : data ,
jsonVersion : contextFork ,
2023-03-11 14:39:29 +00:00
sszContext : dag . forkDigests [ ] . atConsensusFork ( contextFork ) ) )
2023-01-12 17:11:38 +00:00
proc onLightClientOptimisticUpdate ( data : ForkedLightClientOptimisticUpdate ) =
if dag = = nil : return
withForkyOptimisticUpdate ( data ) :
2023-01-14 21:19:50 +00:00
when lcDataFork > LightClientDataFork . None :
2023-01-12 17:11:38 +00:00
let contextFork =
2023-02-16 09:32:12 +00:00
dag . cfg . consensusForkAtEpoch ( forkyOptimisticUpdate . contextEpoch )
2023-01-12 17:11:38 +00:00
eventBus . optUpdateQueue . emit (
RestVersioned [ ForkedLightClientOptimisticUpdate ] (
data : data ,
jsonVersion : contextFork ,
2023-03-11 14:39:29 +00:00
sszContext : dag . forkDigests [ ] . atConsensusFork ( contextFork ) ) )
2022-03-19 16:48:24 +00:00
let
chainDagFlags =
2022-07-13 13:48:09 +00:00
if config . strictVerification : { strictVerification }
2022-03-19 16:48:24 +00:00
else : { }
2022-05-23 12:02:54 +00:00
onLightClientFinalityUpdateCb =
2022-07-29 08:45:39 +00:00
if config . lightClientDataServe : onLightClientFinalityUpdate
2022-05-23 12:02:54 +00:00
else : nil
onLightClientOptimisticUpdateCb =
2022-07-29 08:45:39 +00:00
if config . lightClientDataServe : onLightClientOptimisticUpdate
2022-03-19 16:48:24 +00:00
else : nil
2023-01-12 17:11:38 +00:00
dag = ChainDAGRef . init (
2023-04-18 19:26:36 +00:00
cfg , db , validatorMonitor , chainDagFlags , config . eraDir ,
2023-01-12 17:11:38 +00:00
vanityLogs = getVanityLogs ( detectTTY ( config . logStdout ) ) ,
lcDataConfig = LightClientDataConfig (
serve : config . lightClientDataServe ,
importMode : config . lightClientDataImportMode ,
maxPeriods : config . lightClientDataMaxPeriods ,
onLightClientFinalityUpdate : onLightClientFinalityUpdateCb ,
onLightClientOptimisticUpdate : onLightClientOptimisticUpdateCb ) )
2022-03-19 16:48:24 +00:00
if networkGenesisValidatorsRoot . isSome :
2023-01-12 17:11:38 +00:00
let databaseGenesisValidatorsRoot =
getStateField ( dag . headState , genesis_validators_root )
2022-03-19 16:48:24 +00:00
if networkGenesisValidatorsRoot . get ! = databaseGenesisValidatorsRoot :
fatal " The specified --data-dir contains data for a different network " ,
networkGenesisValidatorsRoot = networkGenesisValidatorsRoot . get ,
databaseGenesisValidatorsRoot ,
dataDir = config . dataDir
quit 1
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
# The first pruning after restart may take a while..
if config . historyMode = = HistoryMode . Prune :
dag . pruneHistory ( true )
2022-03-19 16:48:24 +00:00
dag
proc checkWeakSubjectivityCheckpoint (
dag : ChainDAGRef ,
wsCheckpoint : Checkpoint ,
beaconClock : BeaconClock ) =
let
currentSlot = beaconClock . now . slotOrZero
isCheckpointStale = not is_within_weak_subjectivity_period (
dag . cfg , currentSlot , dag . headState , wsCheckpoint )
if isCheckpointStale :
error " Weak subjectivity checkpoint is stale " ,
currentSlot , checkpoint = wsCheckpoint ,
headStateSlot = getStateField ( dag . headState , slot )
quit 1
2024-01-13 09:52:13 +00:00
from . / spec / state_transition_block import kzg_commitment_to_versioned_hash
2022-03-21 16:52:15 +00:00
proc initFullNode (
node : BeaconNode ,
2022-06-21 08:29:16 +00:00
rng : ref HmacDrbgContext ,
2022-03-21 16:52:15 +00:00
dag : ChainDAGRef ,
2022-04-08 16:22:49 +00:00
taskpool : TaskPoolPtr ,
2024-01-13 10:53:53 +00:00
getBeaconTime : GetBeaconTimeFn ) {. async . } =
2022-03-21 16:52:15 +00:00
template config ( ) : auto = node . config
2024-04-17 20:44:29 +00:00
proc onAttestationReceived ( data : phase0 . Attestation ) =
2022-06-17 15:27:28 +00:00
node . eventBus . attestQueue . emit ( data )
2022-03-21 16:52:15 +00:00
proc onSyncContribution ( data : SignedContributionAndProof ) =
2022-06-17 15:27:28 +00:00
node . eventBus . contribQueue . emit ( data )
2022-03-21 16:52:15 +00:00
proc onVoluntaryExitAdded ( data : SignedVoluntaryExit ) =
2022-06-17 15:27:28 +00:00
node . eventBus . exitQueue . emit ( data )
2023-12-22 13:52:43 +00:00
proc onBLSToExecutionChangeAdded ( data : SignedBLSToExecutionChange ) =
node . eventBus . blsToExecQueue . emit ( data )
2023-12-22 17:54:55 +00:00
proc onProposerSlashingAdded ( data : ProposerSlashing ) =
node . eventBus . propSlashQueue . emit ( data )
2024-04-21 05:49:11 +00:00
proc onAttesterSlashingAdded ( data : phase0 . AttesterSlashing ) =
2023-12-22 17:54:55 +00:00
node . eventBus . attSlashQueue . emit ( data )
2024-01-13 09:52:13 +00:00
proc onBlobSidecarAdded ( data : BlobSidecar ) =
node . eventBus . blobSidecarQueue . emit (
BlobSidecarInfoObject (
block_root : hash_tree_root ( data . signed_block_header . message ) ,
index : data . index ,
slot : data . signed_block_header . message . slot ,
kzg_commitment : data . kzg_commitment ,
2024-01-31 03:50:24 +00:00
versioned_hash :
data . kzg_commitment . kzg_commitment_to_versioned_hash . to0xHex ) )
2022-06-28 10:21:16 +00:00
proc onBlockAdded ( data : ForkedTrustedSignedBeaconBlock ) =
let optimistic =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
2023-05-25 13:57:24 +00:00
some node . dag . is_optimistic ( data . toBlockId ( ) )
2022-06-28 10:21:16 +00:00
else :
none [ bool ] ( )
node . eventBus . blocksQueue . emit (
EventBeaconBlockObject . init ( data , optimistic ) )
proc onHeadChanged ( data : HeadChangeInfoObject ) =
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2023-05-25 13:57:24 +00:00
res . optimistic = some node . dag . is_optimistic (
BlockId ( slot : data . slot , root : data . block_root ) )
2022-06-28 10:21:16 +00:00
res
else :
data
node . eventBus . headQueue . emit ( eventData )
proc onChainReorg ( data : ReorgInfoObject ) =
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2023-05-25 13:57:24 +00:00
res . optimistic = some node . dag . is_optimistic (
BlockId ( slot : data . slot , root : data . new_head_block ) )
2022-06-28 10:21:16 +00:00
res
else :
data
node . eventBus . reorgQueue . emit ( eventData )
2022-03-21 16:52:15 +00:00
proc makeOnFinalizationCb (
# This `nimcall` functions helps for keeping track of what
# needs to be captured by the onFinalization closure.
2022-06-17 15:27:28 +00:00
eventBus : EventBus ,
2023-03-05 01:40:21 +00:00
elManager : ELManager ) : OnFinalizedCallback {. nimcall . } =
static : doAssert ( elManager is ref )
2022-03-21 16:52:15 +00:00
return proc ( dag : ChainDAGRef , data : FinalizationInfoObject ) =
2023-03-05 01:40:21 +00:00
if elManager ! = nil :
2022-03-21 16:52:15 +00:00
let finalizedEpochRef = dag . getFinalizedEpochRef ( )
2023-03-05 01:40:21 +00:00
discard trackFinalizedState ( elManager ,
2022-03-21 16:52:15 +00:00
finalizedEpochRef . eth1_data ,
finalizedEpochRef . eth1_deposit_index )
2022-06-07 17:01:11 +00:00
node . updateLightClientFromDag ( )
2022-06-28 10:21:16 +00:00
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2023-05-25 13:57:24 +00:00
# `slot` in this `BlockId` may be higher than block's actual slot,
# this is alright for the purpose of calling `is_optimistic`.
res . optimistic = some node . dag . is_optimistic (
BlockId ( slot : data . epoch . start_slot , root : data . block_root ) )
2022-06-28 10:21:16 +00:00
res
else :
data
eventBus . finalQueue . emit ( eventData )
2022-03-21 16:52:15 +00:00
func getLocalHeadSlot ( ) : Slot =
dag . head . slot
proc getLocalWallSlot ( ) : Slot =
node . beaconClock . now . slotOrZero
func getFirstSlotAtFinalizedEpoch ( ) : Slot =
dag . finalizedHead . slot
func getBackfillSlot ( ) : Slot =
2024-02-09 19:44:54 +00:00
if dag . backfill . parent_root ! = dag . tail . root :
dag . backfill . slot
else :
dag . tail . slot
2022-03-21 16:52:15 +00:00
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
func getFrontfillSlot ( ) : Slot =
2022-12-23 07:42:55 +00:00
max ( dag . frontfill . get ( BlockId ( ) ) . slot , dag . horizon )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
2022-03-21 16:52:15 +00:00
let
quarantine = newClone (
Quarantine . init ( ) )
2023-09-12 07:52:51 +00:00
attestationPool = newClone ( AttestationPool . init (
dag , quarantine , config . forkChoiceVersion . get , onAttestationReceived ) )
2022-03-21 16:52:15 +00:00
syncCommitteeMsgPool = newClone (
2023-05-17 04:55:55 +00:00
SyncCommitteeMsgPool . init ( rng , dag . cfg , onSyncContribution ) )
2022-05-23 12:02:54 +00:00
lightClientPool = newClone (
LightClientPool ( ) )
2023-12-22 13:52:43 +00:00
validatorChangePool = newClone ( ValidatorChangePool . init (
2023-12-22 17:54:55 +00:00
dag , attestationPool , onVoluntaryExitAdded , onBLSToExecutionChangeAdded ,
onProposerSlashingAdded , onAttesterSlashingAdded ) )
2024-01-13 09:52:13 +00:00
blobQuarantine = newClone ( BlobQuarantine . init ( onBlobSidecarAdded ) )
2024-06-28 09:23:08 +00:00
dataColumnQuarantine = newClone ( DataColumnQuarantine . init ( ) )
2022-03-21 16:52:15 +00:00
consensusManager = ConsensusManager . new (
2023-03-05 01:40:21 +00:00
dag , attestationPool , quarantine , node . elManager ,
2023-09-11 16:03:34 +00:00
ActionTracker . init ( node . network . nodeId , config . subscribeAllSubnets ) ,
2022-09-17 05:30:07 +00:00
node . dynamicFeeRecipientsStore , config . validatorsDir ,
2023-02-15 15:10:31 +00:00
config . defaultFeeRecipient , config . suggestedGasLimit )
2022-03-21 16:52:15 +00:00
blockProcessor = BlockProcessor . new (
config . dumpEnabled , config . dumpDirInvalid , config . dumpDirIncoming ,
2023-04-13 19:11:40 +00:00
rng , taskpool , consensusManager , node . validatorMonitor ,
2024-06-28 09:23:08 +00:00
blobQuarantine , dataColumnQuarantine , getBeaconTime )
2023-08-17 13:12:37 +00:00
blockVerifier = proc ( signedBlock : ForkedSignedBeaconBlock ,
2024-06-24 12:02:06 +00:00
blobs : Opt [ BlobSidecars ] , data_columns : Opt [ DataColumnSidecars ] ,
maybeFinalized : bool ) :
2024-02-09 08:35:41 +00:00
Future [ Result [ void , VerifierError ] ] {. async : ( raises : [ CancelledError ] , raw : true ) . } =
2022-03-21 16:52:15 +00:00
# The design with a callback for block verification is unusual compared
# to the rest of the application, but fits with the general approach
# taken in the sync/request managers - this is an architectural compromise
# that should probably be reimagined more holistically in the future.
2023-08-17 13:12:37 +00:00
blockProcessor [ ] . addBlock (
2024-06-24 12:02:06 +00:00
MsgSource . gossip , signedBlock , blobs , data_columns , maybeFinalized = maybeFinalized )
2023-05-19 16:25:11 +00:00
rmanBlockVerifier = proc ( signedBlock : ForkedSignedBeaconBlock ,
2023-08-17 13:12:37 +00:00
maybeFinalized : bool ) :
2024-02-09 08:35:41 +00:00
Future [ Result [ void , VerifierError ] ] {. async : ( raises : [ CancelledError ] ) . } =
2023-04-19 16:37:38 +00:00
withBlck ( signedBlock ) :
2024-08-05 13:57:39 +00:00
# when consensusFork >= ConsensusFork.Deneb:
2024-08-17 20:40:51 +00:00
# if not blobQuarantine[].hasBlobs(forkyBlck):
# # We don't have all the blobs for this block, so we have
# # to put it in blobless quarantine.
# if not quarantine[].addBlobless(dag.finalizedHead.slot, forkyBlck):
2024-08-05 13:57:39 +00:00
# err(VerifierError.UnviableFork)
# else:
# err(VerifierError.MissingParent)
# else:
2024-08-17 20:40:51 +00:00
# let blobs = blobQuarantine[].popBlobs(forkyBlck.root, forkyBlck)
2024-08-05 13:57:39 +00:00
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
2024-08-17 20:40:51 +00:00
# Opt.some(blobs), Opt.none(DataColumnSidecars),
2024-08-05 13:57:39 +00:00
# maybeFinalized = maybeFinalized)
# else:
# await blockProcessor[].addBlock(MsgSource.gossip, signedBlock,
# Opt.none(BlobSidecars), Opt.none(DataColumnSidecars),
# maybeFinalized = maybeFinalized)
2024-08-17 20:40:51 +00:00
when consensusFork > = ConsensusFork . Deneb :
2024-10-08 04:59:05 +00:00
if not dataColumnQuarantine [ ] . checkForInitialDcSidecars ( forkyBlck ) :
# We don't have all the data columns for this block, so we have
# to put it in columnless quarantine.
if not quarantine [ ] . addColumnless ( dag . finalizedHead . slot , forkyBlck ) :
err ( VerifierError . UnviableFork )
else :
err ( VerifierError . MissingParent )
else :
2024-08-17 20:40:51 +00:00
let data_columns = dataColumnQuarantine [ ] . popDataColumns ( forkyBlck . root , forkyBlck )
await blockProcessor [ ] . addBlock ( MsgSource . gossip , signedBlock ,
Opt . none ( BlobSidecars ) , Opt . some ( data_columns ) ,
maybeFinalized = maybeFinalized )
else :
await blockProcessor [ ] . addBlock ( MsgSource . gossip , signedBlock ,
Opt . none ( BlobSidecars ) , Opt . none ( DataColumnSidecars ) ,
maybeFinalized = maybeFinalized )
2024-03-21 17:37:31 +00:00
rmanBlockLoader = proc (
blockRoot : Eth2Digest ) : Opt [ ForkedTrustedSignedBeaconBlock ] =
dag . getForkedBlock ( blockRoot )
rmanBlobLoader = proc (
blobId : BlobIdentifier ) : Opt [ ref BlobSidecar ] =
var blob_sidecar = BlobSidecar . new ( )
if dag . db . getBlobSidecar ( blobId . block_root , blobId . index , blob_sidecar [ ] ) :
Opt . some blob_sidecar
else :
Opt . none ( ref BlobSidecar )
2023-07-11 16:22:02 +00:00
2024-06-28 09:23:08 +00:00
rmanDataColumnLoader = proc (
columnId : DataColumnIdentifier ) : Opt [ ref DataColumnSidecar ] =
var data_column_sidecar = DataColumnSidecar . new ( )
if dag . db . getDataColumnSidecar ( columnId . block_root , columnId . index , data_column_sidecar [ ] ) :
Opt . some data_column_sidecar
else :
Opt . none ( ref DataColumnSidecar )
2022-03-21 16:52:15 +00:00
processor = Eth2Processor . new (
config . doppelgangerDetection ,
2023-01-19 22:00:40 +00:00
blockProcessor , node . validatorMonitor , dag , attestationPool ,
validatorChangePool , node . attachedValidators , syncCommitteeMsgPool ,
2024-06-28 09:23:08 +00:00
lightClientPool , quarantine , blobQuarantine , dataColumnQuarantine ,
rng , getBeaconTime , taskpool )
2024-07-22 13:34:04 +00:00
router = ( ref MessageRouter ) (
processor : processor ,
network : node . network )
var supernode = node . config . subscribeAllSubnets
let
2022-04-08 16:22:49 +00:00
syncManager = newSyncManager [ Peer , PeerId ] (
2023-11-09 20:41:17 +00:00
node . network . peerPool ,
dag . cfg . DENEB_FORK_EPOCH , dag . cfg . MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS ,
2024-09-30 10:18:27 +00:00
supernode , node . network . nodeId , SyncQueueKind . Forward , getLocalHeadSlot ,
2022-03-21 16:52:15 +00:00
getLocalWallSlot , getFirstSlotAtFinalizedEpoch , getBackfillSlot ,
2023-04-18 00:12:57 +00:00
getFrontfillSlot , dag . tail . slot , blockVerifier )
2022-04-08 16:22:49 +00:00
backfiller = newSyncManager [ Peer , PeerId ] (
2023-11-09 20:41:17 +00:00
node . network . peerPool ,
dag . cfg . DENEB_FORK_EPOCH , dag . cfg . MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS ,
2024-09-30 10:18:27 +00:00
supernode , node . network . nodeId , SyncQueueKind . Backward , getLocalHeadSlot ,
2022-03-21 16:52:15 +00:00
getLocalWallSlot , getFirstSlotAtFinalizedEpoch , getBackfillSlot ,
2023-04-18 00:12:57 +00:00
getFrontfillSlot , dag . backfill . slot , blockVerifier ,
2024-07-22 13:34:04 +00:00
maxHeadAge = 0 )
2023-07-11 16:22:02 +00:00
requestManager = RequestManager . init (
2024-07-23 07:06:42 +00:00
node . network , supernode , dag . cfg . DENEB_FORK_EPOCH , getBeaconTime ,
2023-07-11 16:22:02 +00:00
( proc ( ) : bool = syncManager . inProgress ) ,
2024-06-28 09:23:08 +00:00
quarantine , blobQuarantine , dataColumnQuarantine , rmanBlockVerifier ,
rmanBlockLoader , rmanBlobLoader , rmanDataColumnLoader )
2022-07-06 16:11:44 +00:00
2022-07-29 08:45:39 +00:00
if node . config . lightClientDataServe :
2022-07-06 16:11:44 +00:00
proc scheduleSendingLightClientUpdates ( slot : Slot ) =
if node . lightClientPool [ ] . broadcastGossipFut ! = nil :
return
if slot < = node . lightClientPool [ ] . latestBroadcastedSlot :
return
node . lightClientPool [ ] . latestBroadcastedSlot = slot
template fut ( ) : auto = node . lightClientPool [ ] . broadcastGossipFut
fut = node . handleLightClientUpdates ( slot )
fut . addCallback do ( p : pointer ) {. gcsafe . } :
fut = nil
router . onSyncCommitteeMessage = scheduleSendingLightClientUpdates
2022-03-21 16:52:15 +00:00
2023-03-05 01:40:21 +00:00
dag . setFinalizationCb makeOnFinalizationCb ( node . eventBus , node . elManager )
2022-06-28 10:21:16 +00:00
dag . setBlockCb ( onBlockAdded )
dag . setHeadCb ( onHeadChanged )
dag . setReorgCb ( onChainReorg )
2022-03-21 16:52:15 +00:00
node . dag = dag
2023-04-28 12:57:35 +00:00
node . blobQuarantine = blobQuarantine
2024-07-02 15:57:50 +00:00
node . dataColumnQuarantine = dataColumnQuarantine
2022-03-21 16:52:15 +00:00
node . quarantine = quarantine
node . attestationPool = attestationPool
node . syncCommitteeMsgPool = syncCommitteeMsgPool
2022-05-23 12:02:54 +00:00
node . lightClientPool = lightClientPool
2023-01-19 22:00:40 +00:00
node . validatorChangePool = validatorChangePool
2022-03-21 16:52:15 +00:00
node . processor = processor
node . blockProcessor = blockProcessor
node . consensusManager = consensusManager
2023-07-11 16:22:02 +00:00
node . requestManager = requestManager
2022-03-21 16:52:15 +00:00
node . syncManager = syncManager
node . backfiller = backfiller
2022-07-06 16:11:44 +00:00
node . router = router
2022-03-21 16:52:15 +00:00
2024-01-13 10:53:53 +00:00
await node . addValidators ( )
2022-03-21 16:52:15 +00:00
block :
# Add in-process validators to the list of "known" validators such that
# we start with a reasonable ENR
let wallSlot = node . beaconClock . now ( ) . slotOrZero ( )
for validator in node . attachedValidators [ ] . validators . values ( ) :
if config . validatorMonitorAuto :
node . validatorMonitor [ ] . addMonitor ( validator . pubkey , validator . index )
if validator . index . isSome ( ) :
2023-04-06 13:53:06 +00:00
withState ( dag . headState ) :
let idx = validator . index . get ( )
if distinctBase ( idx ) < = forkyState . data . validators . lenu64 :
template v : auto = forkyState . data . validators . item ( idx )
if is_active_validator ( v , wallSlot . epoch ) or
is_active_validator ( v , wallSlot . epoch + 1 ) :
node . consensusManager [ ] . actionTracker . knownValidators [ idx ] = wallSlot
2023-04-15 21:31:24 +00:00
elif is_exited_validator ( v , wallSlot . epoch ) :
notice " Ignoring exited validator " ,
index = idx ,
pubkey = shortLog ( v . pubkey )
2022-09-07 18:34:52 +00:00
let stabilitySubnets =
node . consensusManager [ ] . actionTracker . stabilitySubnets ( wallSlot )
2022-03-21 16:52:15 +00:00
# Here, we also set the correct ENR should we be in all subnets mode!
node . network . updateStabilitySubnetMetadata ( stabilitySubnets )
2024-01-13 09:54:24 +00:00
node . network . registerProtocol (
PeerSync , PeerSync . NetworkState . init (
node . dag ,
node . beaconClock . getBeaconTimeFn ( ) ,
) )
node . network . registerProtocol (
BeaconSync , BeaconSync . NetworkState . init ( node . dag ) )
if node . dag . lcDataStore . serve :
node . network . registerProtocol (
LightClientSync , LightClientSync . NetworkState . init ( node . dag ) )
2022-03-21 16:52:15 +00:00
node . updateValidatorMetrics ( )
2023-05-11 08:52:44 +00:00
const
SlashingDbName = " slashing_protection "
2021-05-19 06:38:13 +00:00
# changing this requires physical file rename as well or history is lost.
2021-12-22 12:37:31 +00:00
proc init * ( T : type BeaconNode ,
2022-06-21 08:29:16 +00:00
rng : ref HmacDrbgContext ,
2021-12-22 12:37:31 +00:00
config : BeaconNodeConf ,
2023-09-08 05:53:27 +00:00
metadata : Eth2NetworkMetadata ) : Future [ BeaconNode ]
2023-12-05 21:08:18 +00:00
{. async . } =
2022-04-08 16:22:49 +00:00
var taskpool : TaskPoolPtr
2021-09-17 00:13:52 +00:00
2022-12-07 10:24:51 +00:00
template cfg : auto = metadata . cfg
template eth1Network : auto = metadata . eth1Network
2022-01-21 10:59:09 +00:00
2021-09-17 00:13:52 +00:00
try :
if config . numThreads < 0 :
fatal " The number of threads --numThreads cannot be negative. "
quit 1
elif config . numThreads = = 0 :
2022-04-08 16:22:49 +00:00
taskpool = TaskPoolPtr . new ( numThreads = min ( countProcessors ( ) , 16 ) )
2021-09-17 00:13:52 +00:00
else :
2022-04-08 16:22:49 +00:00
taskpool = TaskPoolPtr . new ( numThreads = config . numThreads )
2021-09-17 00:13:52 +00:00
info " Threadpool started " , numThreads = taskpool . numThreads
2023-11-11 08:49:34 +00:00
except Exception :
2021-09-17 00:13:52 +00:00
raise newException ( Defect , " Failure in taskpool initialization. " )
2023-09-08 10:09:21 +00:00
if metadata . genesis . kind = = BakedIn :
if config . genesisState . isSome :
warn " The --genesis-state option has no effect on networks with built-in genesis state "
if config . genesisStateUrl . isSome :
warn " The --genesis-state-url option has no effect on networks with built-in genesis state "
2020-01-17 13:44:01 +00:00
let
2022-06-17 15:27:28 +00:00
eventBus = EventBus (
headQueue : newAsyncEventQueue [ HeadChangeInfoObject ] ( ) ,
2023-12-22 13:52:43 +00:00
blocksQueue : newAsyncEventQueue [ EventBeaconBlockObject ] ( ) ,
2024-04-22 09:00:38 +00:00
attestQueue : newAsyncEventQueue [ phase0 . Attestation ] ( ) ,
2023-12-22 13:52:43 +00:00
exitQueue : newAsyncEventQueue [ SignedVoluntaryExit ] ( ) ,
blsToExecQueue : newAsyncEventQueue [ SignedBLSToExecutionChange ] ( ) ,
2023-12-22 17:54:55 +00:00
propSlashQueue : newAsyncEventQueue [ ProposerSlashing ] ( ) ,
attSlashQueue : newAsyncEventQueue [ AttesterSlashing ] ( ) ,
2024-01-13 09:52:13 +00:00
blobSidecarQueue : newAsyncEventQueue [ BlobSidecarInfoObject ] ( ) ,
2023-12-22 13:52:43 +00:00
finalQueue : newAsyncEventQueue [ FinalizationInfoObject ] ( ) ,
2022-06-17 15:27:28 +00:00
reorgQueue : newAsyncEventQueue [ ReorgInfoObject ] ( ) ,
2023-12-22 13:52:43 +00:00
contribQueue : newAsyncEventQueue [ SignedContributionAndProof ] ( ) ,
2023-01-12 17:11:38 +00:00
finUpdateQueue : newAsyncEventQueue [
RestVersioned [ ForkedLightClientFinalityUpdate ] ] ( ) ,
optUpdateQueue : newAsyncEventQueue [
2023-12-22 13:52:43 +00:00
RestVersioned [ ForkedLightClientOptimisticUpdate ] ] ( ) )
2023-01-09 18:42:10 +00:00
db = BeaconChainDB . new ( config . databaseDir , cfg , inMemory = false )
2020-01-17 13:44:01 +00:00
2023-11-03 15:07:49 +00:00
if config . externalBeaconApiUrl . isSome and ChainDAGRef . isInitialized ( db ) . isErr :
2024-03-05 14:41:22 +00:00
var genesisState : ref ForkedHashedBeaconState
let trustedBlockRoot =
if config . trustedStateRoot . isSome or config . trustedBlockRoot . isSome :
config . trustedBlockRoot
elif cfg . ALTAIR_FORK_EPOCH = = GENESIS_EPOCH :
# Sync can be bootstrapped from the genesis block root
2024-03-08 13:52:54 +00:00
genesisState = await fetchGenesisState (
metadata , config . genesisState , config . genesisStateUrl )
2024-03-05 14:41:22 +00:00
if genesisState ! = nil :
let genesisBlockRoot = get_initial_beacon_block ( genesisState [ ] ) . root
notice " Neither `--trusted-block-root` nor `--trusted-state-root` " &
" provided with `--external-beacon-api-url`, " &
" falling back to genesis block root " ,
externalBeaconApiUrl = config . externalBeaconApiUrl . get ,
trustedBlockRoot = config . trustedBlockRoot ,
trustedStateRoot = config . trustedStateRoot ,
genesisBlockRoot = $ genesisBlockRoot
some genesisBlockRoot
else :
none [ Eth2Digest ] ( )
else :
none [ Eth2Digest ] ( )
if config . trustedStateRoot . isNone and trustedBlockRoot . isNone :
2023-11-03 15:07:49 +00:00
warn " Ignoring `--external-beacon-api-url`, neither " &
2024-03-05 14:41:22 +00:00
" `--trusted-block-root` nor `--trusted-state-root` provided " ,
2023-11-03 15:07:49 +00:00
externalBeaconApiUrl = config . externalBeaconApiUrl . get ,
trustedBlockRoot = config . trustedBlockRoot ,
trustedStateRoot = config . trustedStateRoot
else :
2024-03-05 14:41:22 +00:00
if genesisState = = nil :
2024-03-08 13:52:54 +00:00
genesisState = await fetchGenesisState (
metadata , config . genesisState , config . genesisStateUrl )
2023-11-03 15:07:49 +00:00
await db . doRunTrustedNodeSync (
metadata ,
config . databaseDir ,
config . eraDir ,
config . externalBeaconApiUrl . get ,
config . trustedStateRoot . map do ( x : Eth2Digest ) - > string :
" 0x " & x . data . toHex ,
2024-03-05 14:41:22 +00:00
trustedBlockRoot ,
2023-11-03 15:07:49 +00:00
backfill = false ,
reindex = false ,
2024-03-05 14:41:22 +00:00
downloadDepositSnapshot = false ,
genesisState )
2023-11-03 15:07:49 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if config . finalizedCheckpointBlock . isSome :
warn " --finalized-checkpoint-block has been deprecated, ignoring "
2020-09-22 20:42:42 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let checkpointState = if config . finalizedCheckpointState . isSome :
2021-02-22 16:17:48 +00:00
let checkpointStatePath = config . finalizedCheckpointState . get . string
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let tmp = try :
2021-11-10 11:39:08 +00:00
newClone ( readSszForkedHashedBeaconState (
cfg , readAllBytes ( checkpointStatePath ) . tryGet ( ) ) )
except SszError as err :
2023-02-23 02:10:07 +00:00
fatal " Checkpoint state loading failed " ,
2020-09-22 20:42:42 +00:00
err = formatMsg ( err , checkpointStatePath )
quit 1
except CatchableError as err :
fatal " Failed to read checkpoint state file " , err = err . msg
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not getStateField ( tmp [ ] , slot ) . is_epoch :
2022-07-12 15:09:56 +00:00
fatal " --finalized-checkpoint-state must point to a state for an epoch slot " ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
slot = getStateField ( tmp [ ] , slot )
2022-07-12 15:09:56 +00:00
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
tmp
else :
nil
2020-01-17 13:44:01 +00:00
2023-02-23 02:10:07 +00:00
if config . finalizedDepositTreeSnapshot . isSome :
let
depositTreeSnapshotPath = config . finalizedDepositTreeSnapshot . get . string
2024-03-08 13:22:03 +00:00
snapshot =
try :
SSZ . loadFile ( depositTreeSnapshotPath , DepositTreeSnapshot )
except SszError as err :
fatal " Deposit tree snapshot loading failed " ,
err = formatMsg ( err , depositTreeSnapshotPath )
quit 1
except CatchableError as err :
fatal " Failed to read deposit tree snapshot file " , err = err . msg
quit 1
depositContractSnapshot = DepositContractSnapshot . init ( snapshot ) . valueOr :
fatal " Invalid deposit tree snapshot file "
2023-02-23 02:10:07 +00:00
quit 1
2024-03-07 17:42:52 +00:00
db . putDepositContractSnapshot ( depositContractSnapshot )
2023-02-23 02:10:07 +00:00
2023-03-05 01:40:21 +00:00
let engineApiUrls = config . engineApiUrls
2022-03-31 14:43:05 +00:00
2023-03-05 01:40:21 +00:00
if engineApiUrls . len = = 0 :
2022-12-23 07:42:55 +00:00
notice " Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html) "
2022-06-28 20:49:52 +00:00
2023-09-08 05:53:27 +00:00
var networkGenesisValidatorsRoot = metadata . bakedGenesisValidatorsRoot
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not ChainDAGRef . isInitialized ( db ) . isOk ( ) :
2024-03-05 14:41:22 +00:00
let genesisState =
if checkpointState ! = nil and
getStateField ( checkpointState [ ] , slot ) = = 0 :
checkpointState
2023-09-08 05:53:27 +00:00
else :
2024-03-08 13:52:54 +00:00
await fetchGenesisState (
metadata , config . genesisState , config . genesisStateUrl )
2023-09-08 05:53:27 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if genesisState = = nil and checkpointState = = nil :
2023-01-12 00:49:43 +00:00
fatal " No database and no genesis snapshot found. Please supply a genesis.ssz " &
" with the network configuration "
quit 1
2019-10-25 14:53:31 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not genesisState . isNil and not checkpointState . isNil :
if getStateField ( genesisState [ ] , genesis_validators_root ) ! =
getStateField ( checkpointState [ ] , genesis_validators_root ) :
fatal " Checkpoint state does not match genesis - check the --network parameter " ,
rootFromGenesis = getStateField (
genesisState [ ] , genesis_validators_root ) ,
rootFromCheckpoint = getStateField (
checkpointState [ ] , genesis_validators_root )
2020-04-22 23:35:55 +00:00
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
try :
# Always store genesis state if we have it - this allows reindexing and
# answering genesis queries
if not genesisState . isNil :
ChainDAGRef . preInit ( db , genesisState [ ] )
2023-09-08 05:53:27 +00:00
networkGenesisValidatorsRoot =
Opt . some ( getStateField ( genesisState [ ] , genesis_validators_root ) )
2020-01-17 13:44:01 +00:00
2021-11-10 11:39:08 +00:00
if not checkpointState . isNil :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if genesisState . isNil or
getStateField ( checkpointState [ ] , slot ) ! = GENESIS_SLOT :
ChainDAGRef . preInit ( db , checkpointState [ ] )
2020-09-22 20:42:42 +00:00
2021-12-21 10:40:14 +00:00
doAssert ChainDAGRef . isInitialized ( db ) . isOk ( ) , " preInit should have initialized db "
2021-03-26 06:52:01 +00:00
except CatchableError as exc :
error " Failed to initialize database " , err = exc . msg
2020-09-22 20:42:42 +00:00
quit 1
2021-11-10 11:39:08 +00:00
else :
if not checkpointState . isNil :
fatal " A database already exists, cannot start from given checkpoint " ,
dataDir = config . dataDir
quit 1
2020-07-02 15:52:48 +00:00
2021-03-23 06:57:10 +00:00
# Doesn't use std/random directly, but dependencies might
randomize ( rng [ ] . rand ( high ( int ) ) )
2023-01-16 10:28:35 +00:00
# The validatorMonitorTotals flag has been deprecated and should eventually be
# removed - until then, it's given priority if set so as not to needlessly
# break existing setups
2021-12-20 19:20:31 +00:00
let
validatorMonitor = newClone ( ValidatorMonitor . init (
2023-01-16 10:28:35 +00:00
config . validatorMonitorAuto ,
config . validatorMonitorTotals . get (
not config . validatorMonitorDetails ) ) )
2021-12-20 19:20:31 +00:00
for key in config . validatorMonitorPubkeys :
2022-08-19 21:51:30 +00:00
validatorMonitor [ ] . addMonitor ( key , Opt . none ( ValidatorIndex ) )
2021-12-20 19:20:31 +00:00
2020-07-07 23:02:14 +00:00
let
2022-03-19 16:48:24 +00:00
dag = loadChainDag (
config , cfg , db , eventBus ,
2023-01-31 12:35:01 +00:00
validatorMonitor , networkGenesisValidatorsRoot )
2022-06-15 02:38:27 +00:00
genesisTime = getStateField ( dag . headState , genesis_time )
2024-01-06 14:26:56 +00:00
beaconClock = BeaconClock . init ( genesisTime ) . valueOr :
fatal " Invalid genesis time in state " , genesisTime
quit 1
2022-03-31 14:43:05 +00:00
getBeaconTime = beaconClock . getBeaconTimeFn ( )
2021-08-20 08:58:15 +00:00
2021-02-22 16:17:48 +00:00
if config . weakSubjectivityCheckpoint . isSome :
2022-03-19 16:48:24 +00:00
dag . checkWeakSubjectivityCheckpoint (
config . weakSubjectivityCheckpoint . get , beaconClock )
2020-09-22 20:42:42 +00:00
2023-03-05 01:40:21 +00:00
let elManager = ELManager . new (
cfg ,
metadata . depositContractBlock ,
metadata . depositContractBlockHash ,
db ,
engineApiUrls ,
eth1Network )
2020-11-12 16:21:04 +00:00
2022-09-29 06:29:49 +00:00
if config . rpcEnabled . isSome :
2022-05-24 07:23:48 +00:00
warn " Nimbus ' s JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it. "
2020-03-16 22:28:54 +00:00
2021-03-17 18:46:45 +00:00
let restServer = if config . restEnabled :
2022-08-19 10:30:07 +00:00
RestServerRef . init ( config . restAddress , config . restPort ,
2023-06-04 16:46:00 +00:00
config . restAllowedOrigin ,
2022-08-19 10:30:07 +00:00
validateBeaconApiQueries ,
config )
2021-12-22 12:37:31 +00:00
else :
nil
2020-04-15 02:41:22 +00:00
let
2021-02-22 16:17:48 +00:00
netKeys = getPersistentNetKeys ( rng [ ] , config )
nickname = if config . nodeName = = " auto " : shortForm ( netKeys )
else : config . nodeName
2021-07-07 09:09:47 +00:00
network = createEth2Node (
2021-08-19 10:45:31 +00:00
rng , config , netKeys , cfg , dag . forkDigests , getBeaconTime ,
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , genesis_validators_root ) )
2021-02-22 16:17:48 +00:00
2021-05-04 13:17:28 +00:00
case config . slashingDbKind
of SlashingDbKind . v2 :
discard
of SlashingDbKind . v1 :
error " Slashing DB v1 is no longer supported for writing "
quit 1
of SlashingDbKind . both :
warn " Slashing DB v1 deprecated, writing only v2 "
info " Loading slashing protection database (v2) " ,
path = config . validatorsDir ( )
2022-11-20 13:55:43 +00:00
proc getValidatorAndIdx ( pubkey : ValidatorPubKey ) : Opt [ ValidatorAndIndex ] =
2022-08-19 10:30:07 +00:00
withState ( dag . headState ) :
2022-11-20 13:55:43 +00:00
getValidator ( forkyState ( ) . data . validators . asSeq ( ) , pubkey )
2022-08-19 10:30:07 +00:00
2024-02-26 08:48:07 +00:00
func getCapellaForkVersion ( ) : Opt [ Version ] =
Opt . some ( cfg . CAPELLA_FORK_VERSION )
func getDenebForkEpoch ( ) : Opt [ Epoch ] =
Opt . some ( cfg . DENEB_FORK_EPOCH )
2024-05-15 13:30:49 +00:00
func getElectraForkEpoch ( ) : Opt [ Epoch ] =
Opt . some ( cfg . ELECTRA_FORK_EPOCH )
2023-06-14 06:46:01 +00:00
proc getForkForEpoch ( epoch : Epoch ) : Opt [ Fork ] =
Opt . some ( dag . forkAtEpoch ( epoch ) )
proc getGenesisRoot ( ) : Eth2Digest =
getStateField ( dag . headState , genesis_validators_root )
2021-05-04 13:17:28 +00:00
let
2023-09-21 18:07:02 +00:00
keystoreCache = KeystoreCacheRef . init ( )
2021-02-22 16:17:48 +00:00
slashingProtectionDB =
2021-05-04 13:17:28 +00:00
SlashingProtectionDB . init (
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , genesis_validators_root ) ,
2021-05-19 06:38:13 +00:00
config . validatorsDir ( ) , SlashingDbName )
2022-12-09 16:05:55 +00:00
validatorPool = newClone ( ValidatorPool . init (
slashingProtectionDB , config . doppelgangerDetection ) )
2021-03-11 10:10:57 +00:00
2022-08-19 10:30:07 +00:00
keymanagerInitResult = initKeymanagerServer ( config , restServer )
keymanagerHost = if keymanagerInitResult . server ! = nil :
newClone KeymanagerHost . init (
validatorPool ,
2023-09-21 18:07:02 +00:00
keystoreCache ,
2022-08-19 10:30:07 +00:00
rng ,
keymanagerInitResult . token ,
config . validatorsDir ,
config . secretsDir ,
config . defaultFeeRecipient ,
2023-02-15 15:10:31 +00:00
config . suggestedGasLimit ,
2024-03-14 03:44:00 +00:00
config . defaultGraffitiBytes ,
2023-06-25 12:00:17 +00:00
config . getPayloadBuilderAddress ,
2022-11-20 13:55:43 +00:00
getValidatorAndIdx ,
2023-06-14 06:46:01 +00:00
getBeaconTime ,
2024-02-26 08:48:07 +00:00
getCapellaForkVersion ,
getDenebForkEpoch ,
2023-06-14 06:46:01 +00:00
getForkForEpoch ,
getGenesisRoot )
2022-08-19 10:30:07 +00:00
else : nil
2022-03-21 16:52:15 +00:00
stateTtlCache =
if config . restCacheSize > 0 :
StateTtlCache . init (
cacheSize = config . restCacheSize ,
cacheTtl = chronos . seconds ( config . restCacheTtl ) )
else :
nil
2022-01-31 17:28:26 +00:00
2023-05-25 15:38:56 +00:00
if config . payloadBuilderEnable :
2023-02-23 09:46:18 +00:00
info " Using external payload builder " ,
payloadBuilderUrl = config . payloadBuilderUrl
2022-06-07 17:01:11 +00:00
let node = BeaconNode (
2020-01-17 13:44:01 +00:00
nickname : nickname ,
2021-12-20 11:21:17 +00:00
graffitiBytes : if config . graffiti . isSome : config . graffiti . get
2020-06-29 17:30:19 +00:00
else : defaultGraffitiBytes ( ) ,
2020-01-17 13:44:01 +00:00
network : network ,
2020-02-05 20:40:14 +00:00
netKeys : netKeys ,
2020-01-17 13:44:01 +00:00
db : db ,
2021-02-22 16:17:48 +00:00
config : config ,
2021-11-01 14:50:24 +00:00
attachedValidators : validatorPool ,
2023-03-05 01:40:21 +00:00
elManager : elManager ,
2021-03-17 18:46:45 +00:00
restServer : restServer ,
2022-08-19 10:30:07 +00:00
keymanagerHost : keymanagerHost ,
keymanagerServer : keymanagerInitResult . server ,
2023-09-21 18:07:02 +00:00
keystoreCache : keystoreCache ,
2021-09-22 12:17:15 +00:00
eventBus : eventBus ,
2021-12-21 14:24:23 +00:00
gossipState : { } ,
2022-08-25 03:53:59 +00:00
blocksGossipState : { } ,
2021-09-17 00:13:52 +00:00
beaconClock : beaconClock ,
2022-01-05 14:49:10 +00:00
validatorMonitor : validatorMonitor ,
2022-06-15 02:38:27 +00:00
stateTtlCache : stateTtlCache ,
2023-02-23 02:10:07 +00:00
dynamicFeeRecipientsStore : newClone ( DynamicFeeRecipientsStore . init ( ) ) )
2020-08-20 16:30:47 +00:00
2022-06-07 17:01:11 +00:00
node . initLightClient (
rng , cfg , dag . forkDigests , getBeaconTime , dag . genesis_validators_root )
2024-01-13 10:53:53 +00:00
await node . initFullNode ( rng , dag , taskpool , getBeaconTime )
2020-12-16 13:03:04 +00:00
2022-06-07 17:01:11 +00:00
node . updateLightClientFromDag ( )
2022-02-21 11:55:56 +00:00
node
2019-09-07 17:48:05 +00:00
2022-08-01 06:41:47 +00:00
func verifyFinalization ( node : BeaconNode , slot : Slot ) =
2020-05-13 08:36:33 +00:00
# Epoch must be >= 4 to check finalization
const SETTLING_TIME_OFFSET = 1 'u64
2022-01-11 10:01:54 +00:00
let epoch = slot . epoch ( )
2020-05-13 08:36:33 +00:00
# Don't static-assert this -- if this isn't called, don't require it
doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET
# Intentionally, loudly assert. Point is to fail visibly and unignorably
# during testing.
if epoch > = 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET :
let finalizedEpoch =
2022-01-11 10:01:54 +00:00
node . dag . finalizedHead . slot . epoch ( )
2020-05-13 08:36:33 +00:00
# Finalization rule 234, that has the most lag slots among the cases, sets
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
# and then state.slot gets incremented, to increase the maximum offset, if
# finalization occurs every slot, to 4 slots vs scheduledSlot.
doAssert finalizedEpoch + 4 > = epoch
2024-01-13 09:52:13 +00:00
from std / sequtils import toSeq
2021-05-11 20:03:40 +00:00
func subnetLog ( v : BitArray ) : string =
$ toSeq ( v . oneIndices ( ) )
2022-01-24 20:40:59 +00:00
func forkDigests ( node : BeaconNode ) : auto =
2023-01-28 19:53:41 +00:00
let forkDigestsArray : array [ ConsensusFork , auto ] = [
2022-01-24 20:40:59 +00:00
node . dag . forkDigests . phase0 ,
node . dag . forkDigests . altair ,
2022-11-02 16:23:30 +00:00
node . dag . forkDigests . bellatrix ,
2022-12-07 16:47:23 +00:00
node . dag . forkDigests . capella ,
2024-04-03 14:43:43 +00:00
node . dag . forkDigests . deneb ,
node . dag . forkDigests . electra ]
2022-01-24 20:40:59 +00:00
forkDigestsArray
2023-12-16 02:27:06 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attestation-subnet-subscription
2021-10-18 09:11:44 +00:00
proc updateAttestationSubnetHandlers ( node : BeaconNode , slot : Slot ) =
2021-12-21 14:24:23 +00:00
if node . gossipState . card = = 0 :
2024-01-12 20:40:34 +00:00
# When disconnected, updateBlocksGossipStatus is responsible for all things
2021-10-18 09:11:44 +00:00
# subnets - in particular, it will remove subscriptions on the edge where
# we enter the disconnected state.
return
2020-08-12 17:48:31 +00:00
2021-01-19 17:44:03 +00:00
let
2022-09-07 18:34:52 +00:00
aggregateSubnets =
node . consensusManager [ ] . actionTracker . aggregateSubnets ( slot )
stabilitySubnets =
node . consensusManager [ ] . actionTracker . stabilitySubnets ( slot )
2021-10-18 09:11:44 +00:00
subnets = aggregateSubnets + stabilitySubnets
2021-01-19 17:44:03 +00:00
2021-10-18 09:11:44 +00:00
node . network . updateStabilitySubnetMetadata ( stabilitySubnets )
2020-12-09 09:13:51 +00:00
2021-10-18 09:11:44 +00:00
# Now we know what we should be subscribed to - make it so
2021-01-19 17:44:03 +00:00
let
2022-09-07 18:34:52 +00:00
prevSubnets = node . consensusManager [ ] . actionTracker . subscribedSubnets
2021-10-18 09:11:44 +00:00
unsubscribeSubnets = prevSubnets - subnets
subscribeSubnets = subnets - prevSubnets
# Remember what we subscribed to, so we can unsubscribe later
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . subscribedSubnets = subnets
2021-01-19 17:44:03 +00:00
2022-01-24 20:40:59 +00:00
let forkDigests = node . forkDigests ( )
2021-12-21 14:24:23 +00:00
for gossipFork in node . gossipState :
let forkDigest = forkDigests [ gossipFork ]
node . network . unsubscribeAttestationSubnets ( unsubscribeSubnets , forkDigest )
node . network . subscribeAttestationSubnets ( subscribeSubnets , forkDigest )
2021-01-19 17:44:03 +00:00
debug " Attestation subnets " ,
2021-10-18 09:11:44 +00:00
slot , epoch = slot . epoch , gossipState = node . gossipState ,
2021-05-11 20:03:40 +00:00
stabilitySubnets = subnetLog ( stabilitySubnets ) ,
2021-10-18 09:11:44 +00:00
aggregateSubnets = subnetLog ( aggregateSubnets ) ,
prevSubnets = subnetLog ( prevSubnets ) ,
2021-05-11 20:03:40 +00:00
subscribeSubnets = subnetLog ( subscribeSubnets ) ,
2021-12-21 14:24:23 +00:00
unsubscribeSubnets = subnetLog ( unsubscribeSubnets ) ,
gossipState = node . gossipState
2021-05-11 20:03:40 +00:00
2022-08-25 03:53:59 +00:00
proc updateBlocksGossipStatus * (
node : BeaconNode , slot : Slot , dagIsBehind : bool ) =
template cfg ( ) : auto = node . dag . cfg
let
isBehind =
if node . shouldSyncOptimistically ( slot ) :
# If optimistic sync is active, always subscribe to blocks gossip
false
else :
# Use DAG status to determine whether to subscribe for blocks gossip
dagIsBehind
targetGossipState = getTargetGossipState (
2022-12-04 07:42:03 +00:00
slot . epoch , cfg . ALTAIR_FORK_EPOCH , cfg . BELLATRIX_FORK_EPOCH ,
2024-05-15 13:30:49 +00:00
cfg . CAPELLA_FORK_EPOCH , cfg . DENEB_FORK_EPOCH , cfg . ELECTRA_FORK_EPOCH ,
isBehind )
2022-08-25 03:53:59 +00:00
template currentGossipState ( ) : auto = node . blocksGossipState
if currentGossipState = = targetGossipState :
return
if currentGossipState . card = = 0 and targetGossipState . card > 0 :
debug " Enabling blocks topic subscriptions " ,
wallSlot = slot , targetGossipState
elif currentGossipState . card > 0 and targetGossipState . card = = 0 :
debug " Disabling blocks topic subscriptions " ,
wallSlot = slot
else :
# Individual forks added / removed
discard
let
newGossipForks = targetGossipState - currentGossipState
oldGossipForks = currentGossipState - targetGossipState
for gossipFork in oldGossipForks :
2023-03-11 14:39:29 +00:00
let forkDigest = node . dag . forkDigests [ ] . atConsensusFork ( gossipFork )
2023-03-01 20:30:20 +00:00
node . network . unsubscribe ( getBeaconBlocksTopic ( forkDigest ) )
2022-08-25 03:53:59 +00:00
for gossipFork in newGossipForks :
2023-03-11 14:39:29 +00:00
let forkDigest = node . dag . forkDigests [ ] . atConsensusFork ( gossipFork )
2022-08-25 03:53:59 +00:00
node . network . subscribe (
2023-03-01 20:30:20 +00:00
getBeaconBlocksTopic ( forkDigest ) , blocksTopicParams ,
2022-08-25 03:53:59 +00:00
enableTopicMetrics = true )
node . blocksGossipState = targetGossipState
2021-08-09 12:54:45 +00:00
2021-12-21 14:24:23 +00:00
proc addPhase0MessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
2021-08-09 12:54:45 +00:00
node . network . subscribe ( getAttesterSlashingsTopic ( forkDigest ) , basicParams )
node . network . subscribe ( getProposerSlashingsTopic ( forkDigest ) , basicParams )
node . network . subscribe ( getVoluntaryExitsTopic ( forkDigest ) , basicParams )
2021-12-21 14:24:23 +00:00
node . network . subscribe (
getAggregateAndProofsTopic ( forkDigest ) , aggregateTopicParams ,
enableTopicMetrics = true )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
# updateAttestationSubnetHandlers subscribes attestation subnets
2021-08-09 12:54:45 +00:00
2021-08-18 12:30:05 +00:00
proc removePhase0MessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
2021-08-09 12:54:45 +00:00
node . network . unsubscribe ( getVoluntaryExitsTopic ( forkDigest ) )
node . network . unsubscribe ( getProposerSlashingsTopic ( forkDigest ) )
node . network . unsubscribe ( getAttesterSlashingsTopic ( forkDigest ) )
node . network . unsubscribe ( getAggregateAndProofsTopic ( forkDigest ) )
2020-12-24 08:48:52 +00:00
2022-01-08 23:28:49 +00:00
for subnet_id in SubnetId :
node . network . unsubscribe ( getAttestationTopic ( forkDigest , subnet_id ) )
2020-09-15 12:40:43 +00:00
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . subscribedSubnets = default ( AttnetBits )
2021-10-18 09:11:44 +00:00
2022-01-24 20:40:59 +00:00
func hasSyncPubKey ( node : BeaconNode , epoch : Epoch ) : auto =
# Only used to determine which gossip topics to which to subscribe
2022-02-04 12:34:03 +00:00
if node . config . subscribeAllSubnets :
2022-01-24 20:40:59 +00:00
( func ( pubkey : ValidatorPubKey ) : bool {. closure . } = true )
else :
( func ( pubkey : ValidatorPubKey ) : bool =
2022-11-08 11:43:38 +00:00
node . consensusManager [ ] . actionTracker . hasSyncDuty ( pubkey , epoch ) or
2022-08-19 10:30:07 +00:00
pubkey in node . attachedValidators [ ] . validators )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
func getCurrentSyncCommiteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
2022-07-12 09:00:39 +00:00
let syncCommittee = withState ( node . dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2022-09-13 11:53:12 +00:00
forkyState . data . current_sync_committee
2022-07-12 09:00:39 +00:00
else :
return static ( default ( SyncnetBits ) )
2022-11-08 11:43:38 +00:00
getSyncSubnets ( node . hasSyncPubKey ( epoch ) , syncCommittee )
func getNextSyncCommitteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
let syncCommittee = withState ( node . dag . headState ) :
2023-03-11 00:35:52 +00:00
when consensusFork > = ConsensusFork . Altair :
2022-11-08 11:43:38 +00:00
forkyState . data . next_sync_committee
else :
return static ( default ( SyncnetBits ) )
getSyncSubnets (
node . hasSyncPubKey ( ( epoch . sync_committee_period + 1 ) . start_slot ( ) . epoch ) ,
syncCommittee )
func getSyncCommitteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
let
subnets = node . getCurrentSyncCommiteeSubnets ( epoch )
epochsToSyncPeriod = nearSyncCommitteePeriod ( epoch )
# The end-slot tracker might call this when it's theoretically applicable,
# but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync
# committee period begins, in which case `epochsToNextSyncPeriod` is none.
if epochsToSyncPeriod . isNone or
2023-02-16 09:32:12 +00:00
node . dag . cfg . consensusForkAtEpoch ( epoch + epochsToSyncPeriod . get ) <
2023-01-28 19:53:41 +00:00
ConsensusFork . Altair :
2022-11-08 11:43:38 +00:00
return subnets
subnets + node . getNextSyncCommitteeSubnets ( epoch )
2022-07-12 09:00:39 +00:00
2022-11-24 14:38:07 +00:00
proc addAltairMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
2021-11-14 08:00:25 +00:00
node . addPhase0MessageHandlers ( forkDigest , slot )
2020-09-15 12:40:43 +00:00
2022-01-24 20:40:59 +00:00
# If this comes online near sync committee period, it'll immediately get
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
2022-11-24 14:38:07 +00:00
let syncnets = node . getSyncCommitteeSubnets ( slot . epoch )
2021-08-29 05:58:27 +00:00
2022-01-08 23:28:49 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
2022-11-08 11:43:38 +00:00
if syncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . subscribe (
getSyncCommitteeTopic ( forkDigest , subcommitteeIdx ) , basicParams )
2021-08-29 05:58:27 +00:00
2021-11-14 08:00:25 +00:00
node . network . subscribe (
getSyncCommitteeContributionAndProofTopic ( forkDigest ) , basicParams )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
node . network . updateSyncnetsMetadata ( syncnets )
2021-08-29 05:58:27 +00:00
2022-11-24 14:38:07 +00:00
proc addCapellaMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
node . addAltairMessageHandlers ( forkDigest , slot )
node . network . subscribe ( getBlsToExecutionChangeTopic ( forkDigest ) , basicParams )
2024-07-15 08:20:41 +00:00
proc fetchCustodySubnetCount * ( node : BeaconNode ) : uint64 =
var res = CUSTODY_REQUIREMENT . uint64
2024-06-25 21:08:04 +00:00
if node . config . subscribeAllSubnets :
2024-07-15 08:20:41 +00:00
res = DATA_COLUMN_SIDECAR_SUBNET_COUNT . uint64
res
2024-06-25 21:08:04 +00:00
2023-08-23 19:31:41 +00:00
proc addDenebMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
node . addCapellaMessageHandlers ( forkDigest , slot )
2024-08-24 14:26:52 +00:00
let
targetSubnets = node . fetchCustodySubnetCount ( )
2024-09-18 08:00:14 +00:00
custody_subnets =
2024-09-23 08:36:33 +00:00
node . network . nodeId . get_custody_column_subnets ( max ( SAMPLES_PER_SLOT . uint8 ,
targetSubnets . uint8 ) )
2024-08-24 14:26:52 +00:00
2024-08-28 08:12:32 +00:00
debugEcho " Target Subnets "
debugEcho targetSubnets
debugEcho " Custody Subnets "
for cs in custody_subnets . get :
debugEcho cs
2024-09-21 10:28:23 +00:00
for i in 0 'u64 .. < DATA_COLUMN_SIDECAR_SUBNET_COUNT :
2024-08-28 15:08:19 +00:00
if i in custody_subnets . get :
let topic = getDataColumnSidecarTopic ( forkDigest , i )
debugEcho " Topic "
debugEcho topic
node . network . subscribe ( topic , basicParams )
2023-08-23 19:31:41 +00:00
2024-08-19 22:28:27 +00:00
if node . config . subscribeAllSubnets :
2024-09-23 08:36:33 +00:00
node . network . loadCscnetsMetadata ( DATA_COLUMN_SIDECAR_SUBNET_COUNT . uint8 )
2024-08-19 22:28:27 +00:00
elif not node . config . subscribeAllSubnets :
let csc = node . config . custodySubnetCount
if csc . isSome and csc . get < DATA_COLUMN_SIDECAR_SUBNET_COUNT :
2024-09-23 08:36:33 +00:00
node . network . loadCscnetsMetadata ( csc . get . uint8 )
2024-08-19 22:28:27 +00:00
else :
2024-09-23 08:36:33 +00:00
node . network . loadCscnetsMetadata ( CUSTODY_REQUIREMENT . uint8 )
2024-08-19 22:28:27 +00:00
2024-05-14 04:12:35 +00:00
proc addElectraMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
node . addDenebMessageHandlers ( forkDigest , slot )
2021-11-14 08:00:25 +00:00
proc removeAltairMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removePhase0MessageHandlers ( forkDigest )
2021-08-09 12:54:45 +00:00
2022-01-08 23:28:49 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
2021-08-29 05:58:27 +00:00
closureScope :
2022-01-08 23:28:49 +00:00
let idx = subcommitteeIdx
2021-11-14 08:00:25 +00:00
node . network . unsubscribe ( getSyncCommitteeTopic ( forkDigest , idx ) )
node . network . unsubscribe (
getSyncCommitteeContributionAndProofTopic ( forkDigest ) )
2022-11-24 14:38:07 +00:00
proc removeCapellaMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removeAltairMessageHandlers ( forkDigest )
node . network . unsubscribe ( getBlsToExecutionChangeTopic ( forkDigest ) )
2023-08-23 19:31:41 +00:00
proc removeDenebMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removeCapellaMessageHandlers ( forkDigest )
2024-08-24 14:52:14 +00:00
let targetSubnets = node . fetchCustodySubnetCount ( )
for topic in dataColumnSidecarTopics ( forkDigest , targetSubnets ) :
node . network . unsubscribe ( topic )
2023-08-23 19:31:41 +00:00
2024-05-14 04:12:35 +00:00
proc removeElectraMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removeDenebMessageHandlers ( forkDigest )
2022-11-08 11:43:38 +00:00
proc updateSyncCommitteeTopics ( node : BeaconNode , slot : Slot ) =
template lastSyncUpdate : untyped =
node . consensusManager [ ] . actionTracker . lastSyncUpdate
if lastSyncUpdate = = Opt . some ( slot . sync_committee_period ( ) ) and
nearSyncCommitteePeriod ( slot . epoch ) . isNone ( ) :
# No need to update unless we're close to the next sync committee period or
# new validators were registered with the action tracker
# TODO we _could_ skip running this in some of the "near" slots, but..
return
lastSyncUpdate = Opt . some ( slot . sync_committee_period ( ) )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
let syncnets = node . getSyncCommitteeSubnets ( slot . epoch )
debug " Updating sync committee subnets " ,
syncnets ,
2022-01-24 20:40:59 +00:00
metadata_syncnets = node . network . metadata . syncnets ,
gossipState = node . gossipState
# Assume that different gossip fork sync committee setups are in sync; this
# only remains relevant, currently, for one gossip transition epoch, so the
# consequences of this not being true aren't exceptionally dire, while this
# allows for bookkeeping simplication.
2022-11-08 11:43:38 +00:00
if syncnets = = node . network . metadata . syncnets :
2022-01-24 20:40:59 +00:00
return
let
2022-11-08 11:43:38 +00:00
newSyncnets =
syncnets - node . network . metadata . syncnets
oldSyncnets =
node . network . metadata . syncnets - syncnets
2022-01-24 20:40:59 +00:00
forkDigests = node . forkDigests ( )
for subcommitteeIdx in SyncSubcommitteeIndex :
2022-11-08 11:43:38 +00:00
doAssert not ( newSyncnets [ subcommitteeIdx ] and
oldSyncnets [ subcommitteeIdx ] )
2022-01-24 20:40:59 +00:00
for gossipFork in node . gossipState :
template topic ( ) : auto =
getSyncCommitteeTopic ( forkDigests [ gossipFork ] , subcommitteeIdx )
2022-11-08 11:43:38 +00:00
if oldSyncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . unsubscribe ( topic )
2022-11-08 11:43:38 +00:00
elif newSyncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . subscribe ( topic , basicParams )
2022-11-08 11:43:38 +00:00
node . network . updateSyncnetsMetadata ( syncnets )
2021-08-29 05:58:27 +00:00
2023-02-20 11:28:56 +00:00
proc doppelgangerChecked ( node : BeaconNode , epoch : Epoch ) =
2022-12-09 16:05:55 +00:00
if not node . processor [ ] . doppelgangerDetectionEnabled :
return
# broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring
# gossip - it is only viable to assert liveness in epochs where gossip is
# active
if epoch > node . processor [ ] . doppelgangerDetection . broadcastStartEpoch :
for validator in node . attachedValidators [ ] :
2023-02-20 11:28:56 +00:00
validator . doppelgangerChecked ( epoch - 1 )
2022-12-09 16:05:55 +00:00
2023-10-10 00:02:07 +00:00
from . / spec / state_transition_epoch import effective_balance_might_update
proc maybeUpdateActionTrackerNextEpoch (
node : BeaconNode , forkyState : ForkyHashedBeaconState , nextEpoch : Epoch ) =
if node . consensusManager [ ] . actionTracker . needsUpdate (
forkyState , nextEpoch ) :
template epochRefFallback ( ) =
let epochRef =
node . dag . getEpochRef ( node . dag . head , nextEpoch , false ) . expect (
" Getting head EpochRef should never fail " )
node . consensusManager [ ] . actionTracker . updateActions (
epochRef . shufflingRef , epochRef . beacon_proposers )
when forkyState is phase0 . HashedBeaconState :
# The previous_epoch_participation-based logic requires Altair or newer
epochRefFallback ( )
else :
let
shufflingRef = node . dag . getShufflingRef ( node . dag . head , nextEpoch , false ) . valueOr :
# epochRefFallback() won't work in this case either
return
nextEpochProposers = get_beacon_proposer_indices (
forkyState . data , shufflingRef . shuffled_active_validator_indices ,
nextEpoch )
nextEpochFirstProposer = nextEpochProposers [ 0 ] . valueOr :
# All proposers except the first can be more straightforwardly and
# efficiently (re)computed correctly once in that epoch.
epochRefFallback ( )
return
# Has to account for potential epoch transition TIMELY_SOURCE_FLAG_INDEX,
# TIMELY_TARGET_FLAG_INDEX, and inactivity penalties, resulting from spec
# functions get_flag_index_deltas() and get_inactivity_penalty_deltas().
#
# There are no penalties associated with TIMELY_HEAD_FLAG_INDEX, but a
2024-03-19 13:22:07 +00:00
# reward exists. effective_balance == MAX_EFFECTIVE_BALANCE.Gwei ensures
# if even so, then the effective balance cannot change as a result.
2023-10-10 00:02:07 +00:00
#
# It's not truly necessary to avoid all rewards and penalties, but only
# to bound them to ensure they won't unexpected alter effective balance
# during the upcoming epoch transition.
#
2024-03-19 13:22:07 +00:00
# During genesis epoch, the check for epoch participation is against
# current, not previous, epoch, and therefore there's a possibility of
# checking for if a validator has participated in an epoch before it will
# happen.
2023-10-10 00:02:07 +00:00
#
# Because process_rewards_and_penalties() in epoch processing happens
# before the current/previous participation swap, previous is correct
# even here, and consistent with what the epoch transition uses.
#
# Whilst slashing, proposal, and sync committee rewards and penalties do
# update the balances as they occur, they don't update effective_balance
# until the end of epoch, so detect via effective_balance_might_update.
#
# On EF mainnet epoch 233906, this matches 99.5% of active validators;
# with Holesky epoch 2041, 83% of active validators.
let
participation_flags =
forkyState . data . previous_epoch_participation . item (
nextEpochFirstProposer )
effective_balance = forkyState . data . validators . item (
nextEpochFirstProposer ) . effective_balance
if participation_flags . has_flag ( TIMELY_SOURCE_FLAG_INDEX ) and
participation_flags . has_flag ( TIMELY_TARGET_FLAG_INDEX ) and
2024-03-19 13:22:07 +00:00
effective_balance = = MAX_EFFECTIVE_BALANCE . Gwei and
2023-10-10 00:02:07 +00:00
forkyState . data . slot . epoch ! = GENESIS_EPOCH and
forkyState . data . inactivity_scores . item (
nextEpochFirstProposer ) = = 0 and
not effective_balance_might_update (
forkyState . data . balances . item ( nextEpochFirstProposer ) ,
effective_balance ) :
node . consensusManager [ ] . actionTracker . updateActions (
shufflingRef , nextEpochProposers )
else :
epochRefFallback ( )
2021-10-18 09:11:44 +00:00
proc updateGossipStatus ( node : BeaconNode , slot : Slot ) {. async . } =
## Subscribe to subnets that we are providing stability for or aggregating
## and unsubscribe from the ones that are no longer relevant.
# Let the tracker know what duties are approaching - this will tell us how
# many stability subnets we need to be subscribed to and what subnets we'll
# soon be aggregating - in addition to the in-beacon-node duties, there may
# also be duties coming from the validator client, but we don't control when
# these arrive
await node . registerDuties ( slot )
# We start subscribing to gossip before we're fully synced - this allows time
# to subscribe before the sync end game
2020-12-01 10:43:02 +00:00
const
TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64
HYSTERESIS_BUFFER = 16
2024-05-15 13:30:49 +00:00
static : doAssert high ( ConsensusFork ) = = ConsensusFork . Electra
2020-12-01 10:43:02 +00:00
let
2021-10-18 09:11:44 +00:00
head = node . dag . head
headDistance =
if slot > head . slot : ( slot - head . slot ) . uint64
else : 0 'u64
2022-06-07 17:01:11 +00:00
isBehind =
2024-03-25 18:09:31 +00:00
headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER
2021-08-09 12:54:45 +00:00
targetGossipState =
2021-12-21 14:24:23 +00:00
getTargetGossipState (
slot . epoch ,
node . dag . cfg . ALTAIR_FORK_EPOCH ,
2022-02-02 13:06:55 +00:00
node . dag . cfg . BELLATRIX_FORK_EPOCH ,
2022-12-04 07:42:03 +00:00
node . dag . cfg . CAPELLA_FORK_EPOCH ,
2023-02-15 14:44:09 +00:00
node . dag . cfg . DENEB_FORK_EPOCH ,
2024-05-15 13:30:49 +00:00
node . dag . cfg . ELECTRA_FORK_EPOCH ,
2022-06-07 17:01:11 +00:00
isBehind )
2021-12-21 14:24:23 +00:00
doAssert targetGossipState . card < = 2
let
newGossipForks = targetGossipState - node . gossipState
oldGossipForks = node . gossipState - targetGossipState
doAssert newGossipForks . card < = 2
doAssert oldGossipForks . card < = 2
2021-08-09 12:54:45 +00:00
2021-12-21 14:24:23 +00:00
func maxGossipFork ( gossipState : GossipState ) : int =
var res = - 1
for gossipFork in gossipState :
res = max ( res , gossipFork . int )
res
if maxGossipFork ( targetGossipState ) < maxGossipFork ( node . gossipState ) and
targetGossipState ! = { } :
warn " Unexpected clock regression during transition " ,
targetGossipState ,
gossipState = node . gossipState
if node . gossipState . card = = 0 and targetGossipState . card > 0 :
2021-08-09 12:54:45 +00:00
# We are synced, so we will connect
2020-12-01 10:43:02 +00:00
debug " Enabling topic subscriptions " ,
wallSlot = slot ,
2021-10-18 09:11:44 +00:00
headSlot = head . slot ,
headDistance , targetGossipState
2020-12-01 10:43:02 +00:00
2022-01-03 21:18:49 +00:00
node . processor [ ] . setupDoppelgangerDetection ( slot )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
# Specially when waiting for genesis, we'll already be synced on startup -
# it might also happen on a sufficiently fast restart
# We "know" the actions for the current and the next epoch
2022-03-16 07:20:40 +00:00
withState ( node . dag . headState ) :
2022-09-13 11:53:12 +00:00
if node . consensusManager [ ] . actionTracker . needsUpdate (
forkyState , slot . epoch ) :
2022-03-15 08:24:55 +00:00
let epochRef = node . dag . getEpochRef ( head , slot . epoch , false ) . expect (
" Getting head EpochRef should never fail " )
2023-10-10 00:02:07 +00:00
node . consensusManager [ ] . actionTracker . updateActions (
epochRef . shufflingRef , epochRef . beacon_proposers )
2022-02-04 11:25:32 +00:00
2023-10-10 00:02:07 +00:00
node . maybeUpdateActionTrackerNextEpoch ( forkyState , slot . epoch + 1 )
2021-10-18 09:11:44 +00:00
2021-12-21 14:24:23 +00:00
if node . gossipState . card > 0 and targetGossipState . card = = 0 :
debug " Disabling topic subscriptions " ,
wallSlot = slot ,
headSlot = head . slot ,
headDistance
2022-12-09 16:05:55 +00:00
node . processor [ ] . clearDoppelgangerProtection ( )
2022-01-24 20:40:59 +00:00
let forkDigests = node . forkDigests ( )
2021-12-21 14:24:23 +00:00
2023-01-28 19:53:41 +00:00
const removeMessageHandlers : array [ ConsensusFork , auto ] = [
2021-12-21 14:24:23 +00:00
removePhase0MessageHandlers ,
removeAltairMessageHandlers ,
2023-01-04 12:34:15 +00:00
removeAltairMessageHandlers , # bellatrix (altair handlers, different forkDigest)
2022-12-07 16:47:23 +00:00
removeCapellaMessageHandlers ,
2024-04-03 14:43:43 +00:00
removeDenebMessageHandlers ,
2024-05-14 04:12:35 +00:00
removeElectraMessageHandlers
2021-12-21 14:24:23 +00:00
]
for gossipFork in oldGossipForks :
removeMessageHandlers [ gossipFork ] ( node , forkDigests [ gossipFork ] )
2023-01-28 19:53:41 +00:00
const addMessageHandlers : array [ ConsensusFork , auto ] = [
2021-12-21 14:24:23 +00:00
addPhase0MessageHandlers ,
addAltairMessageHandlers ,
2023-01-05 21:35:07 +00:00
addAltairMessageHandlers , # bellatrix (altair handlers, different forkDigest)
2022-12-07 16:47:23 +00:00
addCapellaMessageHandlers ,
2024-04-03 14:43:43 +00:00
addDenebMessageHandlers ,
2024-05-14 04:12:35 +00:00
addElectraMessageHandlers
2021-12-21 14:24:23 +00:00
]
for gossipFork in newGossipForks :
addMessageHandlers [ gossipFork ] ( node , forkDigests [ gossipFork ] , slot )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
node . gossipState = targetGossipState
2023-02-20 11:28:56 +00:00
node . doppelgangerChecked ( slot . epoch )
2021-10-18 09:11:44 +00:00
node . updateAttestationSubnetHandlers ( slot )
2022-08-25 03:53:59 +00:00
node . updateBlocksGossipStatus ( slot , isBehind )
2022-06-07 17:01:11 +00:00
node . updateLightClientGossipStatus ( slot , isBehind )
2020-12-01 10:43:02 +00:00
2023-09-07 06:19:33 +00:00
proc pruneBlobs ( node : BeaconNode , slot : Slot ) =
let blobPruneEpoch = ( slot . epoch -
2023-11-09 20:41:17 +00:00
node . dag . cfg . MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS - 1 )
2023-09-07 06:19:33 +00:00
if slot . is_epoch ( ) and blobPruneEpoch > = node . dag . cfg . DENEB_FORK_EPOCH :
var blocks : array [ SLOTS_PER_EPOCH . int , BlockId ]
var count = 0
let startIndex = node . dag . getBlockRange (
blobPruneEpoch . start_slot , 1 , blocks . toOpenArray ( 0 , SLOTS_PER_EPOCH - 1 ) )
for i in startIndex .. < SLOTS_PER_EPOCH :
let blck = node . dag . getForkedBlock ( blocks [ int ( i ) ] ) . valueOr : continue
withBlck ( blck ) :
2023-09-27 15:10:28 +00:00
when typeof ( forkyBlck ) . kind < ConsensusFork . Deneb : continue
2023-09-07 06:19:33 +00:00
else :
2023-09-21 10:49:14 +00:00
for j in 0 .. len ( forkyBlck . message . body . blob_kzg_commitments ) - 1 :
2023-09-07 06:19:33 +00:00
if node . db . delBlobSidecar ( blocks [ int ( i ) ] . root , BlobIndex ( j ) ) :
count = count + 1
debug " pruned blobs " , count , blobPruneEpoch
2024-06-19 15:12:31 +00:00
proc pruneDataColumns ( node : BeaconNode , slot : Slot ) =
let dataColumnPruneEpoch = ( slot . epoch -
node . dag . cfg . MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS - 1 )
if slot . is_epoch ( ) and dataColumnPruneEpoch > = node . dag . cfg . DENEB_FORK_EPOCH :
var blocks : array [ SLOTS_PER_EPOCH . int , BlockId ]
var count = 0
let startIndex = node . dag . getBlockRange (
dataColumnPruneEpoch . start_slot , 1 , blocks . toopenArray ( 0 , SLOTS_PER_EPOCH - 1 ) )
for i in startIndex .. < SLOTS_PER_EPOCH :
let blck = node . dag . getForkedBlock ( blocks [ int ( i ) ] ) . valueOr : continue
withBlck ( blck ) :
when typeof ( forkyBlck ) . kind < ConsensusFork . Deneb : continue
else :
for j in 0 .. len ( forkyBlck . message . body . blob_kzg_commitments ) - 1 :
if node . db . delDataColumnSidecar ( blocks [ int ( i ) ] . root , ColumnIndex ( j ) ) :
count = count + 1
debug " pruned data columns " , count , dataColumnPruneEpoch
2024-08-08 12:14:55 +00:00
proc tryReconstructingDataColumns * ( self : BeaconNode ,
signed_block : deneb . TrustedSignedBeaconBlock |
electra . TrustedSignedBeaconBlock ) :
2024-08-19 14:38:01 +00:00
Future [ Result [ seq [ DataColumnSidecar ] , string ] ] {. async . } =
2024-08-08 12:14:55 +00:00
# Checks whether the data columns can be reconstructed
# or not from the recovery matrix
let localCustodySubnetCount =
if self . config . subscribeAllSubnets :
DATA_COLUMN_SIDECAR_SUBNET_COUNT . uint64
else :
CUSTODY_REQUIREMENT
let
db = self . db
root = signed_block . root
custodiedColumnIndices = get_custody_columns (
self . network . nodeId ,
2024-09-19 08:06:56 +00:00
max ( SAMPLES_PER_SLOT . uint64 , localCustodySubnetCount ) )
2024-08-08 12:14:55 +00:00
var
2024-08-19 06:34:05 +00:00
finalisedDataColumns : seq [ DataColumnSidecar ]
2024-08-08 12:14:55 +00:00
data_column_sidecars : seq [ DataColumnSidecar ]
columnsOk = true
storedColumns : seq [ ColumnIndex ]
# Loading the data columns from the database
2024-09-08 20:39:24 +00:00
for custody_column in custodiedColumnIndices :
2024-08-08 12:14:55 +00:00
let data_column = DataColumnSidecar . new ( )
if not db . getDataColumnSidecar ( root , custody_column , data_column [ ] ) :
columnsOk = false
break
data_column_sidecars . add data_column [ ]
storedColumns . add data_column . index
2024-10-03 10:13:45 +00:00
debugEcho " Pre stored columns "
debugEcho storedColumns
2024-10-02 08:11:54 +00:00
2024-08-08 12:14:55 +00:00
# storedColumn number is less than the NUMBER_OF_COLUMNS
# then reconstruction is not possible, and if all the data columns
# are already stored then we do not need to reconstruct at all
2024-10-03 10:52:53 +00:00
if not storedColumns . len < NUMBER_OF_COLUMNS div 2 and storedColumns . len ! = NUMBER_OF_COLUMNS :
2024-08-15 19:33:53 +00:00
# Recover blobs from saved data column sidecars
2024-10-07 20:58:42 +00:00
let recovered_cps = recover_cells_and_proofs ( data_column_sidecars , signed_block )
2024-08-15 19:33:53 +00:00
if not recovered_cps . isOk :
return err ( " Error recovering cells and proofs from data columns " )
2024-08-08 12:14:55 +00:00
2024-08-15 19:33:53 +00:00
# Reconstruct data column sidecars from recovered blobs
let reconstructedDataColumns = get_data_column_sidecars ( signed_block , recovered_cps . get )
2024-10-02 16:14:40 +00:00
debugEcho " Reconstructed Data Columns len "
2024-10-02 17:06:18 +00:00
debugEcho reconstructedDataColumns . len
for data_column in reconstructedDataColumns :
2024-09-08 20:39:24 +00:00
if data_column . index notin custodiedColumnIndices :
2024-08-15 19:33:53 +00:00
continue
2024-08-19 06:34:05 +00:00
finalisedDataColumns . add ( data_column )
2024-10-02 16:14:40 +00:00
for fc in finalisedDataColumns :
db . putDataColumnSidecar ( fc )
2024-10-02 08:11:54 +00:00
debug " Reconstructed data column written to database " ,
2024-10-02 16:14:40 +00:00
data_column = shortLog ( fc )
2024-09-21 10:17:58 +00:00
ok ( finalisedDataColumns )
2024-08-08 12:14:55 +00:00
proc reconstructAndSendDataColumns * ( node : BeaconNode ) {. async . } =
let
db = node . db
root = node . dag . head . root
2024-08-18 19:53:37 +00:00
2024-08-08 12:14:55 +00:00
let blck = getForkedBlock ( db , root ) . valueOr : return
withBlck ( blck ) :
2024-09-02 11:53:31 +00:00
when typeof ( forkyBlck ) . kind > = ConsensusFork . Deneb :
2024-09-12 06:29:59 +00:00
if node . config . subscribeAllSubnets :
2024-09-21 11:57:34 +00:00
let data_column_sidecars = await node . tryReconstructingDataColumns ( forkyBlck )
if not data_column_sidecars . isOk ( ) :
return
notice " Data Column Reconstructed and Saved Successfully "
2024-09-12 06:29:59 +00:00
notice " Attempting to publish reconstructed columns "
let dc = data_column_sidecars . get
var
2024-09-21 10:17:58 +00:00
das_workers = newSeq [ Future [ SendResult ] ] ( dc . len )
2024-09-12 06:29:59 +00:00
for i in 0 .. < dc . lenu64 :
2024-09-21 10:17:58 +00:00
let subnet_id = compute_subnet_for_data_column_sidecar ( dc [ i ] . index )
2024-09-12 06:29:59 +00:00
das_workers [ i ] =
node . network . broadcastDataColumnSidecar ( subnet_id , dc [ i ] )
let allres = await allFinished ( das_workers )
for i in 0 .. < allres . len :
let res = allres [ i ]
doAssert res . finished ( )
if res . failed ( ) :
notice " Reconstructed data columns not sent " ,
data_column = shortLog ( dc [ i ] ) , error = res . error [ ]
else :
notice " Reconstructed data columns sent " ,
data_column = shortLog ( dc [ i ] )
2024-09-02 11:53:31 +00:00
else :
return
2024-08-08 12:14:55 +00:00
2021-02-14 15:37:32 +00:00
proc onSlotEnd ( node : BeaconNode , slot : Slot ) {. async . } =
2020-12-18 21:01:24 +00:00
# Things we do when slot processing has ended and we're about to wait for the
# next slot
2024-08-08 12:14:55 +00:00
2023-07-18 18:55:36 +00:00
# By waiting until close before slot end, ensure that preparation for next
# slot does not interfere with propagation of messages and with VC duties.
const endOffset = aggregateSlotOffset + nanos (
( NANOSECONDS_PER_SLOT - aggregateSlotOffset . nanoseconds . uint64 ) . int64 div 2 )
let endCutoff = node . beaconClock . fromNow ( slot . start_beacon_time + endOffset )
if endCutoff . inFuture :
debug " Waiting for slot end " , slot , endCutoff = shortLog ( endCutoff . offset )
await sleepAsync ( endCutoff . offset )
2021-06-01 11:13:40 +00:00
if node . dag . needStateCachesAndForkChoicePruning ( ) :
2022-08-19 10:30:07 +00:00
if node . attachedValidators [ ] . validators . len > 0 :
node . attachedValidators [ ]
2021-05-10 14:32:28 +00:00
. slashingProtection
# pruning is only done if the DB is set to pruning mode.
. pruneAfterFinalization (
2022-01-11 10:01:54 +00:00
node . dag . finalizedHead . slot . epoch ( )
2021-05-10 14:32:28 +00:00
)
2021-03-09 14:36:17 +00:00
# Delay part of pruning until latency critical duties are done.
# The other part of pruning, `pruneBlocksDAG`, is done eagerly.
2021-05-10 14:32:28 +00:00
# ----
# This is the last pruning to do as it clears the "needPruning" condition.
2021-03-11 10:10:57 +00:00
node . consensusManager [ ] . pruneStateCachesAndForkChoice ( )
2021-03-09 14:36:17 +00:00
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
if node . config . historyMode = = HistoryMode . Prune :
2023-05-12 10:37:15 +00:00
if not ( slot + 1 ) . is_epoch ( ) :
# The epoch slot already is "heavy" due to the epoch processing, leave
# the pruning for later
node . dag . pruneHistory ( )
2023-09-07 06:19:33 +00:00
node . pruneBlobs ( slot )
2024-08-18 19:53:37 +00:00
node . pruneDataColumns ( slot )
History pruning (fixes #4419) (#4445)
Introduce (optional) pruning of historical data - a pruned node will
continue to answer queries for historical data up to
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, or roughly 5 months, capping
typical database usage at around 60-70gb.
To enable pruning, add `--history=prune` to the command line - on the
first start, old data will be cleared (which may take a while) - after
that, data is pruned continuously.
When pruning an existing database, the database will not shrink -
instead, the freed space is recycled as the node continues to run - to
free up space, perform a trusted node sync with a fresh database.
When switching on archive mode in a pruned node, history is retained
from that point onwards.
History pruning is scheduled to be enabled by default in a future
release.
In this PR, `minimal` mode from #4419 is not implemented meaning
retention periods for states and blocks are always the same - depending
on user demand, a future PR may implement `minimal` as well.
2023-01-07 10:02:15 +00:00
2020-12-18 21:01:24 +00:00
when declared ( GC_fullCollect ) :
# The slots in the beacon node work as frames in a game: we want to make
# sure that we're ready for the next one and don't get stuck in lengthy
# garbage collection tasks when time is of essence in the middle of a slot -
# while this does not guarantee that we'll never collect during a slot, it
# makes sure that all the scratch space we used during slot tasks (logging,
# temporary buffers etc) gets recycled for the next slot that is likely to
# need similar amounts of memory.
2022-03-22 08:42:28 +00:00
try :
GC_fullCollect ( )
except Defect as exc :
raise exc # Reraise to maintain call stack
2023-12-15 01:00:14 +00:00
except Exception :
2022-03-22 08:42:28 +00:00
# TODO upstream
raiseAssert " Unexpected exception during GC collection "
2024-02-19 10:00:11 +00:00
let gcCollectionTick = Moment . now ( )
2020-12-18 21:01:24 +00:00
# Checkpoint the database to clear the WAL file and make sure changes in
# the database are synced with the filesystem.
2021-01-18 10:02:56 +00:00
node . db . checkpoint ( )
2024-02-19 10:00:11 +00:00
let
dbCheckpointTick = Moment . now ( )
dbCheckpointDur = dbCheckpointTick - gcCollectionTick
2024-02-20 19:34:41 +00:00
db_checkpoint_seconds . inc ( dbCheckpointDur . toFloatSeconds )
2024-02-19 10:00:11 +00:00
if dbCheckpointDur > = MinSignificantProcessingDuration :
info " Database checkpointed " , dur = dbCheckpointDur
else :
debug " Database checkpointed " , dur = dbCheckpointDur
2020-12-18 21:01:24 +00:00
2021-08-30 01:00:37 +00:00
node . syncCommitteeMsgPool [ ] . pruneData ( slot )
2022-01-24 20:40:59 +00:00
if slot . is_epoch :
2022-08-23 16:19:52 +00:00
node . dynamicFeeRecipientsStore [ ] . pruneOldMappings ( slot . epoch )
2021-08-28 22:27:51 +00:00
2021-10-18 09:11:44 +00:00
# Update upcoming actions - we do this every slot in case a reorg happens
2022-02-04 11:25:32 +00:00
let head = node . dag . head
2023-05-25 13:57:24 +00:00
if node . isSynced ( head ) and head . executionValid :
2022-03-16 07:20:40 +00:00
withState ( node . dag . headState ) :
2023-10-10 00:02:07 +00:00
# maybeUpdateActionTrackerNextEpoch might not account for balance changes
# from the process_rewards_and_penalties() epoch transition but only from
# process_block() and other per-slot sources. This mainly matters insofar
# as it might trigger process_effective_balance_updates() changes in that
# same epoch transition, which function is therefore potentially blind to
# but which might then affect beacon proposers.
#
# Because this runs every slot, it can account naturally for slashings,
# which affect balances via slash_validator() when they happen, and any
# missed sync committee participation via process_sync_aggregate(), but
# attestation penalties for example, need, specific handling.
# checked by maybeUpdateActionTrackerNextEpoch.
node . maybeUpdateActionTrackerNextEpoch ( forkyState , slot . epoch + 1 )
2021-10-18 09:11:44 +00:00
let
2022-09-07 18:34:52 +00:00
nextAttestationSlot =
node . consensusManager [ ] . actionTracker . getNextAttestationSlot ( slot )
nextProposalSlot =
node . consensusManager [ ] . actionTracker . getNextProposalSlot ( slot )
2024-03-06 11:20:53 +00:00
nextActionSlot = min ( nextAttestationSlot , nextProposalSlot )
nextActionWaitTime = saturate ( fromNow ( node . beaconClock , nextActionSlot ) )
2021-10-18 09:11:44 +00:00
2021-03-24 10:05:04 +00:00
# -1 is a more useful output than 18446744073709551615 as an indicator of
# no future attestation/proposal known.
2022-07-12 09:00:39 +00:00
template formatInt64 ( x : Slot ) : int64 =
2021-03-24 10:05:04 +00:00
if x = = high ( uint64 ) . Slot :
- 1 'i64
else :
toGaugeValue ( x )
2024-03-07 14:08:48 +00:00
let
syncCommitteeSlot = slot + 1
syncCommitteeEpoch = syncCommitteeSlot . epoch
inCurrentSyncCommittee =
not node . getCurrentSyncCommiteeSubnets ( syncCommitteeEpoch ) . isZeros ( )
2022-07-12 09:00:39 +00:00
template formatSyncCommitteeStatus ( ) : string =
2024-03-07 14:08:48 +00:00
if inCurrentSyncCommittee :
" current "
elif not node . getNextSyncCommitteeSubnets ( syncCommitteeEpoch ) . isZeros ( ) :
let slotsToNextSyncCommitteePeriod =
2023-09-11 10:51:55 +00:00
SLOTS_PER_SYNC_COMMITTEE_PERIOD -
since_sync_committee_period_start ( syncCommitteeSlot )
2024-03-07 14:08:48 +00:00
# int64 conversion is safe
doAssert slotsToNextSyncCommitteePeriod < = SLOTS_PER_SYNC_COMMITTEE_PERIOD
2022-07-12 09:00:39 +00:00
" in " & toTimeLeftString (
SECONDS_PER_SLOT . int64 . seconds * slotsToNextSyncCommitteePeriod . int64 )
else :
" none "
2020-12-18 21:01:24 +00:00
info " Slot end " ,
slot = shortLog ( slot ) ,
2021-02-14 15:37:32 +00:00
nextActionWait =
2024-03-06 11:20:53 +00:00
if nextActionSlot = = FAR_FUTURE_SLOT :
2021-02-14 15:37:32 +00:00
" n/a "
else :
2021-11-02 17:06:36 +00:00
shortLog ( nextActionWaitTime ) ,
2022-07-12 09:00:39 +00:00
nextAttestationSlot = formatInt64 ( nextAttestationSlot ) ,
nextProposalSlot = formatInt64 ( nextProposalSlot ) ,
syncCommitteeDuties = formatSyncCommitteeStatus ( ) ,
2022-02-04 11:25:32 +00:00
head = shortLog ( head )
2020-12-18 21:01:24 +00:00
2024-03-06 11:20:53 +00:00
if nextActionSlot ! = FAR_FUTURE_SLOT :
2021-03-12 09:46:26 +00:00
next_action_wait . set ( nextActionWaitTime . toFloatSeconds )
2024-03-07 14:08:48 +00:00
next_proposal_wait . set (
if nextProposalSlot ! = FAR_FUTURE_SLOT :
saturate ( fromNow ( node . beaconClock , nextProposalSlot ) ) . toFloatSeconds ( )
else :
Inf )
sync_committee_active . set ( if inCurrentSyncCommittee : 1 else : 0 )
2021-08-24 19:49:51 +00:00
let epoch = slot . epoch
2021-09-29 11:06:16 +00:00
if epoch + 1 > = node . network . forkId . next_fork_epoch :
# Update 1 epoch early to block non-fork-ready peers
2022-04-08 16:22:49 +00:00
node . network . updateForkId ( epoch , node . dag . genesis_validators_root )
2021-08-24 19:49:51 +00:00
2021-06-01 11:13:40 +00:00
# When we're not behind schedule, we'll speculatively update the clearance
use separate state when catching up to perform validator duties (#6131)
There are situations where all states in the `blockchain_dag` are
occupied and cannot be borrowed.
- headState: Many assumptions in the code that it cannot be advanced
- clearanceState: Resets every time a new block gets imported, including
blocks from non-canonical branches
- epochRefState: Used even more frequently than clearanceState
This means that during the catch-up mechanic where the head state is
slowly advanced to wall clock to catch up on validator duties in the
situation where the canonical head is way behind non-canonical heads,
we cannot use any of the three existing states. In that situation,
Nimbus already consumes an increased amount of memory due to all the
`BlockRef`, fork choice states and so on, so experience is degraded.
It seems reasonable to allocate a fourth state temporarily during that
mechanic, until a new proposal could be made on the canonical chain.
Note that currently, on `unstable`, proposals _do_ happen every couple
hours because sync manager doesn't manage to discover additional heads
in a split-view scenario on Goerli. However, with the branch discovery
module, new blocks are discovered all the time, and the clearanceState
may no longer be borrowed as it is reset to different branch too often.
The extra state could also find other uses in the future, e.g., for
incremental computations as in reindexing the database, or online
collection of historical light client data.
2024-03-24 06:18:33 +00:00
# state in anticipation of receiving the next block - we do it after
# logging slot end since the nextActionWaitTime can be short
let advanceCutoff = node . beaconClock . fromNow (
slot . start_beacon_time ( ) + chronos . seconds ( int ( SECONDS_PER_SLOT - 1 ) ) )
2021-06-01 11:13:40 +00:00
if advanceCutoff . inFuture :
# We wait until there's only a second left before the next slot begins, then
# we advance the clearance state to the next slot - this gives us a high
# probability of being prepared for the block that will arrive and the
# epoch processing that follows
await sleepAsync ( advanceCutoff . offset )
use separate state when catching up to perform validator duties (#6131)
There are situations where all states in the `blockchain_dag` are
occupied and cannot be borrowed.
- headState: Many assumptions in the code that it cannot be advanced
- clearanceState: Resets every time a new block gets imported, including
blocks from non-canonical branches
- epochRefState: Used even more frequently than clearanceState
This means that during the catch-up mechanic where the head state is
slowly advanced to wall clock to catch up on validator duties in the
situation where the canonical head is way behind non-canonical heads,
we cannot use any of the three existing states. In that situation,
Nimbus already consumes an increased amount of memory due to all the
`BlockRef`, fork choice states and so on, so experience is degraded.
It seems reasonable to allocate a fourth state temporarily during that
mechanic, until a new proposal could be made on the canonical chain.
Note that currently, on `unstable`, proposals _do_ happen every couple
hours because sync manager doesn't manage to discover additional heads
in a split-view scenario on Goerli. However, with the branch discovery
module, new blocks are discovered all the time, and the clearanceState
may no longer be borrowed as it is reset to different branch too often.
The extra state could also find other uses in the future, e.g., for
incremental computations as in reindexing the database, or online
collection of historical light client data.
2024-03-24 06:18:33 +00:00
node . dag . advanceClearanceState ( )
2021-06-01 11:13:40 +00:00
2021-10-18 09:11:44 +00:00
# Prepare action tracker for the next slot
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . updateSlot ( slot + 1 )
2021-10-18 09:11:44 +00:00
# The last thing we do is to perform the subscriptions and unsubscriptions for
# the next slot, just before that slot starts - because of the advance cuttoff
# above, this will be done just before the next slot starts
2022-11-08 11:43:38 +00:00
node . updateSyncCommitteeTopics ( slot + 1 )
2021-10-18 09:11:44 +00:00
await node . updateGossipStatus ( slot + 1 )
2024-01-16 17:33:46 +00:00
func formatNextConsensusFork (
node : BeaconNode , withVanityArt = false ) : Opt [ string ] =
2024-01-12 20:40:34 +00:00
let consensusFork =
node . dag . cfg . consensusForkAtEpoch ( node . dag . head . slot . epoch )
2024-01-15 16:48:03 +00:00
if consensusFork = = ConsensusFork . high :
return Opt . none ( string )
let
nextConsensusFork = consensusFork . succ ( )
nextForkEpoch = node . dag . cfg . consensusForkEpoch ( nextConsensusFork )
if nextForkEpoch = = FAR_FUTURE_EPOCH :
return Opt . none ( string )
2024-01-16 17:33:46 +00:00
Opt . some (
( if withVanityArt : nextConsensusFork . getVanityMascot & " " else : " " ) &
$ nextConsensusFork & " : " & $ nextForkEpoch )
2024-01-12 20:40:34 +00:00
2022-08-29 12:16:35 +00:00
func syncStatus ( node : BeaconNode , wallSlot : Slot ) : string =
use separate state when catching up to perform validator duties (#6131)
There are situations where all states in the `blockchain_dag` are
occupied and cannot be borrowed.
- headState: Many assumptions in the code that it cannot be advanced
- clearanceState: Resets every time a new block gets imported, including
blocks from non-canonical branches
- epochRefState: Used even more frequently than clearanceState
This means that during the catch-up mechanic where the head state is
slowly advanced to wall clock to catch up on validator duties in the
situation where the canonical head is way behind non-canonical heads,
we cannot use any of the three existing states. In that situation,
Nimbus already consumes an increased amount of memory due to all the
`BlockRef`, fork choice states and so on, so experience is degraded.
It seems reasonable to allocate a fourth state temporarily during that
mechanic, until a new proposal could be made on the canonical chain.
Note that currently, on `unstable`, proposals _do_ happen every couple
hours because sync manager doesn't manage to discover additional heads
in a split-view scenario on Goerli. However, with the branch discovery
module, new blocks are discovered all the time, and the clearanceState
may no longer be borrowed as it is reset to different branch too often.
The extra state could also find other uses in the future, e.g., for
incremental computations as in reindexing the database, or online
collection of historical light client data.
2024-03-24 06:18:33 +00:00
let optimisticHead = not node . dag . head . executionValid
2022-03-29 07:15:42 +00:00
if node . syncManager . inProgress :
2022-08-29 12:16:35 +00:00
let
optimisticSuffix =
use separate state when catching up to perform validator duties (#6131)
There are situations where all states in the `blockchain_dag` are
occupied and cannot be borrowed.
- headState: Many assumptions in the code that it cannot be advanced
- clearanceState: Resets every time a new block gets imported, including
blocks from non-canonical branches
- epochRefState: Used even more frequently than clearanceState
This means that during the catch-up mechanic where the head state is
slowly advanced to wall clock to catch up on validator duties in the
situation where the canonical head is way behind non-canonical heads,
we cannot use any of the three existing states. In that situation,
Nimbus already consumes an increased amount of memory due to all the
`BlockRef`, fork choice states and so on, so experience is degraded.
It seems reasonable to allocate a fourth state temporarily during that
mechanic, until a new proposal could be made on the canonical chain.
Note that currently, on `unstable`, proposals _do_ happen every couple
hours because sync manager doesn't manage to discover additional heads
in a split-view scenario on Goerli. However, with the branch discovery
module, new blocks are discovered all the time, and the clearanceState
may no longer be borrowed as it is reset to different branch too often.
The extra state could also find other uses in the future, e.g., for
incremental computations as in reindexing the database, or online
collection of historical light client data.
2024-03-24 06:18:33 +00:00
if optimisticHead :
2022-08-29 12:16:35 +00:00
" /opt "
else :
" "
lightClientSuffix =
if node . consensusManager [ ] . shouldSyncOptimistically ( wallSlot ) :
" - lc: " & $ shortLog ( node . consensusManager [ ] . optimisticHead )
else :
" "
2024-03-25 18:09:31 +00:00
node . syncManager . syncStatus & optimisticSuffix & lightClientSuffix
2022-03-29 07:15:42 +00:00
elif node . backfiller . inProgress :
" backfill: " & node . backfiller . syncStatus
2022-08-18 13:22:22 +00:00
elif optimistic_head :
" synced/opt "
2022-03-29 07:15:42 +00:00
else :
" synced "
2022-01-20 07:25:45 +00:00
2024-01-15 15:53:34 +00:00
when defined ( windows ) :
from winservice import establishWindowsService , reportServiceStatusSuccess
2022-07-13 14:43:57 +00:00
proc onSlotStart ( node : BeaconNode , wallTime : BeaconTime ,
lastSlot : Slot ) : Future [ bool ] {. async . } =
2019-03-22 15:49:37 +00:00
## Called at the beginning of a slot - usually every slot, but sometimes might
## skip a few in case we're running late.
2021-03-01 16:36:06 +00:00
## wallTime: current system time - we will strive to perform all duties up
## to this point in time
2020-06-26 13:51:20 +00:00
## lastSlot: the last slot that we successfully processed, so we know where to
2021-03-01 16:36:06 +00:00
## start work from - there might be jumps if processing is delayed
2019-03-22 15:49:37 +00:00
let
# The slot we should be at, according to the clock
2021-03-01 16:36:06 +00:00
wallSlot = wallTime . slotOrZero
# If everything was working perfectly, the slot that we should be processing
expectedSlot = lastSlot + 1
2022-01-11 10:01:54 +00:00
finalizedEpoch = node . dag . finalizedHead . blck . slot . epoch ( )
delay = wallTime - expectedSlot . start_beacon_time ( )
2020-12-18 21:01:24 +00:00
2023-06-28 13:33:07 +00:00
node . processingDelay = Opt . some ( nanoseconds ( delay . nanoseconds ) )
2024-01-15 16:48:03 +00:00
block :
logScope :
slot = shortLog ( wallSlot )
epoch = shortLog ( wallSlot . epoch )
sync = node . syncStatus ( wallSlot )
peers = len ( node . network . peerPool )
head = shortLog ( node . dag . head )
finalized = shortLog ( getStateField (
node . dag . headState , finalized_checkpoint ) )
delay = shortLog ( delay )
let nextConsensusForkDescription = node . formatNextConsensusFork ( )
if nextConsensusForkDescription . isNone :
info " Slot start "
else :
info " Slot start " , nextFork = nextConsensusForkDescription . get
2019-03-22 15:49:37 +00:00
2020-02-17 18:24:14 +00:00
# Check before any re-scheduling of onSlotStart()
2022-07-13 14:43:57 +00:00
if checkIfShouldStopAtEpoch ( wallSlot , node . config . stopAtEpoch ) :
quit ( 0 )
2020-02-17 18:24:14 +00:00
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
if node . config . runAsService :
2024-01-13 10:53:53 +00:00
reportServiceStatusSuccess ( )
2022-02-27 11:02:45 +00:00
2021-03-01 19:55:25 +00:00
beacon_slot . set wallSlot . toGaugeValue
beacon_current_epoch . set wallSlot . epoch . toGaugeValue
2019-12-23 15:34:09 +00:00
2021-03-01 19:55:25 +00:00
# both non-negative, so difference can't overflow or underflow int64
finalization_delay . set (
wallSlot . epoch . toGaugeValue - finalizedEpoch . toGaugeValue )
2019-03-22 15:49:37 +00:00
2022-07-13 13:48:09 +00:00
if node . config . strictVerification :
2022-08-01 06:41:47 +00:00
verifyFinalization ( node , wallSlot )
2019-03-22 15:49:37 +00:00
2021-03-11 10:10:57 +00:00
node . consensusManager [ ] . updateHead ( wallSlot )
2019-03-22 15:49:37 +00:00
2024-08-19 16:03:40 +00:00
await node . reconstructAndSendDataColumns ( )
2024-09-02 08:10:15 +00:00
await node . handleValidatorDuties ( lastSlot , wallSlot )
2021-03-01 16:36:06 +00:00
await onSlotEnd ( node , wallSlot )
2020-12-08 17:11:54 +00:00
2024-01-22 07:36:46 +00:00
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#registration-dissemination
2022-11-26 18:50:42 +00:00
# This specification suggests validators re-submit to builder software every
# `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs.
if wallSlot . is_epoch and
wallSlot . epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION = = 0 :
asyncSpawn node . registerValidators ( wallSlot . epoch )
2022-07-13 14:43:57 +00:00
return false
2022-06-15 02:38:27 +00:00
proc onSecond ( node : BeaconNode , time : Moment ) =
2022-02-08 19:19:21 +00:00
# Nim GC metrics (for the main thread)
updateThreadMetrics ( )
2022-03-29 07:15:42 +00:00
if node . config . stopAtSyncedEpoch ! = 0 and
node . dag . head . slot . epoch > = node . config . stopAtSyncedEpoch :
2022-03-04 17:38:01 +00:00
notice " Shutting down after having reached the target synced epoch "
bnStatus = BeaconNodeStatus . Stopping
2020-06-03 08:46:29 +00:00
proc runOnSecondLoop ( node : BeaconNode ) {. async . } =
2022-07-12 17:50:12 +00:00
const
sleepTime = chronos . seconds ( 1 )
nanosecondsIn1s = float ( sleepTime . nanoseconds )
2020-06-03 08:46:29 +00:00
while true :
let start = chronos . now ( chronos . Moment )
2020-08-10 18:49:45 +00:00
await chronos . sleepAsync ( sleepTime )
let afterSleep = chronos . now ( chronos . Moment )
let sleepTime = afterSleep - start
2022-06-15 02:38:27 +00:00
node . onSecond ( start )
2020-08-10 18:49:45 +00:00
let finished = chronos . now ( chronos . Moment )
let processingTime = finished - afterSleep
ticks_delay . set ( sleepTime . nanoseconds . float / nanosecondsIn1s )
2020-10-01 18:56:42 +00:00
trace " onSecond task completed " , sleepTime , processingTime
2019-03-27 20:17:01 +00:00
2021-03-24 10:05:04 +00:00
func connectedPeersCount ( node : BeaconNode ) : int =
2020-09-14 14:50:03 +00:00
len ( node . network . peerPool )
2020-03-16 22:28:54 +00:00
2021-03-17 18:46:45 +00:00
proc installRestHandlers ( restServer : RestServerRef , node : BeaconNode ) =
restServer . router . installBeaconApiHandlers ( node )
2023-06-07 21:27:15 +00:00
restServer . router . installBuilderApiHandlers ( node )
2021-03-17 18:46:45 +00:00
restServer . router . installConfigApiHandlers ( node )
restServer . router . installDebugApiHandlers ( node )
2021-03-23 22:50:18 +00:00
restServer . router . installEventApiHandlers ( node )
restServer . router . installNimbusApiHandlers ( node )
2021-03-17 18:46:45 +00:00
restServer . router . installNodeApiHandlers ( node )
2021-03-23 22:50:18 +00:00
restServer . router . installValidatorApiHandlers ( node )
2022-06-24 14:57:50 +00:00
if node . dag . lcDataStore . serve :
2022-06-19 05:57:52 +00:00
restServer . router . installLightClientApiHandlers ( node )
2021-03-17 18:46:45 +00:00
2022-12-04 07:42:03 +00:00
from . / spec / datatypes / capella import SignedBeaconBlock
2020-08-17 12:07:29 +00:00
proc installMessageValidators ( node : BeaconNode ) =
2020-08-12 17:48:31 +00:00
# These validators stay around the whole time, regardless of which specific
# subnets are subscribed to during any given epoch.
2022-03-29 07:15:42 +00:00
let forkDigests = node . dag . forkDigests
2023-05-16 07:46:41 +00:00
for fork in ConsensusFork :
withConsensusFork ( fork ) :
let digest = forkDigests [ ] . atConsensusFork ( consensusFork )
# beacon_block
2023-09-01 09:31:52 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_block
2023-05-16 07:46:41 +00:00
node . network . addValidator (
getBeaconBlocksTopic ( digest ) , proc (
signedBlock : consensusFork . SignedBeaconBlock
) : ValidationResult =
if node . shouldSyncOptimistically ( node . currentSlot ) :
toValidationResult (
node . optimisticProcessor . processSignedBeaconBlock (
signedBlock ) )
else :
toValidationResult (
node . processor [ ] . processSignedBeaconBlock (
MsgSource . gossip , signedBlock ) ) )
# beacon_attestation_{subnet_id}
2023-12-16 02:27:06 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
2024-05-14 16:01:26 +00:00
when consensusFork > = ConsensusFork . Electra :
for it in SubnetId :
closureScope : # Needed for inner `proc`; don't lift it out of loop.
let subnet_id = it
node . network . addAsyncValidator (
getAttestationTopic ( digest , subnet_id ) , proc (
attestation : electra . Attestation
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
return toValidationResult (
await node . processor . processAttestation (
MsgSource . gossip , attestation , subnet_id ) ) )
else :
for it in SubnetId :
closureScope : # Needed for inner `proc`; don't lift it out of loop.
let subnet_id = it
node . network . addAsyncValidator (
getAttestationTopic ( digest , subnet_id ) , proc (
attestation : phase0 . Attestation
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
return toValidationResult (
await node . processor . processAttestation (
MsgSource . gossip , attestation , subnet_id ) ) )
2023-05-16 07:46:41 +00:00
# beacon_aggregate_and_proof
2023-11-11 05:27:53 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
2024-05-14 16:01:26 +00:00
when consensusFork > = ConsensusFork . Electra :
node . network . addAsyncValidator (
getAggregateAndProofsTopic ( digest ) , proc (
signedAggregateAndProof : electra . SignedAggregateAndProof
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
return toValidationResult (
await node . processor . processSignedAggregateAndProof (
MsgSource . gossip , signedAggregateAndProof ) ) )
else :
node . network . addAsyncValidator (
getAggregateAndProofsTopic ( digest ) , proc (
signedAggregateAndProof : phase0 . SignedAggregateAndProof
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
return toValidationResult (
await node . processor . processSignedAggregateAndProof (
MsgSource . gossip , signedAggregateAndProof ) ) )
2023-05-16 07:46:41 +00:00
# attester_slashing
2023-12-16 02:27:06 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#attester_slashing
2023-05-16 07:46:41 +00:00
node . network . addValidator (
getAttesterSlashingsTopic ( digest ) , proc (
2024-04-21 05:49:11 +00:00
attesterSlashing : phase0 . AttesterSlashing
2023-05-16 07:46:41 +00:00
) : ValidationResult =
toValidationResult (
node . processor [ ] . processAttesterSlashing (
MsgSource . gossip , attesterSlashing ) ) )
# proposer_slashing
2023-12-06 22:16:55 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#proposer_slashing
2023-05-16 07:46:41 +00:00
node . network . addValidator (
getProposerSlashingsTopic ( digest ) , proc (
proposerSlashing : ProposerSlashing
) : ValidationResult =
toValidationResult (
node . processor [ ] . processProposerSlashing (
MsgSource . gossip , proposerSlashing ) ) )
# voluntary_exit
2023-12-06 22:16:55 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit
2023-05-16 07:46:41 +00:00
node . network . addValidator (
getVoluntaryExitsTopic ( digest ) , proc (
signedVoluntaryExit : SignedVoluntaryExit
) : ValidationResult =
toValidationResult (
node . processor [ ] . processSignedVoluntaryExit (
MsgSource . gossip , signedVoluntaryExit ) ) )
when consensusFork > = ConsensusFork . Altair :
# sync_committee_{subnet_id}
2024-03-14 06:26:36 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
2023-05-16 07:46:41 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
closureScope : # Needed for inner `proc`; don't lift it out of loop.
let idx = subcommitteeIdx
node . network . addAsyncValidator (
getSyncCommitteeTopic ( digest , idx ) , proc (
msg : SyncCommitteeMessage
2024-01-22 16:34:54 +00:00
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
2023-05-16 07:46:41 +00:00
return toValidationResult (
await node . processor . processSyncCommitteeMessage (
MsgSource . gossip , msg , idx ) ) )
# sync_committee_contribution_and_proof
2024-03-14 06:26:36 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
2021-11-05 15:39:47 +00:00
node . network . addAsyncValidator (
2023-05-16 07:46:41 +00:00
getSyncCommitteeContributionAndProofTopic ( digest ) , proc (
msg : SignedContributionAndProof
2024-01-22 16:34:54 +00:00
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
2021-11-05 15:39:47 +00:00
return toValidationResult (
2023-05-16 07:46:41 +00:00
await node . processor . processSignedContributionAndProof (
MsgSource . gossip , msg ) ) )
when consensusFork > = ConsensusFork . Capella :
2024-05-09 05:03:10 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change
2021-12-09 12:56:54 +00:00
node . network . addAsyncValidator (
2023-05-16 07:46:41 +00:00
getBlsToExecutionChangeTopic ( digest ) , proc (
msg : SignedBLSToExecutionChange
2024-01-22 16:34:54 +00:00
) : Future [ ValidationResult ] {. async : ( raises : [ CancelledError ] ) . } =
2021-12-09 12:56:54 +00:00
return toValidationResult (
2023-05-16 07:46:41 +00:00
await node . processor . processBlsToExecutionChange (
MsgSource . gossip , msg ) ) )
when consensusFork > = ConsensusFork . Deneb :
2023-11-04 14:20:34 +00:00
# blob_sidecar_{subnet_id}
2023-12-16 02:27:06 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
2024-05-27 12:45:41 +00:00
# for it in BlobId:
# closureScope: # Needed for inner `proc`; don't lift it out of loop.
# let subnet_id = it
# node.network.addValidator(
# getBlobSidecarTopic(digest, subnet_id), proc (
# blobSidecar: deneb.BlobSidecar
# ): ValidationResult =
# toValidationResult(
# node.processor[].processBlobSidecar(
# MsgSource.gossip, blobSidecar, subnet_id)))
2024-06-24 12:02:06 +00:00
# data_column_sidecar_{subnet_id}
2024-08-22 09:53:37 +00:00
#
2024-08-28 15:08:19 +00:00
# let subnetCount =
# if node.config.subscribeAllSubnets:
# DATA_COLUMN_SIDECAR_SUBNET_COUNT.uint64
# else:
# CUSTODY_REQUIREMENT.uint64
2024-08-23 10:26:36 +00:00
2024-08-28 15:08:19 +00:00
# let dc_subnets = get_custody_column_subnet(node.network.nodeId, subnetCount)
2024-08-24 11:11:38 +00:00
for it in 0 'u64 .. < DATA_COLUMN_SIDECAR_SUBNET_COUNT :
2023-05-16 07:46:41 +00:00
closureScope : # Needed for inner `proc`; don't lift it out of loop.
2023-11-04 14:20:34 +00:00
let subnet_id = it
2024-08-28 15:08:19 +00:00
node . network . addValidator (
getDataColumnSidecarTopic ( digest , subnet_id ) , proc (
dataColumnSidecar : DataColumnSidecar
) : ValidationResult =
toValidationResult (
node . processor [ ] . processDataColumnSidecar (
MsgSource . gossip , dataColumnSidecar , subnet_id ) ) )
2023-01-19 22:00:40 +00:00
2022-06-07 17:01:11 +00:00
node . installLightClientMessageValidators ( )
2022-03-14 13:05:38 +00:00
2021-12-20 11:21:17 +00:00
proc stop ( node : BeaconNode ) =
2020-09-28 15:19:57 +00:00
bnStatus = BeaconNodeStatus . Stopping
2020-10-01 18:56:42 +00:00
notice " Graceful shutdown "
2020-09-01 13:44:40 +00:00
if not node . config . inProcessValidators :
2021-03-26 06:52:01 +00:00
try :
node . vcProcess . close ( )
except Exception as exc :
warn " Couldn ' t close vc process " , msg = exc . msg
try :
waitFor node . network . stop ( )
except CatchableError as exc :
warn " Couldn ' t stop network " , msg = exc . msg
2022-08-19 10:30:07 +00:00
node . attachedValidators [ ] . slashingProtection . close ( )
2022-08-07 21:53:20 +00:00
node . attachedValidators [ ] . close ( )
2020-09-12 05:35:58 +00:00
node . db . close ( )
2020-11-20 13:23:55 +00:00
notice " Databases closed "
2020-05-19 18:57:35 +00:00
2022-01-20 07:25:45 +00:00
proc startBackfillTask ( node : BeaconNode ) {. async . } =
while node . dag . needsBackfill :
if not node . syncManager . inProgress :
# Only start the backfiller if it's needed _and_ head sync has completed -
# if we lose sync after having synced head, we could stop the backfilller,
# but this should be a fringe case - might as well keep the logic simple for
# now
node . backfiller . start ( )
return
await sleepAsync ( chronos . seconds ( 2 ) )
2023-08-25 09:29:07 +00:00
proc run ( node : BeaconNode ) {. raises : [ CatchableError ] . } =
2021-11-01 14:50:24 +00:00
bnStatus = BeaconNodeStatus . Running
2020-05-19 18:57:35 +00:00
2022-08-19 10:30:07 +00:00
if not isNil ( node . restServer ) :
2021-11-01 14:50:24 +00:00
node . restServer . installRestHandlers ( node )
node . restServer . start ( )
2021-03-17 18:46:45 +00:00
2022-08-19 10:30:07 +00:00
if not isNil ( node . keymanagerServer ) :
doAssert not isNil ( node . keymanagerHost )
node . keymanagerServer . router . installKeymanagerHandlers ( node . keymanagerHost [ ] )
2021-12-22 12:37:31 +00:00
if node . keymanagerServer ! = node . restServer :
node . keymanagerServer . start ( )
2021-11-01 14:50:24 +00:00
let
wallTime = node . beaconClock . now ( )
wallSlot = wallTime . slotOrZero ( )
2020-06-18 10:03:36 +00:00
2022-06-07 17:01:11 +00:00
node . startLightClient ( )
2021-11-01 14:50:24 +00:00
node . requestManager . start ( )
node . syncManager . start ( )
2021-10-18 09:11:44 +00:00
2022-01-20 07:25:45 +00:00
if node . dag . needsBackfill ( ) : asyncSpawn node . startBackfillTask ( )
2021-11-01 14:50:24 +00:00
waitFor node . updateGossipStatus ( wallSlot )
2021-10-18 09:11:44 +00:00
2023-10-13 12:42:00 +00:00
for web3signerUrl in node . config . web3SignerUrls :
2023-09-06 19:04:10 +00:00
# TODO
# The current strategy polls all remote signers independently
# from each other which may lead to some race conditions of
# validators are migrated from one signer to another
# (because the updates to our validator pool are not atomic).
# Consider using different strategies that would detect such
# race conditions.
asyncSpawn node . pollForDynamicValidators (
web3signerUrl , node . config . web3signerUpdateInterval )
2021-11-01 14:50:24 +00:00
asyncSpawn runSlotLoop ( node , wallTime , onSlotStart )
asyncSpawn runOnSecondLoop ( node )
asyncSpawn runQueueProcessingLoop ( node . blockProcessor )
2023-02-16 17:25:48 +00:00
asyncSpawn runKeystoreCachePruningLoop ( node . keystoreCache )
2020-12-01 10:43:02 +00:00
2020-05-19 18:57:35 +00:00
# main event loop
2020-09-28 15:19:57 +00:00
while bnStatus = = BeaconNodeStatus . Running :
2021-03-26 06:52:01 +00:00
poll ( ) # if poll fails, the network is broken
2020-04-20 14:59:18 +00:00
2020-05-19 18:57:35 +00:00
# time to say goodbye
node . stop ( )
2018-11-23 23:58:49 +00:00
2018-12-19 12:58:53 +00:00
var gPidFile : string
2023-08-25 09:29:07 +00:00
proc createPidFile ( filename : string ) {. raises : [ IOError ] . } =
2019-07-07 09:53:58 +00:00
writeFile filename , $ os . getCurrentProcessId ( )
2018-12-19 12:58:53 +00:00
gPidFile = filename
2020-08-19 13:12:10 +00:00
addQuitProc proc { . noconv . } = discard io2 . removeFile ( gPidFile )
2020-06-11 12:13:12 +00:00
proc initializeNetworking ( node : BeaconNode ) {. async . } =
2021-11-01 14:50:24 +00:00
node . installMessageValidators ( )
2020-11-16 19:15:43 +00:00
info " Listening to incoming network requests "
2020-08-03 17:35:27 +00:00
await node . network . startListening ( )
2020-06-11 12:13:12 +00:00
2020-06-19 17:42:28 +00:00
let addressFile = node . config . dataDir / " beacon_node.enr "
2020-06-11 12:13:12 +00:00
writeFile ( addressFile , node . network . announcedENR . toURI )
2020-09-21 16:02:27 +00:00
await node . network . start ( )
2020-06-11 12:13:12 +00:00
2023-08-25 09:29:07 +00:00
proc start * ( node : BeaconNode ) {. raises : [ CatchableError ] . } =
2019-11-25 12:47:29 +00:00
let
2021-06-01 11:13:40 +00:00
head = node . dag . head
finalizedHead = node . dag . finalizedHead
2022-01-11 10:01:54 +00:00
genesisTime = node . beaconClock . fromNow ( start_beacon_time ( Slot 0 ) )
2020-06-11 12:13:12 +00:00
2020-10-01 18:56:42 +00:00
notice " Starting beacon node " ,
2019-11-12 00:05:35 +00:00
version = fullVersionStr ,
2022-08-12 14:53:15 +00:00
nimVersion = NimVersion ,
2020-11-16 19:15:43 +00:00
enr = node . network . announcedENR . toURI ,
peerId = $ node . network . switch . peerInfo . peerId ,
2019-08-16 11:16:56 +00:00
timeSinceFinalization =
2022-01-11 10:01:54 +00:00
node . beaconClock . now ( ) - finalizedHead . slot . start_beacon_time ( ) ,
2020-07-28 13:54:32 +00:00
head = shortLog ( head ) ,
2021-11-02 17:06:36 +00:00
justified = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , current_justified_checkpoint ) ) ,
2021-11-02 17:06:36 +00:00
finalized = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , finalized_checkpoint ) ) ,
2020-07-16 13:16:51 +00:00
finalizedHead = shortLog ( finalizedHead ) ,
2019-03-20 11:52:30 +00:00
SLOTS_PER_EPOCH ,
SECONDS_PER_SLOT ,
2019-09-12 01:45:04 +00:00
SPEC_VERSION ,
2020-11-16 19:15:43 +00:00
dataDir = node . config . dataDir . string ,
2021-02-22 16:17:48 +00:00
validators = node . attachedValidators [ ] . count
2019-03-20 11:52:30 +00:00
2020-06-29 05:34:48 +00:00
if genesisTime . inFuture :
notice " Waiting for genesis " , genesisIn = genesisTime . offset
2020-06-11 12:13:12 +00:00
waitFor node . initializeNetworking ( )
2020-11-12 16:21:04 +00:00
2023-03-05 01:40:21 +00:00
node . elManager . start ( )
2019-03-20 11:52:30 +00:00
node . run ( )
2024-03-19 13:22:07 +00:00
func formatGwei ( amount : Gwei ) : string =
2019-10-03 01:51:44 +00:00
# TODO This is implemented in a quite a silly way.
# Better routines for formatting decimal numbers
# should exists somewhere else.
let
2024-03-19 13:22:07 +00:00
eth = distinctBase ( amount ) div 1000000000
remainder = distinctBase ( amount ) mod 1000000000
2019-10-03 01:51:44 +00:00
result = $ eth
if remainder ! = 0 :
result . add ' . '
2020-12-01 18:08:55 +00:00
let remainderStr = $ remainder
for i in remainderStr . len .. < 9 :
result . add ' 0 '
result . add remainderStr
2019-10-03 01:51:44 +00:00
while result [ ^ 1 ] = = ' 0 ' :
result . setLen ( result . len - 1 )
2022-03-14 09:19:50 +00:00
when not defined ( windows ) :
2023-08-25 09:29:07 +00:00
proc initStatusBar ( node : BeaconNode ) {. raises : [ ValueError ] . } =
2022-03-14 09:19:50 +00:00
if not isatty ( stdout ) : return
if not node . config . statusBarEnabled : return
2021-02-22 16:17:48 +00:00
2022-03-14 09:19:50 +00:00
try :
enableTrueColors ( )
except Exception as exc : # TODO Exception
error " Couldn ' t enable colors " , err = exc . msg
2023-08-25 09:29:07 +00:00
proc dataResolver ( expr : string ) : string {. raises : [ ] . } =
2022-03-14 09:19:50 +00:00
template justified : untyped = node . dag . head . atEpochStart (
getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , current_justified_checkpoint ) . epoch )
2022-03-14 09:19:50 +00:00
# TODO:
# We should introduce a general API for resolving dot expressions
# such as `db.latest_block.slot` or `metrics.connected_peers`.
# Such an API can be shared between the RPC back-end, CLI tools
# such as ncli, a potential GraphQL back-end and so on.
# The status bar feature would allow the user to specify an
# arbitrary expression that is resolvable through this API.
case expr . toLowerAscii
2022-06-14 11:53:58 +00:00
of " version " :
versionAsStr
of " full_version " :
fullVersionStr
2022-03-14 09:19:50 +00:00
of " connected_peers " :
$ ( node . connectedPeersCount )
of " head_root " :
shortLog ( node . dag . head . root )
of " head_epoch " :
$ ( node . dag . head . slot . epoch )
of " head_epoch_slot " :
$ ( node . dag . head . slot . since_epoch_start )
of " head_slot " :
$ ( node . dag . head . slot )
of " justifed_root " :
shortLog ( justified . blck . root )
of " justifed_epoch " :
$ ( justified . slot . epoch )
of " justifed_epoch_slot " :
$ ( justified . slot . since_epoch_start )
of " justifed_slot " :
$ ( justified . slot )
of " finalized_root " :
shortLog ( node . dag . finalizedHead . blck . root )
of " finalized_epoch " :
$ ( node . dag . finalizedHead . slot . epoch )
of " finalized_epoch_slot " :
$ ( node . dag . finalizedHead . slot . since_epoch_start )
of " finalized_slot " :
$ ( node . dag . finalizedHead . slot )
of " epoch " :
$ node . currentSlot . epoch
of " epoch_slot " :
$ ( node . currentSlot . since_epoch_start )
of " slot " :
$ node . currentSlot
of " slots_per_epoch " :
$ SLOTS_PER_EPOCH
of " slot_trailing_digits " :
var slotStr = $ node . currentSlot
if slotStr . len > 3 : slotStr = slotStr [ ^ 3 .. ^ 1 ]
slotStr
of " attached_validators_balance " :
formatGwei ( node . attachedValidatorBalanceTotal )
2024-01-15 16:48:03 +00:00
of " next_consensus_fork " :
2024-01-16 17:33:46 +00:00
let nextConsensusForkDescription =
node . formatNextConsensusFork ( withVanityArt = true )
2024-01-15 16:48:03 +00:00
if nextConsensusForkDescription . isNone :
" "
else :
" (scheduled " & nextConsensusForkDescription . get & " ) "
2024-01-12 20:40:34 +00:00
2022-03-14 09:19:50 +00:00
of " sync_status " :
2022-08-29 12:16:35 +00:00
node . syncStatus ( node . currentSlot )
2022-03-14 09:19:50 +00:00
else :
# We ignore typos for now and just render the expression
# as it was written. TODO: come up with a good way to show
# an error message to the user.
" $ " & expr
var statusBar = StatusBarView . init (
node . config . statusBarContents ,
dataResolver )
when compiles ( defaultChroniclesStream . outputs [ 0 ] . writer ) :
let tmp = defaultChroniclesStream . outputs [ 0 ] . writer
defaultChroniclesStream . outputs [ 0 ] . writer =
2023-08-25 09:29:07 +00:00
proc ( logLevel : LogLevel , msg : LogOutputStr ) {. raises : [ ] . } =
2022-03-14 09:19:50 +00:00
try :
# p.hidePrompt
erase statusBar
# p.writeLine msg
tmp ( logLevel , msg )
render statusBar
# p.showPrompt
except Exception as e : # render raises Exception
logLoggingFailure ( cstring ( msg ) , e )
proc statusBarUpdatesPollingLoop ( ) {. async . } =
try :
while true :
update statusBar
2021-02-22 16:17:48 +00:00
erase statusBar
render statusBar
2022-03-14 09:19:50 +00:00
await sleepAsync ( chronos . seconds ( 1 ) )
except CatchableError as exc :
warn " Failed to update status bar, no further updates " , err = exc . msg
2021-02-22 16:17:48 +00:00
2022-03-14 09:19:50 +00:00
asyncSpawn statusBarUpdatesPollingLoop ( )
2021-02-22 16:17:48 +00:00
2023-08-25 09:29:07 +00:00
proc doRunBeaconNode ( config : var BeaconNodeConf , rng : ref HmacDrbgContext ) {. raises : [ CatchableError ] . } =
2021-02-22 16:17:48 +00:00
info " Launching beacon node " ,
version = fullVersionStr ,
bls_backend = $ BLS_BACKEND ,
2024-01-17 13:38:56 +00:00
const_preset ,
2021-02-22 16:17:48 +00:00
cmdParams = commandLineParams ( ) ,
config
2022-09-19 21:47:46 +00:00
template ignoreDeprecatedOption ( option : untyped ) : untyped =
if config . option . isSome :
warn " Config option is deprecated " ,
option = config . option . get
ignoreDeprecatedOption requireEngineAPI
2022-09-29 06:29:49 +00:00
ignoreDeprecatedOption safeSlotsToImportOptimistically
2022-10-24 20:32:52 +00:00
ignoreDeprecatedOption terminalTotalDifficultyOverride
2023-01-04 15:51:14 +00:00
ignoreDeprecatedOption optimistic
2023-01-16 10:28:35 +00:00
ignoreDeprecatedOption validatorMonitorTotals
2023-03-14 16:54:15 +00:00
ignoreDeprecatedOption web3ForcePolling
2022-09-19 21:47:46 +00:00
2021-02-22 16:17:48 +00:00
createPidFile ( config . dataDir . string / " beacon_node.pid " )
config . createDumpDirs ( )
if config . metricsEnabled :
2021-04-01 12:44:11 +00:00
let metricsAddress = config . metricsAddress
notice " Starting metrics HTTP server " ,
url = " http:// " & $ metricsAddress & " : " & $ config . metricsPort & " /metrics "
try :
startMetricsHttpServer ( $ metricsAddress , config . metricsPort )
2022-02-08 19:19:21 +00:00
except CatchableError as exc :
raise exc
except Exception as exc :
raiseAssert exc . msg # TODO fix metrics
# Nim GC metrics (for the main thread) will be collected in onSecond(), but
# we disable piggy-backing on other metrics here.
setSystemMetricsAutomaticUpdate ( false )
2020-10-09 13:57:45 +00:00
2021-02-22 16:17:48 +00:00
# There are no managed event loops in here, to do a graceful shutdown, but
# letting the default Ctrl+C handler exit is safe, since we only read from
# the db.
2022-12-06 16:43:11 +00:00
let metadata = config . loadEth2Network ( )
2021-11-25 10:53:31 +00:00
2021-11-01 14:50:24 +00:00
# Updating the config based on the metadata certainly is not beautiful but it
# works
for node in metadata . bootstrapNodes :
config . bootstrapNodes . add node
2023-09-12 07:52:51 +00:00
if config . forkChoiceVersion . isNone :
2024-03-20 18:12:33 +00:00
config . forkChoiceVersion = some ( ForkChoiceVersion . Pr3431 )
2022-06-07 17:01:11 +00:00
2023-08-28 08:40:23 +00:00
## Ctrl+C handling
proc controlCHandler ( ) {. noconv . } =
when defined ( windows ) :
# workaround for https://github.com/nim-lang/Nim/issues/4057
try :
setupForeignThreadGc ( )
except Exception as exc : raiseAssert exc . msg # shouldn't happen
notice " Shutting down after having received SIGINT "
bnStatus = BeaconNodeStatus . Stopping
try :
setControlCHook ( controlCHandler )
except Exception as exc : # TODO Exception
warn " Cannot set ctrl-c handler " , msg = exc . msg
# equivalent SIGTERM handler
when defined ( posix ) :
proc SIGTERMHandler ( signal : cint ) {. noconv . } =
notice " Shutting down after having received SIGTERM "
bnStatus = BeaconNodeStatus . Stopping
c_signal ( ansi_c . SIGTERM , SIGTERMHandler )
2023-10-31 12:43:46 +00:00
if metadata . cfg . DENEB_FORK_EPOCH ! = FAR_FUTURE_EPOCH :
2023-06-14 08:52:00 +00:00
let res =
if config . trustedSetupFile . isNone :
conf . loadKzgTrustedSetup ( )
else :
conf . loadKzgTrustedSetup ( config . trustedSetupFile . get )
2023-05-11 08:52:44 +00:00
if res . isErr ( ) :
raiseAssert res . error ( )
2023-10-31 12:43:46 +00:00
let node = waitFor BeaconNode . init ( rng , config , metadata )
2021-02-22 16:17:48 +00:00
if bnStatus = = BeaconNodeStatus . Stopping :
return
2020-07-02 15:14:11 +00:00
2022-03-14 09:19:50 +00:00
when not defined ( windows ) :
# This status bar can lock a Windows terminal emulator, blocking the whole
# event loop (seen on Windows 10, with a default MSYS2 terminal).
initStatusBar ( node )
2020-07-02 15:52:48 +00:00
2021-02-22 16:17:48 +00:00
if node . nickname ! = " " :
dynamicLogScope ( node = node . nickname ) : node . start ( )
2020-07-07 23:02:14 +00:00
else :
2021-02-22 16:17:48 +00:00
node . start ( )
2022-06-21 08:29:16 +00:00
proc doRecord ( config : BeaconNodeConf , rng : var HmacDrbgContext ) {.
2023-08-25 09:29:07 +00:00
raises : [ CatchableError ] . } =
2021-02-22 16:17:48 +00:00
case config . recordCmd :
of RecordCmd . create :
let netKeys = getPersistentNetKeys ( rng , config )
var fieldPairs : seq [ FieldPair ]
for field in config . fields :
let fieldPair = field . split ( " : " )
if fieldPair . len > 1 :
fieldPairs . add ( toFieldPair ( fieldPair [ 0 ] , hexToSeqByte ( fieldPair [ 1 ] ) ) )
else :
fatal " Invalid field pair "
quit QuitFailure
2020-06-24 13:57:09 +00:00
2021-02-22 16:17:48 +00:00
let record = enr . Record . init (
config . seqNumber ,
netKeys . seckey . asEthKey ,
2023-11-10 15:58:48 +00:00
some ( config . ipExt ) ,
2021-02-22 16:17:48 +00:00
some ( config . tcpPortExt ) ,
some ( config . udpPortExt ) ,
fieldPairs ) . expect ( " Record within size limits " )
2020-07-01 09:13:56 +00:00
2021-02-22 16:17:48 +00:00
echo record . toURI ( )
2020-08-21 19:36:42 +00:00
2021-02-22 16:17:48 +00:00
of RecordCmd . print :
echo $ config . recordPrint
2020-03-24 11:13:07 +00:00
2022-06-21 08:29:16 +00:00
proc doWeb3Cmd ( config : BeaconNodeConf , rng : var HmacDrbgContext )
2023-08-25 09:29:07 +00:00
{. raises : [ CatchableError ] . } =
2021-02-22 16:17:48 +00:00
case config . web3Cmd :
of Web3Cmd . test :
2022-04-12 22:28:01 +00:00
let metadata = config . loadEth2Network ( )
2022-03-31 14:43:05 +00:00
2021-02-22 16:17:48 +00:00
waitFor testWeb3Provider ( config . web3TestUrl ,
2022-03-31 14:43:05 +00:00
metadata . cfg . DEPOSIT_CONTRACT_ADDRESS ,
2022-07-12 10:08:52 +00:00
rng . loadJwtSecret ( config , allowCreate = true ) )
2020-11-27 19:48:33 +00:00
2023-08-25 09:29:07 +00:00
proc doSlashingExport ( conf : BeaconNodeConf ) {. raises : [ IOError ] . } =
2021-05-19 06:38:13 +00:00
let
dir = conf . validatorsDir ( )
filetrunc = SlashingDbName
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
let db = SlashingProtectionDB . loadUnchecked ( dir , filetrunc , readOnly = false )
let interchange = conf . exportedInterchangeFile . string
db . exportSlashingInterchange ( interchange , conf . exportedValidators )
echo " Export finished: ' " , dir / filetrunc & " .sqlite3 " , " ' into ' " , interchange , " ' "
2023-08-25 09:29:07 +00:00
proc doSlashingImport ( conf : BeaconNodeConf ) {. raises : [ SerializationError , IOError ] . } =
2021-05-19 06:38:13 +00:00
let
dir = conf . validatorsDir ( )
filetrunc = SlashingDbName
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
let interchange = conf . importedInterchangeFile . string
var spdir : SPDIR
try :
2022-09-19 22:09:56 +00:00
spdir = Json . loadFile ( interchange , SPDIR ,
requireAllFields = true )
2021-05-19 06:38:13 +00:00
except SerializationError as err :
writeStackTrace ( )
2022-04-08 16:22:49 +00:00
stderr . write $ Json & " load issue for file \" " , interchange , " \" \n "
2021-05-19 06:38:13 +00:00
stderr . write err . formatMsg ( interchange ) , " \n "
quit 1
# Open DB and handle migration from v1 to v2 if needed
let db = SlashingProtectionDB . init (
genesis_validators_root = Eth2Digest spdir . metadata . genesis_validators_root ,
basePath = dir ,
dbname = filetrunc ,
modes = { kCompleteArchive }
)
# Now import the slashing interchange file
# Failures mode:
# - siError can only happen with invalid genesis_validators_root which would be caught above
# - siPartial can happen for invalid public keys, slashable blocks, slashable votes
let status = db . inclSPDIR ( spdir )
doAssert status in { siSuccess , siPartial }
echo " Import finished: ' " , interchange , " ' into ' " , dir / filetrunc & " .sqlite3 " , " ' "
2023-08-25 09:29:07 +00:00
proc doSlashingInterchange ( conf : BeaconNodeConf ) {. raises : [ CatchableError ] . } =
2021-05-19 06:38:13 +00:00
case conf . slashingdbCmd
of SlashProtCmd . ` export ` :
conf . doSlashingExport ( )
of SlashProtCmd . ` import ` :
conf . doSlashingImport ( )
2023-08-25 09:29:07 +00:00
proc handleStartUpCmd ( config : var BeaconNodeConf ) {. raises : [ CatchableError ] . } =
2022-02-27 11:02:45 +00:00
# Single RNG instance for the application - will be seeded on construction
# and avoid using system resources (such as urandom) after that
2023-06-19 22:43:50 +00:00
let rng = HmacDrbgContext . new ( )
2022-02-27 11:02:45 +00:00
case config . cmd
of BNStartUpCmd . noCommand : doRunBeaconNode ( config , rng )
of BNStartUpCmd . deposits : doDeposits ( config , rng [ ] )
of BNStartUpCmd . wallets : doWallets ( config , rng [ ] )
of BNStartUpCmd . record : doRecord ( config , rng [ ] )
2022-03-31 14:43:05 +00:00
of BNStartUpCmd . web3 : doWeb3Cmd ( config , rng [ ] )
2022-02-27 11:02:45 +00:00
of BNStartUpCmd . slashingdb : doSlashingInterchange ( config )
2022-04-08 16:22:49 +00:00
of BNStartUpCmd . trustedNodeSync :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if config . blockId . isSome ( ) :
error " --blockId option has been removed - use --state-id instead! "
quit 1
2023-10-31 00:56:52 +00:00
let
metadata = loadEth2Network ( config )
db = BeaconChainDB . new ( config . databaseDir , metadata . cfg , inMemory = false )
2024-03-08 13:52:54 +00:00
genesisState = waitFor fetchGenesisState ( metadata )
2023-10-31 00:56:52 +00:00
waitFor db . doRunTrustedNodeSync (
metadata ,
2022-02-27 11:02:45 +00:00
config . databaseDir ,
2022-11-10 10:44:47 +00:00
config . eraDir ,
2022-02-27 11:02:45 +00:00
config . trustedNodeUrl ,
2023-10-31 00:56:52 +00:00
config . stateId ,
config . lcTrustedBlockRoot ,
2022-02-27 11:02:45 +00:00
config . backfillBlocks ,
2022-03-11 12:49:47 +00:00
config . reindex ,
2024-03-05 14:41:22 +00:00
config . downloadDepositSnapshot ,
genesisState )
2023-10-31 00:56:52 +00:00
db . close ( )
2022-02-27 11:02:45 +00:00
2021-03-26 06:52:01 +00:00
{. pop . } # TODO moduletests exceptions
2022-02-27 11:02:45 +00:00
2021-02-22 16:17:48 +00:00
programMain :
2024-01-13 10:53:53 +00:00
var config = makeBannerAndConfig ( clientId , copyrights , nimBanner ,
SPEC_VERSION , [ ] , BeaconNodeConf ) . valueOr :
stderr . write error
quit QuitFailure
2020-11-28 18:50:09 +00:00
2021-02-22 16:17:48 +00:00
if not ( checkAndCreateDataDir ( string ( config . dataDir ) ) ) :
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
2023-09-21 14:08:13 +00:00
setupFileLimits ( )
2021-11-02 17:06:36 +00:00
setupLogging ( config . logLevel , config . logStdout , config . logFile )
2021-02-22 16:17:48 +00:00
## This Ctrl+C handler exits the program in non-graceful way.
## It's responsible for handling Ctrl+C in sub-commands such
## as `wallets *` and `deposits *`. In a regular beacon node
## run, it will be overwritten later with a different handler
## performing a graceful exit.
proc exitImmediatelyOnCtrlC ( ) {. noconv . } =
when defined ( windows ) :
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc ( )
2021-03-16 08:06:45 +00:00
# in case a password prompt disabled echoing
resetStdin ( )
2021-02-22 16:17:48 +00:00
echo " " # If we interrupt during an interactive prompt, this
# will move the cursor to the next line
notice " Shutting down after having received SIGINT "
quit 0
setControlCHook ( exitImmediatelyOnCtrlC )
# equivalent SIGTERM handler
when defined ( posix ) :
proc exitImmediatelyOnSIGTERM ( signal : cint ) {. noconv . } =
notice " Shutting down after having received SIGTERM "
quit 0
2022-07-13 14:43:57 +00:00
c_signal ( ansi_c . SIGTERM , exitImmediatelyOnSIGTERM )
2021-02-22 16:17:48 +00:00
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
if config . runAsService :
2024-01-13 10:53:53 +00:00
proc exitService ( ) =
bnStatus = BeaconNodeStatus . Stopping
establishWindowsService ( clientId , copyrights , nimBanner , SPEC_VERSION ,
" nimbus_beacon_node " , BeaconNodeConf ,
handleStartUpCmd , exitService )
2022-02-27 11:02:45 +00:00
else :
handleStartUpCmd ( config )
else :
handleStartUpCmd ( config )