2022-03-11 20:28:10 +00:00
# beacon_chain
2022-01-05 14:24:15 +00:00
# Copyright (c) 2018-2022 Status Research & Development GmbH
2020-04-24 07:16:11 +00:00
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
2022-07-29 10:53:42 +00:00
when ( NimMajor , NimMinor ) < ( 1 , 4 ) :
{. push raises : [ Defect ] . }
else :
{. push raises : [ ] . }
2021-03-26 06:52:01 +00:00
2018-11-23 23:58:49 +00:00
import
2022-02-11 20:40:49 +00:00
std / [ os , random , sequtils , terminal , times ] ,
2022-06-21 08:29:16 +00:00
chronos , chronicles , chronicles / chronos_tools ,
2022-02-11 20:40:49 +00:00
metrics , metrics / chronos_httpserver ,
stew / [ byteutils , io2 ] ,
eth / p2p / discoveryv5 / [ enr , random2 ] ,
eth / keys ,
2022-12-21 12:30:24 +00:00
. / consensus_object_pools / vanity_logs / vanity_logs ,
2022-08-25 03:53:59 +00:00
. / networking / topic_params ,
2022-05-24 07:23:48 +00:00
. / rpc / [ rest_api , state_ttl_cache ] ,
2022-01-18 13:36:52 +00:00
. / spec / datatypes / [ altair , bellatrix , phase0 ] ,
2022-12-07 10:24:51 +00:00
. / spec / [ deposit_snapshots , engine_authentication , weak_subjectivity ] ,
2022-02-11 20:40:49 +00:00
. / validators / [ keystore_management , validator_duties ] ,
" . " / [
2022-06-07 17:01:11 +00:00
beacon_node , beacon_node_light_client , deposits , interop ,
nimbus_binary_common , statusbar , trusted_node_sync , wallets ]
2020-12-04 16:28:42 +00:00
2021-06-03 09:43:04 +00:00
when defined ( posix ) :
import system / ansi_c
2023-01-04 12:34:15 +00:00
from . / spec / datatypes / eip4844 import SignedBeaconBlock
2021-02-09 09:20:55 +00:00
from
libp2p / protocols / pubsub / gossipsub
import
TopicParams , validateParameters , init
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
import winlean
type
LPCSTR * = cstring
LPSTR * = cstring
SERVICE_STATUS * {. final , pure . } = object
dwServiceType * : DWORD
dwCurrentState * : DWORD
dwControlsAccepted * : DWORD
dwWin32ExitCode * : DWORD
dwServiceSpecificExitCode * : DWORD
dwCheckPoint * : DWORD
dwWaitHint * : DWORD
SERVICE_STATUS_HANDLE * = DWORD
LPSERVICE_STATUS * = ptr SERVICE_STATUS
LPSERVICE_MAIN_FUNCTION * = proc ( para1 : DWORD , para2 : LPSTR ) {. stdcall . }
SERVICE_TABLE_ENTRY * {. final , pure . } = object
lpServiceName * : LPSTR
lpServiceProc * : LPSERVICE_MAIN_FUNCTION
LPSERVICE_TABLE_ENTRY * = ptr SERVICE_TABLE_ENTRY
LPHANDLER_FUNCTION * = proc ( para1 : DWORD ) : WINBOOL {. stdcall . }
const
SERVICE_WIN32_OWN_PROCESS = 16
SERVICE_RUNNING = 4
SERVICE_STOPPED = 1
SERVICE_START_PENDING = 2
SERVICE_STOP_PENDING = 3
SERVICE_CONTROL_STOP = 1
SERVICE_CONTROL_PAUSE = 2
SERVICE_CONTROL_CONTINUE = 3
SERVICE_CONTROL_INTERROGATE = 4
SERVICE_ACCEPT_STOP = 1
NO_ERROR = 0
SERVICE_NAME = LPCSTR " NIMBUS_BEACON_NODE "
var
gSvcStatusHandle : SERVICE_STATUS_HANDLE
gSvcStatus : SERVICE_STATUS
proc reportServiceStatus * ( dwCurrentState , dwWin32ExitCode , dwWaitHint : DWORD ) {. gcsafe . }
proc StartServiceCtrlDispatcher * ( lpServiceStartTable : LPSERVICE_TABLE_ENTRY ) : WINBOOL {.
stdcall , dynlib : " advapi32 " , importc : " StartServiceCtrlDispatcherA " . }
proc SetServiceStatus * ( hServiceStatus : SERVICE_STATUS_HANDLE ,
lpServiceStatus : LPSERVICE_STATUS ) : WINBOOL {. stdcall ,
dynlib : " advapi32 " , importc : " SetServiceStatus " . }
proc RegisterServiceCtrlHandler * ( lpServiceName : LPCSTR ,
lpHandlerProc : LPHANDLER_FUNCTION ) : SERVICE_STATUS_HANDLE {.
stdcall , dynlib : " advapi32 " , importc : " RegisterServiceCtrlHandlerA " . }
2020-02-19 08:58:10 +00:00
type
2021-12-20 11:21:17 +00:00
RpcServer = RpcHttpServer
2020-03-16 22:28:54 +00:00
2020-06-05 15:08:50 +00:00
template init ( T : type RpcHttpServer , ip : ValidIpAddress , port : Port ) : T =
2020-03-16 22:28:54 +00:00
newRpcHttpServer ( [ initTAddress ( ip , port ) ] )
2020-02-19 08:58:10 +00:00
2022-08-19 10:30:07 +00:00
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
2020-11-27 22:16:13 +00:00
declareGauge beacon_slot , " Latest slot of the beacon chain state "
declareGauge beacon_current_epoch , " Current epoch "
2019-09-07 17:48:05 +00:00
2020-07-30 14:27:34 +00:00
# Finalization tracking
declareGauge finalization_delay ,
" Epoch delay between scheduled epoch and finalized epoch "
2020-08-10 18:49:45 +00:00
declareGauge ticks_delay ,
" How long does to take to run the onSecond loop "
2021-03-12 09:46:26 +00:00
declareGauge next_action_wait ,
" Seconds until the next attestation will be sent "
2019-09-12 01:45:04 +00:00
logScope : topics = " beacnde "
2022-12-21 12:30:24 +00:00
func getVanityLogs ( stdoutKind : StdoutLogKind ) : VanityLogs =
2022-06-29 16:53:59 +00:00
case stdoutKind
of StdoutLogKind . Auto : raiseAssert " inadmissable here "
of StdoutLogKind . Colors :
VanityLogs (
onMergeTransitionBlock : color 🐼,
2022-12-21 12:30:24 +00:00
onFinalizedMergeTransitionBlock : blink 🐼,
onUpgradeToCapella : color 🦉)
2022-06-29 16:53:59 +00:00
of StdoutLogKind . NoColors :
VanityLogs (
onMergeTransitionBlock : mono 🐼,
2022-12-21 12:30:24 +00:00
onFinalizedMergeTransitionBlock : mono 🐼,
onUpgradeToCapella : mono 🦉)
2022-06-29 16:53:59 +00:00
of StdoutLogKind . Json , StdoutLogKind . None :
VanityLogs (
2022-12-21 12:30:24 +00:00
onMergeTransitionBlock :
( proc ( ) = notice " 🐼 Proof of Stake Activated 🐼 " ) ,
onFinalizedMergeTransitionBlock :
( proc ( ) = notice " 🐼 Proof of Stake Finalized 🐼 " ) ,
onUpgradeToCapella :
( proc ( ) = notice " 🦉 Withdrowls now available 🦉 " ) )
2022-06-29 16:53:59 +00:00
2022-03-19 16:48:24 +00:00
proc loadChainDag (
config : BeaconNodeConf ,
cfg : RuntimeConfig ,
db : BeaconChainDB ,
2022-06-17 15:27:28 +00:00
eventBus : EventBus ,
2022-03-19 16:48:24 +00:00
validatorMonitor : ref ValidatorMonitor ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
networkGenesisValidatorsRoot : Opt [ Eth2Digest ] ,
2022-06-24 14:57:50 +00:00
shouldEnableTestFeatures : bool ) : ChainDAGRef =
2022-03-19 16:48:24 +00:00
info " Loading block DAG from database " , path = config . databaseDir
2022-05-23 12:02:54 +00:00
proc onLightClientFinalityUpdate ( data : altair . LightClientFinalityUpdate ) =
2022-06-19 05:57:52 +00:00
eventBus . finUpdateQueue . emit ( data )
2022-05-23 12:02:54 +00:00
proc onLightClientOptimisticUpdate ( data : altair . LightClientOptimisticUpdate ) =
2022-06-19 05:57:52 +00:00
eventBus . optUpdateQueue . emit ( data )
2022-03-19 16:48:24 +00:00
let
2022-06-24 14:57:50 +00:00
extraFlags =
2022-08-29 07:26:01 +00:00
if shouldEnableTestFeatures : { enableTestFeatures , lowParticipation }
2022-08-26 16:49:43 +00:00
else : { enableTestFeatures }
2022-03-19 16:48:24 +00:00
chainDagFlags =
2022-07-13 13:48:09 +00:00
if config . strictVerification : { strictVerification }
2022-03-19 16:48:24 +00:00
else : { }
2022-05-23 12:02:54 +00:00
onLightClientFinalityUpdateCb =
2022-07-29 08:45:39 +00:00
if config . lightClientDataServe : onLightClientFinalityUpdate
2022-05-23 12:02:54 +00:00
else : nil
onLightClientOptimisticUpdateCb =
2022-07-29 08:45:39 +00:00
if config . lightClientDataServe : onLightClientOptimisticUpdate
2022-03-19 16:48:24 +00:00
else : nil
2022-06-28 10:21:16 +00:00
dag = ChainDAGRef . init (
cfg , db , validatorMonitor , extraFlags + chainDagFlags , config . eraDir ,
2022-12-21 12:30:24 +00:00
vanityLogs = getVanityLogs ( detectTTY ( config . logStdout ) ) ,
2022-06-28 20:52:29 +00:00
lcDataConfig = LightClientDataConfig (
2022-07-29 08:45:39 +00:00
serve : config . lightClientDataServe ,
importMode : config . lightClientDataImportMode ,
2022-06-28 20:52:29 +00:00
maxPeriods : config . lightClientDataMaxPeriods ,
onLightClientFinalityUpdate : onLightClientFinalityUpdateCb ,
onLightClientOptimisticUpdate : onLightClientOptimisticUpdateCb ) )
2022-03-19 16:48:24 +00:00
databaseGenesisValidatorsRoot =
getStateField ( dag . headState , genesis_validators_root )
if networkGenesisValidatorsRoot . isSome :
if networkGenesisValidatorsRoot . get ! = databaseGenesisValidatorsRoot :
fatal " The specified --data-dir contains data for a different network " ,
networkGenesisValidatorsRoot = networkGenesisValidatorsRoot . get ,
databaseGenesisValidatorsRoot ,
dataDir = config . dataDir
quit 1
dag
proc checkWeakSubjectivityCheckpoint (
dag : ChainDAGRef ,
wsCheckpoint : Checkpoint ,
beaconClock : BeaconClock ) =
let
currentSlot = beaconClock . now . slotOrZero
isCheckpointStale = not is_within_weak_subjectivity_period (
dag . cfg , currentSlot , dag . headState , wsCheckpoint )
if isCheckpointStale :
error " Weak subjectivity checkpoint is stale " ,
currentSlot , checkpoint = wsCheckpoint ,
headStateSlot = getStateField ( dag . headState , slot )
quit 1
2022-03-21 16:52:15 +00:00
proc initFullNode (
node : BeaconNode ,
2022-06-21 08:29:16 +00:00
rng : ref HmacDrbgContext ,
2022-03-21 16:52:15 +00:00
dag : ChainDAGRef ,
2022-04-08 16:22:49 +00:00
taskpool : TaskPoolPtr ,
2022-03-21 16:52:15 +00:00
getBeaconTime : GetBeaconTimeFn ) =
template config ( ) : auto = node . config
proc onAttestationReceived ( data : Attestation ) =
2022-06-17 15:27:28 +00:00
node . eventBus . attestQueue . emit ( data )
2022-03-21 16:52:15 +00:00
proc onSyncContribution ( data : SignedContributionAndProof ) =
2022-06-17 15:27:28 +00:00
node . eventBus . contribQueue . emit ( data )
2022-03-21 16:52:15 +00:00
proc onVoluntaryExitAdded ( data : SignedVoluntaryExit ) =
2022-06-17 15:27:28 +00:00
node . eventBus . exitQueue . emit ( data )
2022-06-28 10:21:16 +00:00
proc onBlockAdded ( data : ForkedTrustedSignedBeaconBlock ) =
let optimistic =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
2022-07-04 20:35:33 +00:00
some node . dag . is_optimistic ( data . root )
2022-06-28 10:21:16 +00:00
else :
none [ bool ] ( )
node . eventBus . blocksQueue . emit (
EventBeaconBlockObject . init ( data , optimistic ) )
proc onHeadChanged ( data : HeadChangeInfoObject ) =
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2022-07-04 20:35:33 +00:00
res . optimistic = some node . dag . is_optimistic ( data . block_root )
2022-06-28 10:21:16 +00:00
res
else :
data
node . eventBus . headQueue . emit ( eventData )
proc onChainReorg ( data : ReorgInfoObject ) =
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2022-07-04 20:35:33 +00:00
res . optimistic = some node . dag . is_optimistic ( data . new_head_block )
2022-06-28 10:21:16 +00:00
res
else :
data
node . eventBus . reorgQueue . emit ( eventData )
2022-03-21 16:52:15 +00:00
proc makeOnFinalizationCb (
# This `nimcall` functions helps for keeping track of what
# needs to be captured by the onFinalization closure.
2022-06-17 15:27:28 +00:00
eventBus : EventBus ,
2022-03-21 16:52:15 +00:00
eth1Monitor : Eth1Monitor ) : OnFinalizedCallback {. nimcall . } =
2022-06-17 15:27:28 +00:00
static : doAssert ( eth1Monitor is ref )
2022-03-21 16:52:15 +00:00
return proc ( dag : ChainDAGRef , data : FinalizationInfoObject ) =
if eth1Monitor ! = nil :
let finalizedEpochRef = dag . getFinalizedEpochRef ( )
discard trackFinalizedState ( eth1Monitor ,
finalizedEpochRef . eth1_data ,
finalizedEpochRef . eth1_deposit_index )
2022-06-07 17:01:11 +00:00
node . updateLightClientFromDag ( )
2022-06-28 10:21:16 +00:00
let eventData =
if node . currentSlot ( ) . epoch ( ) > = dag . cfg . BELLATRIX_FORK_EPOCH :
var res = data
2022-07-04 20:35:33 +00:00
res . optimistic = some node . dag . is_optimistic ( data . block_root )
2022-06-28 10:21:16 +00:00
res
else :
data
eventBus . finalQueue . emit ( eventData )
2022-03-21 16:52:15 +00:00
func getLocalHeadSlot ( ) : Slot =
dag . head . slot
proc getLocalWallSlot ( ) : Slot =
node . beaconClock . now . slotOrZero
func getFirstSlotAtFinalizedEpoch ( ) : Slot =
dag . finalizedHead . slot
func getBackfillSlot ( ) : Slot =
dag . backfill . slot
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
func getFrontfillSlot ( ) : Slot =
2022-12-23 07:42:55 +00:00
max ( dag . frontfill . get ( BlockId ( ) ) . slot , dag . horizon )
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
2022-03-21 16:52:15 +00:00
let
quarantine = newClone (
Quarantine . init ( ) )
attestationPool = newClone (
2022-04-12 10:06:30 +00:00
AttestationPool . init ( dag , quarantine , onAttestationReceived ) )
2022-03-21 16:52:15 +00:00
syncCommitteeMsgPool = newClone (
SyncCommitteeMsgPool . init ( rng , onSyncContribution ) )
2022-05-23 12:02:54 +00:00
lightClientPool = newClone (
LightClientPool ( ) )
2022-03-21 16:52:15 +00:00
exitPool = newClone (
2022-07-06 10:33:02 +00:00
ExitPool . init ( dag , attestationPool , onVoluntaryExitAdded ) )
2022-03-21 16:52:15 +00:00
consensusManager = ConsensusManager . new (
2022-08-23 16:19:52 +00:00
dag , attestationPool , quarantine , node . eth1Monitor ,
2022-09-07 18:34:52 +00:00
ActionTracker . init ( rng , config . subscribeAllSubnets ) ,
2022-09-17 05:30:07 +00:00
node . dynamicFeeRecipientsStore , config . validatorsDir ,
2022-08-23 16:19:52 +00:00
config . defaultFeeRecipient )
2022-03-21 16:52:15 +00:00
blockProcessor = BlockProcessor . new (
config . dumpEnabled , config . dumpDirInvalid , config . dumpDirIncoming ,
2023-01-04 15:51:14 +00:00
rng , taskpool , consensusManager , node . validatorMonitor , getBeaconTime )
2022-03-21 16:52:15 +00:00
blockVerifier = proc ( signedBlock : ForkedSignedBeaconBlock ) :
2022-11-10 17:40:27 +00:00
Future [ Result [ void , VerifierError ] ] =
2022-03-21 16:52:15 +00:00
# The design with a callback for block verification is unusual compared
# to the rest of the application, but fits with the general approach
# taken in the sync/request managers - this is an architectural compromise
# that should probably be reimagined more holistically in the future.
2022-11-10 17:40:27 +00:00
let resfut = newFuture [ Result [ void , VerifierError ] ] ( " blockVerifier " )
2022-03-21 16:52:15 +00:00
blockProcessor [ ] . addBlock ( MsgSource . gossip , signedBlock , resfut )
resfut
processor = Eth2Processor . new (
config . doppelgangerDetection ,
blockProcessor , node . validatorMonitor , dag , attestationPool , exitPool ,
2022-05-23 12:02:54 +00:00
node . attachedValidators , syncCommitteeMsgPool , lightClientPool ,
quarantine , rng , getBeaconTime , taskpool )
2022-04-08 16:22:49 +00:00
syncManager = newSyncManager [ Peer , PeerId ] (
2022-03-21 16:52:15 +00:00
node . network . peerPool , SyncQueueKind . Forward , getLocalHeadSlot ,
getLocalWallSlot , getFirstSlotAtFinalizedEpoch , getBackfillSlot ,
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
getFrontfillSlot , dag . tail . slot , blockVerifier )
2022-04-08 16:22:49 +00:00
backfiller = newSyncManager [ Peer , PeerId ] (
2022-03-21 16:52:15 +00:00
node . network . peerPool , SyncQueueKind . Backward , getLocalHeadSlot ,
getLocalWallSlot , getFirstSlotAtFinalizedEpoch , getBackfillSlot ,
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
getFrontfillSlot , dag . backfill . slot , blockVerifier , maxHeadAge = 0 )
2022-07-06 16:11:44 +00:00
router = ( ref MessageRouter ) (
processor : processor ,
2022-07-29 08:45:39 +00:00
network : node . network )
2022-07-06 16:11:44 +00:00
2022-07-29 08:45:39 +00:00
if node . config . lightClientDataServe :
2022-07-06 16:11:44 +00:00
proc scheduleSendingLightClientUpdates ( slot : Slot ) =
if node . lightClientPool [ ] . broadcastGossipFut ! = nil :
return
if slot < = node . lightClientPool [ ] . latestBroadcastedSlot :
return
node . lightClientPool [ ] . latestBroadcastedSlot = slot
template fut ( ) : auto = node . lightClientPool [ ] . broadcastGossipFut
fut = node . handleLightClientUpdates ( slot )
fut . addCallback do ( p : pointer ) {. gcsafe . } :
fut = nil
router . onSyncCommitteeMessage = scheduleSendingLightClientUpdates
2022-03-21 16:52:15 +00:00
dag . setFinalizationCb makeOnFinalizationCb ( node . eventBus , node . eth1Monitor )
2022-06-28 10:21:16 +00:00
dag . setBlockCb ( onBlockAdded )
dag . setHeadCb ( onHeadChanged )
dag . setReorgCb ( onChainReorg )
2022-03-21 16:52:15 +00:00
node . dag = dag
node . quarantine = quarantine
node . attestationPool = attestationPool
node . syncCommitteeMsgPool = syncCommitteeMsgPool
2022-05-23 12:02:54 +00:00
node . lightClientPool = lightClientPool
2022-03-21 16:52:15 +00:00
node . exitPool = exitPool
node . processor = processor
node . blockProcessor = blockProcessor
node . consensusManager = consensusManager
node . requestManager = RequestManager . init ( node . network , blockVerifier )
node . syncManager = syncManager
node . backfiller = backfiller
2022-07-06 16:11:44 +00:00
node . router = router
2022-03-21 16:52:15 +00:00
node . addValidators ( )
block :
# Add in-process validators to the list of "known" validators such that
# we start with a reasonable ENR
let wallSlot = node . beaconClock . now ( ) . slotOrZero ( )
for validator in node . attachedValidators [ ] . validators . values ( ) :
if config . validatorMonitorAuto :
node . validatorMonitor [ ] . addMonitor ( validator . pubkey , validator . index )
if validator . index . isSome ( ) :
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . knownValidators [
validator . index . get ( ) ] = wallSlot
let stabilitySubnets =
node . consensusManager [ ] . actionTracker . stabilitySubnets ( wallSlot )
2022-03-21 16:52:15 +00:00
# Here, we also set the correct ENR should we be in all subnets mode!
node . network . updateStabilitySubnetMetadata ( stabilitySubnets )
node . network . initBeaconSync ( dag , getBeaconTime )
node . updateValidatorMetrics ( )
2021-05-19 06:38:13 +00:00
const SlashingDbName = " slashing_protection "
# changing this requires physical file rename as well or history is lost.
2021-12-22 12:37:31 +00:00
proc init * ( T : type BeaconNode ,
2022-06-21 08:29:16 +00:00
rng : ref HmacDrbgContext ,
2021-12-22 12:37:31 +00:00
config : BeaconNodeConf ,
2022-12-07 10:24:51 +00:00
metadata : Eth2NetworkMetadata ) : BeaconNode
{. raises : [ Defect , CatchableError ] . } =
2022-04-08 16:22:49 +00:00
var taskpool : TaskPoolPtr
2021-09-17 00:13:52 +00:00
2022-12-07 10:24:51 +00:00
template cfg : auto = metadata . cfg
template eth1Network : auto = metadata . eth1Network
2022-01-21 10:59:09 +00:00
2021-09-17 00:13:52 +00:00
try :
if config . numThreads < 0 :
fatal " The number of threads --numThreads cannot be negative. "
quit 1
elif config . numThreads = = 0 :
2022-04-08 16:22:49 +00:00
taskpool = TaskPoolPtr . new ( numThreads = min ( countProcessors ( ) , 16 ) )
2021-09-17 00:13:52 +00:00
else :
2022-04-08 16:22:49 +00:00
taskpool = TaskPoolPtr . new ( numThreads = config . numThreads )
2021-09-17 00:13:52 +00:00
info " Threadpool started " , numThreads = taskpool . numThreads
except Exception as exc :
raise newException ( Defect , " Failure in taskpool initialization. " )
2020-01-17 13:44:01 +00:00
let
2022-06-17 15:27:28 +00:00
eventBus = EventBus (
2022-06-20 05:53:39 +00:00
blocksQueue : newAsyncEventQueue [ EventBeaconBlockObject ] ( ) ,
2022-06-17 15:27:28 +00:00
headQueue : newAsyncEventQueue [ HeadChangeInfoObject ] ( ) ,
reorgQueue : newAsyncEventQueue [ ReorgInfoObject ] ( ) ,
2022-06-19 05:57:52 +00:00
finUpdateQueue : newAsyncEventQueue [ altair . LightClientFinalityUpdate ] ( ) ,
optUpdateQueue : newAsyncEventQueue [ altair . LightClientOptimisticUpdate ] ( ) ,
2022-06-17 15:27:28 +00:00
attestQueue : newAsyncEventQueue [ Attestation ] ( ) ,
contribQueue : newAsyncEventQueue [ SignedContributionAndProof ] ( ) ,
exitQueue : newAsyncEventQueue [ SignedVoluntaryExit ] ( ) ,
finalQueue : newAsyncEventQueue [ FinalizationInfoObject ] ( )
)
2021-07-13 14:27:10 +00:00
db = BeaconChainDB . new ( config . databaseDir , inMemory = false )
2020-01-17 13:44:01 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if config . finalizedCheckpointBlock . isSome :
warn " --finalized-checkpoint-block has been deprecated, ignoring "
2020-09-22 20:42:42 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let checkpointState = if config . finalizedCheckpointState . isSome :
2021-02-22 16:17:48 +00:00
let checkpointStatePath = config . finalizedCheckpointState . get . string
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
let tmp = try :
2021-11-10 11:39:08 +00:00
newClone ( readSszForkedHashedBeaconState (
cfg , readAllBytes ( checkpointStatePath ) . tryGet ( ) ) )
except SszError as err :
2020-09-22 20:42:42 +00:00
fatal " Checkpoint state deserialization failed " ,
err = formatMsg ( err , checkpointStatePath )
quit 1
except CatchableError as err :
fatal " Failed to read checkpoint state file " , err = err . msg
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not getStateField ( tmp [ ] , slot ) . is_epoch :
2022-07-12 15:09:56 +00:00
fatal " --finalized-checkpoint-state must point to a state for an epoch slot " ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
slot = getStateField ( tmp [ ] , slot )
2022-07-12 15:09:56 +00:00
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
tmp
else :
nil
2020-01-17 13:44:01 +00:00
2022-07-12 10:08:52 +00:00
let optJwtSecret = rng [ ] . loadJwtSecret ( config , allowCreate = false )
2022-03-31 14:43:05 +00:00
2022-06-28 20:49:52 +00:00
if config . web3Urls . len ( ) = = 0 :
2022-12-23 07:42:55 +00:00
notice " Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html) "
2022-06-28 20:49:52 +00:00
2020-11-16 19:15:43 +00:00
var eth1Monitor : Eth1Monitor
2020-01-17 13:44:01 +00:00
2022-12-06 16:43:11 +00:00
let genesisState =
2022-12-07 10:24:51 +00:00
if metadata . genesisData . len > 0 :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
try :
2022-12-07 10:24:51 +00:00
newClone readSszForkedHashedBeaconState (
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
cfg ,
2022-12-07 10:24:51 +00:00
metadata . genesisData . toOpenArrayByte ( 0 , metadata . genesisData . high ) )
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
except CatchableError as err :
raiseAssert " Invalid baked-in state: " & err . msg
else :
nil
if not ChainDAGRef . isInitialized ( db ) . isOk ( ) :
if genesisState = = nil and checkpointState = = nil :
2020-11-24 21:21:47 +00:00
when hasGenesisDetection :
# This is a fresh start without a known genesis state
# (most likely, it hasn't arrived yet). We'll try to
# obtain a genesis through the Eth1 deposits monitor:
2021-04-06 21:42:59 +00:00
if config . web3Urls . len = = 0 :
2020-11-24 21:21:47 +00:00
fatal " Web3 URL not specified "
quit 1
# TODO Could move this to a separate "GenesisMonitor" process or task
# that would do only this - see Paul's proposal for this.
2022-02-25 08:22:44 +00:00
let eth1Monitor = Eth1Monitor . init (
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
cfg ,
2022-12-19 17:19:48 +00:00
metadata . depositContractBlock ,
metadata . depositContractBlockHash ,
2020-12-03 04:30:35 +00:00
db ,
2022-03-31 14:43:05 +00:00
nil ,
2021-04-06 21:42:59 +00:00
config . web3Urls ,
2021-11-25 16:51:51 +00:00
eth1Network ,
2022-02-27 16:55:02 +00:00
config . web3ForcePolling ,
2022-09-20 06:17:25 +00:00
optJwtSecret ,
ttdReached = false )
2020-11-24 21:21:47 +00:00
2022-02-25 08:22:44 +00:00
eth1Monitor . loadPersistedDeposits ( )
2020-06-27 12:01:19 +00:00
2021-12-23 14:58:54 +00:00
let phase0Genesis = waitFor eth1Monitor . waitGenesis ( )
2022-04-08 20:11:37 +00:00
genesisState = ( ref ForkedHashedBeaconState ) (
kind : BeaconStateFork . Phase0 ,
phase0Data :
( ref phase0 . HashedBeaconState ) (
data : phase0Genesis [ ] ,
root : hash_tree_root ( phase0Genesis [ ] ) ) [ ] )
2021-12-23 14:58:54 +00:00
2020-11-24 21:21:47 +00:00
if bnStatus = = BeaconNodeStatus . Stopping :
return nil
2020-06-27 12:01:19 +00:00
2020-11-24 21:21:47 +00:00
notice " Eth2 genesis state detected " ,
2021-12-23 14:58:54 +00:00
genesisTime = phase0Genesis . genesisTime ,
eth1Block = phase0Genesis . eth1_data . block_hash ,
totalDeposits = phase0Genesis . eth1_data . deposit_count
2020-11-12 16:21:04 +00:00
else :
2021-11-10 11:39:08 +00:00
fatal " No database and no genesis snapshot found: supply a genesis.ssz " &
" with the network configuration, or compile the beacon node with " &
" the -d:has_genesis_detection option " &
2020-11-24 21:21:47 +00:00
" in order to support monitoring for genesis events "
quit 1
2019-10-25 14:53:31 +00:00
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if not genesisState . isNil and not checkpointState . isNil :
if getStateField ( genesisState [ ] , genesis_validators_root ) ! =
getStateField ( checkpointState [ ] , genesis_validators_root ) :
fatal " Checkpoint state does not match genesis - check the --network parameter " ,
rootFromGenesis = getStateField (
genesisState [ ] , genesis_validators_root ) ,
rootFromCheckpoint = getStateField (
checkpointState [ ] , genesis_validators_root )
2020-04-22 23:35:55 +00:00
quit 1
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
try :
# Always store genesis state if we have it - this allows reindexing and
# answering genesis queries
if not genesisState . isNil :
ChainDAGRef . preInit ( db , genesisState [ ] )
2020-01-17 13:44:01 +00:00
2021-11-10 11:39:08 +00:00
if not checkpointState . isNil :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if genesisState . isNil or
getStateField ( checkpointState [ ] , slot ) ! = GENESIS_SLOT :
ChainDAGRef . preInit ( db , checkpointState [ ] )
2020-09-22 20:42:42 +00:00
2021-12-21 10:40:14 +00:00
doAssert ChainDAGRef . isInitialized ( db ) . isOk ( ) , " preInit should have initialized db "
2021-03-26 06:52:01 +00:00
except CatchableError as exc :
error " Failed to initialize database " , err = exc . msg
2020-09-22 20:42:42 +00:00
quit 1
2021-11-10 11:39:08 +00:00
else :
if not checkpointState . isNil :
fatal " A database already exists, cannot start from given checkpoint " ,
dataDir = config . dataDir
quit 1
2020-07-02 15:52:48 +00:00
2021-03-23 06:57:10 +00:00
# Doesn't use std/random directly, but dependencies might
randomize ( rng [ ] . rand ( high ( int ) ) )
2021-12-20 19:20:31 +00:00
let
validatorMonitor = newClone ( ValidatorMonitor . init (
2022-01-14 14:57:46 +00:00
config . validatorMonitorAuto , config . validatorMonitorTotals ) )
2021-12-20 19:20:31 +00:00
for key in config . validatorMonitorPubkeys :
2022-08-19 21:51:30 +00:00
validatorMonitor [ ] . addMonitor ( key , Opt . none ( ValidatorIndex ) )
2021-12-20 19:20:31 +00:00
2020-07-07 23:02:14 +00:00
let
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
networkGenesisValidatorsRoot =
if not genesisState . isNil :
Opt . some ( getStateField ( genesisState [ ] , genesis_validators_root ) )
2022-03-19 16:48:24 +00:00
else :
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
Opt . none ( Eth2Digest )
2022-03-19 16:48:24 +00:00
dag = loadChainDag (
config , cfg , db , eventBus ,
2022-06-24 14:57:50 +00:00
validatorMonitor , networkGenesisValidatorsRoot ,
2022-10-12 22:58:36 +00:00
config . deploymentPhase < = DeploymentPhase . Testnet )
2022-06-15 02:38:27 +00:00
genesisTime = getStateField ( dag . headState , genesis_time )
beaconClock = BeaconClock . init ( genesisTime )
2022-03-31 14:43:05 +00:00
getBeaconTime = beaconClock . getBeaconTimeFn ( )
2021-08-20 08:58:15 +00:00
2021-02-22 16:17:48 +00:00
if config . weakSubjectivityCheckpoint . isSome :
2022-03-19 16:48:24 +00:00
dag . checkWeakSubjectivityCheckpoint (
config . weakSubjectivityCheckpoint . get , beaconClock )
2020-09-22 20:42:42 +00:00
2022-01-21 10:59:09 +00:00
if eth1Monitor . isNil and config . web3Urls . len > 0 :
2020-12-15 21:59:29 +00:00
eth1Monitor = Eth1Monitor . init (
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
cfg ,
2022-12-19 17:19:48 +00:00
metadata . depositContractBlock ,
metadata . depositContractBlockHash ,
2020-12-03 04:30:35 +00:00
db ,
2022-03-31 14:43:05 +00:00
getBeaconTime ,
2021-04-06 21:42:59 +00:00
config . web3Urls ,
2021-11-25 16:51:51 +00:00
eth1Network ,
2022-02-27 16:55:02 +00:00
config . web3ForcePolling ,
2022-09-20 06:17:25 +00:00
optJwtSecret ,
ttdReached = not dag . loadExecutionBlockRoot ( dag . finalizedHead . blck ) . isZero )
2020-11-12 16:21:04 +00:00
2022-09-29 06:29:49 +00:00
if config . rpcEnabled . isSome :
2022-05-24 07:23:48 +00:00
warn " Nimbus ' s JSON-RPC server has been removed. This includes the --rpc, --rpc-port, and --rpc-address configuration options. https://nimbus.guide/rest-api.html shows how to enable and configure the REST Beacon API server which replaces it. "
2020-03-16 22:28:54 +00:00
2021-03-17 18:46:45 +00:00
let restServer = if config . restEnabled :
2022-08-19 10:30:07 +00:00
RestServerRef . init ( config . restAddress , config . restPort ,
config . keymanagerAllowedOrigin ,
validateBeaconApiQueries ,
config )
2021-12-22 12:37:31 +00:00
else :
nil
2020-04-15 02:41:22 +00:00
let
2021-02-22 16:17:48 +00:00
netKeys = getPersistentNetKeys ( rng [ ] , config )
nickname = if config . nodeName = = " auto " : shortForm ( netKeys )
else : config . nodeName
2021-07-07 09:09:47 +00:00
network = createEth2Node (
2021-08-19 10:45:31 +00:00
rng , config , netKeys , cfg , dag . forkDigests , getBeaconTime ,
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , genesis_validators_root ) )
2021-02-22 16:17:48 +00:00
2021-05-04 13:17:28 +00:00
case config . slashingDbKind
of SlashingDbKind . v2 :
discard
of SlashingDbKind . v1 :
error " Slashing DB v1 is no longer supported for writing "
quit 1
of SlashingDbKind . both :
warn " Slashing DB v1 deprecated, writing only v2 "
info " Loading slashing protection database (v2) " ,
path = config . validatorsDir ( )
2022-11-20 13:55:43 +00:00
proc getValidatorAndIdx ( pubkey : ValidatorPubKey ) : Opt [ ValidatorAndIndex ] =
2022-08-19 10:30:07 +00:00
withState ( dag . headState ) :
2022-11-20 13:55:43 +00:00
getValidator ( forkyState ( ) . data . validators . asSeq ( ) , pubkey )
2022-08-19 10:30:07 +00:00
2021-05-04 13:17:28 +00:00
let
2021-02-22 16:17:48 +00:00
slashingProtectionDB =
2021-05-04 13:17:28 +00:00
SlashingProtectionDB . init (
2022-03-16 07:20:40 +00:00
getStateField ( dag . headState , genesis_validators_root ) ,
2021-05-19 06:38:13 +00:00
config . validatorsDir ( ) , SlashingDbName )
2022-12-09 16:05:55 +00:00
validatorPool = newClone ( ValidatorPool . init (
slashingProtectionDB , config . doppelgangerDetection ) )
2021-03-11 10:10:57 +00:00
2022-08-19 10:30:07 +00:00
keymanagerInitResult = initKeymanagerServer ( config , restServer )
keymanagerHost = if keymanagerInitResult . server ! = nil :
newClone KeymanagerHost . init (
validatorPool ,
rng ,
keymanagerInitResult . token ,
config . validatorsDir ,
config . secretsDir ,
config . defaultFeeRecipient ,
2022-11-20 13:55:43 +00:00
getValidatorAndIdx ,
2022-08-19 10:30:07 +00:00
getBeaconTime )
else : nil
2022-03-21 16:52:15 +00:00
stateTtlCache =
if config . restCacheSize > 0 :
StateTtlCache . init (
cacheSize = config . restCacheSize ,
cacheTtl = chronos . seconds ( config . restCacheTtl ) )
else :
nil
2022-01-31 17:28:26 +00:00
2022-08-01 06:41:47 +00:00
let payloadBuilderRestClient =
if config . payloadBuilderEnable :
2022-10-13 11:38:33 +00:00
RestClientRef . new ( config . payloadBuilderUrl ) . valueOr :
2022-08-01 06:41:47 +00:00
warn " Payload builder REST client setup failed " ,
payloadBuilderUrl = config . payloadBuilderUrl
nil
else :
nil
2022-06-07 17:01:11 +00:00
let node = BeaconNode (
2020-01-17 13:44:01 +00:00
nickname : nickname ,
2021-12-20 11:21:17 +00:00
graffitiBytes : if config . graffiti . isSome : config . graffiti . get
2020-06-29 17:30:19 +00:00
else : defaultGraffitiBytes ( ) ,
2020-01-17 13:44:01 +00:00
network : network ,
2020-02-05 20:40:14 +00:00
netKeys : netKeys ,
2020-01-17 13:44:01 +00:00
db : db ,
2021-02-22 16:17:48 +00:00
config : config ,
2021-11-01 14:50:24 +00:00
attachedValidators : validatorPool ,
2020-11-03 01:21:07 +00:00
eth1Monitor : eth1Monitor ,
2022-08-01 06:41:47 +00:00
payloadBuilderRestClient : payloadBuilderRestClient ,
2021-03-17 18:46:45 +00:00
restServer : restServer ,
2022-08-19 10:30:07 +00:00
keymanagerHost : keymanagerHost ,
keymanagerServer : keymanagerInitResult . server ,
2021-09-22 12:17:15 +00:00
eventBus : eventBus ,
2021-12-21 14:24:23 +00:00
gossipState : { } ,
2022-08-25 03:53:59 +00:00
blocksGossipState : { } ,
2021-09-17 00:13:52 +00:00
beaconClock : beaconClock ,
2022-01-05 14:49:10 +00:00
validatorMonitor : validatorMonitor ,
2022-06-15 02:38:27 +00:00
stateTtlCache : stateTtlCache ,
2022-09-15 13:00:23 +00:00
nextExchangeTransitionConfTime :
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#specification-3
# Consensus Layer client software **SHOULD** poll this endpoint every
# 60 seconds.
# Delay first call by that time to allow for EL syncing to begin; it can
# otherwise generate an EL warning by claiming a zero merge block.
Moment . now + chronos . seconds ( 60 ) ,
2022-08-23 16:19:52 +00:00
dynamicFeeRecipientsStore : newClone ( DynamicFeeRecipientsStore . init ( ) ) )
2020-08-20 16:30:47 +00:00
2022-06-07 17:01:11 +00:00
node . initLightClient (
rng , cfg , dag . forkDigests , getBeaconTime , dag . genesis_validators_root )
2022-03-21 16:52:15 +00:00
node . initFullNode (
rng , dag , taskpool , getBeaconTime )
2020-12-16 13:03:04 +00:00
2022-06-07 17:01:11 +00:00
node . updateLightClientFromDag ( )
2022-02-21 11:55:56 +00:00
node
2019-09-07 17:48:05 +00:00
2022-08-01 06:41:47 +00:00
func verifyFinalization ( node : BeaconNode , slot : Slot ) =
2020-05-13 08:36:33 +00:00
# Epoch must be >= 4 to check finalization
const SETTLING_TIME_OFFSET = 1 'u64
2022-01-11 10:01:54 +00:00
let epoch = slot . epoch ( )
2020-05-13 08:36:33 +00:00
# Don't static-assert this -- if this isn't called, don't require it
doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET
# Intentionally, loudly assert. Point is to fail visibly and unignorably
# during testing.
if epoch > = 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET :
let finalizedEpoch =
2022-01-11 10:01:54 +00:00
node . dag . finalizedHead . slot . epoch ( )
2020-05-13 08:36:33 +00:00
# Finalization rule 234, that has the most lag slots among the cases, sets
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
# and then state.slot gets incremented, to increase the maximum offset, if
# finalization occurs every slot, to 4 slots vs scheduledSlot.
doAssert finalizedEpoch + 4 > = epoch
2021-05-11 20:03:40 +00:00
func subnetLog ( v : BitArray ) : string =
$ toSeq ( v . oneIndices ( ) )
2022-01-24 20:40:59 +00:00
func forkDigests ( node : BeaconNode ) : auto =
let forkDigestsArray : array [ BeaconStateFork , auto ] = [
node . dag . forkDigests . phase0 ,
node . dag . forkDigests . altair ,
2022-11-02 16:23:30 +00:00
node . dag . forkDigests . bellatrix ,
2022-12-07 16:47:23 +00:00
node . dag . forkDigests . capella ,
node . dag . forkDigests . eip4844 ]
2022-01-24 20:40:59 +00:00
forkDigestsArray
2022-12-15 12:15:12 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/phase0/validator.md#phase-0-attestation-subnet-stability
2021-10-18 09:11:44 +00:00
proc updateAttestationSubnetHandlers ( node : BeaconNode , slot : Slot ) =
2021-12-21 14:24:23 +00:00
if node . gossipState . card = = 0 :
2021-10-18 09:11:44 +00:00
# When disconnected, updateGossipState is responsible for all things
# subnets - in particular, it will remove subscriptions on the edge where
# we enter the disconnected state.
return
2020-08-12 17:48:31 +00:00
2021-01-19 17:44:03 +00:00
let
2022-09-07 18:34:52 +00:00
aggregateSubnets =
node . consensusManager [ ] . actionTracker . aggregateSubnets ( slot )
stabilitySubnets =
node . consensusManager [ ] . actionTracker . stabilitySubnets ( slot )
2021-10-18 09:11:44 +00:00
subnets = aggregateSubnets + stabilitySubnets
2021-01-19 17:44:03 +00:00
2021-10-18 09:11:44 +00:00
node . network . updateStabilitySubnetMetadata ( stabilitySubnets )
2020-12-09 09:13:51 +00:00
2021-10-18 09:11:44 +00:00
# Now we know what we should be subscribed to - make it so
2021-01-19 17:44:03 +00:00
let
2022-09-07 18:34:52 +00:00
prevSubnets = node . consensusManager [ ] . actionTracker . subscribedSubnets
2021-10-18 09:11:44 +00:00
unsubscribeSubnets = prevSubnets - subnets
subscribeSubnets = subnets - prevSubnets
# Remember what we subscribed to, so we can unsubscribe later
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . subscribedSubnets = subnets
2021-01-19 17:44:03 +00:00
2022-01-24 20:40:59 +00:00
let forkDigests = node . forkDigests ( )
2021-12-21 14:24:23 +00:00
for gossipFork in node . gossipState :
let forkDigest = forkDigests [ gossipFork ]
node . network . unsubscribeAttestationSubnets ( unsubscribeSubnets , forkDigest )
node . network . subscribeAttestationSubnets ( subscribeSubnets , forkDigest )
2021-01-19 17:44:03 +00:00
debug " Attestation subnets " ,
2021-10-18 09:11:44 +00:00
slot , epoch = slot . epoch , gossipState = node . gossipState ,
2021-05-11 20:03:40 +00:00
stabilitySubnets = subnetLog ( stabilitySubnets ) ,
2021-10-18 09:11:44 +00:00
aggregateSubnets = subnetLog ( aggregateSubnets ) ,
prevSubnets = subnetLog ( prevSubnets ) ,
2021-05-11 20:03:40 +00:00
subscribeSubnets = subnetLog ( subscribeSubnets ) ,
2021-12-21 14:24:23 +00:00
unsubscribeSubnets = subnetLog ( unsubscribeSubnets ) ,
gossipState = node . gossipState
2021-05-11 20:03:40 +00:00
2022-08-25 03:53:59 +00:00
proc updateBlocksGossipStatus * (
node : BeaconNode , slot : Slot , dagIsBehind : bool ) =
template cfg ( ) : auto = node . dag . cfg
let
isBehind =
if node . shouldSyncOptimistically ( slot ) :
# If optimistic sync is active, always subscribe to blocks gossip
false
else :
# Use DAG status to determine whether to subscribe for blocks gossip
dagIsBehind
targetGossipState = getTargetGossipState (
2022-12-04 07:42:03 +00:00
slot . epoch , cfg . ALTAIR_FORK_EPOCH , cfg . BELLATRIX_FORK_EPOCH ,
2022-12-06 16:43:11 +00:00
cfg . CAPELLA_FORK_EPOCH , cfg . EIP4844_FORK_EPOCH , isBehind )
2022-08-25 03:53:59 +00:00
template currentGossipState ( ) : auto = node . blocksGossipState
if currentGossipState = = targetGossipState :
return
if currentGossipState . card = = 0 and targetGossipState . card > 0 :
debug " Enabling blocks topic subscriptions " ,
wallSlot = slot , targetGossipState
elif currentGossipState . card > 0 and targetGossipState . card = = 0 :
debug " Disabling blocks topic subscriptions " ,
wallSlot = slot
else :
# Individual forks added / removed
discard
let
newGossipForks = targetGossipState - currentGossipState
oldGossipForks = currentGossipState - targetGossipState
2022-12-06 16:43:11 +00:00
discard $ eip4844ImplementationMissing & " : for EIP4844, https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.1/specs/eip4844/p2p-interface.md#beacon_block notes use beacon_block_and_blobs_sidecar rather than beacon_block "
2022-08-25 03:53:59 +00:00
for gossipFork in oldGossipForks :
let forkDigest = node . dag . forkDigests [ ] . atStateFork ( gossipFork )
node . network . unsubscribe ( getBeaconBlocksTopic ( forkDigest ) )
for gossipFork in newGossipForks :
let forkDigest = node . dag . forkDigests [ ] . atStateFork ( gossipFork )
node . network . subscribe (
getBeaconBlocksTopic ( forkDigest ) , blocksTopicParams ,
enableTopicMetrics = true )
node . blocksGossipState = targetGossipState
2021-08-09 12:54:45 +00:00
2021-12-21 14:24:23 +00:00
proc addPhase0MessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
2021-08-09 12:54:45 +00:00
node . network . subscribe ( getAttesterSlashingsTopic ( forkDigest ) , basicParams )
node . network . subscribe ( getProposerSlashingsTopic ( forkDigest ) , basicParams )
node . network . subscribe ( getVoluntaryExitsTopic ( forkDigest ) , basicParams )
2021-12-21 14:24:23 +00:00
node . network . subscribe (
getAggregateAndProofsTopic ( forkDigest ) , aggregateTopicParams ,
enableTopicMetrics = true )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
# updateAttestationSubnetHandlers subscribes attestation subnets
2021-08-09 12:54:45 +00:00
2021-08-18 12:30:05 +00:00
proc removePhase0MessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
2021-08-09 12:54:45 +00:00
node . network . unsubscribe ( getVoluntaryExitsTopic ( forkDigest ) )
node . network . unsubscribe ( getProposerSlashingsTopic ( forkDigest ) )
node . network . unsubscribe ( getAttesterSlashingsTopic ( forkDigest ) )
node . network . unsubscribe ( getAggregateAndProofsTopic ( forkDigest ) )
2020-12-24 08:48:52 +00:00
2022-01-08 23:28:49 +00:00
for subnet_id in SubnetId :
node . network . unsubscribe ( getAttestationTopic ( forkDigest , subnet_id ) )
2020-09-15 12:40:43 +00:00
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . subscribedSubnets = default ( AttnetBits )
2021-10-18 09:11:44 +00:00
2022-01-24 20:40:59 +00:00
func hasSyncPubKey ( node : BeaconNode , epoch : Epoch ) : auto =
# Only used to determine which gossip topics to which to subscribe
2022-02-04 12:34:03 +00:00
if node . config . subscribeAllSubnets :
2022-01-24 20:40:59 +00:00
( func ( pubkey : ValidatorPubKey ) : bool {. closure . } = true )
else :
( func ( pubkey : ValidatorPubKey ) : bool =
2022-11-08 11:43:38 +00:00
node . consensusManager [ ] . actionTracker . hasSyncDuty ( pubkey , epoch ) or
2022-08-19 10:30:07 +00:00
pubkey in node . attachedValidators [ ] . validators )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
func getCurrentSyncCommiteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
2022-07-12 09:00:39 +00:00
let syncCommittee = withState ( node . dag . headState ) :
when stateFork > = BeaconStateFork . Altair :
2022-09-13 11:53:12 +00:00
forkyState . data . current_sync_committee
2022-07-12 09:00:39 +00:00
else :
return static ( default ( SyncnetBits ) )
2022-11-08 11:43:38 +00:00
getSyncSubnets ( node . hasSyncPubKey ( epoch ) , syncCommittee )
func getNextSyncCommitteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
let syncCommittee = withState ( node . dag . headState ) :
when stateFork > = BeaconStateFork . Altair :
forkyState . data . next_sync_committee
else :
return static ( default ( SyncnetBits ) )
getSyncSubnets (
node . hasSyncPubKey ( ( epoch . sync_committee_period + 1 ) . start_slot ( ) . epoch ) ,
syncCommittee )
func getSyncCommitteeSubnets ( node : BeaconNode , epoch : Epoch ) : SyncnetBits =
let
subnets = node . getCurrentSyncCommiteeSubnets ( epoch )
epochsToSyncPeriod = nearSyncCommitteePeriod ( epoch )
# The end-slot tracker might call this when it's theoretically applicable,
# but more than SYNC_COMMITTEE_SUBNET_COUNT epochs from when the next sync
# committee period begins, in which case `epochsToNextSyncPeriod` is none.
if epochsToSyncPeriod . isNone or
node . dag . cfg . stateForkAtEpoch ( epoch + epochsToSyncPeriod . get ) <
BeaconStateFork . Altair :
return subnets
subnets + node . getNextSyncCommitteeSubnets ( epoch )
2022-07-12 09:00:39 +00:00
2022-11-24 14:38:07 +00:00
proc addAltairMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
2021-11-14 08:00:25 +00:00
node . addPhase0MessageHandlers ( forkDigest , slot )
2020-09-15 12:40:43 +00:00
2022-01-24 20:40:59 +00:00
# If this comes online near sync committee period, it'll immediately get
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
2022-11-24 14:38:07 +00:00
let syncnets = node . getSyncCommitteeSubnets ( slot . epoch )
2021-08-29 05:58:27 +00:00
2022-01-08 23:28:49 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
2022-11-08 11:43:38 +00:00
if syncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . subscribe (
getSyncCommitteeTopic ( forkDigest , subcommitteeIdx ) , basicParams )
2021-08-29 05:58:27 +00:00
2021-11-14 08:00:25 +00:00
node . network . subscribe (
getSyncCommitteeContributionAndProofTopic ( forkDigest ) , basicParams )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
node . network . updateSyncnetsMetadata ( syncnets )
2021-08-29 05:58:27 +00:00
2022-11-24 14:38:07 +00:00
proc addCapellaMessageHandlers (
node : BeaconNode , forkDigest : ForkDigest , slot : Slot ) =
node . addAltairMessageHandlers ( forkDigest , slot )
node . network . subscribe ( getBlsToExecutionChangeTopic ( forkDigest ) , basicParams )
2021-11-14 08:00:25 +00:00
proc removeAltairMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removePhase0MessageHandlers ( forkDigest )
2021-08-09 12:54:45 +00:00
2022-01-08 23:28:49 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
2021-08-29 05:58:27 +00:00
closureScope :
2022-01-08 23:28:49 +00:00
let idx = subcommitteeIdx
2021-11-14 08:00:25 +00:00
node . network . unsubscribe ( getSyncCommitteeTopic ( forkDigest , idx ) )
node . network . unsubscribe (
getSyncCommitteeContributionAndProofTopic ( forkDigest ) )
2022-11-24 14:38:07 +00:00
proc removeCapellaMessageHandlers ( node : BeaconNode , forkDigest : ForkDigest ) =
node . removeAltairMessageHandlers ( forkDigest )
node . network . unsubscribe ( getBlsToExecutionChangeTopic ( forkDigest ) )
2022-11-08 11:43:38 +00:00
proc updateSyncCommitteeTopics ( node : BeaconNode , slot : Slot ) =
template lastSyncUpdate : untyped =
node . consensusManager [ ] . actionTracker . lastSyncUpdate
if lastSyncUpdate = = Opt . some ( slot . sync_committee_period ( ) ) and
nearSyncCommitteePeriod ( slot . epoch ) . isNone ( ) :
# No need to update unless we're close to the next sync committee period or
# new validators were registered with the action tracker
# TODO we _could_ skip running this in some of the "near" slots, but..
return
lastSyncUpdate = Opt . some ( slot . sync_committee_period ( ) )
2022-01-24 20:40:59 +00:00
2022-11-08 11:43:38 +00:00
let syncnets = node . getSyncCommitteeSubnets ( slot . epoch )
debug " Updating sync committee subnets " ,
syncnets ,
2022-01-24 20:40:59 +00:00
metadata_syncnets = node . network . metadata . syncnets ,
gossipState = node . gossipState
# Assume that different gossip fork sync committee setups are in sync; this
# only remains relevant, currently, for one gossip transition epoch, so the
# consequences of this not being true aren't exceptionally dire, while this
# allows for bookkeeping simplication.
2022-11-08 11:43:38 +00:00
if syncnets = = node . network . metadata . syncnets :
2022-01-24 20:40:59 +00:00
return
let
2022-11-08 11:43:38 +00:00
newSyncnets =
syncnets - node . network . metadata . syncnets
oldSyncnets =
node . network . metadata . syncnets - syncnets
2022-01-24 20:40:59 +00:00
forkDigests = node . forkDigests ( )
for subcommitteeIdx in SyncSubcommitteeIndex :
2022-11-08 11:43:38 +00:00
doAssert not ( newSyncnets [ subcommitteeIdx ] and
oldSyncnets [ subcommitteeIdx ] )
2022-01-24 20:40:59 +00:00
for gossipFork in node . gossipState :
template topic ( ) : auto =
getSyncCommitteeTopic ( forkDigests [ gossipFork ] , subcommitteeIdx )
2022-11-08 11:43:38 +00:00
if oldSyncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . unsubscribe ( topic )
2022-11-08 11:43:38 +00:00
elif newSyncnets [ subcommitteeIdx ] :
2022-01-24 20:40:59 +00:00
node . network . subscribe ( topic , basicParams )
2022-11-08 11:43:38 +00:00
node . network . updateSyncnetsMetadata ( syncnets )
2021-08-29 05:58:27 +00:00
2022-12-09 16:05:55 +00:00
proc updateDoppelganger ( node : BeaconNode , epoch : Epoch ) =
if not node . processor [ ] . doppelgangerDetectionEnabled :
return
# broadcastStartEpoch is set to FAR_FUTURE_EPOCH when we're not monitoring
# gossip - it is only viable to assert liveness in epochs where gossip is
# active
if epoch > node . processor [ ] . doppelgangerDetection . broadcastStartEpoch :
for validator in node . attachedValidators [ ] :
validator . updateDoppelganger ( epoch - 1 )
2021-10-18 09:11:44 +00:00
proc updateGossipStatus ( node : BeaconNode , slot : Slot ) {. async . } =
## Subscribe to subnets that we are providing stability for or aggregating
## and unsubscribe from the ones that are no longer relevant.
# Let the tracker know what duties are approaching - this will tell us how
# many stability subnets we need to be subscribed to and what subnets we'll
# soon be aggregating - in addition to the in-beacon-node duties, there may
# also be duties coming from the validator client, but we don't control when
# these arrive
await node . registerDuties ( slot )
# We start subscribing to gossip before we're fully synced - this allows time
# to subscribe before the sync end game
2020-12-01 10:43:02 +00:00
const
TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64
HYSTERESIS_BUFFER = 16
let
2021-10-18 09:11:44 +00:00
head = node . dag . head
headDistance =
if slot > head . slot : ( slot - head . slot ) . uint64
else : 0 'u64
2022-06-07 17:01:11 +00:00
isBehind =
headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER
2021-08-09 12:54:45 +00:00
targetGossipState =
2021-12-21 14:24:23 +00:00
getTargetGossipState (
slot . epoch ,
node . dag . cfg . ALTAIR_FORK_EPOCH ,
2022-02-02 13:06:55 +00:00
node . dag . cfg . BELLATRIX_FORK_EPOCH ,
2022-12-04 07:42:03 +00:00
node . dag . cfg . CAPELLA_FORK_EPOCH ,
2022-12-06 16:43:11 +00:00
node . dag . cfg . EIP4844_FORK_EPOCH ,
2022-06-07 17:01:11 +00:00
isBehind )
2021-12-21 14:24:23 +00:00
doAssert targetGossipState . card < = 2
let
newGossipForks = targetGossipState - node . gossipState
oldGossipForks = node . gossipState - targetGossipState
doAssert newGossipForks . card < = 2
doAssert oldGossipForks . card < = 2
2021-08-09 12:54:45 +00:00
2021-12-21 14:24:23 +00:00
func maxGossipFork ( gossipState : GossipState ) : int =
var res = - 1
for gossipFork in gossipState :
res = max ( res , gossipFork . int )
res
if maxGossipFork ( targetGossipState ) < maxGossipFork ( node . gossipState ) and
targetGossipState ! = { } :
warn " Unexpected clock regression during transition " ,
targetGossipState ,
gossipState = node . gossipState
if node . gossipState . card = = 0 and targetGossipState . card > 0 :
2021-08-09 12:54:45 +00:00
# We are synced, so we will connect
2020-12-01 10:43:02 +00:00
debug " Enabling topic subscriptions " ,
wallSlot = slot ,
2021-10-18 09:11:44 +00:00
headSlot = head . slot ,
headDistance , targetGossipState
2020-12-01 10:43:02 +00:00
2022-01-03 21:18:49 +00:00
node . processor [ ] . setupDoppelgangerDetection ( slot )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
# Specially when waiting for genesis, we'll already be synced on startup -
# it might also happen on a sufficiently fast restart
# We "know" the actions for the current and the next epoch
2022-03-16 07:20:40 +00:00
withState ( node . dag . headState ) :
2022-09-13 11:53:12 +00:00
if node . consensusManager [ ] . actionTracker . needsUpdate (
forkyState , slot . epoch ) :
2022-03-15 08:24:55 +00:00
let epochRef = node . dag . getEpochRef ( head , slot . epoch , false ) . expect (
" Getting head EpochRef should never fail " )
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . updateActions ( epochRef )
2022-02-04 11:25:32 +00:00
2022-09-07 18:34:52 +00:00
if node . consensusManager [ ] . actionTracker . needsUpdate (
2022-09-13 11:53:12 +00:00
forkyState , slot . epoch + 1 ) :
2022-03-15 08:24:55 +00:00
let epochRef = node . dag . getEpochRef ( head , slot . epoch + 1 , false ) . expect (
" Getting head EpochRef should never fail " )
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . updateActions ( epochRef )
2021-10-18 09:11:44 +00:00
2021-12-21 14:24:23 +00:00
if node . gossipState . card > 0 and targetGossipState . card = = 0 :
debug " Disabling topic subscriptions " ,
wallSlot = slot ,
headSlot = head . slot ,
headDistance
2022-12-09 16:05:55 +00:00
node . processor [ ] . clearDoppelgangerProtection ( )
2022-01-24 20:40:59 +00:00
let forkDigests = node . forkDigests ( )
2021-12-21 14:24:23 +00:00
const removeMessageHandlers : array [ BeaconStateFork , auto ] = [
removePhase0MessageHandlers ,
removeAltairMessageHandlers ,
2023-01-04 12:34:15 +00:00
removeAltairMessageHandlers , # bellatrix (altair handlers, different forkDigest)
2022-12-07 16:47:23 +00:00
removeCapellaMessageHandlers ,
2023-01-04 12:34:15 +00:00
removeCapellaMessageHandlers # eip4844 (capella handlers, different forkDigest)
2021-12-21 14:24:23 +00:00
]
for gossipFork in oldGossipForks :
removeMessageHandlers [ gossipFork ] ( node , forkDigests [ gossipFork ] )
const addMessageHandlers : array [ BeaconStateFork , auto ] = [
addPhase0MessageHandlers ,
addAltairMessageHandlers ,
2023-01-04 12:34:15 +00:00
addAltairMessageHandlers , # bellatrix (altair handlers, with different forkDigest)
2022-12-07 16:47:23 +00:00
addCapellaMessageHandlers ,
2023-01-04 12:34:15 +00:00
addCapellaMessageHandlers # eip4844 (capella handlers, different forkDigest)
2021-12-21 14:24:23 +00:00
]
for gossipFork in newGossipForks :
addMessageHandlers [ gossipFork ] ( node , forkDigests [ gossipFork ] , slot )
2021-08-09 12:54:45 +00:00
2021-10-18 09:11:44 +00:00
node . gossipState = targetGossipState
2022-12-09 16:05:55 +00:00
node . updateDoppelganger ( slot . epoch )
2021-10-18 09:11:44 +00:00
node . updateAttestationSubnetHandlers ( slot )
2022-08-25 03:53:59 +00:00
node . updateBlocksGossipStatus ( slot , isBehind )
2022-06-07 17:01:11 +00:00
node . updateLightClientGossipStatus ( slot , isBehind )
2020-12-01 10:43:02 +00:00
2021-02-14 15:37:32 +00:00
proc onSlotEnd ( node : BeaconNode , slot : Slot ) {. async . } =
2020-12-18 21:01:24 +00:00
# Things we do when slot processing has ended and we're about to wait for the
# next slot
2021-06-01 11:13:40 +00:00
if node . dag . needStateCachesAndForkChoicePruning ( ) :
2022-08-19 10:30:07 +00:00
if node . attachedValidators [ ] . validators . len > 0 :
node . attachedValidators [ ]
2021-05-10 14:32:28 +00:00
. slashingProtection
# pruning is only done if the DB is set to pruning mode.
. pruneAfterFinalization (
2022-01-11 10:01:54 +00:00
node . dag . finalizedHead . slot . epoch ( )
2021-05-10 14:32:28 +00:00
)
2021-03-09 14:36:17 +00:00
# Delay part of pruning until latency critical duties are done.
# The other part of pruning, `pruneBlocksDAG`, is done eagerly.
2021-05-10 14:32:28 +00:00
# ----
# This is the last pruning to do as it clears the "needPruning" condition.
2021-03-11 10:10:57 +00:00
node . consensusManager [ ] . pruneStateCachesAndForkChoice ( )
2021-03-09 14:36:17 +00:00
2020-12-18 21:01:24 +00:00
when declared ( GC_fullCollect ) :
# The slots in the beacon node work as frames in a game: we want to make
# sure that we're ready for the next one and don't get stuck in lengthy
# garbage collection tasks when time is of essence in the middle of a slot -
# while this does not guarantee that we'll never collect during a slot, it
# makes sure that all the scratch space we used during slot tasks (logging,
# temporary buffers etc) gets recycled for the next slot that is likely to
# need similar amounts of memory.
2022-03-22 08:42:28 +00:00
try :
GC_fullCollect ( )
except Defect as exc :
raise exc # Reraise to maintain call stack
except Exception as exc :
# TODO upstream
raiseAssert " Unexpected exception during GC collection "
2020-12-18 21:01:24 +00:00
# Checkpoint the database to clear the WAL file and make sure changes in
# the database are synced with the filesystem.
2021-01-18 10:02:56 +00:00
node . db . checkpoint ( )
2020-12-18 21:01:24 +00:00
2021-08-30 01:00:37 +00:00
node . syncCommitteeMsgPool [ ] . pruneData ( slot )
2022-01-24 20:40:59 +00:00
if slot . is_epoch :
2022-08-23 16:19:52 +00:00
node . dynamicFeeRecipientsStore [ ] . pruneOldMappings ( slot . epoch )
2021-08-28 22:27:51 +00:00
2021-10-18 09:11:44 +00:00
# Update upcoming actions - we do this every slot in case a reorg happens
2022-02-04 11:25:32 +00:00
let head = node . dag . head
2022-10-27 17:22:32 +00:00
if node . isSynced ( head ) = = SyncStatus . synced :
2022-03-16 07:20:40 +00:00
withState ( node . dag . headState ) :
2022-09-07 18:34:52 +00:00
if node . consensusManager [ ] . actionTracker . needsUpdate (
2022-09-13 11:53:12 +00:00
forkyState , slot . epoch + 1 ) :
2022-03-15 08:24:55 +00:00
let epochRef = node . dag . getEpochRef ( head , slot . epoch + 1 , false ) . expect (
" Getting head EpochRef should never fail " )
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . updateActions ( epochRef )
2021-10-18 09:11:44 +00:00
let
2022-09-07 18:34:52 +00:00
nextAttestationSlot =
node . consensusManager [ ] . actionTracker . getNextAttestationSlot ( slot )
nextProposalSlot =
node . consensusManager [ ] . actionTracker . getNextProposalSlot ( slot )
2021-10-18 09:11:44 +00:00
nextActionWaitTime = saturate ( fromNow (
node . beaconClock , min ( nextAttestationSlot , nextProposalSlot ) ) )
2021-03-24 10:05:04 +00:00
# -1 is a more useful output than 18446744073709551615 as an indicator of
# no future attestation/proposal known.
2022-07-12 09:00:39 +00:00
template formatInt64 ( x : Slot ) : int64 =
2021-03-24 10:05:04 +00:00
if x = = high ( uint64 ) . Slot :
- 1 'i64
else :
toGaugeValue ( x )
2022-07-12 09:00:39 +00:00
template formatSyncCommitteeStatus ( ) : string =
let slotsToNextSyncCommitteePeriod =
SLOTS_PER_SYNC_COMMITTEE_PERIOD - since_sync_committee_period_start ( slot )
# int64 conversion is safe
doAssert slotsToNextSyncCommitteePeriod < = SLOTS_PER_SYNC_COMMITTEE_PERIOD
2022-11-08 11:43:38 +00:00
if not node . getCurrentSyncCommiteeSubnets ( slot . epoch ) . isZeros :
2022-07-12 09:00:39 +00:00
" current "
2022-11-08 11:43:38 +00:00
elif not node . getNextSyncCommitteeSubnets ( slot . epoch ) . isZeros :
2022-07-12 09:00:39 +00:00
" in " & toTimeLeftString (
SECONDS_PER_SLOT . int64 . seconds * slotsToNextSyncCommitteePeriod . int64 )
else :
" none "
2020-12-18 21:01:24 +00:00
info " Slot end " ,
slot = shortLog ( slot ) ,
2021-02-14 15:37:32 +00:00
nextActionWait =
if nextAttestationSlot = = FAR_FUTURE_SLOT :
" n/a "
else :
2021-11-02 17:06:36 +00:00
shortLog ( nextActionWaitTime ) ,
2022-07-12 09:00:39 +00:00
nextAttestationSlot = formatInt64 ( nextAttestationSlot ) ,
nextProposalSlot = formatInt64 ( nextProposalSlot ) ,
syncCommitteeDuties = formatSyncCommitteeStatus ( ) ,
2022-02-04 11:25:32 +00:00
head = shortLog ( head )
2020-12-18 21:01:24 +00:00
2021-03-12 09:46:26 +00:00
if nextAttestationSlot ! = FAR_FUTURE_SLOT :
next_action_wait . set ( nextActionWaitTime . toFloatSeconds )
2021-08-24 19:49:51 +00:00
let epoch = slot . epoch
2021-09-29 11:06:16 +00:00
if epoch + 1 > = node . network . forkId . next_fork_epoch :
# Update 1 epoch early to block non-fork-ready peers
2022-04-08 16:22:49 +00:00
node . network . updateForkId ( epoch , node . dag . genesis_validators_root )
2021-08-24 19:49:51 +00:00
2021-06-01 11:13:40 +00:00
# When we're not behind schedule, we'll speculatively update the clearance
# state in anticipation of receiving the next block - we do it after logging
# slot end since the nextActionWaitTime can be short
let
2021-08-20 08:58:15 +00:00
advanceCutoff = node . beaconClock . fromNow (
2022-01-11 10:01:54 +00:00
slot . start_beacon_time ( ) + chronos . seconds ( int ( SECONDS_PER_SLOT - 1 ) ) )
2021-06-01 11:13:40 +00:00
if advanceCutoff . inFuture :
# We wait until there's only a second left before the next slot begins, then
# we advance the clearance state to the next slot - this gives us a high
# probability of being prepared for the block that will arrive and the
# epoch processing that follows
await sleepAsync ( advanceCutoff . offset )
node . dag . advanceClearanceState ( )
2021-10-18 09:11:44 +00:00
# Prepare action tracker for the next slot
2022-09-07 18:34:52 +00:00
node . consensusManager [ ] . actionTracker . updateSlot ( slot + 1 )
2021-10-18 09:11:44 +00:00
# The last thing we do is to perform the subscriptions and unsubscriptions for
# the next slot, just before that slot starts - because of the advance cuttoff
# above, this will be done just before the next slot starts
2022-11-08 11:43:38 +00:00
node . updateSyncCommitteeTopics ( slot + 1 )
2021-10-18 09:11:44 +00:00
await node . updateGossipStatus ( slot + 1 )
2022-08-29 12:16:35 +00:00
func syncStatus ( node : BeaconNode , wallSlot : Slot ) : string =
2022-08-18 13:22:22 +00:00
let optimistic_head = node . dag . is_optimistic ( node . dag . head . root )
2022-03-29 07:15:42 +00:00
if node . syncManager . inProgress :
2022-08-29 12:16:35 +00:00
let
optimisticSuffix =
if optimistic_head :
" /opt "
else :
" "
lightClientSuffix =
if node . consensusManager [ ] . shouldSyncOptimistically ( wallSlot ) :
" - lc: " & $ shortLog ( node . consensusManager [ ] . optimisticHead )
else :
" "
node . syncManager . syncStatus & optimisticSuffix & lightClientSuffix
2022-03-29 07:15:42 +00:00
elif node . backfiller . inProgress :
" backfill: " & node . backfiller . syncStatus
2022-08-18 13:22:22 +00:00
elif optimistic_head :
" synced/opt "
2022-03-29 07:15:42 +00:00
else :
" synced "
2022-01-20 07:25:45 +00:00
2022-07-13 14:43:57 +00:00
proc onSlotStart ( node : BeaconNode , wallTime : BeaconTime ,
lastSlot : Slot ) : Future [ bool ] {. async . } =
2019-03-22 15:49:37 +00:00
## Called at the beginning of a slot - usually every slot, but sometimes might
## skip a few in case we're running late.
2021-03-01 16:36:06 +00:00
## wallTime: current system time - we will strive to perform all duties up
## to this point in time
2020-06-26 13:51:20 +00:00
## lastSlot: the last slot that we successfully processed, so we know where to
2021-03-01 16:36:06 +00:00
## start work from - there might be jumps if processing is delayed
2019-03-22 15:49:37 +00:00
let
# The slot we should be at, according to the clock
2021-03-01 16:36:06 +00:00
wallSlot = wallTime . slotOrZero
# If everything was working perfectly, the slot that we should be processing
expectedSlot = lastSlot + 1
2022-01-11 10:01:54 +00:00
finalizedEpoch = node . dag . finalizedHead . blck . slot . epoch ( )
delay = wallTime - expectedSlot . start_beacon_time ( )
2020-12-18 21:01:24 +00:00
2019-12-23 15:34:09 +00:00
info " Slot start " ,
2021-11-02 17:06:36 +00:00
slot = shortLog ( wallSlot ) ,
epoch = shortLog ( wallSlot . epoch ) ,
2022-08-29 12:16:35 +00:00
sync = node . syncStatus ( wallSlot ) ,
2021-11-02 17:06:36 +00:00
peers = len ( node . network . peerPool ) ,
head = shortLog ( node . dag . head ) ,
finalized = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , finalized_checkpoint ) ) ,
2021-11-02 17:06:36 +00:00
delay = shortLog ( delay )
2019-03-22 15:49:37 +00:00
2020-02-17 18:24:14 +00:00
# Check before any re-scheduling of onSlotStart()
2022-07-13 14:43:57 +00:00
if checkIfShouldStopAtEpoch ( wallSlot , node . config . stopAtEpoch ) :
quit ( 0 )
2020-02-17 18:24:14 +00:00
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
if node . config . runAsService :
reportServiceStatus ( SERVICE_RUNNING , NO_ERROR , 0 )
2021-03-01 19:55:25 +00:00
beacon_slot . set wallSlot . toGaugeValue
beacon_current_epoch . set wallSlot . epoch . toGaugeValue
2019-12-23 15:34:09 +00:00
2021-03-01 19:55:25 +00:00
# both non-negative, so difference can't overflow or underflow int64
finalization_delay . set (
wallSlot . epoch . toGaugeValue - finalizedEpoch . toGaugeValue )
2019-03-22 15:49:37 +00:00
2022-07-13 13:48:09 +00:00
if node . config . strictVerification :
2022-08-01 06:41:47 +00:00
verifyFinalization ( node , wallSlot )
2019-03-22 15:49:37 +00:00
2021-03-11 10:10:57 +00:00
node . consensusManager [ ] . updateHead ( wallSlot )
2019-03-22 15:49:37 +00:00
2021-03-01 16:36:06 +00:00
await node . handleValidatorDuties ( lastSlot , wallSlot )
2019-08-16 11:16:56 +00:00
2021-03-01 16:36:06 +00:00
await onSlotEnd ( node , wallSlot )
2020-12-08 17:11:54 +00:00
2022-11-26 18:50:42 +00:00
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#registration-dissemination
# This specification suggests validators re-submit to builder software every
# `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs.
if wallSlot . is_epoch and
wallSlot . epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION = = 0 :
asyncSpawn node . registerValidators ( wallSlot . epoch )
2022-07-13 14:43:57 +00:00
return false
2019-12-02 14:42:57 +00:00
proc handleMissingBlocks ( node : BeaconNode ) =
2021-12-06 09:49:01 +00:00
let missingBlocks = node . quarantine [ ] . checkMissing ( )
2019-03-28 14:03:19 +00:00
if missingBlocks . len > 0 :
2020-10-01 18:56:42 +00:00
debug " Requesting detected missing blocks " , blocks = shortLog ( missingBlocks )
2020-06-18 10:03:36 +00:00
node . requestManager . fetchAncestorBlocks ( missingBlocks )
2019-12-02 14:42:57 +00:00
2022-06-15 02:38:27 +00:00
proc onSecond ( node : BeaconNode , time : Moment ) =
2020-06-03 08:46:29 +00:00
## This procedure will be called once per second.
if not ( node . syncManager . inProgress ) :
node . handleMissingBlocks ( )
2022-02-08 19:19:21 +00:00
# Nim GC metrics (for the main thread)
updateThreadMetrics ( )
2022-09-07 08:02:07 +00:00
if time > = node . nextExchangeTransitionConfTime and not node . eth1Monitor . isNil :
# The EL client SHOULD log a warning when not receiving an exchange message
# at least once every 120 seconds. If we only attempt to exchange every 60
# seconds, the warning would be triggered if a single message is missed.
# To accommodate for that, exchange slightly more frequently.
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#engine_exchangetransitionconfigurationv1
node . nextExchangeTransitionConfTime = time + chronos . seconds ( 45 )
if node . currentSlot . epoch > = node . dag . cfg . BELLATRIX_FORK_EPOCH :
traceAsyncErrors node . eth1Monitor . exchangeTransitionConfiguration ( )
2022-06-15 02:38:27 +00:00
2022-03-29 07:15:42 +00:00
if node . config . stopAtSyncedEpoch ! = 0 and
node . dag . head . slot . epoch > = node . config . stopAtSyncedEpoch :
2022-03-04 17:38:01 +00:00
notice " Shutting down after having reached the target synced epoch "
bnStatus = BeaconNodeStatus . Stopping
2020-06-03 08:46:29 +00:00
proc runOnSecondLoop ( node : BeaconNode ) {. async . } =
2022-07-12 17:50:12 +00:00
const
sleepTime = chronos . seconds ( 1 )
nanosecondsIn1s = float ( sleepTime . nanoseconds )
2020-06-03 08:46:29 +00:00
while true :
let start = chronos . now ( chronos . Moment )
2020-08-10 18:49:45 +00:00
await chronos . sleepAsync ( sleepTime )
let afterSleep = chronos . now ( chronos . Moment )
let sleepTime = afterSleep - start
2022-06-15 02:38:27 +00:00
node . onSecond ( start )
2020-08-10 18:49:45 +00:00
let finished = chronos . now ( chronos . Moment )
let processingTime = finished - afterSleep
ticks_delay . set ( sleepTime . nanoseconds . float / nanosecondsIn1s )
2020-10-01 18:56:42 +00:00
trace " onSecond task completed " , sleepTime , processingTime
2019-03-27 20:17:01 +00:00
2021-03-24 10:05:04 +00:00
func connectedPeersCount ( node : BeaconNode ) : int =
2020-09-14 14:50:03 +00:00
len ( node . network . peerPool )
2020-03-16 22:28:54 +00:00
2021-03-17 18:46:45 +00:00
proc installRestHandlers ( restServer : RestServerRef , node : BeaconNode ) =
restServer . router . installBeaconApiHandlers ( node )
restServer . router . installConfigApiHandlers ( node )
restServer . router . installDebugApiHandlers ( node )
2021-03-23 22:50:18 +00:00
restServer . router . installEventApiHandlers ( node )
restServer . router . installNimbusApiHandlers ( node )
2021-03-17 18:46:45 +00:00
restServer . router . installNodeApiHandlers ( node )
2021-03-23 22:50:18 +00:00
restServer . router . installValidatorApiHandlers ( node )
2022-06-24 14:57:50 +00:00
if node . dag . lcDataStore . serve :
2022-06-19 05:57:52 +00:00
restServer . router . installLightClientApiHandlers ( node )
2021-03-17 18:46:45 +00:00
2022-12-04 07:42:03 +00:00
from . / spec / datatypes / capella import SignedBeaconBlock
2020-08-17 12:07:29 +00:00
proc installMessageValidators ( node : BeaconNode ) =
2022-12-15 12:15:12 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-alpha.2/specs/phase0/p2p-interface.md#attestations-and-aggregation
2020-08-12 17:48:31 +00:00
# These validators stay around the whole time, regardless of which specific
# subnets are subscribed to during any given epoch.
2022-03-29 07:15:42 +00:00
let forkDigests = node . dag . forkDigests
2020-08-20 16:30:47 +00:00
node . network . addValidator (
2022-03-29 07:15:42 +00:00
getBeaconBlocksTopic ( forkDigests . phase0 ) ,
2021-07-15 19:01:07 +00:00
proc ( signedBlock : phase0 . SignedBeaconBlock ) : ValidationResult =
2022-08-25 03:53:59 +00:00
if node . shouldSyncOptimistically ( node . currentSlot ) :
toValidationResult (
node . optimisticProcessor . processSignedBeaconBlock ( signedBlock ) )
else :
toValidationResult ( node . processor [ ] . processSignedBeaconBlock (
MsgSource . gossip , signedBlock ) ) )
2021-11-05 15:39:47 +00:00
template installPhase0Validators ( digest : auto ) =
2022-01-08 23:28:49 +00:00
for it in SubnetId :
2021-11-05 15:39:47 +00:00
closureScope :
2022-01-08 23:28:49 +00:00
let subnet_id = it
2021-11-05 15:39:47 +00:00
node . network . addAsyncValidator (
getAttestationTopic ( digest , subnet_id ) ,
# This proc needs to be within closureScope; don't lift out of loop.
proc ( attestation : Attestation ) : Future [ ValidationResult ] {. async . } =
return toValidationResult (
2022-07-06 16:11:44 +00:00
await node . processor . processAttestation (
2021-12-20 19:20:31 +00:00
MsgSource . gossip , attestation , subnet_id ) ) )
2021-11-05 15:39:47 +00:00
node . network . addAsyncValidator (
getAggregateAndProofsTopic ( digest ) ,
proc ( signedAggregateAndProof : SignedAggregateAndProof ) :
Future [ ValidationResult ] {. async . } =
return toValidationResult (
2022-07-06 16:11:44 +00:00
await node . processor . processSignedAggregateAndProof (
MsgSource . gossip , signedAggregateAndProof , false ) ) )
2021-11-05 15:39:47 +00:00
node . network . addValidator (
getAttesterSlashingsTopic ( digest ) ,
proc ( attesterSlashing : AttesterSlashing ) : ValidationResult =
toValidationResult (
2022-07-06 16:11:44 +00:00
node . processor [ ] . processAttesterSlashing (
2021-12-20 19:20:31 +00:00
MsgSource . gossip , attesterSlashing ) ) )
2021-11-05 15:39:47 +00:00
node . network . addValidator (
getProposerSlashingsTopic ( digest ) ,
proc ( proposerSlashing : ProposerSlashing ) : ValidationResult =
toValidationResult (
2022-07-06 16:11:44 +00:00
node . processor [ ] . processProposerSlashing (
2021-12-20 19:20:31 +00:00
MsgSource . gossip , proposerSlashing ) ) )
2021-11-05 15:39:47 +00:00
node . network . addValidator (
getVoluntaryExitsTopic ( digest ) ,
proc ( signedVoluntaryExit : SignedVoluntaryExit ) : ValidationResult =
toValidationResult (
2022-07-06 16:11:44 +00:00
node . processor [ ] . processSignedVoluntaryExit (
2021-12-20 19:20:31 +00:00
MsgSource . gossip , signedVoluntaryExit ) ) )
2021-11-05 15:39:47 +00:00
2022-03-29 07:15:42 +00:00
installPhase0Validators ( forkDigests . phase0 )
2021-11-05 15:39:47 +00:00
2022-12-04 07:42:03 +00:00
# Validators introduced in phase0 are also used in Altair and Bellatrix, but
# with different fork digests
2022-03-29 07:15:42 +00:00
installPhase0Validators ( forkDigests . altair )
installPhase0Validators ( forkDigests . bellatrix )
2022-12-04 07:42:03 +00:00
installPhase0Validators ( forkDigests . capella )
2022-12-06 16:43:11 +00:00
installPhase0Validators ( forkDigests . eip4844 )
2021-08-09 12:54:45 +00:00
node . network . addValidator (
2022-03-29 07:15:42 +00:00
getBeaconBlocksTopic ( forkDigests . altair ) ,
2021-08-09 12:54:45 +00:00
proc ( signedBlock : altair . SignedBeaconBlock ) : ValidationResult =
2022-08-25 03:53:59 +00:00
if node . shouldSyncOptimistically ( node . currentSlot ) :
toValidationResult (
node . optimisticProcessor . processSignedBeaconBlock ( signedBlock ) )
else :
toValidationResult ( node . processor [ ] . processSignedBeaconBlock (
MsgSource . gossip , signedBlock ) ) )
2021-08-09 12:54:45 +00:00
2021-12-08 17:29:22 +00:00
node . network . addValidator (
2022-03-29 07:15:42 +00:00
getBeaconBlocksTopic ( forkDigests . bellatrix ) ,
2022-01-18 13:36:52 +00:00
proc ( signedBlock : bellatrix . SignedBeaconBlock ) : ValidationResult =
2022-08-25 03:53:59 +00:00
if node . shouldSyncOptimistically ( node . currentSlot ) :
toValidationResult (
node . optimisticProcessor . processSignedBeaconBlock ( signedBlock ) )
else :
toValidationResult ( node . processor [ ] . processSignedBeaconBlock (
MsgSource . gossip , signedBlock ) ) )
2021-12-08 17:29:22 +00:00
2022-12-04 07:42:03 +00:00
node . network . addValidator (
getBeaconBlocksTopic ( forkDigests . capella ) ,
proc ( signedBlock : capella . SignedBeaconBlock ) : ValidationResult =
if node . shouldSyncOptimistically ( node . currentSlot ) :
toValidationResult (
node . optimisticProcessor . processSignedBeaconBlock ( signedBlock ) )
else :
toValidationResult ( node . processor [ ] . processSignedBeaconBlock (
MsgSource . gossip , signedBlock ) ) )
2023-01-04 12:34:15 +00:00
node . network . addValidator (
getBeaconBlockAndBlobsSidecarTopic ( forkDigests . eip4844 ) ,
proc ( signedBlock : eip4844 . SignedBeaconBlockAndBlobsSidecar ) : ValidationResult =
# TODO: take into account node.shouldSyncOptimistically(node.currentSlot)
toValidationResult ( node . processor [ ] . processSignedBeaconBlockAndBlobsSidecar (
MsgSource . gossip , signedBlock ) ) )
2022-12-06 16:43:11 +00:00
2021-11-13 21:26:02 +00:00
template installSyncCommitteeeValidators ( digest : auto ) =
2022-01-08 23:28:49 +00:00
for subcommitteeIdx in SyncSubcommitteeIndex :
2021-11-13 21:26:02 +00:00
closureScope :
2022-01-08 23:28:49 +00:00
let idx = subcommitteeIdx
2021-12-09 12:56:54 +00:00
node . network . addAsyncValidator (
2021-11-13 21:26:02 +00:00
getSyncCommitteeTopic ( digest , idx ) ,
# This proc needs to be within closureScope; don't lift out of loop.
2021-12-09 12:56:54 +00:00
proc ( msg : SyncCommitteeMessage ) : Future [ ValidationResult ] {. async . } =
return toValidationResult (
2022-07-06 16:11:44 +00:00
await node . processor . processSyncCommitteeMessage (
2021-12-20 19:20:31 +00:00
MsgSource . gossip , msg , idx ) ) )
2021-08-29 05:58:27 +00:00
2021-12-09 12:56:54 +00:00
node . network . addAsyncValidator (
2021-11-13 21:26:02 +00:00
getSyncCommitteeContributionAndProofTopic ( digest ) ,
2021-12-09 12:56:54 +00:00
proc ( msg : SignedContributionAndProof ) : Future [ ValidationResult ] {. async . } =
return toValidationResult (
2022-07-06 16:11:44 +00:00
await node . processor . processSignedContributionAndProof (
2022-03-29 07:15:42 +00:00
MsgSource . gossip , msg ) ) )
2021-11-13 21:26:02 +00:00
2022-03-29 07:15:42 +00:00
installSyncCommitteeeValidators ( forkDigests . altair )
installSyncCommitteeeValidators ( forkDigests . bellatrix )
2022-12-04 07:42:03 +00:00
installSyncCommitteeeValidators ( forkDigests . capella )
2022-12-06 16:43:11 +00:00
installSyncCommitteeeValidators ( forkDigests . eip4844 )
2021-08-29 05:58:27 +00:00
2022-06-07 17:01:11 +00:00
node . installLightClientMessageValidators ( )
2022-03-14 13:05:38 +00:00
2021-12-20 11:21:17 +00:00
proc stop ( node : BeaconNode ) =
2020-09-28 15:19:57 +00:00
bnStatus = BeaconNodeStatus . Stopping
2020-10-01 18:56:42 +00:00
notice " Graceful shutdown "
2020-09-01 13:44:40 +00:00
if not node . config . inProcessValidators :
2021-03-26 06:52:01 +00:00
try :
node . vcProcess . close ( )
except Exception as exc :
warn " Couldn ' t close vc process " , msg = exc . msg
try :
waitFor node . network . stop ( )
except CatchableError as exc :
warn " Couldn ' t stop network " , msg = exc . msg
2022-08-19 10:30:07 +00:00
node . attachedValidators [ ] . slashingProtection . close ( )
2022-08-07 21:53:20 +00:00
node . attachedValidators [ ] . close ( )
2020-09-12 05:35:58 +00:00
node . db . close ( )
2020-11-20 13:23:55 +00:00
notice " Databases closed "
2020-05-19 18:57:35 +00:00
2022-01-20 07:25:45 +00:00
proc startBackfillTask ( node : BeaconNode ) {. async . } =
while node . dag . needsBackfill :
if not node . syncManager . inProgress :
# Only start the backfiller if it's needed _and_ head sync has completed -
# if we lose sync after having synced head, we could stop the backfilller,
# but this should be a fringe case - might as well keep the logic simple for
# now
node . backfiller . start ( )
return
await sleepAsync ( chronos . seconds ( 2 ) )
2021-12-20 11:21:17 +00:00
proc run ( node : BeaconNode ) {. raises : [ Defect , CatchableError ] . } =
2021-11-01 14:50:24 +00:00
bnStatus = BeaconNodeStatus . Running
2020-05-19 18:57:35 +00:00
2022-08-19 10:30:07 +00:00
if not isNil ( node . restServer ) :
2021-11-01 14:50:24 +00:00
node . restServer . installRestHandlers ( node )
node . restServer . start ( )
2021-03-17 18:46:45 +00:00
2022-08-19 10:30:07 +00:00
if not isNil ( node . keymanagerServer ) :
doAssert not isNil ( node . keymanagerHost )
node . keymanagerServer . router . installKeymanagerHandlers ( node . keymanagerHost [ ] )
2021-12-22 12:37:31 +00:00
if node . keymanagerServer ! = node . restServer :
node . keymanagerServer . start ( )
2021-11-01 14:50:24 +00:00
let
wallTime = node . beaconClock . now ( )
wallSlot = wallTime . slotOrZero ( )
2020-06-18 10:03:36 +00:00
2022-06-07 17:01:11 +00:00
node . startLightClient ( )
2021-11-01 14:50:24 +00:00
node . requestManager . start ( )
node . syncManager . start ( )
2021-10-18 09:11:44 +00:00
2022-01-20 07:25:45 +00:00
if node . dag . needsBackfill ( ) : asyncSpawn node . startBackfillTask ( )
2021-11-01 14:50:24 +00:00
waitFor node . updateGossipStatus ( wallSlot )
2021-10-18 09:11:44 +00:00
2021-11-01 14:50:24 +00:00
asyncSpawn runSlotLoop ( node , wallTime , onSlotStart )
asyncSpawn runOnSecondLoop ( node )
asyncSpawn runQueueProcessingLoop ( node . blockProcessor )
2020-12-01 10:43:02 +00:00
2020-11-02 18:02:27 +00:00
## Ctrl+C handling
proc controlCHandler ( ) {. noconv . } =
when defined ( windows ) :
# workaround for https://github.com/nim-lang/Nim/issues/4057
2021-03-26 06:52:01 +00:00
try :
setupForeignThreadGc ( )
except Exception as exc : raiseAssert exc . msg # shouldn't happen
2020-11-02 18:02:27 +00:00
notice " Shutting down after having received SIGINT "
bnStatus = BeaconNodeStatus . Stopping
2021-03-26 06:52:01 +00:00
try :
setControlCHook ( controlCHandler )
except Exception as exc : # TODO Exception
warn " Cannot set ctrl-c handler " , msg = exc . msg
2020-12-14 16:45:14 +00:00
# equivalent SIGTERM handler
when defined ( posix ) :
proc SIGTERMHandler ( signal : cint ) {. noconv . } =
notice " Shutting down after having received SIGTERM "
bnStatus = BeaconNodeStatus . Stopping
2022-07-13 14:43:57 +00:00
c_signal ( ansi_c . SIGTERM , SIGTERMHandler )
2020-11-02 18:02:27 +00:00
2020-05-19 18:57:35 +00:00
# main event loop
2020-09-28 15:19:57 +00:00
while bnStatus = = BeaconNodeStatus . Running :
2021-03-26 06:52:01 +00:00
poll ( ) # if poll fails, the network is broken
2020-04-20 14:59:18 +00:00
2020-05-19 18:57:35 +00:00
# time to say goodbye
node . stop ( )
2018-11-23 23:58:49 +00:00
2018-12-19 12:58:53 +00:00
var gPidFile : string
2021-03-26 06:52:01 +00:00
proc createPidFile ( filename : string ) {. raises : [ Defect , IOError ] . } =
2019-07-07 09:53:58 +00:00
writeFile filename , $ os . getCurrentProcessId ( )
2018-12-19 12:58:53 +00:00
gPidFile = filename
2020-08-19 13:12:10 +00:00
addQuitProc proc { . noconv . } = discard io2 . removeFile ( gPidFile )
2020-06-11 12:13:12 +00:00
proc initializeNetworking ( node : BeaconNode ) {. async . } =
2021-11-01 14:50:24 +00:00
node . installMessageValidators ( )
2020-11-16 19:15:43 +00:00
info " Listening to incoming network requests "
2020-08-03 17:35:27 +00:00
await node . network . startListening ( )
2020-06-11 12:13:12 +00:00
2020-06-19 17:42:28 +00:00
let addressFile = node . config . dataDir / " beacon_node.enr "
2020-06-11 12:13:12 +00:00
writeFile ( addressFile , node . network . announcedENR . toURI )
2020-09-21 16:02:27 +00:00
await node . network . start ( )
2020-06-11 12:13:12 +00:00
2021-12-22 12:37:31 +00:00
proc start * ( node : BeaconNode ) {. raises : [ Defect , CatchableError ] . } =
2019-11-25 12:47:29 +00:00
let
2021-06-01 11:13:40 +00:00
head = node . dag . head
finalizedHead = node . dag . finalizedHead
2022-01-11 10:01:54 +00:00
genesisTime = node . beaconClock . fromNow ( start_beacon_time ( Slot 0 ) )
2020-06-11 12:13:12 +00:00
2020-10-01 18:56:42 +00:00
notice " Starting beacon node " ,
2019-11-12 00:05:35 +00:00
version = fullVersionStr ,
2022-08-12 14:53:15 +00:00
nimVersion = NimVersion ,
2020-11-16 19:15:43 +00:00
enr = node . network . announcedENR . toURI ,
peerId = $ node . network . switch . peerInfo . peerId ,
2019-08-16 11:16:56 +00:00
timeSinceFinalization =
2022-01-11 10:01:54 +00:00
node . beaconClock . now ( ) - finalizedHead . slot . start_beacon_time ( ) ,
2020-07-28 13:54:32 +00:00
head = shortLog ( head ) ,
2021-11-02 17:06:36 +00:00
justified = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , current_justified_checkpoint ) ) ,
2021-11-02 17:06:36 +00:00
finalized = shortLog ( getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , finalized_checkpoint ) ) ,
2020-07-16 13:16:51 +00:00
finalizedHead = shortLog ( finalizedHead ) ,
2019-03-20 11:52:30 +00:00
SLOTS_PER_EPOCH ,
SECONDS_PER_SLOT ,
2019-09-12 01:45:04 +00:00
SPEC_VERSION ,
2020-11-16 19:15:43 +00:00
dataDir = node . config . dataDir . string ,
2021-02-22 16:17:48 +00:00
validators = node . attachedValidators [ ] . count
2019-03-20 11:52:30 +00:00
2020-06-29 05:34:48 +00:00
if genesisTime . inFuture :
notice " Waiting for genesis " , genesisIn = genesisTime . offset
2020-06-11 12:13:12 +00:00
waitFor node . initializeNetworking ( )
2020-11-12 16:21:04 +00:00
2021-11-25 17:08:02 +00:00
if node . eth1Monitor ! = nil :
2020-11-12 16:21:04 +00:00
node . eth1Monitor . start ( )
2019-03-20 11:52:30 +00:00
node . run ( )
2019-10-03 01:51:44 +00:00
func formatGwei ( amount : uint64 ) : string =
# TODO This is implemented in a quite a silly way.
# Better routines for formatting decimal numbers
# should exists somewhere else.
let
eth = amount div 1000000000
remainder = amount mod 1000000000
result = $ eth
if remainder ! = 0 :
result . add ' . '
2020-12-01 18:08:55 +00:00
let remainderStr = $ remainder
for i in remainderStr . len .. < 9 :
result . add ' 0 '
result . add remainderStr
2019-10-03 01:51:44 +00:00
while result [ ^ 1 ] = = ' 0 ' :
result . setLen ( result . len - 1 )
2022-03-14 09:19:50 +00:00
when not defined ( windows ) :
proc initStatusBar ( node : BeaconNode ) {. raises : [ Defect , ValueError ] . } =
if not isatty ( stdout ) : return
if not node . config . statusBarEnabled : return
2021-02-22 16:17:48 +00:00
2022-03-14 09:19:50 +00:00
try :
enableTrueColors ( )
except Exception as exc : # TODO Exception
error " Couldn ' t enable colors " , err = exc . msg
proc dataResolver ( expr : string ) : string {. raises : [ Defect ] . } =
template justified : untyped = node . dag . head . atEpochStart (
getStateField (
2022-03-16 07:20:40 +00:00
node . dag . headState , current_justified_checkpoint ) . epoch )
2022-03-14 09:19:50 +00:00
# TODO:
# We should introduce a general API for resolving dot expressions
# such as `db.latest_block.slot` or `metrics.connected_peers`.
# Such an API can be shared between the RPC back-end, CLI tools
# such as ncli, a potential GraphQL back-end and so on.
# The status bar feature would allow the user to specify an
# arbitrary expression that is resolvable through this API.
case expr . toLowerAscii
2022-06-14 11:53:58 +00:00
of " version " :
versionAsStr
of " full_version " :
fullVersionStr
2022-03-14 09:19:50 +00:00
of " connected_peers " :
$ ( node . connectedPeersCount )
of " head_root " :
shortLog ( node . dag . head . root )
of " head_epoch " :
$ ( node . dag . head . slot . epoch )
of " head_epoch_slot " :
$ ( node . dag . head . slot . since_epoch_start )
of " head_slot " :
$ ( node . dag . head . slot )
of " justifed_root " :
shortLog ( justified . blck . root )
of " justifed_epoch " :
$ ( justified . slot . epoch )
of " justifed_epoch_slot " :
$ ( justified . slot . since_epoch_start )
of " justifed_slot " :
$ ( justified . slot )
of " finalized_root " :
shortLog ( node . dag . finalizedHead . blck . root )
of " finalized_epoch " :
$ ( node . dag . finalizedHead . slot . epoch )
of " finalized_epoch_slot " :
$ ( node . dag . finalizedHead . slot . since_epoch_start )
of " finalized_slot " :
$ ( node . dag . finalizedHead . slot )
of " epoch " :
$ node . currentSlot . epoch
of " epoch_slot " :
$ ( node . currentSlot . since_epoch_start )
of " slot " :
$ node . currentSlot
of " slots_per_epoch " :
$ SLOTS_PER_EPOCH
of " slot_trailing_digits " :
var slotStr = $ node . currentSlot
if slotStr . len > 3 : slotStr = slotStr [ ^ 3 .. ^ 1 ]
slotStr
of " attached_validators_balance " :
formatGwei ( node . attachedValidatorBalanceTotal )
of " sync_status " :
2022-08-29 12:16:35 +00:00
node . syncStatus ( node . currentSlot )
2022-03-14 09:19:50 +00:00
else :
# We ignore typos for now and just render the expression
# as it was written. TODO: come up with a good way to show
# an error message to the user.
" $ " & expr
var statusBar = StatusBarView . init (
node . config . statusBarContents ,
dataResolver )
when compiles ( defaultChroniclesStream . outputs [ 0 ] . writer ) :
let tmp = defaultChroniclesStream . outputs [ 0 ] . writer
defaultChroniclesStream . outputs [ 0 ] . writer =
proc ( logLevel : LogLevel , msg : LogOutputStr ) {. raises : [ Defect ] . } =
try :
# p.hidePrompt
erase statusBar
# p.writeLine msg
tmp ( logLevel , msg )
render statusBar
# p.showPrompt
except Exception as e : # render raises Exception
logLoggingFailure ( cstring ( msg ) , e )
proc statusBarUpdatesPollingLoop ( ) {. async . } =
try :
while true :
update statusBar
2021-02-22 16:17:48 +00:00
erase statusBar
render statusBar
2022-03-14 09:19:50 +00:00
await sleepAsync ( chronos . seconds ( 1 ) )
except CatchableError as exc :
warn " Failed to update status bar, no further updates " , err = exc . msg
2021-02-22 16:17:48 +00:00
2022-03-14 09:19:50 +00:00
asyncSpawn statusBarUpdatesPollingLoop ( )
2021-02-22 16:17:48 +00:00
2022-06-21 08:29:16 +00:00
proc doRunBeaconNode ( config : var BeaconNodeConf , rng : ref HmacDrbgContext ) {. raises : [ Defect , CatchableError ] . } =
2021-02-22 16:17:48 +00:00
info " Launching beacon node " ,
version = fullVersionStr ,
bls_backend = $ BLS_BACKEND ,
cmdParams = commandLineParams ( ) ,
config
2022-09-19 21:47:46 +00:00
template ignoreDeprecatedOption ( option : untyped ) : untyped =
if config . option . isSome :
warn " Config option is deprecated " ,
option = config . option . get
ignoreDeprecatedOption requireEngineAPI
2022-09-29 06:29:49 +00:00
ignoreDeprecatedOption safeSlotsToImportOptimistically
2022-10-24 20:32:52 +00:00
ignoreDeprecatedOption terminalTotalDifficultyOverride
2023-01-04 15:51:14 +00:00
ignoreDeprecatedOption optimistic
2022-09-19 21:47:46 +00:00
2021-02-22 16:17:48 +00:00
createPidFile ( config . dataDir . string / " beacon_node.pid " )
config . createDumpDirs ( )
if config . metricsEnabled :
2021-04-01 12:44:11 +00:00
let metricsAddress = config . metricsAddress
notice " Starting metrics HTTP server " ,
url = " http:// " & $ metricsAddress & " : " & $ config . metricsPort & " /metrics "
try :
startMetricsHttpServer ( $ metricsAddress , config . metricsPort )
2022-02-08 19:19:21 +00:00
except CatchableError as exc :
raise exc
except Exception as exc :
raiseAssert exc . msg # TODO fix metrics
# Nim GC metrics (for the main thread) will be collected in onSecond(), but
# we disable piggy-backing on other metrics here.
setSystemMetricsAutomaticUpdate ( false )
2020-10-09 13:57:45 +00:00
2021-02-22 16:17:48 +00:00
# There are no managed event loops in here, to do a graceful shutdown, but
# letting the default Ctrl+C handler exit is safe, since we only read from
# the db.
2022-12-06 16:43:11 +00:00
let metadata = config . loadEth2Network ( )
2021-11-25 10:53:31 +00:00
2021-11-01 14:50:24 +00:00
# Updating the config based on the metadata certainly is not beautiful but it
# works
for node in metadata . bootstrapNodes :
config . bootstrapNodes . add node
2022-06-07 17:01:11 +00:00
2022-12-07 10:24:51 +00:00
let node = BeaconNode . init ( rng , config , metadata )
2020-07-07 23:02:14 +00:00
2021-02-22 16:17:48 +00:00
if bnStatus = = BeaconNodeStatus . Stopping :
return
2020-07-02 15:14:11 +00:00
2022-03-14 09:19:50 +00:00
when not defined ( windows ) :
# This status bar can lock a Windows terminal emulator, blocking the whole
# event loop (seen on Windows 10, with a default MSYS2 terminal).
initStatusBar ( node )
2020-07-02 15:52:48 +00:00
2021-02-22 16:17:48 +00:00
if node . nickname ! = " " :
dynamicLogScope ( node = node . nickname ) : node . start ( )
2020-07-07 23:02:14 +00:00
else :
2021-02-22 16:17:48 +00:00
node . start ( )
2022-06-21 08:29:16 +00:00
proc doCreateTestnet * ( config : BeaconNodeConf , rng : var HmacDrbgContext ) {. raises : [ Defect , CatchableError ] . } =
2021-02-22 16:17:48 +00:00
let launchPadDeposits = try :
Json . loadFile ( config . testnetDepositsFile . string , seq [ LaunchPadDeposit ] )
except SerializationError as err :
error " Invalid LaunchPad deposits file " ,
err = formatMsg ( err , config . testnetDepositsFile . string )
quit 1
2020-12-01 11:35:55 +00:00
2021-02-22 16:17:48 +00:00
var deposits : seq [ DepositData ]
2021-04-14 13:15:22 +00:00
for i in 0 .. < launchPadDeposits . len :
2021-02-22 16:17:48 +00:00
deposits . add ( launchPadDeposits [ i ] as DepositData )
2020-12-01 11:35:55 +00:00
2021-02-22 16:17:48 +00:00
let
startTime = uint64 ( times . toUnix ( times . getTime ( ) ) + config . genesisOffset )
outGenesis = config . outputGenesis . string
2021-04-06 21:42:59 +00:00
eth1Hash = if config . web3Urls . len = = 0 : eth1BlockHash
2022-03-31 14:43:05 +00:00
else : ( waitFor getEth1BlockHash (
config . web3Urls [ 0 ] , blockId ( " latest " ) ,
2022-07-12 10:08:52 +00:00
rng . loadJwtSecret ( config , allowCreate = true ) ) ) . asEth2Digest
2021-07-13 14:27:10 +00:00
cfg = getRuntimeConfig ( config . eth2Network )
2021-02-22 16:17:48 +00:00
var
2021-11-18 12:02:43 +00:00
initialState = newClone ( initialize_beacon_state_from_eth1 (
cfg , eth1Hash , startTime , deposits , { skipBlsValidation } ) )
2020-07-02 15:14:11 +00:00
2021-02-22 16:17:48 +00:00
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
initialState . genesis_time = startTime
2020-07-07 15:51:02 +00:00
2021-02-22 16:17:48 +00:00
doAssert initialState . validators . len > 0
2020-10-02 13:38:32 +00:00
2021-02-22 16:17:48 +00:00
let outGenesisExt = splitFile ( outGenesis ) . ext
if cmpIgnoreCase ( outGenesisExt , " .json " ) = = 0 :
Json . saveFile ( outGenesis , initialState , pretty = true )
echo " Wrote " , outGenesis
2020-06-02 19:59:51 +00:00
2021-02-22 16:17:48 +00:00
let outSszGenesis = outGenesis . changeFileExt " ssz "
SSZ . saveFile ( outSszGenesis , initialState [ ] )
echo " Wrote " , outSszGenesis
2019-03-19 17:22:17 +00:00
2021-02-22 16:17:48 +00:00
let bootstrapFile = config . outputBootstrapFile . string
if bootstrapFile . len > 0 :
2022-03-18 11:36:50 +00:00
type MetaData = altair . MetaData
2019-10-29 16:46:41 +00:00
let
2021-02-22 16:17:48 +00:00
networkKeys = getPersistentNetKeys ( rng , config )
2022-03-18 11:36:50 +00:00
netMetadata = MetaData ( )
2021-08-10 06:19:13 +00:00
forkId = getENRForkID (
cfg ,
initialState [ ] . slot . epoch ,
initialState [ ] . genesis_validators_root )
2021-02-22 16:17:48 +00:00
bootstrapEnr = enr . Record . init (
1 , # sequence number
networkKeys . seckey . asEthKey ,
some ( config . bootstrapAddress ) ,
some ( config . bootstrapPort ) ,
some ( config . bootstrapPort ) ,
2021-08-10 06:19:13 +00:00
[
toFieldPair ( enrForkIdField , SSZ . encode ( forkId ) ) ,
toFieldPair ( enrAttestationSubnetsField , SSZ . encode ( netMetadata . attnets ) )
] )
2021-02-22 16:17:48 +00:00
writeFile ( bootstrapFile , bootstrapEnr . tryGet ( ) . toURI )
echo " Wrote " , bootstrapFile
2022-06-21 08:29:16 +00:00
proc doRecord ( config : BeaconNodeConf , rng : var HmacDrbgContext ) {.
2021-03-26 06:52:01 +00:00
raises : [ Defect , CatchableError ] . } =
2021-02-22 16:17:48 +00:00
case config . recordCmd :
of RecordCmd . create :
let netKeys = getPersistentNetKeys ( rng , config )
var fieldPairs : seq [ FieldPair ]
for field in config . fields :
let fieldPair = field . split ( " : " )
if fieldPair . len > 1 :
fieldPairs . add ( toFieldPair ( fieldPair [ 0 ] , hexToSeqByte ( fieldPair [ 1 ] ) ) )
else :
fatal " Invalid field pair "
quit QuitFailure
2020-06-24 13:57:09 +00:00
2021-02-22 16:17:48 +00:00
let record = enr . Record . init (
config . seqNumber ,
netKeys . seckey . asEthKey ,
some ( config . ipExt ) ,
some ( config . tcpPortExt ) ,
some ( config . udpPortExt ) ,
fieldPairs ) . expect ( " Record within size limits " )
2020-07-01 09:13:56 +00:00
2021-02-22 16:17:48 +00:00
echo record . toURI ( )
2020-08-21 19:36:42 +00:00
2021-02-22 16:17:48 +00:00
of RecordCmd . print :
echo $ config . recordPrint
2020-03-24 11:13:07 +00:00
2022-06-21 08:29:16 +00:00
proc doWeb3Cmd ( config : BeaconNodeConf , rng : var HmacDrbgContext )
2022-03-31 14:43:05 +00:00
{. raises : [ Defect , CatchableError ] . } =
2021-02-22 16:17:48 +00:00
case config . web3Cmd :
of Web3Cmd . test :
2022-04-12 22:28:01 +00:00
let metadata = config . loadEth2Network ( )
2022-03-31 14:43:05 +00:00
2021-02-22 16:17:48 +00:00
waitFor testWeb3Provider ( config . web3TestUrl ,
2022-03-31 14:43:05 +00:00
metadata . cfg . DEPOSIT_CONTRACT_ADDRESS ,
2022-07-12 10:08:52 +00:00
rng . loadJwtSecret ( config , allowCreate = true ) )
2020-11-27 19:48:33 +00:00
2021-05-19 06:38:13 +00:00
proc doSlashingExport ( conf : BeaconNodeConf ) {. raises : [ IOError , Defect ] . } =
let
dir = conf . validatorsDir ( )
filetrunc = SlashingDbName
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
let db = SlashingProtectionDB . loadUnchecked ( dir , filetrunc , readOnly = false )
let interchange = conf . exportedInterchangeFile . string
db . exportSlashingInterchange ( interchange , conf . exportedValidators )
echo " Export finished: ' " , dir / filetrunc & " .sqlite3 " , " ' into ' " , interchange , " ' "
proc doSlashingImport ( conf : BeaconNodeConf ) {. raises : [ SerializationError , IOError , Defect ] . } =
let
dir = conf . validatorsDir ( )
filetrunc = SlashingDbName
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
let interchange = conf . importedInterchangeFile . string
var spdir : SPDIR
try :
2022-09-19 22:09:56 +00:00
spdir = Json . loadFile ( interchange , SPDIR ,
requireAllFields = true )
2021-05-19 06:38:13 +00:00
except SerializationError as err :
writeStackTrace ( )
2022-04-08 16:22:49 +00:00
stderr . write $ Json & " load issue for file \" " , interchange , " \" \n "
2021-05-19 06:38:13 +00:00
stderr . write err . formatMsg ( interchange ) , " \n "
quit 1
# Open DB and handle migration from v1 to v2 if needed
let db = SlashingProtectionDB . init (
genesis_validators_root = Eth2Digest spdir . metadata . genesis_validators_root ,
basePath = dir ,
dbname = filetrunc ,
modes = { kCompleteArchive }
)
# Now import the slashing interchange file
# Failures mode:
# - siError can only happen with invalid genesis_validators_root which would be caught above
# - siPartial can happen for invalid public keys, slashable blocks, slashable votes
let status = db . inclSPDIR ( spdir )
doAssert status in { siSuccess , siPartial }
echo " Import finished: ' " , interchange , " ' into ' " , dir / filetrunc & " .sqlite3 " , " ' "
proc doSlashingInterchange ( conf : BeaconNodeConf ) {. raises : [ Defect , CatchableError ] . } =
case conf . slashingdbCmd
of SlashProtCmd . ` export ` :
conf . doSlashingExport ( )
of SlashProtCmd . ` import ` :
conf . doSlashingImport ( )
2022-02-27 11:02:45 +00:00
proc handleStartUpCmd ( config : var BeaconNodeConf ) {. raises : [ Defect , CatchableError ] . } =
# Single RNG instance for the application - will be seeded on construction
# and avoid using system resources (such as urandom) after that
let rng = keys . newRng ( )
case config . cmd
of BNStartUpCmd . createTestnet : doCreateTestnet ( config , rng [ ] )
of BNStartUpCmd . noCommand : doRunBeaconNode ( config , rng )
of BNStartUpCmd . deposits : doDeposits ( config , rng [ ] )
of BNStartUpCmd . wallets : doWallets ( config , rng [ ] )
of BNStartUpCmd . record : doRecord ( config , rng [ ] )
2022-03-31 14:43:05 +00:00
of BNStartUpCmd . web3 : doWeb3Cmd ( config , rng [ ] )
2022-02-27 11:02:45 +00:00
of BNStartUpCmd . slashingdb : doSlashingInterchange ( config )
2022-04-08 16:22:49 +00:00
of BNStartUpCmd . trustedNodeSync :
2022-02-27 11:02:45 +00:00
let
network = loadEth2Network ( config )
cfg = network . cfg
genesis =
if network . genesisData . len > 0 :
newClone ( readSszForkedHashedBeaconState (
cfg ,
network . genesisData . toOpenArrayByte ( 0 , network . genesisData . high ( ) ) ) )
else : nil
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
if config . blockId . isSome ( ) :
error " --blockId option has been removed - use --state-id instead! "
quit 1
2022-02-27 11:02:45 +00:00
waitFor doTrustedNodeSync (
cfg ,
config . databaseDir ,
2022-11-10 10:44:47 +00:00
config . eraDir ,
2022-02-27 11:02:45 +00:00
config . trustedNodeUrl ,
State-only checkpoint state startup (#4251)
Currently, we require genesis and a checkpoint block and state to start
from an arbitrary slot - this PR relaxes this requirement so that we can
start with a state alone.
The current trusted-node-sync algorithm works by first downloading
blocks until we find an epoch aligned non-empty slot, then downloads the
state via slot.
However, current
[proposals](https://github.com/ethereum/beacon-APIs/pull/226) for
checkpointing prefer finalized state as
the main reference - this allows more simple access control and caching
on the server side - in particular, this should help checkpoint-syncing
from sources that have a fast `finalized` state download (like infura
and teku) but are slow when accessing state via slot.
Earlier versions of Nimbus will not be able to read databases created
without a checkpoint block and genesis. In most cases, backfilling makes
the database compatible except where genesis is also missing (custom
networks).
* backfill checkpoint block from libp2p instead of checkpoint source,
when doing trusted node sync
* allow starting the client without genesis / checkpoint block
* perform epoch start slot lookahead when loading tail state, so as to
deal with the case where the epoch start slot does not have a block
* replace `--blockId` with `--state-id` in TNS command line
* when replaying, also look at the parent of the last-known-block (even
if we don't have the parent block data, we can still replay from a
"parent" state) - in particular, this clears the way for implementing
state pruning
* deprecate `--finalized-checkpoint-block` option (no longer needed)
2022-11-02 10:02:38 +00:00
config . stateId ,
2022-02-27 11:02:45 +00:00
config . backfillBlocks ,
2022-03-11 12:49:47 +00:00
config . reindex ,
2022-12-07 10:24:51 +00:00
config . downloadDepositSnapshot ,
2022-02-27 11:02:45 +00:00
genesis )
2021-03-26 06:52:01 +00:00
{. pop . } # TODO moduletests exceptions
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
proc reportServiceStatus * ( dwCurrentState , dwWin32ExitCode , dwWaitHint : DWORD ) {. gcsafe . } =
gSvcStatus . dwCurrentState = dwCurrentState
gSvcStatus . dwWin32ExitCode = dwWin32ExitCode
gSvcStatus . dwWaitHint = dwWaitHint
if dwCurrentState = = SERVICE_START_PENDING :
gSvcStatus . dwControlsAccepted = 0
else :
gSvcStatus . dwControlsAccepted = SERVICE_ACCEPT_STOP
# TODO
# We can use non-zero values for the `dwCheckPoint` parameter to report
# progress during lengthy operations such as start-up and shut down.
gSvcStatus . dwCheckPoint = 0
# Report the status of the service to the SCM.
let status = SetServiceStatus ( gSvcStatusHandle , addr gSvcStatus )
debug " Service status updated " , status
proc serviceControlHandler ( dwCtrl : DWORD ) : WINBOOL {. stdcall . } =
case dwCtrl
of SERVICE_CONTROL_STOP :
# We re reporting that we plan stop the service in 10 seconds
reportServiceStatus ( SERVICE_STOP_PENDING , NO_ERROR , 10_000 )
bnStatus = BeaconNodeStatus . Stopping
of SERVICE_CONTROL_PAUSE , SERVICE_CONTROL_CONTINUE :
warn " The Nimbus service cannot be paused and resimed "
of SERVICE_CONTROL_INTERROGATE :
# The default behavior is correct.
# The service control manager will report our last status.
discard
else :
debug " Service received an unexpected user-defined control message " ,
msg = dwCtrl
proc serviceMainFunction ( dwArgc : DWORD , lpszArgv : LPSTR ) {. stdcall . } =
# The service is launched in a fresh thread created by Windows, so
# we must initialize the Nim GC here
setupForeignThreadGc ( )
gSvcStatusHandle = RegisterServiceCtrlHandler (
SERVICE_NAME ,
serviceControlHandler )
gSvcStatus . dwServiceType = SERVICE_WIN32_OWN_PROCESS
gSvcStatus . dwServiceSpecificExitCode = 0
reportServiceStatus ( SERVICE_RUNNING , NO_ERROR , 0 )
info " Service thread started "
var config = makeBannerAndConfig ( clientId , BeaconNodeConf )
handleStartUpCmd ( config )
info " Service thread stopped "
reportServiceStatus ( SERVICE_STOPPED , NO_ERROR , 0 ) # we have to report back when we stopped!
2021-02-22 16:17:48 +00:00
programMain :
2022-12-06 16:43:11 +00:00
var config = makeBannerAndConfig ( clientId , BeaconNodeConf )
2020-11-28 18:50:09 +00:00
2021-02-22 16:17:48 +00:00
if not ( checkAndCreateDataDir ( string ( config . dataDir ) ) ) :
# We are unable to access/create data folder or data folder's
# permissions are insecure.
quit QuitFailure
2021-11-02 17:06:36 +00:00
setupLogging ( config . logLevel , config . logStdout , config . logFile )
2021-02-22 16:17:48 +00:00
## This Ctrl+C handler exits the program in non-graceful way.
## It's responsible for handling Ctrl+C in sub-commands such
## as `wallets *` and `deposits *`. In a regular beacon node
## run, it will be overwritten later with a different handler
## performing a graceful exit.
proc exitImmediatelyOnCtrlC ( ) {. noconv . } =
when defined ( windows ) :
# workaround for https://github.com/nim-lang/Nim/issues/4057
setupForeignThreadGc ( )
2021-03-16 08:06:45 +00:00
# in case a password prompt disabled echoing
resetStdin ( )
2021-02-22 16:17:48 +00:00
echo " " # If we interrupt during an interactive prompt, this
# will move the cursor to the next line
notice " Shutting down after having received SIGINT "
quit 0
setControlCHook ( exitImmediatelyOnCtrlC )
# equivalent SIGTERM handler
when defined ( posix ) :
proc exitImmediatelyOnSIGTERM ( signal : cint ) {. noconv . } =
notice " Shutting down after having received SIGTERM "
quit 0
2022-07-13 14:43:57 +00:00
c_signal ( ansi_c . SIGTERM , exitImmediatelyOnSIGTERM )
2021-02-22 16:17:48 +00:00
2022-02-27 11:02:45 +00:00
when defined ( windows ) :
if config . runAsService :
var dispatchTable = [
SERVICE_TABLE_ENTRY ( lpServiceName : SERVICE_NAME , lpServiceProc : serviceMainFunction ) ,
SERVICE_TABLE_ENTRY ( lpServiceName : nil , lpServiceProc : nil ) # last entry must be nil
]
2022-01-17 09:27:08 +00:00
2022-02-27 11:02:45 +00:00
let status = StartServiceCtrlDispatcher ( LPSERVICE_TABLE_ENTRY ( addr dispatchTable [ 0 ] ) )
if status = = 0 :
fatal " Failed to start Windows service " , errorCode = getLastError ( )
quit 1
else :
handleStartUpCmd ( config )
else :
handleStartUpCmd ( config )