2022-03-11 20:28:10 +00:00
|
|
|
# beacon_chain
|
2022-01-05 14:24:15 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2020-04-24 07:16:11 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2022-02-11 20:40:49 +00:00
|
|
|
std/[os, random, sequtils, terminal, times],
|
|
|
|
bearssl, chronicles, chronos,
|
|
|
|
metrics, metrics/chronos_httpserver,
|
|
|
|
stew/[byteutils, io2],
|
|
|
|
eth/p2p/discoveryv5/[enr, random2],
|
|
|
|
eth/keys,
|
2022-01-05 14:49:10 +00:00
|
|
|
./rpc/[rest_api, rpc_api, state_ttl_cache],
|
2022-01-18 13:36:52 +00:00
|
|
|
./spec/datatypes/[altair, bellatrix, phase0],
|
2022-02-27 16:55:02 +00:00
|
|
|
./spec/[engine_authentication, weak_subjectivity],
|
2022-02-11 20:40:49 +00:00
|
|
|
./validators/[keystore_management, validator_duties],
|
|
|
|
"."/[
|
|
|
|
beacon_node, deposits, interop, nimbus_binary_common, statusbar,
|
|
|
|
trusted_node_sync, wallets]
|
2020-12-04 16:28:42 +00:00
|
|
|
|
2021-06-03 09:43:04 +00:00
|
|
|
when defined(posix):
|
|
|
|
import system/ansi_c
|
|
|
|
|
2021-02-09 09:20:55 +00:00
|
|
|
from
|
|
|
|
libp2p/protocols/pubsub/gossipsub
|
|
|
|
import
|
|
|
|
TopicParams, validateParameters, init
|
|
|
|
|
2022-02-27 11:02:45 +00:00
|
|
|
when defined(windows):
|
|
|
|
import winlean
|
|
|
|
|
|
|
|
type
|
|
|
|
LPCSTR* = cstring
|
|
|
|
LPSTR* = cstring
|
|
|
|
|
|
|
|
SERVICE_STATUS* {.final, pure.} = object
|
|
|
|
dwServiceType*: DWORD
|
|
|
|
dwCurrentState*: DWORD
|
|
|
|
dwControlsAccepted*: DWORD
|
|
|
|
dwWin32ExitCode*: DWORD
|
|
|
|
dwServiceSpecificExitCode*: DWORD
|
|
|
|
dwCheckPoint*: DWORD
|
|
|
|
dwWaitHint*: DWORD
|
|
|
|
|
|
|
|
SERVICE_STATUS_HANDLE* = DWORD
|
|
|
|
LPSERVICE_STATUS* = ptr SERVICE_STATUS
|
|
|
|
LPSERVICE_MAIN_FUNCTION* = proc (para1: DWORD, para2: LPSTR) {.stdcall.}
|
|
|
|
|
|
|
|
SERVICE_TABLE_ENTRY* {.final, pure.} = object
|
|
|
|
lpServiceName*: LPSTR
|
|
|
|
lpServiceProc*: LPSERVICE_MAIN_FUNCTION
|
|
|
|
|
|
|
|
LPSERVICE_TABLE_ENTRY* = ptr SERVICE_TABLE_ENTRY
|
|
|
|
LPHANDLER_FUNCTION* = proc (para1: DWORD): WINBOOL{.stdcall.}
|
|
|
|
|
|
|
|
const
|
|
|
|
SERVICE_WIN32_OWN_PROCESS = 16
|
|
|
|
SERVICE_RUNNING = 4
|
|
|
|
SERVICE_STOPPED = 1
|
|
|
|
SERVICE_START_PENDING = 2
|
|
|
|
SERVICE_STOP_PENDING = 3
|
|
|
|
SERVICE_CONTROL_STOP = 1
|
|
|
|
SERVICE_CONTROL_PAUSE = 2
|
|
|
|
SERVICE_CONTROL_CONTINUE = 3
|
|
|
|
SERVICE_CONTROL_INTERROGATE = 4
|
|
|
|
SERVICE_ACCEPT_STOP = 1
|
|
|
|
NO_ERROR = 0
|
|
|
|
SERVICE_NAME = LPCSTR "NIMBUS_BEACON_NODE"
|
|
|
|
|
|
|
|
var
|
|
|
|
gSvcStatusHandle: SERVICE_STATUS_HANDLE
|
|
|
|
gSvcStatus: SERVICE_STATUS
|
|
|
|
|
|
|
|
proc reportServiceStatus*(dwCurrentState, dwWin32ExitCode, dwWaitHint: DWORD) {.gcsafe.}
|
|
|
|
|
|
|
|
proc StartServiceCtrlDispatcher*(lpServiceStartTable: LPSERVICE_TABLE_ENTRY): WINBOOL{.
|
|
|
|
stdcall, dynlib: "advapi32", importc: "StartServiceCtrlDispatcherA".}
|
|
|
|
|
|
|
|
proc SetServiceStatus*(hServiceStatus: SERVICE_STATUS_HANDLE,
|
|
|
|
lpServiceStatus: LPSERVICE_STATUS): WINBOOL{.stdcall,
|
|
|
|
dynlib: "advapi32", importc: "SetServiceStatus".}
|
|
|
|
|
|
|
|
proc RegisterServiceCtrlHandler*(lpServiceName: LPCSTR,
|
|
|
|
lpHandlerProc: LPHANDLER_FUNCTION): SERVICE_STATUS_HANDLE{.
|
|
|
|
stdcall, dynlib: "advapi32", importc: "RegisterServiceCtrlHandlerA".}
|
|
|
|
|
2020-02-19 08:58:10 +00:00
|
|
|
type
|
2021-12-20 11:21:17 +00:00
|
|
|
RpcServer = RpcHttpServer
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2020-06-05 15:08:50 +00:00
|
|
|
template init(T: type RpcHttpServer, ip: ValidIpAddress, port: Port): T =
|
2020-03-16 22:28:54 +00:00
|
|
|
newRpcHttpServer([initTAddress(ip, port)])
|
2020-02-19 08:58:10 +00:00
|
|
|
|
2022-02-11 22:33:30 +00:00
|
|
|
template init(T: type RestServerRef,
|
|
|
|
ip: ValidIpAddress, port: Port,
|
|
|
|
allowedOrigin: Option[string],
|
2022-01-27 16:41:05 +00:00
|
|
|
config: BeaconNodeConf): T =
|
2021-03-17 18:46:45 +00:00
|
|
|
let address = initTAddress(ip, port)
|
2021-05-18 10:24:57 +00:00
|
|
|
let serverFlags = {HttpServerFlags.QueryCommaSeparatedArray,
|
|
|
|
HttpServerFlags.NotifyDisconnect}
|
2021-10-18 07:14:44 +00:00
|
|
|
let
|
2022-01-27 16:41:05 +00:00
|
|
|
headersTimeout =
|
|
|
|
if config.restRequestTimeout == 0:
|
|
|
|
chronos.InfiniteDuration
|
|
|
|
else:
|
|
|
|
seconds(int64(config.restRequestTimeout))
|
|
|
|
maxHeadersSize = config.restMaxRequestHeadersSize * 1024
|
|
|
|
maxRequestBodySize = config.restMaxRequestBodySize * 1024
|
2022-02-11 22:33:30 +00:00
|
|
|
let res = RestServerRef.new(getRouter(allowedOrigin),
|
|
|
|
address, serverFlags = serverFlags,
|
2021-10-18 07:14:44 +00:00
|
|
|
httpHeadersTimeout = headersTimeout,
|
|
|
|
maxHeadersSize = maxHeadersSize,
|
|
|
|
maxRequestBodySize = maxRequestBodySize)
|
2021-03-17 18:46:45 +00:00
|
|
|
if res.isErr():
|
|
|
|
notice "Rest server could not be started", address = $address,
|
|
|
|
reason = res.error()
|
|
|
|
nil
|
|
|
|
else:
|
2021-11-01 14:50:24 +00:00
|
|
|
notice "Starting REST HTTP server",
|
|
|
|
url = "http://" & $ip & ":" & $port & "/"
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
res.get()
|
|
|
|
|
2019-10-24 06:51:27 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
2020-11-27 22:16:13 +00:00
|
|
|
declareGauge beacon_slot, "Latest slot of the beacon chain state"
|
|
|
|
declareGauge beacon_current_epoch, "Current epoch"
|
2019-09-07 17:48:05 +00:00
|
|
|
|
2020-07-30 14:27:34 +00:00
|
|
|
# Finalization tracking
|
|
|
|
declareGauge finalization_delay,
|
|
|
|
"Epoch delay between scheduled epoch and finalized epoch"
|
|
|
|
|
2020-08-10 18:49:45 +00:00
|
|
|
declareGauge ticks_delay,
|
|
|
|
"How long does to take to run the onSecond loop"
|
|
|
|
|
2021-03-12 09:46:26 +00:00
|
|
|
declareGauge next_action_wait,
|
|
|
|
"Seconds until the next attestation will be sent"
|
|
|
|
|
2021-07-09 05:41:44 +00:00
|
|
|
declareGauge versionGauge, "Nimbus version info (as metric labels)", ["version", "commit"], name = "version"
|
|
|
|
versionGauge.set(1, labelValues=[fullVersionStr, gitRevision])
|
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "beacnde"
|
|
|
|
|
2022-03-19 16:48:24 +00:00
|
|
|
proc loadChainDag(
|
|
|
|
config: BeaconNodeConf,
|
|
|
|
cfg: RuntimeConfig,
|
|
|
|
db: BeaconChainDB,
|
|
|
|
eventBus: AsyncEventBus,
|
|
|
|
validatorMonitor: ref ValidatorMonitor,
|
|
|
|
networkGenesisValidatorsRoot: Option[Eth2Digest]): ChainDAGRef =
|
|
|
|
info "Loading block DAG from database", path = config.databaseDir
|
|
|
|
|
|
|
|
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
|
|
|
|
eventBus.emit("signed-beacon-block", data)
|
|
|
|
proc onHeadChanged(data: HeadChangeInfoObject) =
|
|
|
|
eventBus.emit("head-change", data)
|
|
|
|
proc onChainReorg(data: ReorgInfoObject) =
|
|
|
|
eventBus.emit("chain-reorg", data)
|
|
|
|
proc onOptimisticLightClientUpdate(data: OptimisticLightClientUpdate) =
|
|
|
|
discard
|
|
|
|
|
|
|
|
let
|
|
|
|
chainDagFlags =
|
|
|
|
if config.verifyFinalization: {verifyFinalization}
|
|
|
|
else: {}
|
|
|
|
onOptimisticLightClientUpdateCb =
|
2022-04-01 21:58:06 +00:00
|
|
|
if config.serveLightClientData.get: onOptimisticLightClientUpdate
|
2022-03-19 16:48:24 +00:00
|
|
|
else: nil
|
|
|
|
dag = ChainDAGRef.init(
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
cfg, db, validatorMonitor, chainDagFlags, config.eraDir,
|
2022-03-19 16:48:24 +00:00
|
|
|
onBlockAdded, onHeadChanged, onChainReorg,
|
|
|
|
onOptimisticLCUpdateCb = onOptimisticLightClientUpdateCb,
|
2022-04-01 21:58:06 +00:00
|
|
|
serveLightClientData = config.serveLightClientData.get,
|
|
|
|
importLightClientData = config.importLightClientData.get)
|
2022-03-19 16:48:24 +00:00
|
|
|
databaseGenesisValidatorsRoot =
|
|
|
|
getStateField(dag.headState, genesis_validators_root)
|
|
|
|
|
|
|
|
if networkGenesisValidatorsRoot.isSome:
|
|
|
|
if networkGenesisValidatorsRoot.get != databaseGenesisValidatorsRoot:
|
|
|
|
fatal "The specified --data-dir contains data for a different network",
|
|
|
|
networkGenesisValidatorsRoot = networkGenesisValidatorsRoot.get,
|
|
|
|
databaseGenesisValidatorsRoot,
|
|
|
|
dataDir = config.dataDir
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
dag
|
|
|
|
|
|
|
|
proc checkWeakSubjectivityCheckpoint(
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
wsCheckpoint: Checkpoint,
|
|
|
|
beaconClock: BeaconClock) =
|
|
|
|
let
|
|
|
|
currentSlot = beaconClock.now.slotOrZero
|
|
|
|
isCheckpointStale = not is_within_weak_subjectivity_period(
|
|
|
|
dag.cfg, currentSlot, dag.headState, wsCheckpoint)
|
|
|
|
|
|
|
|
if isCheckpointStale:
|
|
|
|
error "Weak subjectivity checkpoint is stale",
|
|
|
|
currentSlot, checkpoint = wsCheckpoint,
|
|
|
|
headStateSlot = getStateField(dag.headState, slot)
|
|
|
|
quit 1
|
|
|
|
|
2022-03-21 16:52:15 +00:00
|
|
|
proc initFullNode(
|
|
|
|
node: BeaconNode,
|
|
|
|
rng: ref BrHmacDrbgContext,
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
taskpool: TaskpoolPtr,
|
|
|
|
getBeaconTime: GetBeaconTimeFn) =
|
|
|
|
template config(): auto = node.config
|
|
|
|
|
|
|
|
proc onAttestationReceived(data: Attestation) =
|
|
|
|
node.eventBus.emit("attestation-received", data)
|
|
|
|
proc onSyncContribution(data: SignedContributionAndProof) =
|
|
|
|
node.eventBus.emit("sync-contribution-and-proof", data)
|
|
|
|
proc onVoluntaryExitAdded(data: SignedVoluntaryExit) =
|
|
|
|
node.eventBus.emit("voluntary-exit", data)
|
|
|
|
proc makeOnFinalizationCb(
|
|
|
|
# This `nimcall` functions helps for keeping track of what
|
|
|
|
# needs to be captured by the onFinalization closure.
|
|
|
|
eventBus: AsyncEventBus,
|
|
|
|
eth1Monitor: Eth1Monitor): OnFinalizedCallback {.nimcall.} =
|
|
|
|
static: doAssert (eventBus is ref) and (eth1Monitor is ref)
|
|
|
|
return proc(dag: ChainDAGRef, data: FinalizationInfoObject) =
|
|
|
|
if eth1Monitor != nil:
|
|
|
|
let finalizedEpochRef = dag.getFinalizedEpochRef()
|
|
|
|
discard trackFinalizedState(eth1Monitor,
|
|
|
|
finalizedEpochRef.eth1_data,
|
|
|
|
finalizedEpochRef.eth1_deposit_index)
|
|
|
|
eventBus.emit("finalization", data)
|
|
|
|
|
|
|
|
func getLocalHeadSlot(): Slot =
|
|
|
|
dag.head.slot
|
|
|
|
|
|
|
|
proc getLocalWallSlot(): Slot =
|
|
|
|
node.beaconClock.now.slotOrZero
|
|
|
|
|
|
|
|
func getFirstSlotAtFinalizedEpoch(): Slot =
|
|
|
|
dag.finalizedHead.slot
|
|
|
|
|
|
|
|
func getBackfillSlot(): Slot =
|
|
|
|
dag.backfill.slot
|
|
|
|
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
func getFrontfillSlot(): Slot =
|
|
|
|
dag.frontfill.slot
|
|
|
|
|
2022-03-21 16:52:15 +00:00
|
|
|
let
|
|
|
|
quarantine = newClone(
|
|
|
|
Quarantine.init())
|
|
|
|
attestationPool = newClone(
|
|
|
|
AttestationPool.init(
|
|
|
|
dag, quarantine, onAttestationReceived, config.proposerBoosting))
|
|
|
|
syncCommitteeMsgPool = newClone(
|
|
|
|
SyncCommitteeMsgPool.init(rng, onSyncContribution))
|
|
|
|
exitPool = newClone(
|
|
|
|
ExitPool.init(dag, onVoluntaryExitAdded))
|
|
|
|
consensusManager = ConsensusManager.new(
|
2022-03-25 11:40:10 +00:00
|
|
|
dag, attestationPool, quarantine, node.eth1Monitor)
|
2022-03-21 16:52:15 +00:00
|
|
|
blockProcessor = BlockProcessor.new(
|
|
|
|
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
|
|
|
rng, taskpool, consensusManager, node.validatorMonitor, getBeaconTime)
|
|
|
|
blockVerifier = proc(signedBlock: ForkedSignedBeaconBlock):
|
|
|
|
Future[Result[void, BlockError]] =
|
|
|
|
# The design with a callback for block verification is unusual compared
|
|
|
|
# to the rest of the application, but fits with the general approach
|
|
|
|
# taken in the sync/request managers - this is an architectural compromise
|
|
|
|
# that should probably be reimagined more holistically in the future.
|
|
|
|
let resfut = newFuture[Result[void, BlockError]]("blockVerifier")
|
|
|
|
blockProcessor[].addBlock(MsgSource.gossip, signedBlock, resfut)
|
|
|
|
resfut
|
|
|
|
processor = Eth2Processor.new(
|
|
|
|
config.doppelgangerDetection,
|
|
|
|
blockProcessor, node.validatorMonitor, dag, attestationPool, exitPool,
|
|
|
|
node.attachedValidators, syncCommitteeMsgPool, quarantine, rng,
|
|
|
|
getBeaconTime, taskpool)
|
|
|
|
syncManager = newSyncManager[Peer, PeerID](
|
|
|
|
node.network.peerPool, SyncQueueKind.Forward, getLocalHeadSlot,
|
|
|
|
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
getFrontfillSlot, dag.tail.slot, blockVerifier)
|
2022-03-21 16:52:15 +00:00
|
|
|
backfiller = newSyncManager[Peer, PeerID](
|
|
|
|
node.network.peerPool, SyncQueueKind.Backward, getLocalHeadSlot,
|
|
|
|
getLocalWallSlot, getFirstSlotAtFinalizedEpoch, getBackfillSlot,
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
getFrontfillSlot, dag.backfill.slot, blockVerifier, maxHeadAge = 0)
|
2022-03-21 16:52:15 +00:00
|
|
|
|
|
|
|
dag.setFinalizationCb makeOnFinalizationCb(node.eventBus, node.eth1Monitor)
|
|
|
|
|
|
|
|
node.dag = dag
|
|
|
|
node.quarantine = quarantine
|
|
|
|
node.attestationPool = attestationPool
|
|
|
|
node.syncCommitteeMsgPool = syncCommitteeMsgPool
|
|
|
|
node.exitPool = exitPool
|
|
|
|
node.processor = processor
|
|
|
|
node.blockProcessor = blockProcessor
|
|
|
|
node.consensusManager = consensusManager
|
|
|
|
node.requestManager = RequestManager.init(node.network, blockVerifier)
|
|
|
|
node.syncManager = syncManager
|
|
|
|
node.backfiller = backfiller
|
|
|
|
|
|
|
|
debug "Loading validators", validatorsDir = config.validatorsDir()
|
|
|
|
|
|
|
|
node.addValidators()
|
|
|
|
|
|
|
|
block:
|
|
|
|
# Add in-process validators to the list of "known" validators such that
|
|
|
|
# we start with a reasonable ENR
|
|
|
|
let wallSlot = node.beaconClock.now().slotOrZero()
|
|
|
|
for validator in node.attachedValidators[].validators.values():
|
|
|
|
if config.validatorMonitorAuto:
|
|
|
|
node.validatorMonitor[].addMonitor(validator.pubkey, validator.index)
|
|
|
|
|
|
|
|
if validator.index.isSome():
|
|
|
|
node.actionTracker.knownValidators[validator.index.get()] = wallSlot
|
|
|
|
let
|
|
|
|
stabilitySubnets = node.actionTracker.stabilitySubnets(wallSlot)
|
|
|
|
# Here, we also set the correct ENR should we be in all subnets mode!
|
|
|
|
node.network.updateStabilitySubnetMetadata(stabilitySubnets)
|
|
|
|
|
|
|
|
node.network.initBeaconSync(dag, getBeaconTime)
|
|
|
|
|
|
|
|
node.updateValidatorMetrics()
|
|
|
|
|
2021-05-19 06:38:13 +00:00
|
|
|
const SlashingDbName = "slashing_protection"
|
|
|
|
# changing this requires physical file rename as well or history is lost.
|
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
func getBeaconTimeFn(clock: BeaconClock): GetBeaconTimeFn =
|
|
|
|
return proc(): BeaconTime = clock.now()
|
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
proc init*(T: type BeaconNode,
|
|
|
|
cfg: RuntimeConfig,
|
|
|
|
rng: ref BrHmacDrbgContext,
|
|
|
|
config: BeaconNodeConf,
|
|
|
|
depositContractDeployedAt: BlockHashOrNumber,
|
|
|
|
eth1Network: Option[Eth1Network],
|
|
|
|
genesisStateContents: string,
|
2022-01-21 10:59:09 +00:00
|
|
|
depositContractSnapshotContents: string): BeaconNode {.
|
2021-03-26 06:52:01 +00:00
|
|
|
raises: [Defect, CatchableError].} =
|
2021-09-17 00:13:52 +00:00
|
|
|
|
|
|
|
var taskpool: TaskpoolPtr
|
|
|
|
|
2022-01-21 10:59:09 +00:00
|
|
|
let depositContractSnapshot = if depositContractSnapshotContents.len > 0:
|
|
|
|
try:
|
|
|
|
some SSZ.decode(depositContractSnapshotContents, DepositContractSnapshot)
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Invalid deposit contract snapshot", err = err.msg
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
none DepositContractSnapshot
|
|
|
|
|
2021-09-17 00:13:52 +00:00
|
|
|
try:
|
|
|
|
if config.numThreads < 0:
|
|
|
|
fatal "The number of threads --numThreads cannot be negative."
|
|
|
|
quit 1
|
|
|
|
elif config.numThreads == 0:
|
2022-03-28 16:33:25 +00:00
|
|
|
taskpool = TaskpoolPtr.new(numThreads = min(countProcessors(), 16))
|
2021-09-17 00:13:52 +00:00
|
|
|
else:
|
|
|
|
taskpool = TaskpoolPtr.new(numThreads = config.numThreads)
|
|
|
|
|
|
|
|
info "Threadpool started", numThreads = taskpool.numThreads
|
|
|
|
except Exception as exc:
|
|
|
|
raise newException(Defect, "Failure in taskpool initialization.")
|
|
|
|
|
2020-01-17 13:44:01 +00:00
|
|
|
let
|
2021-09-22 12:17:15 +00:00
|
|
|
eventBus = newAsyncEventBus()
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new(config.databaseDir, inMemory = false)
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2020-09-22 20:42:42 +00:00
|
|
|
var
|
2021-11-10 11:39:08 +00:00
|
|
|
genesisState, checkpointState: ref ForkedHashedBeaconState
|
|
|
|
checkpointBlock: ForkedTrustedSignedBeaconBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.finalizedCheckpointState.isSome:
|
|
|
|
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
2020-09-22 20:42:42 +00:00
|
|
|
checkpointState = try:
|
2021-11-10 11:39:08 +00:00
|
|
|
newClone(readSszForkedHashedBeaconState(
|
|
|
|
cfg, readAllBytes(checkpointStatePath).tryGet()))
|
|
|
|
except SszError as err:
|
2020-09-22 20:42:42 +00:00
|
|
|
fatal "Checkpoint state deserialization failed",
|
|
|
|
err = formatMsg(err, checkpointStatePath)
|
|
|
|
quit 1
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to read checkpoint state file", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.finalizedCheckpointBlock.isNone:
|
2021-11-18 12:02:43 +00:00
|
|
|
if getStateField(checkpointState[], slot) > 0:
|
|
|
|
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
|
|
|
|
quit 1
|
2020-09-22 20:42:42 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
|
2020-09-22 20:42:42 +00:00
|
|
|
try:
|
2021-11-10 11:39:08 +00:00
|
|
|
# Checkpoint block might come from an earlier fork than the state with
|
|
|
|
# the state having empty slots processed past the fork epoch.
|
2022-01-06 07:38:40 +00:00
|
|
|
let tmp = readSszForkedSignedBeaconBlock(
|
2021-11-10 11:39:08 +00:00
|
|
|
cfg, readAllBytes(checkpointBlockPath).tryGet())
|
2022-01-06 07:38:40 +00:00
|
|
|
checkpointBlock = tmp.asTrusted()
|
2021-11-10 11:39:08 +00:00
|
|
|
except SszError as err:
|
2020-09-22 20:42:42 +00:00
|
|
|
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
|
|
|
|
quit 1
|
|
|
|
except IOError as err:
|
|
|
|
fatal "Failed to load the checkpoint block", err = err.msg
|
|
|
|
quit 1
|
2021-02-22 16:17:48 +00:00
|
|
|
elif config.finalizedCheckpointBlock.isSome:
|
2020-09-22 20:42:42 +00:00
|
|
|
# TODO We can download the state from somewhere in the future relying
|
|
|
|
# on the trusted `state_root` appearing in the checkpoint block.
|
|
|
|
fatal "--finalized-checkpoint-block cannot be specified without --finalized-checkpoint-state"
|
|
|
|
quit 1
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2022-03-31 14:43:05 +00:00
|
|
|
let jwtSecret = rng[].checkJwtSecret(string(config.dataDir), config.jwtSecret)
|
|
|
|
if jwtSecret.isErr:
|
|
|
|
fatal "Specified a JWT secret file which couldn't be loaded",
|
|
|
|
err = jwtSecret.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# The JWT secret created always exists, it just might not always be used
|
|
|
|
let optJwtSecret = if config.useJwt: some jwtSecret.get else: none(seq[byte])
|
|
|
|
|
2022-01-21 10:59:09 +00:00
|
|
|
template getDepositContractSnapshot: auto =
|
|
|
|
if depositContractSnapshot.isSome:
|
|
|
|
depositContractSnapshot
|
|
|
|
elif not cfg.DEPOSIT_CONTRACT_ADDRESS.isZeroMemory:
|
|
|
|
let snapshotRes = waitFor createInitialDepositSnapshot(
|
|
|
|
cfg.DEPOSIT_CONTRACT_ADDRESS,
|
|
|
|
depositContractDeployedAt,
|
2022-03-31 14:43:05 +00:00
|
|
|
config.web3Urls[0],
|
|
|
|
optJwtSecret)
|
2022-01-21 10:59:09 +00:00
|
|
|
if snapshotRes.isErr:
|
|
|
|
fatal "Failed to locate the deposit contract deployment block",
|
|
|
|
depositContract = cfg.DEPOSIT_CONTRACT_ADDRESS,
|
|
|
|
deploymentBlock = $depositContractDeployedAt
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
some snapshotRes.get
|
|
|
|
else:
|
|
|
|
none(DepositContractSnapshot)
|
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
var eth1Monitor: Eth1Monitor
|
2021-12-21 10:40:14 +00:00
|
|
|
if not ChainDAGRef.isInitialized(db).isOk():
|
2020-09-22 20:42:42 +00:00
|
|
|
var
|
2021-11-10 11:39:08 +00:00
|
|
|
tailState: ref ForkedHashedBeaconState
|
|
|
|
tailBlock: ForkedTrustedSignedBeaconBlock
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if genesisStateContents.len == 0 and checkpointState == nil:
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
2022-01-21 10:59:09 +00:00
|
|
|
if depositContractSnapshotContents.len > 0:
|
2020-11-24 21:21:47 +00:00
|
|
|
fatal "A deposits snapshot cannot be provided without also providing a matching beacon state snapshot"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# This is a fresh start without a known genesis state
|
|
|
|
# (most likely, it hasn't arrived yet). We'll try to
|
|
|
|
# obtain a genesis through the Eth1 deposits monitor:
|
2021-04-06 21:42:59 +00:00
|
|
|
if config.web3Urls.len == 0:
|
2020-11-24 21:21:47 +00:00
|
|
|
fatal "Web3 URL not specified"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# TODO Could move this to a separate "GenesisMonitor" process or task
|
|
|
|
# that would do only this - see Paul's proposal for this.
|
2022-02-25 08:22:44 +00:00
|
|
|
let eth1Monitor = Eth1Monitor.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2020-12-03 04:30:35 +00:00
|
|
|
db,
|
2022-03-31 14:43:05 +00:00
|
|
|
nil,
|
2021-04-06 21:42:59 +00:00
|
|
|
config.web3Urls,
|
2022-01-21 10:59:09 +00:00
|
|
|
getDepositContractSnapshot(),
|
2021-11-25 16:51:51 +00:00
|
|
|
eth1Network,
|
2022-02-27 16:55:02 +00:00
|
|
|
config.web3ForcePolling,
|
2022-03-31 14:43:05 +00:00
|
|
|
optJwtSecret)
|
2020-11-24 21:21:47 +00:00
|
|
|
|
2022-02-25 08:22:44 +00:00
|
|
|
eth1Monitor.loadPersistedDeposits()
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2021-12-23 14:58:54 +00:00
|
|
|
let phase0Genesis = waitFor eth1Monitor.waitGenesis()
|
|
|
|
genesisState = newClone ForkedHashedBeaconState.init(
|
|
|
|
phase0.HashedBeaconState(data: phase0Genesis[],
|
|
|
|
root: hash_tree_root(phase0Genesis[])))
|
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
if bnStatus == BeaconNodeStatus.Stopping:
|
|
|
|
return nil
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
tailState = genesisState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
notice "Eth2 genesis state detected",
|
2021-12-23 14:58:54 +00:00
|
|
|
genesisTime = phase0Genesis.genesisTime,
|
|
|
|
eth1Block = phase0Genesis.eth1_data.block_hash,
|
|
|
|
totalDeposits = phase0Genesis.eth1_data.deposit_count
|
2020-11-12 16:21:04 +00:00
|
|
|
else:
|
2021-11-10 11:39:08 +00:00
|
|
|
fatal "No database and no genesis snapshot found: supply a genesis.ssz " &
|
|
|
|
"with the network configuration, or compile the beacon node with " &
|
|
|
|
"the -d:has_genesis_detection option " &
|
2020-11-24 21:21:47 +00:00
|
|
|
"in order to support monitoring for genesis events"
|
|
|
|
quit 1
|
2019-10-25 14:53:31 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
elif genesisStateContents.len == 0:
|
2021-11-10 11:39:08 +00:00
|
|
|
if getStateField(checkpointState[], slot) == GENESIS_SLOT:
|
2020-09-22 20:42:42 +00:00
|
|
|
genesisState = checkpointState
|
|
|
|
tailState = checkpointState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
|
|
|
else:
|
|
|
|
fatal "State checkpoints cannot be provided for a network without a known genesis state"
|
2020-04-22 23:35:55 +00:00
|
|
|
quit 1
|
2020-09-22 20:42:42 +00:00
|
|
|
else:
|
2020-04-22 23:35:55 +00:00
|
|
|
try:
|
2021-11-10 11:39:08 +00:00
|
|
|
genesisState = newClone(readSszForkedHashedBeaconState(
|
|
|
|
cfg,
|
|
|
|
genesisStateContents.toOpenArrayByte(0, genesisStateContents.high())))
|
2020-09-22 20:42:42 +00:00
|
|
|
except CatchableError as err:
|
2020-11-16 19:15:43 +00:00
|
|
|
raiseAssert "Invalid baked-in state: " & err.msg
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2021-11-10 11:39:08 +00:00
|
|
|
if not checkpointState.isNil:
|
2020-09-22 20:42:42 +00:00
|
|
|
tailState = checkpointState
|
|
|
|
tailBlock = checkpointBlock
|
|
|
|
else:
|
|
|
|
tailState = genesisState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
|
|
|
|
|
|
|
try:
|
|
|
|
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
|
2021-12-21 10:40:14 +00:00
|
|
|
doAssert ChainDAGRef.isInitialized(db).isOk(), "preInit should have initialized db"
|
2021-03-26 06:52:01 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
error "Failed to initialize database", err = exc.msg
|
2020-09-22 20:42:42 +00:00
|
|
|
quit 1
|
2021-11-10 11:39:08 +00:00
|
|
|
else:
|
|
|
|
if not checkpointState.isNil:
|
|
|
|
fatal "A database already exists, cannot start from given checkpoint",
|
|
|
|
dataDir = config.dataDir
|
|
|
|
quit 1
|
2020-07-02 15:52:48 +00:00
|
|
|
|
2021-03-23 06:57:10 +00:00
|
|
|
# Doesn't use std/random directly, but dependencies might
|
|
|
|
randomize(rng[].rand(high(int)))
|
|
|
|
|
2021-12-20 19:20:31 +00:00
|
|
|
let
|
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init(
|
2022-01-14 14:57:46 +00:00
|
|
|
config.validatorMonitorAuto, config.validatorMonitorTotals))
|
2021-12-20 19:20:31 +00:00
|
|
|
|
|
|
|
for key in config.validatorMonitorPubkeys:
|
|
|
|
validatorMonitor[].addMonitor(key, none(ValidatorIndex))
|
|
|
|
|
2020-07-07 23:02:14 +00:00
|
|
|
let
|
2022-03-19 16:48:24 +00:00
|
|
|
networkGenesisValidatorsRoot: Option[Eth2Digest] =
|
|
|
|
if genesisStateContents.len != 0:
|
|
|
|
some(extractGenesisValidatorRootFromSnapshot(genesisStateContents))
|
|
|
|
else:
|
|
|
|
none(Eth2Digest)
|
|
|
|
dag = loadChainDag(
|
|
|
|
config, cfg, db, eventBus,
|
|
|
|
validatorMonitor, networkGenesisValidatorsRoot)
|
|
|
|
beaconClock = BeaconClock.init(
|
|
|
|
getStateField(dag.headState, genesis_time))
|
2022-03-31 14:43:05 +00:00
|
|
|
getBeaconTime = beaconClock.getBeaconTimeFn()
|
2021-08-20 08:58:15 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.weakSubjectivityCheckpoint.isSome:
|
2022-03-19 16:48:24 +00:00
|
|
|
dag.checkWeakSubjectivityCheckpoint(
|
|
|
|
config.weakSubjectivityCheckpoint.get, beaconClock)
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2022-01-21 10:59:09 +00:00
|
|
|
if eth1Monitor.isNil and config.web3Urls.len > 0:
|
2020-12-15 21:59:29 +00:00
|
|
|
eth1Monitor = Eth1Monitor.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2020-12-03 04:30:35 +00:00
|
|
|
db,
|
2022-03-31 14:43:05 +00:00
|
|
|
getBeaconTime,
|
2021-04-06 21:42:59 +00:00
|
|
|
config.web3Urls,
|
2022-01-21 10:59:09 +00:00
|
|
|
getDepositContractSnapshot(),
|
2021-11-25 16:51:51 +00:00
|
|
|
eth1Network,
|
2022-02-27 16:55:02 +00:00
|
|
|
config.web3ForcePolling,
|
2022-03-31 14:43:05 +00:00
|
|
|
optJwtSecret)
|
2020-11-12 16:21:04 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let rpcServer = if config.rpcEnabled:
|
|
|
|
RpcServer.init(config.rpcAddress, config.rpcPort)
|
2020-03-16 22:28:54 +00:00
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
let restServer = if config.restEnabled:
|
2022-02-11 22:33:30 +00:00
|
|
|
RestServerRef.init(
|
|
|
|
config.restAddress,
|
|
|
|
config.restPort,
|
|
|
|
config.restAllowedOrigin,
|
|
|
|
config)
|
2021-03-17 18:46:45 +00:00
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
var keymanagerToken: Option[string]
|
|
|
|
let keymanagerServer = if config.keymanagerEnabled:
|
|
|
|
if config.keymanagerTokenFile.isNone:
|
|
|
|
echo "To enable the Keymanager API, you must also specify " &
|
|
|
|
"the --keymanager-token-file option."
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let
|
|
|
|
tokenFilePath = config.keymanagerTokenFile.get.string
|
|
|
|
tokenFileReadRes = readAllChars(tokenFilePath)
|
|
|
|
|
|
|
|
if tokenFileReadRes.isErr:
|
|
|
|
fatal "Failed to read the keymanager token file",
|
|
|
|
error = $tokenFileReadRes.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
keymanagerToken = some tokenFileReadRes.value.strip
|
|
|
|
if keymanagerToken.get.len == 0:
|
|
|
|
fatal "The keymanager token should not be empty", tokenFilePath
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
if restServer != nil and
|
|
|
|
config.restAddress == config.keymanagerAddress and
|
|
|
|
config.restPort == config.keymanagerPort:
|
2022-02-11 22:33:30 +00:00
|
|
|
if config.keymanagerAllowedOrigin.isSome and
|
|
|
|
config.restAllowedOrigin != config.keymanagerAllowedOrigin:
|
|
|
|
fatal "Please specify a separate port for the Keymanager API " &
|
|
|
|
"if you want to restrict the origin in a different way " &
|
|
|
|
"from the Beacon API"
|
|
|
|
quit 1
|
2021-12-22 12:37:31 +00:00
|
|
|
restServer
|
|
|
|
else:
|
2022-02-11 22:33:30 +00:00
|
|
|
RestServerRef.init(
|
|
|
|
config.keymanagerAddress,
|
|
|
|
config.keymanagerPort,
|
|
|
|
config.keymanagerAllowedOrigin,
|
|
|
|
config)
|
2021-12-22 12:37:31 +00:00
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
2020-04-15 02:41:22 +00:00
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
netKeys = getPersistentNetKeys(rng[], config)
|
|
|
|
nickname = if config.nodeName == "auto": shortForm(netKeys)
|
|
|
|
else: config.nodeName
|
2021-07-07 09:09:47 +00:00
|
|
|
network = createEth2Node(
|
2021-08-19 10:45:31 +00:00
|
|
|
rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime,
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(dag.headState, genesis_validators_root))
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2021-05-04 13:17:28 +00:00
|
|
|
case config.slashingDbKind
|
|
|
|
of SlashingDbKind.v2:
|
|
|
|
discard
|
|
|
|
of SlashingDbKind.v1:
|
|
|
|
error "Slashing DB v1 is no longer supported for writing"
|
|
|
|
quit 1
|
|
|
|
of SlashingDbKind.both:
|
|
|
|
warn "Slashing DB v1 deprecated, writing only v2"
|
|
|
|
|
|
|
|
info "Loading slashing protection database (v2)",
|
|
|
|
path = config.validatorsDir()
|
|
|
|
|
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
slashingProtectionDB =
|
2021-05-04 13:17:28 +00:00
|
|
|
SlashingProtectionDB.init(
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(dag.headState, genesis_validators_root),
|
2021-05-19 06:38:13 +00:00
|
|
|
config.validatorsDir(), SlashingDbName)
|
2021-02-22 16:17:48 +00:00
|
|
|
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
2021-03-11 10:10:57 +00:00
|
|
|
|
2022-03-21 16:52:15 +00:00
|
|
|
stateTtlCache =
|
|
|
|
if config.restCacheSize > 0:
|
|
|
|
StateTtlCache.init(
|
|
|
|
cacheSize = config.restCacheSize,
|
|
|
|
cacheTtl = chronos.seconds(config.restCacheTtl))
|
|
|
|
else:
|
|
|
|
nil
|
2022-01-31 17:28:26 +00:00
|
|
|
|
2021-05-04 15:28:48 +00:00
|
|
|
var node = BeaconNode(
|
2020-01-17 13:44:01 +00:00
|
|
|
nickname: nickname,
|
2021-12-20 11:21:17 +00:00
|
|
|
graffitiBytes: if config.graffiti.isSome: config.graffiti.get
|
2020-06-29 17:30:19 +00:00
|
|
|
else: defaultGraffitiBytes(),
|
2020-01-17 13:44:01 +00:00
|
|
|
network: network,
|
2020-02-05 20:40:14 +00:00
|
|
|
netKeys: netKeys,
|
2020-01-17 13:44:01 +00:00
|
|
|
db: db,
|
2021-02-22 16:17:48 +00:00
|
|
|
config: config,
|
2021-11-01 14:50:24 +00:00
|
|
|
attachedValidators: validatorPool,
|
2020-11-03 01:21:07 +00:00
|
|
|
eth1Monitor: eth1Monitor,
|
2020-03-16 22:28:54 +00:00
|
|
|
rpcServer: rpcServer,
|
2021-03-17 18:46:45 +00:00
|
|
|
restServer: restServer,
|
2021-12-22 12:37:31 +00:00
|
|
|
keymanagerServer: keymanagerServer,
|
|
|
|
keymanagerToken: keymanagerToken,
|
2021-09-22 12:17:15 +00:00
|
|
|
eventBus: eventBus,
|
2022-02-04 12:34:03 +00:00
|
|
|
actionTracker: ActionTracker.init(rng, config.subscribeAllSubnets),
|
2021-12-21 14:24:23 +00:00
|
|
|
gossipState: {},
|
2021-09-17 00:13:52 +00:00
|
|
|
beaconClock: beaconClock,
|
2022-01-05 14:49:10 +00:00
|
|
|
validatorMonitor: validatorMonitor,
|
|
|
|
stateTtlCache: stateTtlCache
|
2020-01-17 13:44:01 +00:00
|
|
|
)
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2022-03-21 16:52:15 +00:00
|
|
|
node.initFullNode(
|
|
|
|
rng, dag, taskpool, getBeaconTime)
|
2020-12-16 13:03:04 +00:00
|
|
|
|
2022-02-21 11:55:56 +00:00
|
|
|
node
|
2019-09-07 17:48:05 +00:00
|
|
|
|
2020-05-13 08:36:33 +00:00
|
|
|
func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|
|
|
# Epoch must be >= 4 to check finalization
|
|
|
|
const SETTLING_TIME_OFFSET = 1'u64
|
2022-01-11 10:01:54 +00:00
|
|
|
let epoch = slot.epoch()
|
2020-05-13 08:36:33 +00:00
|
|
|
|
|
|
|
# Don't static-assert this -- if this isn't called, don't require it
|
|
|
|
doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET
|
|
|
|
|
|
|
|
# Intentionally, loudly assert. Point is to fail visibly and unignorably
|
|
|
|
# during testing.
|
|
|
|
if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET:
|
|
|
|
let finalizedEpoch =
|
2022-01-11 10:01:54 +00:00
|
|
|
node.dag.finalizedHead.slot.epoch()
|
2020-05-13 08:36:33 +00:00
|
|
|
# Finalization rule 234, that has the most lag slots among the cases, sets
|
|
|
|
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
|
|
|
|
# and then state.slot gets incremented, to increase the maximum offset, if
|
|
|
|
# finalization occurs every slot, to 4 slots vs scheduledSlot.
|
|
|
|
doAssert finalizedEpoch + 4 >= epoch
|
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
func subnetLog(v: BitArray): string =
|
|
|
|
$toSeq(v.oneIndices())
|
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
func forkDigests(node: BeaconNode): auto =
|
|
|
|
let forkDigestsArray: array[BeaconStateFork, auto] = [
|
|
|
|
node.dag.forkDigests.phase0,
|
|
|
|
node.dag.forkDigests.altair,
|
|
|
|
node.dag.forkDigests.bellatrix]
|
|
|
|
forkDigestsArray
|
|
|
|
|
2022-03-02 10:00:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2021-10-18 09:11:44 +00:00
|
|
|
proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) =
|
2021-12-21 14:24:23 +00:00
|
|
|
if node.gossipState.card == 0:
|
2021-10-18 09:11:44 +00:00
|
|
|
# When disconnected, updateGossipState is responsible for all things
|
|
|
|
# subnets - in particular, it will remove subscriptions on the edge where
|
|
|
|
# we enter the disconnected state.
|
|
|
|
return
|
2020-08-12 17:48:31 +00:00
|
|
|
|
2021-01-19 17:44:03 +00:00
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
aggregateSubnets = node.actionTracker.aggregateSubnets(slot)
|
|
|
|
stabilitySubnets = node.actionTracker.stabilitySubnets(slot)
|
|
|
|
subnets = aggregateSubnets + stabilitySubnets
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
node.network.updateStabilitySubnetMetadata(stabilitySubnets)
|
2020-12-09 09:13:51 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
# Now we know what we should be subscribed to - make it so
|
2021-01-19 17:44:03 +00:00
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
prevSubnets = node.actionTracker.subscribedSubnets
|
|
|
|
unsubscribeSubnets = prevSubnets - subnets
|
|
|
|
subscribeSubnets = subnets - prevSubnets
|
|
|
|
|
|
|
|
# Remember what we subscribed to, so we can unsubscribe later
|
|
|
|
node.actionTracker.subscribedSubnets = subnets
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
let forkDigests = node.forkDigests()
|
2021-12-21 14:24:23 +00:00
|
|
|
|
|
|
|
for gossipFork in node.gossipState:
|
|
|
|
let forkDigest = forkDigests[gossipFork]
|
|
|
|
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, forkDigest)
|
|
|
|
node.network.subscribeAttestationSubnets(subscribeSubnets, forkDigest)
|
2021-01-19 17:44:03 +00:00
|
|
|
|
|
|
|
debug "Attestation subnets",
|
2021-10-18 09:11:44 +00:00
|
|
|
slot, epoch = slot.epoch, gossipState = node.gossipState,
|
2021-05-11 20:03:40 +00:00
|
|
|
stabilitySubnets = subnetLog(stabilitySubnets),
|
2021-10-18 09:11:44 +00:00
|
|
|
aggregateSubnets = subnetLog(aggregateSubnets),
|
|
|
|
prevSubnets = subnetLog(prevSubnets),
|
2021-05-11 20:03:40 +00:00
|
|
|
subscribeSubnets = subnetLog(subscribeSubnets),
|
2021-12-21 14:24:23 +00:00
|
|
|
unsubscribeSubnets = subnetLog(unsubscribeSubnets),
|
|
|
|
gossipState = node.gossipState
|
2021-05-11 20:03:40 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
# inspired by lighthouse research here
|
|
|
|
# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py
|
|
|
|
const
|
|
|
|
blocksTopicParams = TopicParams(
|
|
|
|
topicWeight: 0.5,
|
|
|
|
timeInMeshWeight: 0.03333333333333333,
|
|
|
|
timeInMeshQuantum: chronos.seconds(12),
|
|
|
|
timeInMeshCap: 300,
|
|
|
|
firstMessageDeliveriesWeight: 1.1471603557060206,
|
|
|
|
firstMessageDeliveriesDecay: 0.9928302477768374,
|
|
|
|
firstMessageDeliveriesCap: 34.86870846001471,
|
|
|
|
meshMessageDeliveriesWeight: -458.31054878249114,
|
|
|
|
meshMessageDeliveriesDecay: 0.9716279515771061,
|
|
|
|
meshMessageDeliveriesThreshold: 0.6849191409056553,
|
|
|
|
meshMessageDeliveriesCap: 2.054757422716966,
|
|
|
|
meshMessageDeliveriesActivation: chronos.seconds(384),
|
|
|
|
meshMessageDeliveriesWindow: chronos.seconds(2),
|
|
|
|
meshFailurePenaltyWeight: -458.31054878249114 ,
|
|
|
|
meshFailurePenaltyDecay: 0.9716279515771061,
|
|
|
|
invalidMessageDeliveriesWeight: -214.99999999999994,
|
|
|
|
invalidMessageDeliveriesDecay: 0.9971259067705325
|
|
|
|
)
|
|
|
|
aggregateTopicParams = TopicParams(
|
|
|
|
topicWeight: 0.5,
|
|
|
|
timeInMeshWeight: 0.03333333333333333,
|
|
|
|
timeInMeshQuantum: chronos.seconds(12),
|
|
|
|
timeInMeshCap: 300,
|
|
|
|
firstMessageDeliveriesWeight: 0.10764904539552399,
|
|
|
|
firstMessageDeliveriesDecay: 0.8659643233600653,
|
|
|
|
firstMessageDeliveriesCap: 371.5778421725158,
|
|
|
|
meshMessageDeliveriesWeight: -0.07538533073670682,
|
|
|
|
meshMessageDeliveriesDecay: 0.930572040929699,
|
|
|
|
meshMessageDeliveriesThreshold: 53.404248450179836,
|
|
|
|
meshMessageDeliveriesCap: 213.61699380071934,
|
|
|
|
meshMessageDeliveriesActivation: chronos.seconds(384),
|
|
|
|
meshMessageDeliveriesWindow: chronos.seconds(2),
|
|
|
|
meshFailurePenaltyWeight: -0.07538533073670682 ,
|
|
|
|
meshFailurePenaltyDecay: 0.930572040929699,
|
|
|
|
invalidMessageDeliveriesWeight: -214.99999999999994,
|
|
|
|
invalidMessageDeliveriesDecay: 0.9971259067705325
|
|
|
|
)
|
|
|
|
basicParams = TopicParams.init()
|
|
|
|
|
|
|
|
static:
|
|
|
|
# compile time validation
|
|
|
|
blocksTopicParams.validateParameters().tryGet()
|
|
|
|
aggregateTopicParams.validateParameters().tryGet()
|
|
|
|
basicParams.validateParameters.tryGet()
|
|
|
|
|
2021-12-21 14:24:23 +00:00
|
|
|
proc addPhase0MessageHandlers(
|
|
|
|
node: BeaconNode, forkDigest: ForkDigest, slot: Slot) =
|
|
|
|
node.network.subscribe(
|
|
|
|
getBeaconBlocksTopic(forkDigest), blocksTopicParams,
|
|
|
|
enableTopicMetrics = true)
|
2021-08-09 12:54:45 +00:00
|
|
|
node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams)
|
|
|
|
node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams)
|
|
|
|
node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams)
|
2021-12-21 14:24:23 +00:00
|
|
|
node.network.subscribe(
|
|
|
|
getAggregateAndProofsTopic(forkDigest), aggregateTopicParams,
|
|
|
|
enableTopicMetrics = true)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
# updateAttestationSubnetHandlers subscribes attestation subnets
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.network.unsubscribe(getBeaconBlocksTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getProposerSlashingsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest))
|
2020-12-24 08:48:52 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subnet_id in SubnetId:
|
|
|
|
node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id))
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2021-10-21 13:09:19 +00:00
|
|
|
node.actionTracker.subscribedSubnets = default(AttnetBits)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto =
|
|
|
|
# Only used to determine which gossip topics to which to subscribe
|
2022-02-04 12:34:03 +00:00
|
|
|
if node.config.subscribeAllSubnets:
|
2022-01-24 20:40:59 +00:00
|
|
|
(func(pubkey: ValidatorPubKey): bool {.closure.} = true)
|
|
|
|
else:
|
|
|
|
(func(pubkey: ValidatorPubKey): bool =
|
|
|
|
node.syncCommitteeMsgPool.syncCommitteeSubscriptions.getOrDefault(
|
|
|
|
pubkey, GENESIS_EPOCH) >= epoch or
|
|
|
|
pubkey in node.attachedValidators.validators)
|
|
|
|
|
2021-11-14 08:00:25 +00:00
|
|
|
proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) =
|
|
|
|
node.addPhase0MessageHandlers(forkDigest, slot)
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
# If this comes online near sync committee period, it'll immediately get
|
|
|
|
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
|
|
|
|
let
|
|
|
|
syncCommittee =
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(node.dag.headState):
|
2022-01-24 20:40:59 +00:00
|
|
|
when stateFork >= BeaconStateFork.Altair:
|
|
|
|
state.data.current_sync_committee
|
|
|
|
else:
|
|
|
|
default(SyncCommittee)
|
|
|
|
|
|
|
|
currentSyncCommitteeSubnets = getSyncSubnets(
|
|
|
|
node.hasSyncPubKey(slot.epoch), syncCommittee)
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
2022-01-24 20:40:59 +00:00
|
|
|
if currentSyncCommitteeSubnets[subcommitteeIdx]:
|
|
|
|
node.network.subscribe(
|
|
|
|
getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams)
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2021-11-14 08:00:25 +00:00
|
|
|
node.network.subscribe(
|
|
|
|
getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams)
|
2022-01-24 20:40:59 +00:00
|
|
|
|
|
|
|
node.network.updateSyncnetsMetadata(currentSyncCommitteeSubnets)
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2022-04-01 21:58:06 +00:00
|
|
|
if node.config.serveLightClientData.get:
|
2022-03-14 13:05:38 +00:00
|
|
|
node.network.subscribe(
|
|
|
|
getOptimisticLightClientUpdateTopic(forkDigest), basicParams)
|
|
|
|
|
2021-11-14 08:00:25 +00:00
|
|
|
proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
|
|
|
|
node.removePhase0MessageHandlers(forkDigest)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
2021-08-29 05:58:27 +00:00
|
|
|
closureScope:
|
2022-01-08 23:28:49 +00:00
|
|
|
let idx = subcommitteeIdx
|
2021-11-14 08:00:25 +00:00
|
|
|
node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx))
|
|
|
|
|
|
|
|
node.network.unsubscribe(
|
|
|
|
getSyncCommitteeContributionAndProofTopic(forkDigest))
|
|
|
|
|
2022-04-01 21:58:06 +00:00
|
|
|
if node.config.serveLightClientData.get:
|
2022-03-14 13:05:38 +00:00
|
|
|
node.network.unsubscribe(getOptimisticLightClientUpdateTopic(forkDigest))
|
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
proc trackCurrentSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
|
|
|
# Unlike trackNextSyncCommitteeTopics, just snap to the currently correct
|
|
|
|
# set of subscriptions, and use current_sync_committee. Furthermore, this
|
|
|
|
# is potentially useful at arbitrary times, so don't guard it by checking
|
|
|
|
# for epoch alignment.
|
|
|
|
let
|
|
|
|
syncCommittee =
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(node.dag.headState):
|
2022-01-24 20:40:59 +00:00
|
|
|
when stateFork >= BeaconStateFork.Altair:
|
|
|
|
state.data.current_sync_committee
|
|
|
|
else:
|
|
|
|
default(SyncCommittee)
|
|
|
|
currentSyncCommitteeSubnets =
|
|
|
|
getSyncSubnets(node.hasSyncPubKey(slot.epoch), syncCommittee)
|
|
|
|
|
|
|
|
debug "trackCurrentSyncCommitteeTopics: aligning with sync committee subnets",
|
|
|
|
currentSyncCommitteeSubnets,
|
|
|
|
metadata_syncnets = node.network.metadata.syncnets,
|
|
|
|
gossipState = node.gossipState
|
|
|
|
|
|
|
|
# Assume that different gossip fork sync committee setups are in sync; this
|
|
|
|
# only remains relevant, currently, for one gossip transition epoch, so the
|
|
|
|
# consequences of this not being true aren't exceptionally dire, while this
|
|
|
|
# allows for bookkeeping simplication.
|
|
|
|
if currentSyncCommitteeSubnets == node.network.metadata.syncnets:
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
|
|
|
newSyncSubnets =
|
|
|
|
currentSyncCommitteeSubnets - node.network.metadata.syncnets
|
|
|
|
oldSyncSubnets =
|
|
|
|
node.network.metadata.syncnets - currentSyncCommitteeSubnets
|
|
|
|
forkDigests = node.forkDigests()
|
|
|
|
|
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
doAssert not (newSyncSubnets[subcommitteeIdx] and
|
|
|
|
oldSyncSubnets[subcommitteeIdx])
|
|
|
|
for gossipFork in node.gossipState:
|
|
|
|
template topic(): auto =
|
|
|
|
getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx)
|
|
|
|
if oldSyncSubnets[subcommitteeIdx]:
|
|
|
|
node.network.unsubscribe(topic)
|
|
|
|
elif newSyncSubnets[subcommitteeIdx]:
|
|
|
|
node.network.subscribe(topic, basicParams)
|
|
|
|
|
|
|
|
node.network.updateSyncnetsMetadata(currentSyncCommitteeSubnets)
|
|
|
|
|
|
|
|
proc trackNextSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
|
|
|
|
let
|
|
|
|
epoch = slot.epoch
|
|
|
|
epochToSyncPeriod = nearSyncCommitteePeriod(epoch)
|
|
|
|
|
|
|
|
if epochToSyncPeriod.isNone or
|
|
|
|
forkVersionAtEpoch(node.dag.cfg, epoch + epochToSyncPeriod.get) ==
|
|
|
|
node.dag.cfg.GENESIS_FORK_VERSION:
|
|
|
|
return
|
|
|
|
|
|
|
|
if epochToSyncPeriod.get == 0:
|
|
|
|
node.trackCurrentSyncCommitteeTopics(slot)
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
|
|
|
syncCommittee =
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(node.dag.headState):
|
2022-01-24 20:40:59 +00:00
|
|
|
when stateFork >= BeaconStateFork.Altair:
|
|
|
|
state.data.next_sync_committee
|
|
|
|
else:
|
|
|
|
default(SyncCommittee)
|
|
|
|
nextSyncCommitteeSubnets = getSyncSubnets(
|
|
|
|
node.hasSyncPubKey(epoch + epochToSyncPeriod.get), syncCommittee)
|
|
|
|
forkDigests = node.forkDigests()
|
|
|
|
|
|
|
|
var newSubcommittees: SyncnetBits
|
|
|
|
|
2022-03-02 10:00:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/altair/validator.md#sync-committee-subnet-stability
|
2022-01-24 20:40:59 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
if (not node.network.metadata.syncnets[subcommitteeIdx]) and
|
|
|
|
nextSyncCommitteeSubnets[subcommitteeIdx] and
|
|
|
|
node.syncCommitteeMsgPool[].isEpochLeadTime(epochToSyncPeriod.get):
|
|
|
|
for gossipFork in node.gossipState:
|
|
|
|
node.network.subscribe(getSyncCommitteeTopic(
|
|
|
|
forkDigests[gossipFork], subcommitteeIdx), basicParams)
|
|
|
|
newSubcommittees.setBit(distinctBase(subcommitteeIdx))
|
|
|
|
|
|
|
|
debug "trackNextSyncCommitteeTopics: subscribing to sync committee subnets",
|
|
|
|
metadata_syncnets = node.network.metadata.syncnets,
|
|
|
|
nextSyncCommitteeSubnets,
|
|
|
|
gossipState = node.gossipState,
|
|
|
|
epochsToSyncPeriod = epochToSyncPeriod.get,
|
|
|
|
newSubcommittees
|
|
|
|
|
|
|
|
node.network.updateSyncnetsMetadata(
|
|
|
|
node.network.metadata.syncnets + newSubcommittees)
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
|
|
|
|
## Subscribe to subnets that we are providing stability for or aggregating
|
|
|
|
## and unsubscribe from the ones that are no longer relevant.
|
|
|
|
|
|
|
|
# Let the tracker know what duties are approaching - this will tell us how
|
|
|
|
# many stability subnets we need to be subscribed to and what subnets we'll
|
|
|
|
# soon be aggregating - in addition to the in-beacon-node duties, there may
|
|
|
|
# also be duties coming from the validator client, but we don't control when
|
|
|
|
# these arrive
|
|
|
|
await node.registerDuties(slot)
|
|
|
|
|
|
|
|
# We start subscribing to gossip before we're fully synced - this allows time
|
|
|
|
# to subscribe before the sync end game
|
2020-12-01 10:43:02 +00:00
|
|
|
const
|
|
|
|
TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64
|
|
|
|
HYSTERESIS_BUFFER = 16
|
|
|
|
|
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
head = node.dag.head
|
|
|
|
headDistance =
|
|
|
|
if slot > head.slot: (slot - head.slot).uint64
|
|
|
|
else: 0'u64
|
2021-08-09 12:54:45 +00:00
|
|
|
targetGossipState =
|
2021-12-21 14:24:23 +00:00
|
|
|
getTargetGossipState(
|
|
|
|
slot.epoch,
|
|
|
|
node.dag.cfg.ALTAIR_FORK_EPOCH,
|
2022-02-02 13:06:55 +00:00
|
|
|
node.dag.cfg.BELLATRIX_FORK_EPOCH,
|
2021-12-21 14:24:23 +00:00
|
|
|
headDistance > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER)
|
|
|
|
|
|
|
|
doAssert targetGossipState.card <= 2
|
|
|
|
|
|
|
|
let
|
|
|
|
newGossipForks = targetGossipState - node.gossipState
|
|
|
|
oldGossipForks = node.gossipState - targetGossipState
|
|
|
|
|
|
|
|
doAssert newGossipForks.card <= 2
|
|
|
|
doAssert oldGossipForks.card <= 2
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-12-21 14:24:23 +00:00
|
|
|
func maxGossipFork(gossipState: GossipState): int =
|
|
|
|
var res = -1
|
|
|
|
for gossipFork in gossipState:
|
|
|
|
res = max(res, gossipFork.int)
|
|
|
|
res
|
|
|
|
|
|
|
|
if maxGossipFork(targetGossipState) < maxGossipFork(node.gossipState) and
|
|
|
|
targetGossipState != {}:
|
|
|
|
warn "Unexpected clock regression during transition",
|
|
|
|
targetGossipState,
|
|
|
|
gossipState = node.gossipState
|
|
|
|
|
|
|
|
if node.gossipState.card == 0 and targetGossipState.card > 0:
|
2021-08-09 12:54:45 +00:00
|
|
|
# We are synced, so we will connect
|
2020-12-01 10:43:02 +00:00
|
|
|
debug "Enabling topic subscriptions",
|
|
|
|
wallSlot = slot,
|
2021-10-18 09:11:44 +00:00
|
|
|
headSlot = head.slot,
|
|
|
|
headDistance, targetGossipState
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2022-01-03 21:18:49 +00:00
|
|
|
node.processor[].setupDoppelgangerDetection(slot)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
# Specially when waiting for genesis, we'll already be synced on startup -
|
|
|
|
# it might also happen on a sufficiently fast restart
|
|
|
|
|
|
|
|
# We "know" the actions for the current and the next epoch
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(node.dag.headState):
|
2022-03-15 08:24:55 +00:00
|
|
|
if node.actionTracker.needsUpdate(state, slot.epoch):
|
|
|
|
let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect(
|
|
|
|
"Getting head EpochRef should never fail")
|
|
|
|
node.actionTracker.updateActions(epochRef)
|
2022-02-04 11:25:32 +00:00
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
if node.actionTracker.needsUpdate(state, slot.epoch + 1):
|
|
|
|
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect(
|
|
|
|
"Getting head EpochRef should never fail")
|
|
|
|
node.actionTracker.updateActions(epochRef)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2021-12-21 14:24:23 +00:00
|
|
|
if node.gossipState.card > 0 and targetGossipState.card == 0:
|
|
|
|
debug "Disabling topic subscriptions",
|
|
|
|
wallSlot = slot,
|
|
|
|
headSlot = head.slot,
|
|
|
|
headDistance
|
|
|
|
|
2022-01-24 20:40:59 +00:00
|
|
|
let forkDigests = node.forkDigests()
|
2021-12-21 14:24:23 +00:00
|
|
|
|
|
|
|
const removeMessageHandlers: array[BeaconStateFork, auto] = [
|
|
|
|
removePhase0MessageHandlers,
|
|
|
|
removeAltairMessageHandlers,
|
|
|
|
removeAltairMessageHandlers # with different forkDigest
|
|
|
|
]
|
|
|
|
|
|
|
|
for gossipFork in oldGossipForks:
|
|
|
|
removeMessageHandlers[gossipFork](node, forkDigests[gossipFork])
|
|
|
|
|
|
|
|
const addMessageHandlers: array[BeaconStateFork, auto] = [
|
|
|
|
addPhase0MessageHandlers,
|
|
|
|
addAltairMessageHandlers,
|
|
|
|
addAltairMessageHandlers # with different forkDigest
|
|
|
|
]
|
|
|
|
|
|
|
|
for gossipFork in newGossipForks:
|
|
|
|
addMessageHandlers[gossipFork](node, forkDigests[gossipFork], slot)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
node.gossipState = targetGossipState
|
|
|
|
node.updateAttestationSubnetHandlers(slot)
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2021-02-14 15:37:32 +00:00
|
|
|
proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
2020-12-18 21:01:24 +00:00
|
|
|
# Things we do when slot processing has ended and we're about to wait for the
|
|
|
|
# next slot
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
if node.dag.needStateCachesAndForkChoicePruning():
|
2021-05-10 14:32:28 +00:00
|
|
|
if node.attachedValidators.validators.len > 0:
|
|
|
|
node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
# pruning is only done if the DB is set to pruning mode.
|
|
|
|
.pruneAfterFinalization(
|
2022-01-11 10:01:54 +00:00
|
|
|
node.dag.finalizedHead.slot.epoch()
|
2021-05-10 14:32:28 +00:00
|
|
|
)
|
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
# Delay part of pruning until latency critical duties are done.
|
|
|
|
# The other part of pruning, `pruneBlocksDAG`, is done eagerly.
|
2021-05-10 14:32:28 +00:00
|
|
|
# ----
|
|
|
|
# This is the last pruning to do as it clears the "needPruning" condition.
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].pruneStateCachesAndForkChoice()
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2020-12-18 21:01:24 +00:00
|
|
|
when declared(GC_fullCollect):
|
|
|
|
# The slots in the beacon node work as frames in a game: we want to make
|
|
|
|
# sure that we're ready for the next one and don't get stuck in lengthy
|
|
|
|
# garbage collection tasks when time is of essence in the middle of a slot -
|
|
|
|
# while this does not guarantee that we'll never collect during a slot, it
|
|
|
|
# makes sure that all the scratch space we used during slot tasks (logging,
|
|
|
|
# temporary buffers etc) gets recycled for the next slot that is likely to
|
|
|
|
# need similar amounts of memory.
|
2022-03-22 08:42:28 +00:00
|
|
|
try:
|
|
|
|
GC_fullCollect()
|
|
|
|
except Defect as exc:
|
|
|
|
raise exc # Reraise to maintain call stack
|
|
|
|
except Exception as exc:
|
|
|
|
# TODO upstream
|
|
|
|
raiseAssert "Unexpected exception during GC collection"
|
2020-12-18 21:01:24 +00:00
|
|
|
|
|
|
|
# Checkpoint the database to clear the WAL file and make sure changes in
|
|
|
|
# the database are synced with the filesystem.
|
2021-01-18 10:02:56 +00:00
|
|
|
node.db.checkpoint()
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2021-08-30 01:00:37 +00:00
|
|
|
node.syncCommitteeMsgPool[].pruneData(slot)
|
2022-01-24 20:40:59 +00:00
|
|
|
if slot.is_epoch:
|
|
|
|
node.trackNextSyncCommitteeTopics(slot)
|
2021-08-28 22:27:51 +00:00
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
# Update upcoming actions - we do this every slot in case a reorg happens
|
2022-02-04 11:25:32 +00:00
|
|
|
let head = node.dag.head
|
|
|
|
if node.isSynced(head):
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(node.dag.headState):
|
2022-03-15 08:24:55 +00:00
|
|
|
if node.actionTracker.needsUpdate(state, slot.epoch + 1):
|
|
|
|
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect(
|
|
|
|
"Getting head EpochRef should never fail")
|
|
|
|
node.actionTracker.updateActions(epochRef)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
let
|
2022-01-03 21:18:49 +00:00
|
|
|
nextAttestationSlot = node.actionTracker.getNextAttestationSlot(slot)
|
|
|
|
nextProposalSlot = node.actionTracker.getNextProposalSlot(slot)
|
2021-10-18 09:11:44 +00:00
|
|
|
nextActionWaitTime = saturate(fromNow(
|
|
|
|
node.beaconClock, min(nextAttestationSlot, nextProposalSlot)))
|
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
|
|
|
# no future attestation/proposal known.
|
|
|
|
template displayInt64(x: Slot): int64 =
|
|
|
|
if x == high(uint64).Slot:
|
|
|
|
-1'i64
|
|
|
|
else:
|
|
|
|
toGaugeValue(x)
|
|
|
|
|
2020-12-18 21:01:24 +00:00
|
|
|
info "Slot end",
|
|
|
|
slot = shortLog(slot),
|
2021-02-14 15:37:32 +00:00
|
|
|
nextActionWait =
|
|
|
|
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
|
|
|
"n/a"
|
|
|
|
else:
|
2021-11-02 17:06:36 +00:00
|
|
|
shortLog(nextActionWaitTime),
|
|
|
|
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
|
|
|
nextProposalSlot = displayInt64(nextProposalSlot),
|
2022-02-04 11:25:32 +00:00
|
|
|
head = shortLog(head)
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2021-03-12 09:46:26 +00:00
|
|
|
if nextAttestationSlot != FAR_FUTURE_SLOT:
|
|
|
|
next_action_wait.set(nextActionWaitTime.toFloatSeconds)
|
|
|
|
|
2021-08-24 19:49:51 +00:00
|
|
|
let epoch = slot.epoch
|
2021-09-29 11:06:16 +00:00
|
|
|
if epoch + 1 >= node.network.forkId.next_fork_epoch:
|
|
|
|
# Update 1 epoch early to block non-fork-ready peers
|
|
|
|
node.network.updateForkId(epoch, node.dag.genesisValidatorsRoot)
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# When we're not behind schedule, we'll speculatively update the clearance
|
|
|
|
# state in anticipation of receiving the next block - we do it after logging
|
|
|
|
# slot end since the nextActionWaitTime can be short
|
|
|
|
let
|
2021-08-20 08:58:15 +00:00
|
|
|
advanceCutoff = node.beaconClock.fromNow(
|
2022-01-11 10:01:54 +00:00
|
|
|
slot.start_beacon_time() + chronos.seconds(int(SECONDS_PER_SLOT - 1)))
|
2021-06-01 11:13:40 +00:00
|
|
|
if advanceCutoff.inFuture:
|
|
|
|
# We wait until there's only a second left before the next slot begins, then
|
|
|
|
# we advance the clearance state to the next slot - this gives us a high
|
|
|
|
# probability of being prepared for the block that will arrive and the
|
|
|
|
# epoch processing that follows
|
|
|
|
await sleepAsync(advanceCutoff.offset)
|
|
|
|
node.dag.advanceClearanceState()
|
|
|
|
|
2021-10-18 09:11:44 +00:00
|
|
|
# Prepare action tracker for the next slot
|
|
|
|
node.actionTracker.updateSlot(slot + 1)
|
|
|
|
|
|
|
|
# The last thing we do is to perform the subscriptions and unsubscriptions for
|
|
|
|
# the next slot, just before that slot starts - because of the advance cuttoff
|
|
|
|
# above, this will be done just before the next slot starts
|
|
|
|
await node.updateGossipStatus(slot + 1)
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func syncStatus(node: BeaconNode): string =
|
2022-03-29 07:15:42 +00:00
|
|
|
if node.syncManager.inProgress:
|
|
|
|
node.syncManager.syncStatus
|
|
|
|
elif node.backfiller.inProgress:
|
|
|
|
"backfill: " & node.backfiller.syncStatus
|
|
|
|
else:
|
|
|
|
"synced"
|
2022-01-20 07:25:45 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
proc onSlotStart(
|
|
|
|
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
2019-03-22 15:49:37 +00:00
|
|
|
## Called at the beginning of a slot - usually every slot, but sometimes might
|
|
|
|
## skip a few in case we're running late.
|
2021-03-01 16:36:06 +00:00
|
|
|
## wallTime: current system time - we will strive to perform all duties up
|
|
|
|
## to this point in time
|
2020-06-26 13:51:20 +00:00
|
|
|
## lastSlot: the last slot that we successfully processed, so we know where to
|
2021-03-01 16:36:06 +00:00
|
|
|
## start work from - there might be jumps if processing is delayed
|
2019-03-22 15:49:37 +00:00
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
2021-03-01 16:36:06 +00:00
|
|
|
wallSlot = wallTime.slotOrZero
|
|
|
|
# If everything was working perfectly, the slot that we should be processing
|
|
|
|
expectedSlot = lastSlot + 1
|
2022-01-11 10:01:54 +00:00
|
|
|
finalizedEpoch = node.dag.finalizedHead.blck.slot.epoch()
|
|
|
|
delay = wallTime - expectedSlot.start_beacon_time()
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2019-12-23 15:34:09 +00:00
|
|
|
info "Slot start",
|
2021-11-02 17:06:36 +00:00
|
|
|
slot = shortLog(wallSlot),
|
|
|
|
epoch = shortLog(wallSlot.epoch),
|
2022-01-20 07:25:45 +00:00
|
|
|
sync = node.syncStatus(),
|
2021-11-02 17:06:36 +00:00
|
|
|
peers = len(node.network.peerPool),
|
|
|
|
head = shortLog(node.dag.head),
|
|
|
|
finalized = shortLog(getStateField(
|
2022-03-16 07:20:40 +00:00
|
|
|
node.dag.headState, finalized_checkpoint)),
|
2021-11-02 17:06:36 +00:00
|
|
|
delay = shortLog(delay)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2020-02-17 18:24:14 +00:00
|
|
|
# Check before any re-scheduling of onSlotStart()
|
2021-03-01 16:36:06 +00:00
|
|
|
checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch)
|
2020-02-17 18:24:14 +00:00
|
|
|
|
2022-02-27 11:02:45 +00:00
|
|
|
when defined(windows):
|
|
|
|
if node.config.runAsService:
|
|
|
|
reportServiceStatus(SERVICE_RUNNING, NO_ERROR, 0)
|
|
|
|
|
2021-03-01 19:55:25 +00:00
|
|
|
beacon_slot.set wallSlot.toGaugeValue
|
|
|
|
beacon_current_epoch.set wallSlot.epoch.toGaugeValue
|
2019-12-23 15:34:09 +00:00
|
|
|
|
2021-03-01 19:55:25 +00:00
|
|
|
# both non-negative, so difference can't overflow or underflow int64
|
|
|
|
finalization_delay.set(
|
|
|
|
wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
if node.config.verifyFinalization:
|
|
|
|
verifyFinalization(node, wallSlot)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].updateHead(wallSlot)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
await node.handleValidatorDuties(lastSlot, wallSlot)
|
2019-08-16 11:16:56 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
await onSlotEnd(node, wallSlot)
|
2020-12-08 17:11:54 +00:00
|
|
|
|
2019-12-02 14:42:57 +00:00
|
|
|
proc handleMissingBlocks(node: BeaconNode) =
|
2021-12-06 09:49:01 +00:00
|
|
|
let missingBlocks = node.quarantine[].checkMissing()
|
2019-03-28 14:03:19 +00:00
|
|
|
if missingBlocks.len > 0:
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
|
2020-06-18 10:03:36 +00:00
|
|
|
node.requestManager.fetchAncestorBlocks(missingBlocks)
|
2019-12-02 14:42:57 +00:00
|
|
|
|
2020-08-10 18:49:45 +00:00
|
|
|
proc onSecond(node: BeaconNode) =
|
2020-06-03 08:46:29 +00:00
|
|
|
## This procedure will be called once per second.
|
|
|
|
if not(node.syncManager.inProgress):
|
|
|
|
node.handleMissingBlocks()
|
|
|
|
|
2022-02-08 19:19:21 +00:00
|
|
|
# Nim GC metrics (for the main thread)
|
|
|
|
updateThreadMetrics()
|
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
if node.config.stopAtSyncedEpoch != 0 and
|
|
|
|
node.dag.head.slot.epoch >= node.config.stopAtSyncedEpoch:
|
2022-03-04 17:38:01 +00:00
|
|
|
notice "Shutting down after having reached the target synced epoch"
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
|
|
|
|
2020-06-03 08:46:29 +00:00
|
|
|
proc runOnSecondLoop(node: BeaconNode) {.async.} =
|
2020-08-10 18:49:45 +00:00
|
|
|
let sleepTime = chronos.seconds(1)
|
|
|
|
const nanosecondsIn1s = float(chronos.seconds(1).nanoseconds)
|
2020-06-03 08:46:29 +00:00
|
|
|
while true:
|
|
|
|
let start = chronos.now(chronos.Moment)
|
2020-08-10 18:49:45 +00:00
|
|
|
await chronos.sleepAsync(sleepTime)
|
|
|
|
let afterSleep = chronos.now(chronos.Moment)
|
|
|
|
let sleepTime = afterSleep - start
|
|
|
|
node.onSecond()
|
|
|
|
let finished = chronos.now(chronos.Moment)
|
|
|
|
let processingTime = finished - afterSleep
|
|
|
|
ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s)
|
2020-10-01 18:56:42 +00:00
|
|
|
trace "onSecond task completed", sleepTime, processingTime
|
2019-03-27 20:17:01 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
func connectedPeersCount(node: BeaconNode): int =
|
2020-09-14 14:50:03 +00:00
|
|
|
len(node.network.peerPool)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2021-08-27 09:00:06 +00:00
|
|
|
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
|
|
|
rpcServer.installBeaconApiHandlers(node)
|
|
|
|
rpcServer.installConfigApiHandlers(node)
|
|
|
|
rpcServer.installDebugApiHandlers(node)
|
|
|
|
rpcServer.installEventApiHandlers(node)
|
|
|
|
rpcServer.installNimbusApiHandlers(node)
|
|
|
|
rpcServer.installNodeApiHandlers(node)
|
|
|
|
rpcServer.installValidatorApiHandlers(node)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) =
|
|
|
|
restServer.router.installBeaconApiHandlers(node)
|
|
|
|
restServer.router.installConfigApiHandlers(node)
|
|
|
|
restServer.router.installDebugApiHandlers(node)
|
2021-03-23 22:50:18 +00:00
|
|
|
restServer.router.installEventApiHandlers(node)
|
|
|
|
restServer.router.installNimbusApiHandlers(node)
|
2021-03-17 18:46:45 +00:00
|
|
|
restServer.router.installNodeApiHandlers(node)
|
2021-03-23 22:50:18 +00:00
|
|
|
restServer.router.installValidatorApiHandlers(node)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2020-08-17 12:07:29 +00:00
|
|
|
proc installMessageValidators(node: BeaconNode) =
|
2022-03-16 10:40:35 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2020-08-12 17:48:31 +00:00
|
|
|
# These validators stay around the whole time, regardless of which specific
|
|
|
|
# subnets are subscribed to during any given epoch.
|
2022-03-29 07:15:42 +00:00
|
|
|
let forkDigests = node.dag.forkDigests
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
func toValidationResult(res: ValidationRes): ValidationResult =
|
|
|
|
if res.isOk(): ValidationResult.Accept else: res.error()[0]
|
2020-08-17 12:07:29 +00:00
|
|
|
|
2020-08-20 16:30:47 +00:00
|
|
|
node.network.addValidator(
|
2022-03-29 07:15:42 +00:00
|
|
|
getBeaconBlocksTopic(forkDigests.phase0),
|
2021-07-15 19:01:07 +00:00
|
|
|
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
2021-12-20 19:20:31 +00:00
|
|
|
toValidationResult(node.processor[].blockValidator(
|
|
|
|
MsgSource.gossip, signedBlock)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
template installPhase0Validators(digest: auto) =
|
2022-01-08 23:28:49 +00:00
|
|
|
for it in SubnetId:
|
2021-11-05 15:39:47 +00:00
|
|
|
closureScope:
|
2022-01-08 23:28:49 +00:00
|
|
|
let subnet_id = it
|
2021-11-05 15:39:47 +00:00
|
|
|
node.network.addAsyncValidator(
|
|
|
|
getAttestationTopic(digest, subnet_id),
|
|
|
|
# This proc needs to be within closureScope; don't lift out of loop.
|
|
|
|
proc(attestation: Attestation): Future[ValidationResult] {.async.} =
|
|
|
|
return toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
await node.processor.attestationValidator(
|
|
|
|
MsgSource.gossip, attestation, subnet_id)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
node.network.addAsyncValidator(
|
|
|
|
getAggregateAndProofsTopic(digest),
|
|
|
|
proc(signedAggregateAndProof: SignedAggregateAndProof):
|
|
|
|
Future[ValidationResult] {.async.} =
|
|
|
|
return toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
await node.processor.aggregateValidator(
|
|
|
|
MsgSource.gossip, signedAggregateAndProof)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getAttesterSlashingsTopic(digest),
|
|
|
|
proc (attesterSlashing: AttesterSlashing): ValidationResult =
|
|
|
|
toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
node.processor[].attesterSlashingValidator(
|
|
|
|
MsgSource.gossip, attesterSlashing)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getProposerSlashingsTopic(digest),
|
|
|
|
proc (proposerSlashing: ProposerSlashing): ValidationResult =
|
|
|
|
toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
node.processor[].proposerSlashingValidator(
|
|
|
|
MsgSource.gossip, proposerSlashing)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getVoluntaryExitsTopic(digest),
|
|
|
|
proc (signedVoluntaryExit: SignedVoluntaryExit): ValidationResult =
|
|
|
|
toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
node.processor[].voluntaryExitValidator(
|
|
|
|
MsgSource.gossip, signedVoluntaryExit)))
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
installPhase0Validators(forkDigests.phase0)
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2021-11-13 21:26:02 +00:00
|
|
|
# Validators introduced in phase0 are also used in altair and merge, but with
|
|
|
|
# different fork digest
|
2022-03-29 07:15:42 +00:00
|
|
|
installPhase0Validators(forkDigests.altair)
|
|
|
|
installPhase0Validators(forkDigests.bellatrix)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
|
|
|
node.network.addValidator(
|
2022-03-29 07:15:42 +00:00
|
|
|
getBeaconBlocksTopic(forkDigests.altair),
|
2021-08-09 12:54:45 +00:00
|
|
|
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
|
2021-12-20 19:20:31 +00:00
|
|
|
toValidationResult(node.processor[].blockValidator(
|
|
|
|
MsgSource.gossip, signedBlock)))
|
2021-08-09 12:54:45 +00:00
|
|
|
|
2021-12-08 17:29:22 +00:00
|
|
|
node.network.addValidator(
|
2022-03-29 07:15:42 +00:00
|
|
|
getBeaconBlocksTopic(forkDigests.bellatrix),
|
2022-01-18 13:36:52 +00:00
|
|
|
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
|
2021-12-20 19:20:31 +00:00
|
|
|
toValidationResult(node.processor[].blockValidator(
|
|
|
|
MsgSource.gossip, signedBlock)))
|
2021-12-08 17:29:22 +00:00
|
|
|
|
2021-11-13 21:26:02 +00:00
|
|
|
template installSyncCommitteeeValidators(digest: auto) =
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
2021-11-13 21:26:02 +00:00
|
|
|
closureScope:
|
2022-01-08 23:28:49 +00:00
|
|
|
let idx = subcommitteeIdx
|
2021-12-09 12:56:54 +00:00
|
|
|
node.network.addAsyncValidator(
|
2021-11-13 21:26:02 +00:00
|
|
|
getSyncCommitteeTopic(digest, idx),
|
|
|
|
# This proc needs to be within closureScope; don't lift out of loop.
|
2021-12-09 12:56:54 +00:00
|
|
|
proc(msg: SyncCommitteeMessage): Future[ValidationResult] {.async.} =
|
|
|
|
return toValidationResult(
|
2021-12-20 19:20:31 +00:00
|
|
|
await node.processor.syncCommitteeMessageValidator(
|
|
|
|
MsgSource.gossip, msg, idx)))
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
node.network.addAsyncValidator(
|
2021-11-13 21:26:02 +00:00
|
|
|
getSyncCommitteeContributionAndProofTopic(digest),
|
2021-12-09 12:56:54 +00:00
|
|
|
proc(msg: SignedContributionAndProof): Future[ValidationResult] {.async.} =
|
|
|
|
return toValidationResult(
|
2022-03-29 07:15:42 +00:00
|
|
|
await node.processor.contributionValidator(
|
|
|
|
MsgSource.gossip, msg)))
|
2021-11-13 21:26:02 +00:00
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
installSyncCommitteeeValidators(forkDigests.altair)
|
|
|
|
installSyncCommitteeeValidators(forkDigests.bellatrix)
|
2021-08-29 05:58:27 +00:00
|
|
|
|
2022-03-14 13:05:38 +00:00
|
|
|
template installOptimisticLightClientUpdateValidator(digest: auto) =
|
|
|
|
node.network.addValidator(
|
|
|
|
getOptimisticLightClientUpdateTopic(digest),
|
|
|
|
proc(msg: OptimisticLightClientUpdate): ValidationResult =
|
2022-04-01 21:58:06 +00:00
|
|
|
if node.config.serveLightClientData.get:
|
2022-03-14 13:05:38 +00:00
|
|
|
toValidationResult(
|
|
|
|
node.processor[].optimisticLightClientUpdateValidator(
|
|
|
|
MsgSource.gossip, msg))
|
|
|
|
else:
|
|
|
|
debug "Ignoring optimistic light client update: Feature disabled"
|
|
|
|
ValidationResult.Ignore)
|
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
installOptimisticLightClientUpdateValidator(forkDigests.altair)
|
|
|
|
installOptimisticLightClientUpdateValidator(forkDigests.bellatrix)
|
2022-03-14 13:05:38 +00:00
|
|
|
|
2021-12-20 11:21:17 +00:00
|
|
|
proc stop(node: BeaconNode) =
|
2020-09-28 15:19:57 +00:00
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Graceful shutdown"
|
2020-09-01 13:44:40 +00:00
|
|
|
if not node.config.inProcessValidators:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
node.vcProcess.close()
|
|
|
|
except Exception as exc:
|
|
|
|
warn "Couldn't close vc process", msg = exc.msg
|
|
|
|
try:
|
|
|
|
waitFor node.network.stop()
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Couldn't stop network", msg = exc.msg
|
|
|
|
|
2020-11-20 13:23:55 +00:00
|
|
|
node.attachedValidators.slashingProtection.close()
|
2020-09-12 05:35:58 +00:00
|
|
|
node.db.close()
|
2020-11-20 13:23:55 +00:00
|
|
|
notice "Databases closed"
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2022-01-20 07:25:45 +00:00
|
|
|
proc startBackfillTask(node: BeaconNode) {.async.} =
|
|
|
|
while node.dag.needsBackfill:
|
|
|
|
if not node.syncManager.inProgress:
|
|
|
|
# Only start the backfiller if it's needed _and_ head sync has completed -
|
|
|
|
# if we lose sync after having synced head, we could stop the backfilller,
|
|
|
|
# but this should be a fringe case - might as well keep the logic simple for
|
|
|
|
# now
|
|
|
|
node.backfiller.start()
|
|
|
|
return
|
|
|
|
|
|
|
|
await sleepAsync(chronos.seconds(2))
|
|
|
|
|
2021-12-20 11:21:17 +00:00
|
|
|
proc run(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
2021-11-01 14:50:24 +00:00
|
|
|
bnStatus = BeaconNodeStatus.Running
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
if not(isNil(node.rpcServer)):
|
|
|
|
node.rpcServer.installRpcHandlers(node)
|
|
|
|
node.rpcServer.start()
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
if not(isNil(node.restServer)):
|
|
|
|
node.restServer.installRestHandlers(node)
|
|
|
|
node.restServer.start()
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
if not(isNil(node.keymanagerServer)):
|
|
|
|
node.keymanagerServer.router.installKeymanagerHandlers(node)
|
|
|
|
if node.keymanagerServer != node.restServer:
|
|
|
|
node.keymanagerServer.start()
|
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
let
|
|
|
|
wallTime = node.beaconClock.now()
|
|
|
|
wallSlot = wallTime.slotOrZero()
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
node.requestManager.start()
|
|
|
|
node.syncManager.start()
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-20 07:25:45 +00:00
|
|
|
if node.dag.needsBackfill(): asyncSpawn node.startBackfillTask()
|
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
waitFor node.updateGossipStatus(wallSlot)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2021-11-01 14:50:24 +00:00
|
|
|
asyncSpawn runSlotLoop(node, wallTime, onSlotStart)
|
|
|
|
asyncSpawn runOnSecondLoop(node)
|
|
|
|
asyncSpawn runQueueProcessingLoop(node.blockProcessor)
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2020-11-02 18:02:27 +00:00
|
|
|
## Ctrl+C handling
|
|
|
|
proc controlCHandler() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
setupForeignThreadGc()
|
|
|
|
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
2020-11-02 18:02:27 +00:00
|
|
|
notice "Shutting down after having received SIGINT"
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
setControlCHook(controlCHandler)
|
|
|
|
except Exception as exc: # TODO Exception
|
|
|
|
warn "Cannot set ctrl-c handler", msg = exc.msg
|
|
|
|
|
2020-12-14 16:45:14 +00:00
|
|
|
# equivalent SIGTERM handler
|
|
|
|
when defined(posix):
|
|
|
|
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGTERM"
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
|
|
|
c_signal(SIGTERM, SIGTERMHandler)
|
2020-11-02 18:02:27 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
# main event loop
|
2020-09-28 15:19:57 +00:00
|
|
|
while bnStatus == BeaconNodeStatus.Running:
|
2021-03-26 06:52:01 +00:00
|
|
|
poll() # if poll fails, the network is broken
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
# time to say goodbye
|
|
|
|
node.stop()
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2018-12-19 12:58:53 +00:00
|
|
|
var gPidFile: string
|
2021-03-26 06:52:01 +00:00
|
|
|
proc createPidFile(filename: string) {.raises: [Defect, IOError].} =
|
2019-07-07 09:53:58 +00:00
|
|
|
writeFile filename, $os.getCurrentProcessId()
|
2018-12-19 12:58:53 +00:00
|
|
|
gPidFile = filename
|
2020-08-19 13:12:10 +00:00
|
|
|
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
|
|
|
|
2020-06-11 12:13:12 +00:00
|
|
|
proc initializeNetworking(node: BeaconNode) {.async.} =
|
2021-11-01 14:50:24 +00:00
|
|
|
node.installMessageValidators()
|
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
info "Listening to incoming network requests"
|
2020-08-03 17:35:27 +00:00
|
|
|
await node.network.startListening()
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-06-19 17:42:28 +00:00
|
|
|
let addressFile = node.config.dataDir / "beacon_node.enr"
|
2020-06-11 12:13:12 +00:00
|
|
|
writeFile(addressFile, node.network.announcedENR.toURI)
|
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
await node.network.start()
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
2019-11-25 12:47:29 +00:00
|
|
|
let
|
2021-06-01 11:13:40 +00:00
|
|
|
head = node.dag.head
|
|
|
|
finalizedHead = node.dag.finalizedHead
|
2022-01-11 10:01:54 +00:00
|
|
|
genesisTime = node.beaconClock.fromNow(start_beacon_time(Slot 0))
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Starting beacon node",
|
2019-11-12 00:05:35 +00:00
|
|
|
version = fullVersionStr,
|
2020-11-16 19:15:43 +00:00
|
|
|
enr = node.network.announcedENR.toURI,
|
|
|
|
peerId = $node.network.switch.peerInfo.peerId,
|
2019-08-16 11:16:56 +00:00
|
|
|
timeSinceFinalization =
|
2022-01-11 10:01:54 +00:00
|
|
|
node.beaconClock.now() - finalizedHead.slot.start_beacon_time(),
|
2020-07-28 13:54:32 +00:00
|
|
|
head = shortLog(head),
|
2021-11-02 17:06:36 +00:00
|
|
|
justified = shortLog(getStateField(
|
2022-03-16 07:20:40 +00:00
|
|
|
node.dag.headState, current_justified_checkpoint)),
|
2021-11-02 17:06:36 +00:00
|
|
|
finalized = shortLog(getStateField(
|
2022-03-16 07:20:40 +00:00
|
|
|
node.dag.headState, finalized_checkpoint)),
|
2020-07-16 13:16:51 +00:00
|
|
|
finalizedHead = shortLog(finalizedHead),
|
2019-03-20 11:52:30 +00:00
|
|
|
SLOTS_PER_EPOCH,
|
|
|
|
SECONDS_PER_SLOT,
|
2019-09-12 01:45:04 +00:00
|
|
|
SPEC_VERSION,
|
2020-11-16 19:15:43 +00:00
|
|
|
dataDir = node.config.dataDir.string,
|
2021-02-22 16:17:48 +00:00
|
|
|
validators = node.attachedValidators[].count
|
2019-03-20 11:52:30 +00:00
|
|
|
|
2020-06-29 05:34:48 +00:00
|
|
|
if genesisTime.inFuture:
|
|
|
|
notice "Waiting for genesis", genesisIn = genesisTime.offset
|
|
|
|
|
2020-06-11 12:13:12 +00:00
|
|
|
waitFor node.initializeNetworking()
|
2020-11-12 16:21:04 +00:00
|
|
|
|
2021-11-25 17:08:02 +00:00
|
|
|
if node.eth1Monitor != nil:
|
2020-11-12 16:21:04 +00:00
|
|
|
node.eth1Monitor.start()
|
2021-11-01 14:50:24 +00:00
|
|
|
else:
|
|
|
|
notice "Running without execution chain monitor, block producation partially disabled"
|
2020-11-12 16:21:04 +00:00
|
|
|
|
2019-03-20 11:52:30 +00:00
|
|
|
node.run()
|
|
|
|
|
2019-10-03 01:51:44 +00:00
|
|
|
func formatGwei(amount: uint64): string =
|
|
|
|
# TODO This is implemented in a quite a silly way.
|
|
|
|
# Better routines for formatting decimal numbers
|
|
|
|
# should exists somewhere else.
|
|
|
|
let
|
|
|
|
eth = amount div 1000000000
|
|
|
|
remainder = amount mod 1000000000
|
|
|
|
|
|
|
|
result = $eth
|
|
|
|
if remainder != 0:
|
|
|
|
result.add '.'
|
2020-12-01 18:08:55 +00:00
|
|
|
let remainderStr = $remainder
|
|
|
|
for i in remainderStr.len ..< 9:
|
|
|
|
result.add '0'
|
|
|
|
result.add remainderStr
|
2019-10-03 01:51:44 +00:00
|
|
|
while result[^1] == '0':
|
|
|
|
result.setLen(result.len - 1)
|
|
|
|
|
2022-03-14 09:19:50 +00:00
|
|
|
when not defined(windows):
|
|
|
|
proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
|
|
|
if not isatty(stdout): return
|
|
|
|
if not node.config.statusBarEnabled: return
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2022-03-14 09:19:50 +00:00
|
|
|
try:
|
|
|
|
enableTrueColors()
|
|
|
|
except Exception as exc: # TODO Exception
|
|
|
|
error "Couldn't enable colors", err = exc.msg
|
|
|
|
|
|
|
|
proc dataResolver(expr: string): string {.raises: [Defect].} =
|
|
|
|
template justified: untyped = node.dag.head.atEpochStart(
|
|
|
|
getStateField(
|
2022-03-16 07:20:40 +00:00
|
|
|
node.dag.headState, current_justified_checkpoint).epoch)
|
2022-03-14 09:19:50 +00:00
|
|
|
# TODO:
|
|
|
|
# We should introduce a general API for resolving dot expressions
|
|
|
|
# such as `db.latest_block.slot` or `metrics.connected_peers`.
|
|
|
|
# Such an API can be shared between the RPC back-end, CLI tools
|
|
|
|
# such as ncli, a potential GraphQL back-end and so on.
|
|
|
|
# The status bar feature would allow the user to specify an
|
|
|
|
# arbitrary expression that is resolvable through this API.
|
|
|
|
case expr.toLowerAscii
|
|
|
|
of "connected_peers":
|
|
|
|
$(node.connectedPeersCount)
|
|
|
|
|
|
|
|
of "head_root":
|
|
|
|
shortLog(node.dag.head.root)
|
|
|
|
of "head_epoch":
|
|
|
|
$(node.dag.head.slot.epoch)
|
|
|
|
of "head_epoch_slot":
|
|
|
|
$(node.dag.head.slot.since_epoch_start)
|
|
|
|
of "head_slot":
|
|
|
|
$(node.dag.head.slot)
|
|
|
|
|
|
|
|
of "justifed_root":
|
|
|
|
shortLog(justified.blck.root)
|
|
|
|
of "justifed_epoch":
|
|
|
|
$(justified.slot.epoch)
|
|
|
|
of "justifed_epoch_slot":
|
|
|
|
$(justified.slot.since_epoch_start)
|
|
|
|
of "justifed_slot":
|
|
|
|
$(justified.slot)
|
|
|
|
|
|
|
|
of "finalized_root":
|
|
|
|
shortLog(node.dag.finalizedHead.blck.root)
|
|
|
|
of "finalized_epoch":
|
|
|
|
$(node.dag.finalizedHead.slot.epoch)
|
|
|
|
of "finalized_epoch_slot":
|
|
|
|
$(node.dag.finalizedHead.slot.since_epoch_start)
|
|
|
|
of "finalized_slot":
|
|
|
|
$(node.dag.finalizedHead.slot)
|
|
|
|
|
|
|
|
of "epoch":
|
|
|
|
$node.currentSlot.epoch
|
|
|
|
|
|
|
|
of "epoch_slot":
|
|
|
|
$(node.currentSlot.since_epoch_start)
|
|
|
|
|
|
|
|
of "slot":
|
|
|
|
$node.currentSlot
|
|
|
|
|
|
|
|
of "slots_per_epoch":
|
|
|
|
$SLOTS_PER_EPOCH
|
|
|
|
|
|
|
|
of "slot_trailing_digits":
|
|
|
|
var slotStr = $node.currentSlot
|
|
|
|
if slotStr.len > 3: slotStr = slotStr[^3..^1]
|
|
|
|
slotStr
|
|
|
|
|
|
|
|
of "attached_validators_balance":
|
|
|
|
formatGwei(node.attachedValidatorBalanceTotal)
|
|
|
|
|
|
|
|
of "sync_status":
|
|
|
|
node.syncStatus()
|
|
|
|
else:
|
|
|
|
# We ignore typos for now and just render the expression
|
|
|
|
# as it was written. TODO: come up with a good way to show
|
|
|
|
# an error message to the user.
|
|
|
|
"$" & expr
|
|
|
|
|
|
|
|
var statusBar = StatusBarView.init(
|
|
|
|
node.config.statusBarContents,
|
|
|
|
dataResolver)
|
|
|
|
|
|
|
|
when compiles(defaultChroniclesStream.outputs[0].writer):
|
|
|
|
let tmp = defaultChroniclesStream.outputs[0].writer
|
|
|
|
|
|
|
|
defaultChroniclesStream.outputs[0].writer =
|
|
|
|
proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [Defect].} =
|
|
|
|
try:
|
|
|
|
# p.hidePrompt
|
|
|
|
erase statusBar
|
|
|
|
# p.writeLine msg
|
|
|
|
tmp(logLevel, msg)
|
|
|
|
render statusBar
|
|
|
|
# p.showPrompt
|
|
|
|
except Exception as e: # render raises Exception
|
|
|
|
logLoggingFailure(cstring(msg), e)
|
|
|
|
|
|
|
|
proc statusBarUpdatesPollingLoop() {.async.} =
|
|
|
|
try:
|
|
|
|
while true:
|
|
|
|
update statusBar
|
2021-02-22 16:17:48 +00:00
|
|
|
erase statusBar
|
|
|
|
render statusBar
|
2022-03-14 09:19:50 +00:00
|
|
|
await sleepAsync(chronos.seconds(1))
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Failed to update status bar, no further updates", err = exc.msg
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2022-03-14 09:19:50 +00:00
|
|
|
asyncSpawn statusBarUpdatesPollingLoop()
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
info "Launching beacon node",
|
|
|
|
version = fullVersionStr,
|
|
|
|
bls_backend = $BLS_BACKEND,
|
|
|
|
cmdParams = commandLineParams(),
|
|
|
|
config
|
|
|
|
|
|
|
|
createPidFile(config.dataDir.string / "beacon_node.pid")
|
|
|
|
|
|
|
|
config.createDumpDirs()
|
|
|
|
|
|
|
|
if config.metricsEnabled:
|
2021-04-01 12:44:11 +00:00
|
|
|
let metricsAddress = config.metricsAddress
|
|
|
|
notice "Starting metrics HTTP server",
|
|
|
|
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
|
|
|
try:
|
|
|
|
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
2022-02-08 19:19:21 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
raise exc
|
|
|
|
except Exception as exc:
|
|
|
|
raiseAssert exc.msg # TODO fix metrics
|
|
|
|
|
|
|
|
# Nim GC metrics (for the main thread) will be collected in onSecond(), but
|
|
|
|
# we disable piggy-backing on other metrics here.
|
|
|
|
setSystemMetricsAutomaticUpdate(false)
|
2020-10-09 13:57:45 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
# There are no managed event loops in here, to do a graceful shutdown, but
|
|
|
|
# letting the default Ctrl+C handler exit is safe, since we only read from
|
|
|
|
# the db.
|
2021-11-01 14:50:24 +00:00
|
|
|
|
2021-11-25 10:53:31 +00:00
|
|
|
var metadata = config.loadEth2Network()
|
|
|
|
|
|
|
|
if config.terminalTotalDifficultyOverride.isSome:
|
|
|
|
metadata.cfg.TERMINAL_TOTAL_DIFFICULTY =
|
2022-01-03 12:22:56 +00:00
|
|
|
parse(config.terminalTotalDifficultyOverride.get, UInt256, 10)
|
2021-11-01 14:50:24 +00:00
|
|
|
|
|
|
|
# Updating the config based on the metadata certainly is not beautiful but it
|
|
|
|
# works
|
|
|
|
for node in metadata.bootstrapNodes:
|
|
|
|
config.bootstrapNodes.add node
|
2022-04-01 21:58:06 +00:00
|
|
|
if config.serveLightClientData.isNone:
|
|
|
|
if metadata.configDefaults.serveLightClientData:
|
|
|
|
info "Applying network config default",
|
|
|
|
serveLightClientData = metadata.configDefaults.serveLightClientData,
|
|
|
|
eth2Network = config.eth2Network
|
|
|
|
config.serveLightClientData =
|
|
|
|
some metadata.configDefaults.serveLightClientData
|
|
|
|
if config.importLightClientData.isNone:
|
|
|
|
if metadata.configDefaults.importLightClientData !=
|
|
|
|
ImportLightClientData.None:
|
|
|
|
info "Applying network config default",
|
|
|
|
importLightClientData = metadata.configDefaults.importLightClientData,
|
|
|
|
eth2Network = config.eth2Network
|
|
|
|
config.importLightClientData =
|
|
|
|
some metadata.configDefaults.importLightClientData
|
2021-11-01 14:50:24 +00:00
|
|
|
|
|
|
|
let node = BeaconNode.init(
|
|
|
|
metadata.cfg,
|
|
|
|
rng,
|
|
|
|
config,
|
|
|
|
metadata.depositContractDeployedAt,
|
|
|
|
metadata.eth1Network,
|
|
|
|
metadata.genesisData,
|
|
|
|
metadata.genesisDepositsSnapshot)
|
2020-07-07 23:02:14 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if bnStatus == BeaconNodeStatus.Stopping:
|
|
|
|
return
|
2020-07-02 15:14:11 +00:00
|
|
|
|
2022-03-14 09:19:50 +00:00
|
|
|
when not defined(windows):
|
|
|
|
# This status bar can lock a Windows terminal emulator, blocking the whole
|
|
|
|
# event loop (seen on Windows 10, with a default MSYS2 terminal).
|
|
|
|
initStatusBar(node)
|
2020-07-02 15:52:48 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.nickname != "":
|
|
|
|
dynamicLogScope(node = node.nickname): node.start()
|
2020-07-07 23:02:14 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
node.start()
|
|
|
|
|
2021-12-22 12:37:31 +00:00
|
|
|
proc doCreateTestnet*(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
let launchPadDeposits = try:
|
|
|
|
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
|
|
|
|
except SerializationError as err:
|
|
|
|
error "Invalid LaunchPad deposits file",
|
|
|
|
err = formatMsg(err, config.testnetDepositsFile.string)
|
|
|
|
quit 1
|
2020-12-01 11:35:55 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
var deposits: seq[DepositData]
|
2021-04-14 13:15:22 +00:00
|
|
|
for i in 0 ..< launchPadDeposits.len:
|
2021-02-22 16:17:48 +00:00
|
|
|
deposits.add(launchPadDeposits[i] as DepositData)
|
2020-12-01 11:35:55 +00:00
|
|
|
|
2022-03-31 14:43:05 +00:00
|
|
|
let jwtSecret = rng.checkJwtSecret(string(config.dataDir), config.jwtSecret)
|
|
|
|
if jwtSecret.isErr:
|
|
|
|
fatal "Specified a JWT secret file which couldn't be loaded",
|
|
|
|
err = jwtSecret.error
|
|
|
|
quit 1
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
|
|
|
startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset)
|
|
|
|
outGenesis = config.outputGenesis.string
|
2021-04-06 21:42:59 +00:00
|
|
|
eth1Hash = if config.web3Urls.len == 0: eth1BlockHash
|
2022-03-31 14:43:05 +00:00
|
|
|
else: (waitFor getEth1BlockHash(
|
|
|
|
config.web3Urls[0], blockId("latest"),
|
|
|
|
if config.useJwt:
|
|
|
|
some jwtSecret.get
|
|
|
|
else:
|
|
|
|
none(seq[byte]))).asEth2Digest
|
2021-07-13 14:27:10 +00:00
|
|
|
cfg = getRuntimeConfig(config.eth2Network)
|
2021-02-22 16:17:48 +00:00
|
|
|
var
|
2021-11-18 12:02:43 +00:00
|
|
|
initialState = newClone(initialize_beacon_state_from_eth1(
|
|
|
|
cfg, eth1Hash, startTime, deposits, {skipBlsValidation}))
|
2020-07-02 15:14:11 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
|
|
|
|
initialState.genesis_time = startTime
|
2020-07-07 15:51:02 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
doAssert initialState.validators.len > 0
|
2020-10-02 13:38:32 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let outGenesisExt = splitFile(outGenesis).ext
|
|
|
|
if cmpIgnoreCase(outGenesisExt, ".json") == 0:
|
|
|
|
Json.saveFile(outGenesis, initialState, pretty = true)
|
|
|
|
echo "Wrote ", outGenesis
|
2020-06-02 19:59:51 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let outSszGenesis = outGenesis.changeFileExt "ssz"
|
|
|
|
SSZ.saveFile(outSszGenesis, initialState[])
|
|
|
|
echo "Wrote ", outSszGenesis
|
2019-03-19 17:22:17 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let bootstrapFile = config.outputBootstrapFile.string
|
|
|
|
if bootstrapFile.len > 0:
|
2022-03-18 11:36:50 +00:00
|
|
|
type MetaData = altair.MetaData
|
2019-10-29 16:46:41 +00:00
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
networkKeys = getPersistentNetKeys(rng, config)
|
2022-03-18 11:36:50 +00:00
|
|
|
|
|
|
|
netMetadata = MetaData()
|
2021-08-10 06:19:13 +00:00
|
|
|
forkId = getENRForkID(
|
|
|
|
cfg,
|
|
|
|
initialState[].slot.epoch,
|
|
|
|
initialState[].genesis_validators_root)
|
2021-02-22 16:17:48 +00:00
|
|
|
bootstrapEnr = enr.Record.init(
|
|
|
|
1, # sequence number
|
|
|
|
networkKeys.seckey.asEthKey,
|
|
|
|
some(config.bootstrapAddress),
|
|
|
|
some(config.bootstrapPort),
|
|
|
|
some(config.bootstrapPort),
|
2021-08-10 06:19:13 +00:00
|
|
|
[
|
|
|
|
toFieldPair(enrForkIdField, SSZ.encode(forkId)),
|
|
|
|
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))
|
|
|
|
])
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
|
|
|
echo "Wrote ", bootstrapFile
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doRecord(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.recordCmd:
|
|
|
|
of RecordCmd.create:
|
|
|
|
let netKeys = getPersistentNetKeys(rng, config)
|
|
|
|
|
|
|
|
var fieldPairs: seq[FieldPair]
|
|
|
|
for field in config.fields:
|
|
|
|
let fieldPair = field.split(":")
|
|
|
|
if fieldPair.len > 1:
|
|
|
|
fieldPairs.add(toFieldPair(fieldPair[0], hexToSeqByte(fieldPair[1])))
|
|
|
|
else:
|
|
|
|
fatal "Invalid field pair"
|
|
|
|
quit QuitFailure
|
2020-06-24 13:57:09 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let record = enr.Record.init(
|
|
|
|
config.seqNumber,
|
|
|
|
netKeys.seckey.asEthKey,
|
|
|
|
some(config.ipExt),
|
|
|
|
some(config.tcpPortExt),
|
|
|
|
some(config.udpPortExt),
|
|
|
|
fieldPairs).expect("Record within size limits")
|
2020-07-01 09:13:56 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
echo record.toURI()
|
2020-08-21 19:36:42 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
of RecordCmd.print:
|
|
|
|
echo $config.recordPrint
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2022-03-31 14:43:05 +00:00
|
|
|
proc doWeb3Cmd(config: BeaconNodeConf, rng: var BrHmacDrbgContext)
|
|
|
|
{.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.web3Cmd:
|
|
|
|
of Web3Cmd.test:
|
2022-03-31 14:43:05 +00:00
|
|
|
let
|
|
|
|
metadata = config.loadEth2Network()
|
|
|
|
jwtSecret = rng.checkJwtSecret(string(config.dataDir), config.jwtSecret)
|
|
|
|
|
|
|
|
if jwtSecret.isErr:
|
|
|
|
fatal "Specified a JWT secret file which couldn't be loaded",
|
|
|
|
err = jwtSecret.error
|
|
|
|
quit 1
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
waitFor testWeb3Provider(config.web3TestUrl,
|
2022-03-31 14:43:05 +00:00
|
|
|
metadata.cfg.DEPOSIT_CONTRACT_ADDRESS,
|
|
|
|
if config.useJwt:
|
|
|
|
some jwtSecret.get
|
|
|
|
else:
|
|
|
|
none(seq[byte]))
|
2020-11-27 19:48:33 +00:00
|
|
|
|
2021-05-19 06:38:13 +00:00
|
|
|
proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError, Defect].}=
|
|
|
|
let
|
|
|
|
dir = conf.validatorsDir()
|
|
|
|
filetrunc = SlashingDbName
|
|
|
|
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
|
|
|
|
let db = SlashingProtectionDB.loadUnchecked(dir, filetrunc, readOnly = false)
|
|
|
|
|
|
|
|
let interchange = conf.exportedInterchangeFile.string
|
|
|
|
db.exportSlashingInterchange(interchange, conf.exportedValidators)
|
|
|
|
echo "Export finished: '", dir/filetrunc & ".sqlite3" , "' into '", interchange, "'"
|
|
|
|
|
|
|
|
proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOError, Defect].} =
|
|
|
|
let
|
|
|
|
dir = conf.validatorsDir()
|
|
|
|
filetrunc = SlashingDbName
|
|
|
|
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
|
|
|
|
|
|
|
|
let interchange = conf.importedInterchangeFile.string
|
|
|
|
|
|
|
|
var spdir: SPDIR
|
|
|
|
try:
|
|
|
|
spdir = JSON.loadFile(interchange, SPDIR)
|
|
|
|
except SerializationError as err:
|
|
|
|
writeStackTrace()
|
|
|
|
stderr.write $JSON & " load issue for file \"", interchange, "\"\n"
|
|
|
|
stderr.write err.formatMsg(interchange), "\n"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# Open DB and handle migration from v1 to v2 if needed
|
|
|
|
let db = SlashingProtectionDB.init(
|
|
|
|
genesis_validators_root = Eth2Digest spdir.metadata.genesis_validators_root,
|
|
|
|
basePath = dir,
|
|
|
|
dbname = filetrunc,
|
|
|
|
modes = {kCompleteArchive}
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now import the slashing interchange file
|
|
|
|
# Failures mode:
|
|
|
|
# - siError can only happen with invalid genesis_validators_root which would be caught above
|
|
|
|
# - siPartial can happen for invalid public keys, slashable blocks, slashable votes
|
|
|
|
let status = db.inclSPDIR(spdir)
|
|
|
|
doAssert status in {siSuccess, siPartial}
|
|
|
|
|
|
|
|
echo "Import finished: '", interchange, "' into '", dir/filetrunc & ".sqlite3", "'"
|
|
|
|
|
|
|
|
proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
|
|
|
case conf.slashingdbCmd
|
|
|
|
of SlashProtCmd.`export`:
|
|
|
|
conf.doSlashingExport()
|
|
|
|
of SlashProtCmd.`import`:
|
|
|
|
conf.doSlashingImport()
|
|
|
|
|
2022-02-27 11:02:45 +00:00
|
|
|
proc handleStartUpCmd(config: var BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
|
|
|
# Single RNG instance for the application - will be seeded on construction
|
|
|
|
# and avoid using system resources (such as urandom) after that
|
|
|
|
let rng = keys.newRng()
|
|
|
|
|
|
|
|
case config.cmd
|
|
|
|
of BNStartUpCmd.createTestnet: doCreateTestnet(config, rng[])
|
|
|
|
of BNStartUpCmd.noCommand: doRunBeaconNode(config, rng)
|
|
|
|
of BNStartUpCmd.deposits: doDeposits(config, rng[])
|
|
|
|
of BNStartUpCmd.wallets: doWallets(config, rng[])
|
|
|
|
of BNStartUpCmd.record: doRecord(config, rng[])
|
2022-03-31 14:43:05 +00:00
|
|
|
of BNStartUpCmd.web3: doWeb3Cmd(config, rng[])
|
2022-02-27 11:02:45 +00:00
|
|
|
of BNStartUpCmd.slashingdb: doSlashingInterchange(config)
|
|
|
|
of BNStartupCmd.trustedNodeSync:
|
|
|
|
let
|
|
|
|
network = loadEth2Network(config)
|
|
|
|
cfg = network.cfg
|
|
|
|
genesis =
|
|
|
|
if network.genesisData.len > 0:
|
|
|
|
newClone(readSszForkedHashedBeaconState(
|
|
|
|
cfg,
|
|
|
|
network.genesisData.toOpenArrayByte(0, network.genesisData.high())))
|
|
|
|
else: nil
|
|
|
|
|
|
|
|
waitFor doTrustedNodeSync(
|
|
|
|
cfg,
|
|
|
|
config.databaseDir,
|
|
|
|
config.trustedNodeUrl,
|
|
|
|
config.blockId,
|
|
|
|
config.backfillBlocks,
|
2022-03-11 12:49:47 +00:00
|
|
|
config.reindex,
|
2022-02-27 11:02:45 +00:00
|
|
|
genesis)
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.pop.} # TODO moduletests exceptions
|
2022-02-27 11:02:45 +00:00
|
|
|
|
|
|
|
when defined(windows):
|
|
|
|
proc reportServiceStatus*(dwCurrentState, dwWin32ExitCode, dwWaitHint: DWORD) {.gcsafe.} =
|
|
|
|
gSvcStatus.dwCurrentState = dwCurrentState
|
|
|
|
gSvcStatus.dwWin32ExitCode = dwWin32ExitCode
|
|
|
|
gSvcStatus.dwWaitHint = dwWaitHint
|
|
|
|
if dwCurrentState == SERVICE_START_PENDING:
|
|
|
|
gSvcStatus.dwControlsAccepted = 0
|
|
|
|
else:
|
|
|
|
gSvcStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP
|
|
|
|
|
|
|
|
# TODO
|
|
|
|
# We can use non-zero values for the `dwCheckPoint` parameter to report
|
|
|
|
# progress during lengthy operations such as start-up and shut down.
|
|
|
|
gSvcStatus.dwCheckPoint = 0
|
|
|
|
|
|
|
|
# Report the status of the service to the SCM.
|
|
|
|
let status = SetServiceStatus(gSvcStatusHandle, addr gSvcStatus)
|
|
|
|
debug "Service status updated", status
|
|
|
|
|
|
|
|
proc serviceControlHandler(dwCtrl: DWORD): WINBOOL {.stdcall.} =
|
|
|
|
case dwCtrl
|
|
|
|
of SERVICE_CONTROL_STOP:
|
|
|
|
# We re reporting that we plan stop the service in 10 seconds
|
|
|
|
reportServiceStatus(SERVICE_STOP_PENDING, NO_ERROR, 10_000)
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
|
|
|
of SERVICE_CONTROL_PAUSE, SERVICE_CONTROL_CONTINUE:
|
|
|
|
warn "The Nimbus service cannot be paused and resimed"
|
|
|
|
of SERVICE_CONTROL_INTERROGATE:
|
|
|
|
# The default behavior is correct.
|
|
|
|
# The service control manager will report our last status.
|
|
|
|
discard
|
|
|
|
else:
|
|
|
|
debug "Service received an unexpected user-defined control message",
|
|
|
|
msg = dwCtrl
|
|
|
|
|
|
|
|
proc serviceMainFunction(dwArgc: DWORD, lpszArgv: LPSTR) {.stdcall.} =
|
|
|
|
# The service is launched in a fresh thread created by Windows, so
|
|
|
|
# we must initialize the Nim GC here
|
|
|
|
setupForeignThreadGc()
|
|
|
|
|
|
|
|
gSvcStatusHandle = RegisterServiceCtrlHandler(
|
|
|
|
SERVICE_NAME,
|
|
|
|
serviceControlHandler)
|
|
|
|
|
|
|
|
gSvcStatus.dwServiceType = SERVICE_WIN32_OWN_PROCESS
|
|
|
|
gSvcStatus.dwServiceSpecificExitCode = 0
|
|
|
|
reportServiceStatus(SERVICE_RUNNING, NO_ERROR, 0)
|
|
|
|
|
|
|
|
info "Service thread started"
|
|
|
|
|
|
|
|
var config = makeBannerAndConfig(clientId, BeaconNodeConf)
|
|
|
|
handleStartUpCmd(config)
|
|
|
|
|
|
|
|
info "Service thread stopped"
|
|
|
|
reportServiceStatus(SERVICE_STOPPED, NO_ERROR, 0) # we have to report back when we stopped!
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
programMain:
|
|
|
|
var
|
|
|
|
config = makeBannerAndConfig(clientId, BeaconNodeConf)
|
2020-11-28 18:50:09 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if not(checkAndCreateDataDir(string(config.dataDir))):
|
|
|
|
# We are unable to access/create data folder or data folder's
|
|
|
|
# permissions are insecure.
|
|
|
|
quit QuitFailure
|
|
|
|
|
2021-11-02 17:06:36 +00:00
|
|
|
setupLogging(config.logLevel, config.logStdout, config.logFile)
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
## This Ctrl+C handler exits the program in non-graceful way.
|
|
|
|
## It's responsible for handling Ctrl+C in sub-commands such
|
|
|
|
## as `wallets *` and `deposits *`. In a regular beacon node
|
|
|
|
## run, it will be overwritten later with a different handler
|
|
|
|
## performing a graceful exit.
|
|
|
|
proc exitImmediatelyOnCtrlC() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
|
|
setupForeignThreadGc()
|
2021-03-16 08:06:45 +00:00
|
|
|
# in case a password prompt disabled echoing
|
|
|
|
resetStdin()
|
2021-02-22 16:17:48 +00:00
|
|
|
echo "" # If we interrupt during an interactive prompt, this
|
|
|
|
# will move the cursor to the next line
|
|
|
|
notice "Shutting down after having received SIGINT"
|
|
|
|
quit 0
|
|
|
|
setControlCHook(exitImmediatelyOnCtrlC)
|
|
|
|
# equivalent SIGTERM handler
|
|
|
|
when defined(posix):
|
|
|
|
proc exitImmediatelyOnSIGTERM(signal: cint) {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGTERM"
|
|
|
|
quit 0
|
|
|
|
c_signal(SIGTERM, exitImmediatelyOnSIGTERM)
|
|
|
|
|
2022-02-27 11:02:45 +00:00
|
|
|
when defined(windows):
|
|
|
|
if config.runAsService:
|
|
|
|
var dispatchTable = [
|
|
|
|
SERVICE_TABLE_ENTRY(lpServiceName: SERVICE_NAME, lpServiceProc: serviceMainFunction),
|
|
|
|
SERVICE_TABLE_ENTRY(lpServiceName: nil, lpServiceProc: nil) # last entry must be nil
|
|
|
|
]
|
2022-01-17 09:27:08 +00:00
|
|
|
|
2022-02-27 11:02:45 +00:00
|
|
|
let status = StartServiceCtrlDispatcher(LPSERVICE_TABLE_ENTRY(addr dispatchTable[0]))
|
|
|
|
if status == 0:
|
|
|
|
fatal "Failed to start Windows service", errorCode = getLastError()
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
handleStartUpCmd(config)
|
|
|
|
else:
|
|
|
|
handleStartUpCmd(config)
|