2021-01-14 08:43:21 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-04-24 07:16:11 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2018-11-23 23:58:49 +00:00
|
|
|
import
|
2019-10-02 12:38:14 +00:00
|
|
|
# Standard library
|
2021-03-23 06:57:10 +00:00
|
|
|
std/[math, os, osproc, random, sequtils, strformat, strutils,
|
|
|
|
tables, times, terminal],
|
2019-10-02 12:38:14 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2020-08-19 13:12:10 +00:00
|
|
|
stew/[objects, byteutils, endians2, io2], stew/shims/macros,
|
2021-03-19 09:36:38 +00:00
|
|
|
chronos, confutils, metrics, metrics/chronos_httpserver,
|
2021-03-17 18:46:45 +00:00
|
|
|
chronicles, bearssl, blscurve, presto,
|
2020-03-16 22:28:54 +00:00
|
|
|
json_serialization/std/[options, sets, net], serialization/errors,
|
2021-09-17 00:13:52 +00:00
|
|
|
taskpools,
|
2020-07-09 22:08:54 +00:00
|
|
|
|
2021-02-16 20:35:10 +00:00
|
|
|
eth/[keys, async_utils], eth/net/nat,
|
2021-07-29 18:53:58 +00:00
|
|
|
eth/p2p/discoveryv5/[protocol, enr, random2],
|
2019-10-02 12:38:14 +00:00
|
|
|
|
|
|
|
# Local modules
|
2021-02-22 16:17:48 +00:00
|
|
|
"."/[
|
2021-08-12 13:08:20 +00:00
|
|
|
beacon_clock, beacon_chain_db, beacon_node_common, beacon_node_status,
|
|
|
|
beacon_node_types, conf, filepath, interop, nimbus_binary_common, statusbar,
|
|
|
|
version],
|
2021-03-05 13:12:00 +00:00
|
|
|
./networking/[eth2_discovery, eth2_network, network_metadata],
|
2021-05-28 16:34:00 +00:00
|
|
|
./gossip_processing/[eth2_processor, block_processor, consensus_manager],
|
2021-03-26 06:52:01 +00:00
|
|
|
./validators/[
|
2021-08-24 19:49:51 +00:00
|
|
|
validator_duties, validator_pool,
|
2021-03-26 06:52:01 +00:00
|
|
|
slashing_protection, keystore_management],
|
2021-03-02 10:27:45 +00:00
|
|
|
./sync/[sync_manager, sync_protocol, request_manager],
|
2021-08-03 15:17:11 +00:00
|
|
|
./rpc/[rest_api, rpc_api],
|
2021-08-09 12:54:45 +00:00
|
|
|
./spec/datatypes/[altair, phase0],
|
2021-08-12 13:08:20 +00:00
|
|
|
./spec/eth2_apis/rpc_beacon_client,
|
2021-08-24 19:49:51 +00:00
|
|
|
./spec/[
|
|
|
|
beaconstate, forks, helpers, network, weak_subjectivity, signatures,
|
|
|
|
validator],
|
2021-03-24 10:05:04 +00:00
|
|
|
./consensus_object_pools/[
|
2021-09-27 09:24:58 +00:00
|
|
|
blockchain_dag, block_quarantine, block_clearance, attestation_pool,
|
|
|
|
sync_committee_msg_pool, exit_pool, spec_cache],
|
2021-03-03 06:23:05 +00:00
|
|
|
./eth1/eth1_monitor
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2020-12-04 16:28:42 +00:00
|
|
|
from eth/common/eth_types import BlockHashOrNumber
|
|
|
|
|
2021-06-03 09:43:04 +00:00
|
|
|
when defined(posix):
|
|
|
|
import system/ansi_c
|
|
|
|
|
2021-02-09 09:20:55 +00:00
|
|
|
from
|
|
|
|
libp2p/protocols/pubsub/gossipsub
|
|
|
|
import
|
|
|
|
TopicParams, validateParameters, init
|
|
|
|
|
2020-02-19 08:58:10 +00:00
|
|
|
type
|
2020-05-06 13:23:45 +00:00
|
|
|
RpcServer* = RpcHttpServer
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2020-06-05 15:08:50 +00:00
|
|
|
template init(T: type RpcHttpServer, ip: ValidIpAddress, port: Port): T =
|
2020-03-16 22:28:54 +00:00
|
|
|
newRpcHttpServer([initTAddress(ip, port)])
|
2020-02-19 08:58:10 +00:00
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
template init(T: type RestServerRef, ip: ValidIpAddress, port: Port): T =
|
|
|
|
let address = initTAddress(ip, port)
|
2021-05-18 10:24:57 +00:00
|
|
|
let serverFlags = {HttpServerFlags.QueryCommaSeparatedArray,
|
|
|
|
HttpServerFlags.NotifyDisconnect}
|
2021-06-03 09:43:04 +00:00
|
|
|
# We increase default timeout to help validator clients who poll our server
|
|
|
|
# at least once per slot (12.seconds).
|
|
|
|
let headersTimeout = seconds(2'i64 * int64(SECONDS_PER_SLOT))
|
|
|
|
let res = RestServerRef.new(getRouter(), address, serverFlags = serverFlags,
|
|
|
|
httpHeadersTimeout = headersTimeout)
|
2021-03-17 18:46:45 +00:00
|
|
|
if res.isErr():
|
|
|
|
notice "Rest server could not be started", address = $address,
|
|
|
|
reason = res.error()
|
|
|
|
nil
|
|
|
|
else:
|
|
|
|
res.get()
|
|
|
|
|
2019-10-24 06:51:27 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#interop-metrics
|
2020-11-27 22:16:13 +00:00
|
|
|
declareGauge beacon_slot, "Latest slot of the beacon chain state"
|
|
|
|
declareGauge beacon_current_epoch, "Current epoch"
|
2019-09-07 17:48:05 +00:00
|
|
|
|
2020-07-30 14:27:34 +00:00
|
|
|
# Finalization tracking
|
|
|
|
declareGauge finalization_delay,
|
|
|
|
"Epoch delay between scheduled epoch and finalized epoch"
|
|
|
|
|
2020-08-10 18:49:45 +00:00
|
|
|
declareGauge ticks_delay,
|
|
|
|
"How long does to take to run the onSecond loop"
|
|
|
|
|
2021-03-12 09:46:26 +00:00
|
|
|
declareGauge next_action_wait,
|
|
|
|
"Seconds until the next attestation will be sent"
|
|
|
|
|
2021-07-09 05:41:44 +00:00
|
|
|
declareGauge versionGauge, "Nimbus version info (as metric labels)", ["version", "commit"], name = "version"
|
|
|
|
versionGauge.set(1, labelValues=[fullVersionStr, gitRevision])
|
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "beacnde"
|
|
|
|
|
2021-05-19 06:38:13 +00:00
|
|
|
const SlashingDbName = "slashing_protection"
|
|
|
|
# changing this requires physical file rename as well or history is lost.
|
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
func getBeaconTimeFn(clock: BeaconClock): GetBeaconTimeFn =
|
|
|
|
return proc(): BeaconTime = clock.now()
|
|
|
|
|
2020-06-29 17:30:19 +00:00
|
|
|
proc init*(T: type BeaconNode,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg: RuntimeConfig,
|
2020-06-29 17:30:19 +00:00
|
|
|
rng: ref BrHmacDrbgContext,
|
2021-02-22 16:17:48 +00:00
|
|
|
config: BeaconNodeConf,
|
2020-12-04 16:28:42 +00:00
|
|
|
depositContractDeployedAt: BlockHashOrNumber,
|
2020-11-24 21:21:47 +00:00
|
|
|
eth1Network: Option[Eth1Network],
|
2021-02-22 16:17:48 +00:00
|
|
|
genesisStateContents: string,
|
2021-03-26 06:52:01 +00:00
|
|
|
genesisDepositsSnapshotContents: string): BeaconNode {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-09-17 00:13:52 +00:00
|
|
|
|
|
|
|
var taskpool: TaskpoolPtr
|
|
|
|
|
|
|
|
try:
|
|
|
|
if config.numThreads < 0:
|
|
|
|
fatal "The number of threads --numThreads cannot be negative."
|
|
|
|
quit 1
|
|
|
|
elif config.numThreads == 0:
|
|
|
|
taskpool = TaskpoolPtr.new()
|
|
|
|
else:
|
|
|
|
taskpool = TaskpoolPtr.new(numThreads = config.numThreads)
|
|
|
|
|
|
|
|
info "Threadpool started", numThreads = taskpool.numThreads
|
|
|
|
except Exception as exc:
|
|
|
|
raise newException(Defect, "Failure in taskpool initialization.")
|
|
|
|
|
2020-01-17 13:44:01 +00:00
|
|
|
let
|
2021-09-22 12:17:15 +00:00
|
|
|
eventBus = newAsyncEventBus()
|
2021-07-13 14:27:10 +00:00
|
|
|
db = BeaconChainDB.new(config.databaseDir, inMemory = false)
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2020-09-22 20:42:42 +00:00
|
|
|
var
|
2021-07-15 19:01:07 +00:00
|
|
|
genesisState, checkpointState: ref phase0.BeaconState
|
|
|
|
checkpointBlock: phase0.TrustedSignedBeaconBlock
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
proc onAttestationReceived(data: Attestation) =
|
|
|
|
eventBus.emit("attestation-received", data)
|
|
|
|
proc onAttestationSent(data: Attestation) =
|
|
|
|
eventBus.emit("attestation-sent", data)
|
|
|
|
proc onVoluntaryExitAdded(data: SignedVoluntaryExit) =
|
|
|
|
eventBus.emit("voluntary-exit", data)
|
|
|
|
proc onBlockAdded(data: ForkedTrustedSignedBeaconBlock) =
|
|
|
|
eventBus.emit("signed-beacon-block", data)
|
|
|
|
proc onHeadChanged(data: HeadChangeInfoObject) =
|
|
|
|
eventBus.emit("head-change", data)
|
|
|
|
proc onChainReorg(data: ReorgInfoObject) =
|
|
|
|
eventBus.emit("chain-reorg", data)
|
|
|
|
proc onFinalization(data: FinalizationInfoObject) =
|
|
|
|
eventBus.emit("finalization", data)
|
|
|
|
proc onSyncContribution(data: SignedContributionAndProof) =
|
|
|
|
eventBus.emit("sync-contribution-and-proof", data)
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.finalizedCheckpointState.isSome:
|
|
|
|
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
2020-09-22 20:42:42 +00:00
|
|
|
checkpointState = try:
|
2021-07-15 19:01:07 +00:00
|
|
|
newClone(SSZ.loadFile(checkpointStatePath, phase0.BeaconState))
|
2020-09-22 20:42:42 +00:00
|
|
|
except SerializationError as err:
|
|
|
|
fatal "Checkpoint state deserialization failed",
|
|
|
|
err = formatMsg(err, checkpointStatePath)
|
|
|
|
quit 1
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to read checkpoint state file", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.finalizedCheckpointBlock.isNone:
|
2020-09-22 20:42:42 +00:00
|
|
|
if checkpointState.slot > 0:
|
|
|
|
fatal "Specifying a non-genesis --finalized-checkpoint-state requires specifying --finalized-checkpoint-block as well"
|
|
|
|
quit 1
|
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
let checkpointBlockPath = config.finalizedCheckpointBlock.get.string
|
2020-09-22 20:42:42 +00:00
|
|
|
try:
|
2021-05-17 16:37:26 +00:00
|
|
|
# TODO Perform sanity checks like signature and slot verification at least
|
2021-07-15 19:01:07 +00:00
|
|
|
checkpointBlock = SSZ.loadFile(checkpointBlockPath, phase0.TrustedSignedBeaconBlock)
|
2020-09-22 20:42:42 +00:00
|
|
|
except SerializationError as err:
|
|
|
|
fatal "Invalid checkpoint block", err = err.formatMsg(checkpointBlockPath)
|
|
|
|
quit 1
|
|
|
|
except IOError as err:
|
|
|
|
fatal "Failed to load the checkpoint block", err = err.msg
|
|
|
|
quit 1
|
2021-02-22 16:17:48 +00:00
|
|
|
elif config.finalizedCheckpointBlock.isSome:
|
2020-09-22 20:42:42 +00:00
|
|
|
# TODO We can download the state from somewhere in the future relying
|
|
|
|
# on the trusted `state_root` appearing in the checkpoint block.
|
|
|
|
fatal "--finalized-checkpoint-block cannot be specified without --finalized-checkpoint-state"
|
|
|
|
quit 1
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
var eth1Monitor: Eth1Monitor
|
2020-07-31 14:49:06 +00:00
|
|
|
if not ChainDAGRef.isInitialized(db):
|
2020-09-22 20:42:42 +00:00
|
|
|
var
|
2021-07-15 19:01:07 +00:00
|
|
|
tailState: ref phase0.BeaconState
|
|
|
|
tailBlock: phase0.TrustedSignedBeaconBlock
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if genesisStateContents.len == 0 and checkpointState == nil:
|
2020-11-24 21:21:47 +00:00
|
|
|
when hasGenesisDetection:
|
|
|
|
if genesisDepositsSnapshotContents != nil:
|
|
|
|
fatal "A deposits snapshot cannot be provided without also providing a matching beacon state snapshot"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# This is a fresh start without a known genesis state
|
|
|
|
# (most likely, it hasn't arrived yet). We'll try to
|
|
|
|
# obtain a genesis through the Eth1 deposits monitor:
|
2021-04-06 21:42:59 +00:00
|
|
|
if config.web3Urls.len == 0:
|
2020-11-24 21:21:47 +00:00
|
|
|
fatal "Web3 URL not specified"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# TODO Could move this to a separate "GenesisMonitor" process or task
|
|
|
|
# that would do only this - see Paul's proposal for this.
|
2021-02-22 16:17:48 +00:00
|
|
|
let eth1MonitorRes = waitFor Eth1Monitor.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2020-12-03 04:30:35 +00:00
|
|
|
db,
|
2021-04-06 21:42:59 +00:00
|
|
|
config.web3Urls,
|
2020-12-04 16:28:42 +00:00
|
|
|
depositContractDeployedAt,
|
2020-11-24 21:21:47 +00:00
|
|
|
eth1Network)
|
|
|
|
|
|
|
|
if eth1MonitorRes.isErr:
|
|
|
|
fatal "Failed to start Eth1 monitor",
|
|
|
|
reason = eth1MonitorRes.error,
|
2021-04-06 21:42:59 +00:00
|
|
|
web3Urls = config.web3Urls,
|
2020-12-04 16:28:42 +00:00
|
|
|
depositContractDeployedAt
|
2020-11-24 21:21:47 +00:00
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
eth1Monitor = eth1MonitorRes.get
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
genesisState = waitFor eth1Monitor.waitGenesis()
|
2020-11-24 21:21:47 +00:00
|
|
|
if bnStatus == BeaconNodeStatus.Stopping:
|
|
|
|
return nil
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
tailState = genesisState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
2020-06-27 12:01:19 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
notice "Eth2 genesis state detected",
|
|
|
|
genesisTime = genesisState.genesisTime,
|
|
|
|
eth1Block = genesisState.eth1_data.block_hash,
|
|
|
|
totalDeposits = genesisState.eth1_data.deposit_count
|
2020-11-12 16:21:04 +00:00
|
|
|
else:
|
2020-11-24 21:21:47 +00:00
|
|
|
fatal "The beacon node must be compiled with -d:has_genesis_detection " &
|
|
|
|
"in order to support monitoring for genesis events"
|
|
|
|
quit 1
|
2019-10-25 14:53:31 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
elif genesisStateContents.len == 0:
|
2020-09-22 20:42:42 +00:00
|
|
|
if checkpointState.slot == GENESIS_SLOT:
|
|
|
|
genesisState = checkpointState
|
|
|
|
tailState = checkpointState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
|
|
|
else:
|
|
|
|
fatal "State checkpoints cannot be provided for a network without a known genesis state"
|
2020-04-22 23:35:55 +00:00
|
|
|
quit 1
|
2020-09-22 20:42:42 +00:00
|
|
|
else:
|
2020-04-22 23:35:55 +00:00
|
|
|
try:
|
2021-07-15 19:01:07 +00:00
|
|
|
genesisState = newClone(SSZ.decode(genesisStateContents, phase0.BeaconState))
|
2020-09-22 20:42:42 +00:00
|
|
|
except CatchableError as err:
|
2020-11-16 19:15:43 +00:00
|
|
|
raiseAssert "Invalid baked-in state: " & err.msg
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2020-09-22 20:42:42 +00:00
|
|
|
if checkpointState != nil:
|
|
|
|
tailState = checkpointState
|
|
|
|
tailBlock = checkpointBlock
|
|
|
|
else:
|
|
|
|
tailState = genesisState
|
|
|
|
tailBlock = get_initial_beacon_block(genesisState[])
|
|
|
|
|
|
|
|
try:
|
|
|
|
ChainDAGRef.preInit(db, genesisState[], tailState[], tailBlock)
|
|
|
|
doAssert ChainDAGRef.isInitialized(db), "preInit should have initialized db"
|
2021-03-26 06:52:01 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
error "Failed to initialize database", err = exc.msg
|
2020-09-22 20:42:42 +00:00
|
|
|
quit 1
|
2020-07-02 15:52:48 +00:00
|
|
|
|
2021-03-23 06:57:10 +00:00
|
|
|
# Doesn't use std/random directly, but dependencies might
|
|
|
|
randomize(rng[].rand(high(int)))
|
|
|
|
|
2021-05-27 10:59:42 +00:00
|
|
|
info "Loading block dag from database", path = config.databaseDir
|
2020-11-16 19:15:43 +00:00
|
|
|
|
2020-07-07 23:02:14 +00:00
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
chainDagFlags = if config.verifyFinalization: {verifyFinalization}
|
2020-07-07 23:02:14 +00:00
|
|
|
else: {}
|
2021-09-22 12:17:15 +00:00
|
|
|
dag = ChainDAGRef.init(cfg, db, chainDagFlags, onBlockAdded, onHeadChanged,
|
|
|
|
onChainReorg, onFinalization)
|
2021-09-17 00:13:52 +00:00
|
|
|
quarantine = QuarantineRef.init(rng, taskpool)
|
2020-11-25 01:09:31 +00:00
|
|
|
databaseGenesisValidatorsRoot =
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, genesis_validators_root)
|
2020-11-25 01:09:31 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if genesisStateContents.len != 0:
|
2020-11-25 01:09:31 +00:00
|
|
|
let
|
|
|
|
networkGenesisValidatorsRoot =
|
2021-02-22 16:17:48 +00:00
|
|
|
extractGenesisValidatorRootFromSnapshop(genesisStateContents)
|
2020-11-25 01:09:31 +00:00
|
|
|
|
|
|
|
if networkGenesisValidatorsRoot != databaseGenesisValidatorsRoot:
|
|
|
|
fatal "The specified --data-dir contains data for a different network",
|
|
|
|
networkGenesisValidatorsRoot, databaseGenesisValidatorsRoot,
|
2021-02-22 16:17:48 +00:00
|
|
|
dataDir = config.dataDir
|
2020-11-25 01:09:31 +00:00
|
|
|
quit 1
|
2020-01-17 13:44:01 +00:00
|
|
|
|
2021-08-20 08:58:15 +00:00
|
|
|
let beaconClock = BeaconClock.init(
|
|
|
|
getStateField(dag.headState.data, genesis_time))
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.weakSubjectivityCheckpoint.isSome:
|
2020-09-22 20:42:42 +00:00
|
|
|
let
|
2021-08-20 08:58:15 +00:00
|
|
|
currentSlot = beaconClock.now.slotOrZero
|
2020-09-22 20:42:42 +00:00
|
|
|
isCheckpointStale = not is_within_weak_subjectivity_period(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2020-09-22 20:42:42 +00:00
|
|
|
currentSlot,
|
2021-06-11 17:51:46 +00:00
|
|
|
dag.headState.data,
|
2021-02-22 16:17:48 +00:00
|
|
|
config.weakSubjectivityCheckpoint.get)
|
2020-09-22 20:42:42 +00:00
|
|
|
|
|
|
|
if isCheckpointStale:
|
|
|
|
error "Weak subjectivity checkpoint is stale",
|
|
|
|
currentSlot,
|
2021-02-22 16:17:48 +00:00
|
|
|
checkpoint = config.weakSubjectivityCheckpoint.get,
|
2021-06-11 17:51:46 +00:00
|
|
|
headStateSlot = getStateField(dag.headState.data, slot)
|
2020-09-22 20:42:42 +00:00
|
|
|
quit 1
|
|
|
|
|
|
|
|
if checkpointState != nil:
|
2020-11-25 01:09:31 +00:00
|
|
|
let checkpointGenesisValidatorsRoot = checkpointState[].genesis_validators_root
|
|
|
|
if checkpointGenesisValidatorsRoot != databaseGenesisValidatorsRoot:
|
|
|
|
fatal "The specified checkpoint state is intended for a different network",
|
|
|
|
checkpointGenesisValidatorsRoot, databaseGenesisValidatorsRoot,
|
2021-02-22 16:17:48 +00:00
|
|
|
dataDir = config.dataDir
|
2020-11-25 01:09:31 +00:00
|
|
|
quit 1
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
dag.setTailState(checkpointState[], checkpointBlock)
|
2020-09-22 20:42:42 +00:00
|
|
|
|
2020-11-03 01:21:07 +00:00
|
|
|
if eth1Monitor.isNil and
|
2021-04-06 21:42:59 +00:00
|
|
|
config.web3Urls.len > 0 and
|
2021-02-22 16:17:48 +00:00
|
|
|
genesisDepositsSnapshotContents.len > 0:
|
|
|
|
let genesisDepositsSnapshot = SSZ.decode(genesisDepositsSnapshotContents,
|
2020-11-24 21:21:47 +00:00
|
|
|
DepositContractSnapshot)
|
2020-12-15 21:59:29 +00:00
|
|
|
eth1Monitor = Eth1Monitor.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
cfg,
|
2020-12-03 04:30:35 +00:00
|
|
|
db,
|
2021-04-06 21:42:59 +00:00
|
|
|
config.web3Urls,
|
2020-11-24 21:21:47 +00:00
|
|
|
genesisDepositsSnapshot,
|
2020-11-12 16:21:04 +00:00
|
|
|
eth1Network)
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let rpcServer = if config.rpcEnabled:
|
|
|
|
RpcServer.init(config.rpcAddress, config.rpcPort)
|
2020-03-16 22:28:54 +00:00
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
let restServer = if config.restEnabled:
|
|
|
|
RestServerRef.init(config.restAddress, config.restPort)
|
|
|
|
else:
|
|
|
|
nil
|
|
|
|
|
2020-04-15 02:41:22 +00:00
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
netKeys = getPersistentNetKeys(rng[], config)
|
|
|
|
nickname = if config.nodeName == "auto": shortForm(netKeys)
|
|
|
|
else: config.nodeName
|
2021-08-20 08:58:15 +00:00
|
|
|
getBeaconTime = beaconClock.getBeaconTimeFn()
|
2021-07-07 09:09:47 +00:00
|
|
|
network = createEth2Node(
|
2021-08-19 10:45:31 +00:00
|
|
|
rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime,
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, genesis_validators_root))
|
2021-09-22 12:17:15 +00:00
|
|
|
attestationPool = newClone(
|
|
|
|
AttestationPool.init(dag, quarantine, onAttestationReceived)
|
|
|
|
)
|
|
|
|
syncCommitteeMsgPool = newClone(
|
|
|
|
SyncCommitteeMsgPool.init(onSyncContribution)
|
|
|
|
)
|
|
|
|
exitPool = newClone(ExitPool.init(dag, onVoluntaryExitAdded))
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2021-05-04 13:17:28 +00:00
|
|
|
case config.slashingDbKind
|
|
|
|
of SlashingDbKind.v2:
|
|
|
|
discard
|
|
|
|
of SlashingDbKind.v1:
|
|
|
|
error "Slashing DB v1 is no longer supported for writing"
|
|
|
|
quit 1
|
|
|
|
of SlashingDbKind.both:
|
|
|
|
warn "Slashing DB v1 deprecated, writing only v2"
|
|
|
|
|
|
|
|
info "Loading slashing protection database (v2)",
|
|
|
|
path = config.validatorsDir()
|
|
|
|
|
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
slashingProtectionDB =
|
2021-05-04 13:17:28 +00:00
|
|
|
SlashingProtectionDB.init(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(dag.headState.data, genesis_validators_root),
|
2021-05-19 06:38:13 +00:00
|
|
|
config.validatorsDir(), SlashingDbName)
|
2021-02-22 16:17:48 +00:00
|
|
|
validatorPool = newClone(ValidatorPool.init(slashingProtectionDB))
|
2021-03-11 10:10:57 +00:00
|
|
|
|
|
|
|
consensusManager = ConsensusManager.new(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag, attestationPool, quarantine
|
2021-03-11 10:10:57 +00:00
|
|
|
)
|
2021-05-28 16:34:00 +00:00
|
|
|
blockProcessor = BlockProcessor.new(
|
2021-04-06 11:59:11 +00:00
|
|
|
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
2021-08-19 10:45:31 +00:00
|
|
|
consensusManager, getBeaconTime)
|
2021-02-22 16:17:48 +00:00
|
|
|
processor = Eth2Processor.new(
|
2021-04-06 11:59:11 +00:00
|
|
|
config.doppelgangerDetection,
|
2021-07-07 09:09:47 +00:00
|
|
|
blockProcessor, dag, attestationPool, exitPool, validatorPool,
|
2021-09-17 00:13:52 +00:00
|
|
|
syncCommitteeMsgPool, quarantine, rng, getBeaconTime, taskpool)
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2021-05-04 15:28:48 +00:00
|
|
|
var node = BeaconNode(
|
2020-01-17 13:44:01 +00:00
|
|
|
nickname: nickname,
|
2021-02-22 16:17:48 +00:00
|
|
|
graffitiBytes: if config.graffiti.isSome: config.graffiti.get.GraffitiBytes
|
2020-06-29 17:30:19 +00:00
|
|
|
else: defaultGraffitiBytes(),
|
2020-01-17 13:44:01 +00:00
|
|
|
network: network,
|
2020-02-05 20:40:14 +00:00
|
|
|
netKeys: netKeys,
|
2020-01-17 13:44:01 +00:00
|
|
|
db: db,
|
2021-02-22 16:17:48 +00:00
|
|
|
config: config,
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: dag,
|
2021-08-09 12:54:45 +00:00
|
|
|
gossipState: GossipState.Disconnected,
|
2020-07-30 19:18:17 +00:00
|
|
|
quarantine: quarantine,
|
2020-08-20 16:30:47 +00:00
|
|
|
attestationPool: attestationPool,
|
2021-08-28 22:27:51 +00:00
|
|
|
syncCommitteeMsgPool: syncCommitteeMsgPool,
|
2021-02-22 16:17:48 +00:00
|
|
|
attachedValidators: validatorPool,
|
2020-09-14 14:26:31 +00:00
|
|
|
exitPool: exitPool,
|
2020-11-03 01:21:07 +00:00
|
|
|
eth1Monitor: eth1Monitor,
|
2020-03-16 22:28:54 +00:00
|
|
|
rpcServer: rpcServer,
|
2021-03-17 18:46:45 +00:00
|
|
|
restServer: restServer,
|
2021-09-22 12:17:15 +00:00
|
|
|
eventBus: eventBus,
|
2021-02-22 16:17:48 +00:00
|
|
|
processor: processor,
|
2021-05-28 16:34:00 +00:00
|
|
|
blockProcessor: blockProcessor,
|
2021-03-11 10:10:57 +00:00
|
|
|
consensusManager: consensusManager,
|
2021-08-20 08:58:15 +00:00
|
|
|
requestManager: RequestManager.init(network, blockProcessor),
|
2021-09-17 00:13:52 +00:00
|
|
|
beaconClock: beaconClock,
|
2021-09-22 12:17:15 +00:00
|
|
|
taskpool: taskpool,
|
|
|
|
onAttestationSent: onAttestationSent
|
2020-01-17 13:44:01 +00:00
|
|
|
)
|
2020-08-20 16:30:47 +00:00
|
|
|
|
2021-01-15 04:17:06 +00:00
|
|
|
# set topic validation routine
|
|
|
|
network.setValidTopics(
|
|
|
|
block:
|
|
|
|
var
|
|
|
|
topics = @[
|
2021-07-14 12:18:52 +00:00
|
|
|
getBeaconBlocksTopic(network.forkDigests.phase0),
|
2021-07-07 09:09:47 +00:00
|
|
|
getAttesterSlashingsTopic(network.forkDigests.phase0),
|
|
|
|
getProposerSlashingsTopic(network.forkDigests.phase0),
|
|
|
|
getVoluntaryExitsTopic(network.forkDigests.phase0),
|
2021-08-09 12:54:45 +00:00
|
|
|
getAggregateAndProofsTopic(network.forkDigests.phase0),
|
|
|
|
|
|
|
|
getBeaconBlocksTopic(dag.forkDigests.altair),
|
|
|
|
getAttesterSlashingsTopic(network.forkDigests.altair),
|
|
|
|
getProposerSlashingsTopic(network.forkDigests.altair),
|
|
|
|
getVoluntaryExitsTopic(network.forkDigests.altair),
|
2021-09-22 13:28:34 +00:00
|
|
|
getAggregateAndProofsTopic(network.forkDigests.altair),
|
2021-01-15 04:17:06 +00:00
|
|
|
]
|
2021-09-24 14:43:53 +00:00
|
|
|
if not config.verifyFinalization:
|
|
|
|
topics &= getSyncCommitteeContributionAndProofTopic(network.forkDigests.altair)
|
2021-05-10 07:13:36 +00:00
|
|
|
for subnet_id in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
|
2021-07-07 09:09:47 +00:00
|
|
|
topics &= getAttestationTopic(network.forkDigests.phase0, SubnetId(subnet_id))
|
2021-08-09 12:54:45 +00:00
|
|
|
topics &= getAttestationTopic(network.forkDigests.altair, SubnetId(subnet_id))
|
2021-09-24 14:43:53 +00:00
|
|
|
if not config.verifyFinalization:
|
2021-09-28 18:02:01 +00:00
|
|
|
for subnet_id in allSyncCommittees():
|
|
|
|
topics &= getSyncCommitteeTopic(network.forkDigests.altair, subnet_id)
|
2021-01-15 04:17:06 +00:00
|
|
|
topics)
|
|
|
|
|
2021-05-04 15:28:48 +00:00
|
|
|
if node.config.inProcessValidators:
|
|
|
|
node.addLocalValidators()
|
2020-09-01 13:44:40 +00:00
|
|
|
else:
|
2020-11-07 18:00:31 +00:00
|
|
|
let cmd = getAppDir() / "nimbus_signing_process".addFileExt(ExeExt)
|
2021-05-04 15:28:48 +00:00
|
|
|
let args = [$node.config.validatorsDir, $node.config.secretsDir]
|
2020-09-22 08:27:46 +00:00
|
|
|
let workdir = io2.getCurrentDir().tryGet()
|
2021-05-04 15:28:48 +00:00
|
|
|
node.vcProcess = try: startProcess(cmd, workdir, args)
|
2021-03-26 06:52:01 +00:00
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg
|
2021-05-04 15:28:48 +00:00
|
|
|
node.addRemoteValidators()
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
network.initBeaconSync(dag, getBeaconTime)
|
2020-12-16 13:03:04 +00:00
|
|
|
|
2021-05-04 15:28:48 +00:00
|
|
|
node.updateValidatorMetrics()
|
2020-12-16 13:03:04 +00:00
|
|
|
|
2021-05-04 15:28:48 +00:00
|
|
|
return node
|
2019-09-07 17:48:05 +00:00
|
|
|
|
2020-05-13 08:36:33 +00:00
|
|
|
func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|
|
|
# Epoch must be >= 4 to check finalization
|
|
|
|
const SETTLING_TIME_OFFSET = 1'u64
|
|
|
|
let epoch = slot.compute_epoch_at_slot()
|
|
|
|
|
|
|
|
# Don't static-assert this -- if this isn't called, don't require it
|
|
|
|
doAssert SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET
|
|
|
|
|
|
|
|
# Intentionally, loudly assert. Point is to fail visibly and unignorably
|
|
|
|
# during testing.
|
|
|
|
if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET:
|
|
|
|
let finalizedEpoch =
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.finalizedHead.slot.compute_epoch_at_slot()
|
2020-05-13 08:36:33 +00:00
|
|
|
# Finalization rule 234, that has the most lag slots among the cases, sets
|
|
|
|
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
|
|
|
|
# and then state.slot gets incremented, to increase the maximum offset, if
|
|
|
|
# finalization occurs every slot, to 4 slots vs scheduledSlot.
|
|
|
|
doAssert finalizedEpoch + 4 >= epoch
|
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
func toBitArray(stabilitySubnets: auto): BitArray[ATTESTATION_SUBNET_COUNT] =
|
2020-12-22 09:05:36 +00:00
|
|
|
for subnetInfo in stabilitySubnets:
|
2021-05-10 07:13:36 +00:00
|
|
|
result[subnetInfo.subnet_id.int] = true
|
2020-12-22 09:05:36 +00:00
|
|
|
|
2021-01-25 17:39:56 +00:00
|
|
|
proc getAttachedValidators(node: BeaconNode):
|
|
|
|
Table[ValidatorIndex, AttachedValidator] =
|
2021-04-08 08:24:25 +00:00
|
|
|
for validatorIndex in 0 ..<
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(node.dag.headState.data, validators).len:
|
2021-01-25 17:39:56 +00:00
|
|
|
let attachedValidator = node.getAttachedValidator(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(node.dag.headState.data, validators),
|
2021-04-13 13:05:44 +00:00
|
|
|
validatorIndex.ValidatorIndex)
|
2021-01-25 17:39:56 +00:00
|
|
|
if attachedValidator.isNil:
|
|
|
|
continue
|
|
|
|
result[validatorIndex.ValidatorIndex] = attachedValidator
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-01-25 17:39:56 +00:00
|
|
|
proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
|
2021-01-19 17:44:03 +00:00
|
|
|
doAssert epoch >= 1
|
2021-01-25 17:39:56 +00:00
|
|
|
let
|
|
|
|
attachedValidators = node.getAttachedValidators()
|
2021-01-26 11:52:00 +00:00
|
|
|
validatorIndices = toIntSet(toSeq(attachedValidators.keys()))
|
2021-01-25 17:39:56 +00:00
|
|
|
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#lookahead
|
2021-01-25 17:39:56 +00:00
|
|
|
# Only subscribe when this node should aggregate; libp2p broadcasting works
|
|
|
|
# on subnet topics regardless.
|
2021-06-01 11:13:40 +00:00
|
|
|
let epochRef = node.dag.getEpochRef(node.dag.head, epoch)
|
2021-01-25 17:39:56 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
# Update proposals
|
|
|
|
node.attestationSubnets.proposingSlots[epoch mod 2] = 0
|
2021-06-01 11:13:40 +00:00
|
|
|
for i, proposer in epochRef.beacon_proposers:
|
|
|
|
if proposer.isSome and proposer.get() in attachedValidators:
|
2021-03-24 10:05:04 +00:00
|
|
|
node.attestationsubnets.proposingSlots[epoch mod 2] =
|
|
|
|
node.attestationsubnets.proposingSlots[epoch mod 2] or (1'u32 shl i)
|
2021-01-25 17:39:56 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
# Update attestations
|
2021-01-25 17:39:56 +00:00
|
|
|
template isAnyCommitteeValidatorAggregating(
|
|
|
|
validatorIndices, committeeLen: untyped, slot: Slot): bool =
|
|
|
|
anyIt(
|
|
|
|
validatorIndices,
|
|
|
|
is_aggregator(
|
|
|
|
committeeLen,
|
2021-01-26 11:52:00 +00:00
|
|
|
await attachedValidators[it.ValidatorIndex].getSlotSig(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(node.dag.headState.data, fork),
|
2021-04-08 08:24:25 +00:00
|
|
|
getStateField(
|
2021-06-11 17:51:46 +00:00
|
|
|
node.dag.headState.data, genesis_validators_root), slot)))
|
2021-01-25 17:39:56 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
node.attestationSubnets.lastCalculatedEpoch = epoch
|
2021-02-14 15:37:32 +00:00
|
|
|
node.attestationSubnets.attestingSlots[epoch mod 2] = 0
|
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
# The relevant bitmaps are 32 bits each.
|
|
|
|
static: doAssert SLOTS_PER_EPOCH <= 32
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
for (validatorIndices, committeeIndex, subnet_id, slot) in
|
2021-03-30 15:01:47 +00:00
|
|
|
get_committee_assignments(epochRef, epoch, validatorIndices):
|
2021-01-25 17:39:56 +00:00
|
|
|
|
|
|
|
doAssert compute_epoch_at_slot(slot) == epoch
|
|
|
|
|
2021-02-14 15:37:32 +00:00
|
|
|
# Each get_committee_assignments() call here is on the next epoch. At any
|
|
|
|
# given time, only care about two epochs, the current and next epoch. So,
|
|
|
|
# after it is done for an epoch, [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
|
|
|
# provides, sequentially, the current and next epochs' slot schedules. If
|
|
|
|
# get_committee_assignments() has not been called for the next epoch yet,
|
|
|
|
# typically because there hasn't been a block in the current epoch, there
|
|
|
|
# isn't valid information in aS[1 - (epoch mod 2)], and only slots within
|
|
|
|
# the current epoch can be known. Usually, this is not a major issue, but
|
|
|
|
# when there hasn't been a block substantially through an epoch, it might
|
|
|
|
# prove misleading to claim that there aren't attestations known, when it
|
|
|
|
# only might be known either way for 3 more slots. However, it's also not
|
|
|
|
# as important to attest when blocks aren't flowing as only attestions in
|
|
|
|
# blocks garner rewards.
|
|
|
|
node.attestationSubnets.attestingSlots[epoch mod 2] =
|
|
|
|
node.attestationSubnets.attestingSlots[epoch mod 2] or
|
|
|
|
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
|
|
|
|
|
2021-01-25 17:39:56 +00:00
|
|
|
if not isAnyCommitteeValidatorAggregating(
|
2021-03-24 10:05:04 +00:00
|
|
|
validatorIndices,
|
|
|
|
get_beacon_committee_len(epochRef, slot, committeeIndex), slot):
|
2021-01-25 17:39:56 +00:00
|
|
|
continue
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64] =
|
|
|
|
max(slot + 1, node.attestationSubnets.unsubscribeSlot[subnet_id.uint64])
|
2021-05-11 20:03:40 +00:00
|
|
|
if not node.attestationSubnets.aggregateSubnets[subnet_id.uint64]:
|
|
|
|
# The lead time here allows for the gossip mesh to stabilise well before
|
|
|
|
# attestations start flowing on the channel - the downside of a long lead
|
|
|
|
# time is that we waste bandwidth and CPU on traffic we're not strictly
|
|
|
|
# interested in - it could potentially be decreased, specially when peers
|
|
|
|
# are selected based on their stability subnet connectivity
|
|
|
|
const SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS = 6
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
node.attestationSubnets.subscribeSlot[subnet_id.uint64] =
|
2021-01-19 17:44:03 +00:00
|
|
|
# Queue upcoming subscription potentially earlier
|
|
|
|
# SLOTS_PER_EPOCH emulates one boundary condition of the per-epoch
|
|
|
|
# cycling mechanism timing buffers
|
|
|
|
min(
|
|
|
|
slot - min(slot.uint64, SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS),
|
2021-05-10 07:13:36 +00:00
|
|
|
node.attestationSubnets.subscribeSlot[subnet_id.uint64])
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
func updateStabilitySubnets(node: BeaconNode, slot: Slot): BitArray[ATTESTATION_SUBNET_COUNT] =
|
2021-01-19 17:44:03 +00:00
|
|
|
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
|
|
|
|
# since it'll try to run early in epochs, avoiding race conditions.
|
|
|
|
let epoch = slot.epoch
|
2021-01-14 08:43:21 +00:00
|
|
|
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2021-05-10 07:13:36 +00:00
|
|
|
for ss in node.attestationSubnets.stabilitySubnets.mitems():
|
|
|
|
if epoch >= ss.expiration:
|
|
|
|
ss.subnet_id = node.network.getRandomSubnetId()
|
|
|
|
ss.expiration = epoch + node.network.getStabilitySubnetLength()
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
result[ss.subnet_id.int] = true
|
2021-01-19 17:44:03 +00:00
|
|
|
|
|
|
|
proc cycleAttestationSubnetsPerEpoch(
|
2021-05-10 07:13:36 +00:00
|
|
|
node: BeaconNode, wallSlot: Slot,
|
|
|
|
prevStabilitySubnets: BitArray[ATTESTATION_SUBNET_COUNT]):
|
|
|
|
Future[BitArray[ATTESTATION_SUBNET_COUNT]] {.async.} =
|
2021-01-19 17:44:03 +00:00
|
|
|
# Per-epoch portion of subnet cycling: updating stability subnets and
|
|
|
|
# calculating future attestation subnets.
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2020-12-22 09:05:36 +00:00
|
|
|
# Only know RANDAO mix, which determines shuffling seed, one epoch in
|
2021-06-01 11:13:40 +00:00
|
|
|
# advance. When getStateField(node.dag.headState, slot).epoch is
|
2021-05-21 09:23:28 +00:00
|
|
|
# ahead of wallSlot, the clock's just incorrect. If the slot's behind
|
2020-12-22 09:05:36 +00:00
|
|
|
# wallSlot, it would have to look more than MIN_SEED_LOOKAHEAD epochs
|
|
|
|
# ahead to compute the shuffling determining the beacon committees.
|
2021-01-25 17:39:56 +00:00
|
|
|
static: doAssert MIN_SEED_LOOKAHEAD == 1
|
2021-06-11 17:51:46 +00:00
|
|
|
if getStateField(node.dag.headState.data, slot).epoch != wallSlot.epoch:
|
2020-12-22 09:05:36 +00:00
|
|
|
debug "Requested attestation subnets too far in advance",
|
|
|
|
wallSlot,
|
2021-06-11 17:51:46 +00:00
|
|
|
stateSlot = getStateField(node.dag.headState.data, slot)
|
2021-01-19 17:44:03 +00:00
|
|
|
return prevStabilitySubnets
|
2020-12-22 09:05:36 +00:00
|
|
|
|
|
|
|
# This works so long as at least one block in an epoch provides a basis for
|
|
|
|
# calculating the shuffling for the next epoch. It will keep checking for a
|
|
|
|
# block, each slot, until a block comes in, even if the first few blocks in
|
|
|
|
# an epoch are missing. If a whole epoch without blocks occurs, it's not as
|
|
|
|
# important to attest regardless, as those upcoming blocks will hit maximum
|
|
|
|
# attestations quickly and any individual attestation's likelihood of being
|
|
|
|
# selected is low.
|
2021-01-19 17:44:03 +00:00
|
|
|
if node.attestationSubnets.nextCycleEpoch <= wallSlot.epoch:
|
2021-01-25 17:39:56 +00:00
|
|
|
await node.updateSubscriptionSchedule(wallSlot.epoch + 1)
|
2021-01-19 17:44:03 +00:00
|
|
|
node.attestationSubnets.nextCycleEpoch = wallSlot.epoch + 1
|
2020-08-12 17:48:31 +00:00
|
|
|
|
2021-01-19 17:44:03 +00:00
|
|
|
let stabilitySubnets = node.updateStabilitySubnets(wallSlot)
|
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
if not node.config.subscribeAllSubnets and
|
2021-01-19 17:44:03 +00:00
|
|
|
stabilitySubnets != prevStabilitySubnets:
|
|
|
|
# In subscribeAllSubnets mode, this only gets set once, at initial subnet
|
|
|
|
# attestation handler creation, since they're all considered as stability
|
|
|
|
# subnets in that case.
|
2021-05-10 07:13:36 +00:00
|
|
|
node.network.updateStabilitySubnetMetadata(stabilitySubnets)
|
2020-08-12 17:48:31 +00:00
|
|
|
|
2021-01-25 17:39:56 +00:00
|
|
|
return stabilitySubnets
|
2020-12-18 08:50:29 +00:00
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
func subnetLog(v: BitArray): string =
|
|
|
|
$toSeq(v.oneIndices())
|
|
|
|
|
2021-01-25 17:39:56 +00:00
|
|
|
proc cycleAttestationSubnets(node: BeaconNode, wallSlot: Slot) {.async.} =
|
2021-01-19 17:44:03 +00:00
|
|
|
static: doAssert RANDOM_SUBNETS_PER_VALIDATOR == 1
|
|
|
|
doAssert not node.config.subscribeAllSubnets
|
2020-08-12 17:48:31 +00:00
|
|
|
|
2021-01-19 17:44:03 +00:00
|
|
|
let
|
|
|
|
prevStabilitySubnets =
|
2021-05-11 20:03:40 +00:00
|
|
|
node.attestationSubnets.stabilitySubnets.toBitArray()
|
2021-01-19 17:44:03 +00:00
|
|
|
stabilitySubnets =
|
2021-01-25 17:39:56 +00:00
|
|
|
await node.cycleAttestationSubnetsPerEpoch(wallSlot, prevStabilitySubnets)
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
let prevAggregateSubnets = node.attestationSubnets.aggregateSubnets
|
|
|
|
|
|
|
|
for i in 0..<node.attestationSubnets.aggregateSubnets.len():
|
|
|
|
if node.attestationSubnets.aggregateSubnets[i]:
|
|
|
|
if wallSlot >= node.attestationSubnets.unsubscribeSlot[i]:
|
|
|
|
node.attestationSubnets.aggregateSubnets[i] = false
|
|
|
|
else:
|
|
|
|
if wallSlot >= node.attestationSubnets.subscribeSlot[i]:
|
|
|
|
node.attestationSubnets.aggregateSubnets[i] = true
|
|
|
|
|
2021-01-19 17:44:03 +00:00
|
|
|
# Accounting specific to non-stability subnets
|
2021-05-11 20:03:40 +00:00
|
|
|
for i in (prevAggregateSubnets - node.attestationSubnets.aggregateSubnets).
|
|
|
|
oneIndices():
|
|
|
|
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
|
2020-12-09 09:13:51 +00:00
|
|
|
|
2021-01-19 17:44:03 +00:00
|
|
|
let
|
2021-05-11 20:03:40 +00:00
|
|
|
prevAllSubnets = prevAggregateSubnets + prevStabilitySubnets
|
|
|
|
allSubnets = node.attestationSubnets.aggregateSubnets + stabilitySubnets
|
2021-05-10 07:13:36 +00:00
|
|
|
unsubscribeSubnets = prevAllSubnets - allSubnets
|
|
|
|
subscribeSubnets = allSubnets - prevAllSubnets
|
2021-01-19 17:44:03 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
case node.gossipState
|
|
|
|
of GossipState.Disconnected:
|
|
|
|
discard
|
|
|
|
of GossipState.ConnectedToPhase0:
|
|
|
|
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, node.dag.forkDigests.phase0)
|
|
|
|
node.network.subscribeAttestationSubnets(subscribeSubnets, node.dag.forkDigests.phase0)
|
|
|
|
of GossipState.InTransitionToAltair:
|
|
|
|
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, node.dag.forkDigests.phase0)
|
|
|
|
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, node.dag.forkDigests.altair)
|
|
|
|
node.network.subscribeAttestationSubnets(subscribeSubnets, node.dag.forkDigests.phase0)
|
|
|
|
node.network.subscribeAttestationSubnets(subscribeSubnets, node.dag.forkDigests.altair)
|
|
|
|
of GossipState.ConnectedToAltair:
|
|
|
|
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets, node.dag.forkDigests.altair)
|
|
|
|
node.network.subscribeAttestationSubnets(subscribeSubnets, node.dag.forkDigests.altair)
|
2021-01-19 17:44:03 +00:00
|
|
|
|
|
|
|
debug "Attestation subnets",
|
|
|
|
wallSlot,
|
|
|
|
wallEpoch = wallSlot.epoch,
|
2021-05-11 20:03:40 +00:00
|
|
|
prevAggregateSubnets = subnetLog(prevAggregateSubnets),
|
|
|
|
aggregateSubnets = subnetLog(node.attestationSubnets.aggregateSubnets),
|
|
|
|
prevStabilitySubnets = subnetLog(prevStabilitySubnets),
|
|
|
|
stabilitySubnets = subnetLog(stabilitySubnets),
|
|
|
|
subscribeSubnets = subnetLog(subscribeSubnets),
|
|
|
|
unsubscribeSubnets = subnetLog(unsubscribeSubnets)
|
|
|
|
|
|
|
|
proc getInitialAggregateSubnets(node: BeaconNode): Table[SubnetId, Slot] =
|
2021-01-14 08:43:21 +00:00
|
|
|
let
|
2021-08-20 08:58:15 +00:00
|
|
|
wallEpoch = node.beaconClock.now.slotOrZero.epoch
|
2021-01-26 11:52:00 +00:00
|
|
|
validatorIndices = toIntSet(toSeq(node.getAttachedValidators().keys()))
|
2021-01-25 17:39:56 +00:00
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
template mergeAggregateSubnets(epoch: Epoch) =
|
2021-02-14 15:37:32 +00:00
|
|
|
# TODO when https://github.com/nim-lang/Nim/issues/15972 and
|
|
|
|
# https://github.com/nim-lang/Nim/issues/16217 are fixed, in
|
|
|
|
# Nimbus's Nim, use (_, _, subnetIndex, slot).
|
2021-06-01 11:13:40 +00:00
|
|
|
let epochRef = node.dag.getEpochRef(node.dag.head, epoch)
|
2021-05-10 07:13:36 +00:00
|
|
|
for (_, ci, subnet_id, slot) in get_committee_assignments(
|
2021-03-30 15:01:47 +00:00
|
|
|
epochRef, epoch, validatorIndices):
|
2021-05-10 07:13:36 +00:00
|
|
|
result.withValue(subnet_id, v) do:
|
2021-03-26 06:52:01 +00:00
|
|
|
v[] = max(v[], slot + 1)
|
|
|
|
do:
|
2021-05-10 07:13:36 +00:00
|
|
|
result[subnet_id] = slot + 1
|
2021-01-14 08:43:21 +00:00
|
|
|
|
|
|
|
# Either wallEpoch is 0, in which case it might be pre-genesis, but we only
|
|
|
|
# care about the already-known first two epochs of attestations, or it's in
|
|
|
|
# epoch 0 for real, in which case both are also already known; or wallEpoch
|
|
|
|
# is greater than 0, in which case it's being called from onSlotStart which
|
|
|
|
# has enough state to calculate wallEpoch + {0,1}'s attestations.
|
2021-05-11 20:03:40 +00:00
|
|
|
mergeAggregateSubnets(wallEpoch)
|
|
|
|
mergeAggregateSubnets(wallEpoch + 1)
|
2020-08-19 12:42:10 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
proc subscribeAttestationSubnetHandlers(node: BeaconNode,
|
2021-08-18 12:30:05 +00:00
|
|
|
forkDigest: ForkDigest) =
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
|
2020-12-18 08:50:29 +00:00
|
|
|
# TODO:
|
|
|
|
# We might want to reuse the previous stability subnet if not expired when:
|
|
|
|
# - Restarting the node with a presistent netkey
|
|
|
|
# - When going from synced -> syncing -> synced state
|
2021-01-14 08:43:21 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
if node.config.subscribeAllSubnets:
|
|
|
|
# In all-subnets mode, we create a stability subnet subscription for every
|
|
|
|
# subnet - this will be propagated in the attnets ENR entry
|
|
|
|
node.attestationSubnets.stabilitySubnets.setLen(ATTESTATION_SUBNET_COUNT)
|
|
|
|
for i, ss in node.attestationSubnets.stabilitySubnets.mpairs():
|
|
|
|
ss.subnet_id = SubnetId(i)
|
|
|
|
ss.expiration = FAR_FUTURE_EPOCH
|
|
|
|
else:
|
2021-08-20 08:58:15 +00:00
|
|
|
let wallEpoch = node.beaconClock.now.slotOrZero.epoch
|
2021-01-14 08:43:21 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
# TODO make length dynamic when validator-client-based validators join and leave
|
|
|
|
# In normal mode, there's one subnet subscription per validator, changing
|
|
|
|
# randomly over time
|
|
|
|
node.attestationSubnets.stabilitySubnets.setLen(
|
|
|
|
node.attachedValidators[].count)
|
|
|
|
for i, ss in node.attestationSubnets.stabilitySubnets.mpairs():
|
|
|
|
ss.subnet_id = node.network.getRandomSubnetId()
|
|
|
|
ss.expiration = wallEpoch + node.network.getStabilitySubnetLength()
|
2020-08-19 12:42:10 +00:00
|
|
|
|
2021-05-11 20:03:40 +00:00
|
|
|
let stabilitySubnets =
|
|
|
|
node.attestationSubnets.stabilitySubnets.toBitArray()
|
|
|
|
node.network.updateStabilitySubnetMetadata(stabilitySubnets)
|
2020-12-18 08:50:29 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
let
|
2021-05-11 20:03:40 +00:00
|
|
|
aggregateSubnets = node.getInitialAggregateSubnets()
|
2021-01-19 17:44:03 +00:00
|
|
|
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
|
2021-05-11 20:03:40 +00:00
|
|
|
if SubnetId(i) in aggregateSubnets:
|
|
|
|
node.attestationSubnets.aggregateSubnets[i] = true
|
2021-03-26 06:52:01 +00:00
|
|
|
node.attestationSubnets.unsubscribeSlot[i] =
|
2021-05-11 20:03:40 +00:00
|
|
|
try: aggregateSubnets[SubnetId(i)] except KeyError: raiseAssert "checked with in"
|
2021-01-19 17:44:03 +00:00
|
|
|
else:
|
2021-05-11 20:03:40 +00:00
|
|
|
node.attestationSubnets.aggregateSubnets[i] = false
|
2021-01-19 17:44:03 +00:00
|
|
|
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
|
|
|
|
|
2020-12-22 09:05:36 +00:00
|
|
|
node.attestationSubnets.enabled = true
|
2020-08-19 12:42:10 +00:00
|
|
|
|
2020-12-22 09:05:36 +00:00
|
|
|
debug "Initial attestation subnets subscribed",
|
2021-05-11 20:03:40 +00:00
|
|
|
aggregateSubnets = subnetLog(node.attestationSubnets.aggregateSubnets),
|
|
|
|
stabilitySubnets = subnetLog(stabilitySubnets)
|
2021-05-10 07:13:36 +00:00
|
|
|
node.network.subscribeAttestationSubnets(
|
2021-08-09 12:54:45 +00:00
|
|
|
node.attestationSubnets.aggregateSubnets + stabilitySubnets,
|
|
|
|
forkDigest)
|
2020-08-19 12:42:10 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
# inspired by lighthouse research here
|
|
|
|
# https://gist.github.com/blacktemplar/5c1862cb3f0e32a1a7fb0b25e79e6e2c#file-generate-scoring-params-py
|
|
|
|
const
|
|
|
|
blocksTopicParams = TopicParams(
|
|
|
|
topicWeight: 0.5,
|
|
|
|
timeInMeshWeight: 0.03333333333333333,
|
|
|
|
timeInMeshQuantum: chronos.seconds(12),
|
|
|
|
timeInMeshCap: 300,
|
|
|
|
firstMessageDeliveriesWeight: 1.1471603557060206,
|
|
|
|
firstMessageDeliveriesDecay: 0.9928302477768374,
|
|
|
|
firstMessageDeliveriesCap: 34.86870846001471,
|
|
|
|
meshMessageDeliveriesWeight: -458.31054878249114,
|
|
|
|
meshMessageDeliveriesDecay: 0.9716279515771061,
|
|
|
|
meshMessageDeliveriesThreshold: 0.6849191409056553,
|
|
|
|
meshMessageDeliveriesCap: 2.054757422716966,
|
|
|
|
meshMessageDeliveriesActivation: chronos.seconds(384),
|
|
|
|
meshMessageDeliveriesWindow: chronos.seconds(2),
|
|
|
|
meshFailurePenaltyWeight: -458.31054878249114 ,
|
|
|
|
meshFailurePenaltyDecay: 0.9716279515771061,
|
|
|
|
invalidMessageDeliveriesWeight: -214.99999999999994,
|
|
|
|
invalidMessageDeliveriesDecay: 0.9971259067705325
|
|
|
|
)
|
|
|
|
aggregateTopicParams = TopicParams(
|
|
|
|
topicWeight: 0.5,
|
|
|
|
timeInMeshWeight: 0.03333333333333333,
|
|
|
|
timeInMeshQuantum: chronos.seconds(12),
|
|
|
|
timeInMeshCap: 300,
|
|
|
|
firstMessageDeliveriesWeight: 0.10764904539552399,
|
|
|
|
firstMessageDeliveriesDecay: 0.8659643233600653,
|
|
|
|
firstMessageDeliveriesCap: 371.5778421725158,
|
|
|
|
meshMessageDeliveriesWeight: -0.07538533073670682,
|
|
|
|
meshMessageDeliveriesDecay: 0.930572040929699,
|
|
|
|
meshMessageDeliveriesThreshold: 53.404248450179836,
|
|
|
|
meshMessageDeliveriesCap: 213.61699380071934,
|
|
|
|
meshMessageDeliveriesActivation: chronos.seconds(384),
|
|
|
|
meshMessageDeliveriesWindow: chronos.seconds(2),
|
|
|
|
meshFailurePenaltyWeight: -0.07538533073670682 ,
|
|
|
|
meshFailurePenaltyDecay: 0.930572040929699,
|
|
|
|
invalidMessageDeliveriesWeight: -214.99999999999994,
|
|
|
|
invalidMessageDeliveriesDecay: 0.9971259067705325
|
|
|
|
)
|
|
|
|
basicParams = TopicParams.init()
|
|
|
|
|
|
|
|
static:
|
|
|
|
# compile time validation
|
|
|
|
blocksTopicParams.validateParameters().tryGet()
|
|
|
|
aggregateTopicParams.validateParameters().tryGet()
|
|
|
|
basicParams.validateParameters.tryGet()
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc addPhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.network.subscribe(getBeaconBlocksTopic(forkDigest), blocksTopicParams, enableTopicMetrics = true)
|
|
|
|
node.network.subscribe(getAttesterSlashingsTopic(forkDigest), basicParams)
|
|
|
|
node.network.subscribe(getProposerSlashingsTopic(forkDigest), basicParams)
|
|
|
|
node.network.subscribe(getVoluntaryExitsTopic(forkDigest), basicParams)
|
|
|
|
node.network.subscribe(getAggregateAndProofsTopic(forkDigest), aggregateTopicParams, enableTopicMetrics = true)
|
|
|
|
|
|
|
|
node.subscribeAttestationSubnetHandlers(forkDigest)
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc addPhase0MessageHandlers(node: BeaconNode) =
|
2021-08-09 12:54:45 +00:00
|
|
|
addPhase0MessageHandlers(node, node.dag.forkDigests.phase0)
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.network.unsubscribe(getBeaconBlocksTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getVoluntaryExitsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getProposerSlashingsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest))
|
|
|
|
node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest))
|
2020-12-24 08:48:52 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
for subnet_id in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
|
|
|
|
node.network.unsubscribe(
|
|
|
|
getAttestationTopic(forkDigest, SubnetId(subnet_id)))
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc removePhase0MessageHandlers(node: BeaconNode) =
|
2021-08-09 12:54:45 +00:00
|
|
|
removePhase0MessageHandlers(node, node.dag.forkDigests.phase0)
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc addAltairMessageHandlers(node: BeaconNode, slot: Slot) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.addPhase0MessageHandlers(node.dag.forkDigests.altair)
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2021-08-29 05:58:27 +00:00
|
|
|
var syncnets: BitArray[SYNC_COMMITTEE_SUBNET_COUNT]
|
|
|
|
|
|
|
|
# TODO: What are the best topic params for this?
|
|
|
|
for committeeIdx in allSyncCommittees():
|
|
|
|
closureScope:
|
|
|
|
let idx = committeeIdx
|
|
|
|
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
|
|
|
|
node.network.subscribe(getSyncCommitteeTopic(node.dag.forkDigests.altair, idx), basicParams)
|
|
|
|
syncnets.setBit(idx.asInt)
|
|
|
|
|
|
|
|
node.network.subscribe(getSyncCommitteeContributionAndProofTopic(node.dag.forkDigests.altair), basicParams)
|
|
|
|
node.network.updateSyncnetsMetadata(syncnets)
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc removeAltairMessageHandlers(node: BeaconNode) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.removePhase0MessageHandlers(node.dag.forkDigests.altair)
|
|
|
|
|
2021-08-29 05:58:27 +00:00
|
|
|
for committeeIdx in allSyncCommittees():
|
|
|
|
closureScope:
|
|
|
|
let idx = committeeIdx
|
|
|
|
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
|
|
|
|
node.network.unsubscribe(getSyncCommitteeTopic(node.dag.forkDigests.altair, idx))
|
|
|
|
|
|
|
|
node.network.unsubscribe(getSyncCommitteeContributionAndProofTopic(node.dag.forkDigests.altair))
|
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
func getTopicSubscriptionEnabled(node: BeaconNode): bool =
|
|
|
|
node.attestationSubnets.enabled
|
|
|
|
|
2021-08-18 12:30:05 +00:00
|
|
|
proc removeAllMessageHandlers(node: BeaconNode) =
|
2021-08-09 12:54:45 +00:00
|
|
|
node.removePhase0MessageHandlers()
|
|
|
|
node.removeAltairMessageHandlers()
|
2020-09-15 12:40:43 +00:00
|
|
|
|
2021-02-01 11:18:16 +00:00
|
|
|
proc setupDoppelgangerDetection(node: BeaconNode, slot: Slot) =
|
2020-10-27 17:21:35 +00:00
|
|
|
# When another client's already running, this is very likely to detect
|
2021-01-29 12:38:52 +00:00
|
|
|
# potential duplicate validators, which can trigger slashing.
|
2020-10-27 17:21:35 +00:00
|
|
|
#
|
|
|
|
# Every missed attestation costs approximately 3*get_base_reward(), which
|
|
|
|
# can be up to around 10,000 Wei. Thus, skipping attestations isn't cheap
|
|
|
|
# and one should gauge the likelihood of this simultaneous launch to tune
|
|
|
|
# the epoch delay to one's perceived risk.
|
|
|
|
const duplicateValidatorEpochs = 2
|
|
|
|
|
2021-02-01 11:18:16 +00:00
|
|
|
node.processor.doppelgangerDetection.broadcastStartEpoch =
|
2020-10-27 17:21:35 +00:00
|
|
|
slot.epoch + duplicateValidatorEpochs
|
2021-01-29 12:38:52 +00:00
|
|
|
debug "Setting up doppelganger protection",
|
2020-10-27 17:21:35 +00:00
|
|
|
epoch = slot.epoch,
|
|
|
|
broadcastStartEpoch =
|
2021-02-01 11:18:16 +00:00
|
|
|
node.processor.doppelgangerDetection.broadcastStartEpoch
|
2020-10-27 17:21:35 +00:00
|
|
|
|
2021-08-29 05:58:27 +00:00
|
|
|
proc trackSyncCommitteeTopics*(node: BeaconNode) =
|
|
|
|
# TODO
|
|
|
|
discard
|
|
|
|
|
|
|
|
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.raises: [Defect, CatchableError].} =
|
2020-12-01 10:43:02 +00:00
|
|
|
# Syncing tends to be ~1 block/s, and allow for an epoch of time for libp2p
|
|
|
|
# subscribing to spin up. The faster the sync, the more wallSlot - headSlot
|
|
|
|
# lead time is required
|
|
|
|
const
|
|
|
|
TOPIC_SUBSCRIBE_THRESHOLD_SLOTS = 64
|
|
|
|
HYSTERESIS_BUFFER = 16
|
|
|
|
|
|
|
|
let
|
|
|
|
syncQueueLen = node.syncManager.syncQueueLen
|
2021-08-09 12:54:45 +00:00
|
|
|
targetGossipState =
|
2020-12-01 10:43:02 +00:00
|
|
|
# SyncManager forward sync by default runs until maxHeadAge slots, or one
|
|
|
|
# epoch range is achieved. This particular condition has a couple caveats
|
|
|
|
# including that under certain conditions, debtsCount appears to push len
|
|
|
|
# (here, syncQueueLen) to underflow-like values; and even when exactly at
|
|
|
|
# the expected walltime slot the queue isn't necessarily empty. Therefore
|
|
|
|
# TOPIC_SUBSCRIBE_THRESHOLD_SLOTS is not exactly the number of slots that
|
|
|
|
# are left. Furthermore, even when 0 peers are being used, this won't get
|
|
|
|
# to 0 slots in syncQueueLen, but that's a vacuous condition given that a
|
|
|
|
# networking interaction cannot happen under such circumstances.
|
2021-08-09 12:54:45 +00:00
|
|
|
if syncQueueLen > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS:
|
|
|
|
GossipState.Disconnected
|
|
|
|
elif slot.epoch + 1 < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
GossipState.ConnectedToPhase0
|
|
|
|
elif slot.epoch >= node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
GossipState.ConnectedToAltair
|
|
|
|
else:
|
|
|
|
GossipState.InTransitionToAltair
|
|
|
|
|
|
|
|
if node.gossipState == GossipState.Disconnected and
|
|
|
|
targetGossipState != GossipState.Disconnected:
|
|
|
|
# We are synced, so we will connect
|
2020-12-01 10:43:02 +00:00
|
|
|
debug "Enabling topic subscriptions",
|
|
|
|
wallSlot = slot,
|
2021-06-01 11:13:40 +00:00
|
|
|
headSlot = node.dag.head.slot,
|
2020-12-01 10:43:02 +00:00
|
|
|
syncQueueLen
|
|
|
|
|
2021-02-01 11:18:16 +00:00
|
|
|
node.setupDoppelgangerDetection(slot)
|
2021-08-09 12:54:45 +00:00
|
|
|
|
|
|
|
block addRemoveHandlers:
|
|
|
|
case targetGossipState
|
|
|
|
of GossipState.Disconnected:
|
|
|
|
case node.gossipState:
|
|
|
|
of GossipState.Disconnected: break
|
|
|
|
else:
|
|
|
|
if syncQueueLen > TOPIC_SUBSCRIBE_THRESHOLD_SLOTS + HYSTERESIS_BUFFER and
|
|
|
|
# Filter out underflow from debtsCount; plausible queue lengths can't
|
|
|
|
# exceed wallslot, with safety margin.
|
|
|
|
syncQueueLen < 2 * slot.uint64:
|
|
|
|
debug "Disabling topic subscriptions",
|
|
|
|
wallSlot = slot,
|
|
|
|
headSlot = node.dag.head.slot,
|
|
|
|
syncQueueLen
|
|
|
|
node.removeAllMessageHandlers()
|
|
|
|
node.gossipState = GossipState.Disconnected
|
|
|
|
break
|
|
|
|
|
|
|
|
of GossipState.ConnectedToPhase0:
|
|
|
|
case node.gossipState:
|
|
|
|
of GossipState.ConnectedToPhase0: break
|
|
|
|
of GossipState.Disconnected:
|
|
|
|
node.addPhase0MessageHandlers()
|
|
|
|
of GossipState.InTransitionToAltair:
|
|
|
|
warn "Unexpected clock regression during altair transition"
|
|
|
|
node.removeAltairMessageHandlers()
|
|
|
|
of GossipState.ConnectedToAltair:
|
|
|
|
warn "Unexpected clock regression during altair transition"
|
|
|
|
node.removeAltairMessageHandlers()
|
|
|
|
node.addPhase0MessageHandlers()
|
|
|
|
|
|
|
|
of GossipState.InTransitionToAltair:
|
|
|
|
case node.gossipState:
|
|
|
|
of GossipState.InTransitionToAltair: break
|
|
|
|
of GossipState.Disconnected:
|
|
|
|
node.addPhase0MessageHandlers()
|
|
|
|
node.addAltairMessageHandlers(slot)
|
|
|
|
of GossipState.ConnectedToPhase0:
|
|
|
|
node.addAltairMessageHandlers(slot)
|
|
|
|
of GossipState.ConnectedToAltair:
|
|
|
|
warn "Unexpected clock regression during altair transition"
|
|
|
|
node.addPhase0MessageHandlers()
|
|
|
|
|
|
|
|
of GossipState.ConnectedToAltair:
|
|
|
|
case node.gossipState:
|
|
|
|
of GossipState.ConnectedToAltair: break
|
|
|
|
of GossipState.Disconnected:
|
|
|
|
node.addAltairMessageHandlers(slot)
|
|
|
|
of GossipState.ConnectedToPhase0:
|
|
|
|
node.removePhase0MessageHandlers()
|
|
|
|
node.addAltairMessageHandlers(slot)
|
|
|
|
of GossipState.InTransitionToAltair:
|
|
|
|
node.removePhase0MessageHandlers()
|
|
|
|
|
|
|
|
node.gossipState = targetGossipState
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2021-01-12 12:43:15 +00:00
|
|
|
# Subscription or unsubscription might have occurred; recheck. Since Nimbus
|
|
|
|
# initially subscribes to all subnets, simply do not ever cycle attestation
|
|
|
|
# subnets and they'll all remain subscribed.
|
|
|
|
if node.getTopicSubscriptionEnabled and not node.config.subscribeAllSubnets:
|
2020-12-22 09:05:36 +00:00
|
|
|
# This exits early all but one call each epoch.
|
2021-01-25 17:39:56 +00:00
|
|
|
traceAsyncErrors node.cycleAttestationSubnets(slot)
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
func getNextValidatorAction(
|
|
|
|
actionSlotSource: auto, lastCalculatedEpoch: Epoch, slot: Slot): Slot =
|
|
|
|
# The relevant actions are in, depending on calculated bounds:
|
2021-02-14 15:37:32 +00:00
|
|
|
# [aS[epoch mod 2], aS[1 - (epoch mod 2)]]
|
|
|
|
# current epoch next epoch
|
2021-03-24 10:05:04 +00:00
|
|
|
let orderedActionSlots = [
|
|
|
|
actionSlotSource[ slot.epoch mod 2'u64],
|
|
|
|
actionSlotSource[1 - (slot.epoch mod 2'u64)]]
|
2021-02-14 15:37:32 +00:00
|
|
|
|
|
|
|
static: doAssert MIN_ATTESTATION_INCLUSION_DELAY == 1
|
|
|
|
|
|
|
|
# Cleverer ways exist, but a short loop is fine. O(n) vs O(log n) isn't that
|
|
|
|
# important when n is 32 or 64, with early exit on average no more than half
|
|
|
|
# through.
|
|
|
|
for i in [0'u64, 1'u64]:
|
|
|
|
let bitmapEpoch = slot.epoch + i
|
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
if bitmapEpoch > lastCalculatedEpoch:
|
2021-02-14 15:37:32 +00:00
|
|
|
return FAR_FUTURE_SLOT
|
|
|
|
|
|
|
|
for slotOffset in 0 ..< SLOTS_PER_EPOCH:
|
2021-03-24 10:05:04 +00:00
|
|
|
let nextActionSlot =
|
2021-02-14 15:37:32 +00:00
|
|
|
compute_start_slot_at_epoch(bitmapEpoch) + slotOffset
|
2021-03-24 10:05:04 +00:00
|
|
|
if ((orderedActionSlots[i] and (1'u32 shl slotOffset)) != 0) and
|
|
|
|
nextActionSlot > slot:
|
|
|
|
return nextActionSlot
|
2021-02-14 15:37:32 +00:00
|
|
|
|
|
|
|
FAR_FUTURE_SLOT
|
|
|
|
|
|
|
|
proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
|
2020-12-18 21:01:24 +00:00
|
|
|
# Things we do when slot processing has ended and we're about to wait for the
|
|
|
|
# next slot
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
if node.dag.needStateCachesAndForkChoicePruning():
|
2021-05-10 14:32:28 +00:00
|
|
|
if node.attachedValidators.validators.len > 0:
|
|
|
|
node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
# pruning is only done if the DB is set to pruning mode.
|
|
|
|
.pruneAfterFinalization(
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.finalizedHead.slot.compute_epoch_at_slot()
|
2021-05-10 14:32:28 +00:00
|
|
|
)
|
|
|
|
|
2021-03-09 14:36:17 +00:00
|
|
|
# Delay part of pruning until latency critical duties are done.
|
|
|
|
# The other part of pruning, `pruneBlocksDAG`, is done eagerly.
|
2021-05-10 14:32:28 +00:00
|
|
|
# ----
|
|
|
|
# This is the last pruning to do as it clears the "needPruning" condition.
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].pruneStateCachesAndForkChoice()
|
2021-03-09 14:36:17 +00:00
|
|
|
|
2020-12-18 21:01:24 +00:00
|
|
|
when declared(GC_fullCollect):
|
|
|
|
# The slots in the beacon node work as frames in a game: we want to make
|
|
|
|
# sure that we're ready for the next one and don't get stuck in lengthy
|
|
|
|
# garbage collection tasks when time is of essence in the middle of a slot -
|
|
|
|
# while this does not guarantee that we'll never collect during a slot, it
|
|
|
|
# makes sure that all the scratch space we used during slot tasks (logging,
|
|
|
|
# temporary buffers etc) gets recycled for the next slot that is likely to
|
|
|
|
# need similar amounts of memory.
|
|
|
|
GC_fullCollect()
|
|
|
|
|
|
|
|
# Checkpoint the database to clear the WAL file and make sure changes in
|
|
|
|
# the database are synced with the filesystem.
|
2021-01-18 10:02:56 +00:00
|
|
|
node.db.checkpoint()
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2021-08-30 01:00:37 +00:00
|
|
|
node.syncCommitteeMsgPool[].pruneData(slot)
|
2021-08-28 22:27:51 +00:00
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
# -1 is a more useful output than 18446744073709551615 as an indicator of
|
|
|
|
# no future attestation/proposal known.
|
|
|
|
template displayInt64(x: Slot): int64 =
|
|
|
|
if x == high(uint64).Slot:
|
|
|
|
-1'i64
|
|
|
|
else:
|
|
|
|
toGaugeValue(x)
|
|
|
|
|
2021-02-14 15:37:32 +00:00
|
|
|
let
|
2021-03-24 10:05:04 +00:00
|
|
|
nextAttestationSlot = getNextValidatorAction(
|
|
|
|
node.attestationSubnets.attestingSlots,
|
|
|
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
|
|
|
nextProposalSlot = getNextValidatorAction(
|
|
|
|
node.attestationSubnets.proposingSlots,
|
|
|
|
node.attestationSubnets.lastCalculatedEpoch, slot)
|
|
|
|
nextActionWaitTime = saturate(fromNow(
|
2021-08-20 08:58:15 +00:00
|
|
|
node.beaconClock, min(nextAttestationSlot, nextProposalSlot)))
|
2021-02-14 15:37:32 +00:00
|
|
|
|
2020-12-18 21:01:24 +00:00
|
|
|
info "Slot end",
|
|
|
|
slot = shortLog(slot),
|
2021-02-14 15:37:32 +00:00
|
|
|
nextSlot = shortLog(slot + 1),
|
2021-06-01 11:13:40 +00:00
|
|
|
head = shortLog(node.dag.head),
|
|
|
|
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
|
|
|
|
finalizedHead = shortLog(node.dag.finalizedHead.blck),
|
2021-02-14 15:37:32 +00:00
|
|
|
finalizedEpoch =
|
2021-06-01 11:13:40 +00:00
|
|
|
shortLog(node.dag.finalizedHead.blck.slot.compute_epoch_at_slot()),
|
2021-03-24 10:05:04 +00:00
|
|
|
nextAttestationSlot = displayInt64(nextAttestationSlot),
|
|
|
|
nextProposalSlot = displayInt64(nextProposalSlot),
|
2021-02-14 15:37:32 +00:00
|
|
|
nextActionWait =
|
|
|
|
if nextAttestationSlot == FAR_FUTURE_SLOT:
|
|
|
|
"n/a"
|
|
|
|
else:
|
2021-03-12 17:13:26 +00:00
|
|
|
shortLog(nextActionWaitTime)
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2021-03-12 09:46:26 +00:00
|
|
|
if nextAttestationSlot != FAR_FUTURE_SLOT:
|
|
|
|
next_action_wait.set(nextActionWaitTime.toFloatSeconds)
|
|
|
|
|
2021-08-24 19:49:51 +00:00
|
|
|
let epoch = slot.epoch
|
2021-09-29 11:06:16 +00:00
|
|
|
if epoch + 1 >= node.network.forkId.next_fork_epoch:
|
|
|
|
# Update 1 epoch early to block non-fork-ready peers
|
|
|
|
node.network.updateForkId(epoch, node.dag.genesisValidatorsRoot)
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2020-12-18 21:01:24 +00:00
|
|
|
node.updateGossipStatus(slot)
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# When we're not behind schedule, we'll speculatively update the clearance
|
|
|
|
# state in anticipation of receiving the next block - we do it after logging
|
|
|
|
# slot end since the nextActionWaitTime can be short
|
|
|
|
let
|
2021-08-20 08:58:15 +00:00
|
|
|
advanceCutoff = node.beaconClock.fromNow(
|
2021-06-01 11:13:40 +00:00
|
|
|
slot.toBeaconTime(chronos.seconds(int(SECONDS_PER_SLOT - 1))))
|
|
|
|
if advanceCutoff.inFuture:
|
|
|
|
# We wait until there's only a second left before the next slot begins, then
|
|
|
|
# we advance the clearance state to the next slot - this gives us a high
|
|
|
|
# probability of being prepared for the block that will arrive and the
|
|
|
|
# epoch processing that follows
|
|
|
|
await sleepAsync(advanceCutoff.offset)
|
|
|
|
node.dag.advanceClearanceState()
|
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
proc onSlotStart(
|
|
|
|
node: BeaconNode, wallTime: BeaconTime, lastSlot: Slot) {.async.} =
|
2019-03-22 15:49:37 +00:00
|
|
|
## Called at the beginning of a slot - usually every slot, but sometimes might
|
|
|
|
## skip a few in case we're running late.
|
2021-03-01 16:36:06 +00:00
|
|
|
## wallTime: current system time - we will strive to perform all duties up
|
|
|
|
## to this point in time
|
2020-06-26 13:51:20 +00:00
|
|
|
## lastSlot: the last slot that we successfully processed, so we know where to
|
2021-03-01 16:36:06 +00:00
|
|
|
## start work from - there might be jumps if processing is delayed
|
2019-03-22 15:49:37 +00:00
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
2021-03-01 16:36:06 +00:00
|
|
|
wallSlot = wallTime.slotOrZero
|
|
|
|
# If everything was working perfectly, the slot that we should be processing
|
|
|
|
expectedSlot = lastSlot + 1
|
2020-07-30 14:27:34 +00:00
|
|
|
finalizedEpoch =
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.finalizedHead.blck.slot.compute_epoch_at_slot()
|
2021-03-01 16:36:06 +00:00
|
|
|
delay = wallTime - expectedSlot.toBeaconTime()
|
2020-12-18 21:01:24 +00:00
|
|
|
|
2019-12-23 15:34:09 +00:00
|
|
|
info "Slot start",
|
2019-08-15 16:01:55 +00:00
|
|
|
lastSlot = shortLog(lastSlot),
|
2021-03-01 16:36:06 +00:00
|
|
|
wallSlot = shortLog(wallSlot),
|
|
|
|
delay = shortLog(delay),
|
2020-09-21 16:02:27 +00:00
|
|
|
peers = len(node.network.peerPool),
|
2021-06-01 11:13:40 +00:00
|
|
|
head = shortLog(node.dag.head),
|
|
|
|
headEpoch = shortLog(node.dag.head.slot.compute_epoch_at_slot()),
|
|
|
|
finalized = shortLog(node.dag.finalizedHead.blck),
|
2020-12-18 21:01:24 +00:00
|
|
|
finalizedEpoch = shortLog(finalizedEpoch),
|
|
|
|
sync =
|
|
|
|
if node.syncManager.inProgress: node.syncManager.syncStatus
|
|
|
|
else: "synced"
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2020-02-17 18:24:14 +00:00
|
|
|
# Check before any re-scheduling of onSlotStart()
|
2021-03-01 16:36:06 +00:00
|
|
|
checkIfShouldStopAtEpoch(wallSlot, node.config.stopAtEpoch)
|
2020-02-17 18:24:14 +00:00
|
|
|
|
2021-03-01 19:55:25 +00:00
|
|
|
beacon_slot.set wallSlot.toGaugeValue
|
|
|
|
beacon_current_epoch.set wallSlot.epoch.toGaugeValue
|
2019-12-23 15:34:09 +00:00
|
|
|
|
2021-03-01 19:55:25 +00:00
|
|
|
# both non-negative, so difference can't overflow or underflow int64
|
|
|
|
finalization_delay.set(
|
|
|
|
wallSlot.epoch.toGaugeValue - finalizedEpoch.toGaugeValue)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
if node.config.verifyFinalization:
|
|
|
|
verifyFinalization(node, wallSlot)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].updateHead(wallSlot)
|
2019-03-22 15:49:37 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
await node.handleValidatorDuties(lastSlot, wallSlot)
|
2019-08-16 11:16:56 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
await onSlotEnd(node, wallSlot)
|
2020-12-08 17:11:54 +00:00
|
|
|
|
2019-12-02 14:42:57 +00:00
|
|
|
proc handleMissingBlocks(node: BeaconNode) =
|
2020-07-30 19:18:17 +00:00
|
|
|
let missingBlocks = node.quarantine.checkMissing()
|
2019-03-28 14:03:19 +00:00
|
|
|
if missingBlocks.len > 0:
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Requesting detected missing blocks", blocks = shortLog(missingBlocks)
|
2020-06-18 10:03:36 +00:00
|
|
|
node.requestManager.fetchAncestorBlocks(missingBlocks)
|
2019-12-02 14:42:57 +00:00
|
|
|
|
2020-08-10 18:49:45 +00:00
|
|
|
proc onSecond(node: BeaconNode) =
|
2020-06-03 08:46:29 +00:00
|
|
|
## This procedure will be called once per second.
|
|
|
|
if not(node.syncManager.inProgress):
|
|
|
|
node.handleMissingBlocks()
|
|
|
|
|
|
|
|
proc runOnSecondLoop(node: BeaconNode) {.async.} =
|
2020-08-10 18:49:45 +00:00
|
|
|
let sleepTime = chronos.seconds(1)
|
|
|
|
const nanosecondsIn1s = float(chronos.seconds(1).nanoseconds)
|
2020-06-03 08:46:29 +00:00
|
|
|
while true:
|
|
|
|
let start = chronos.now(chronos.Moment)
|
2020-08-10 18:49:45 +00:00
|
|
|
await chronos.sleepAsync(sleepTime)
|
|
|
|
let afterSleep = chronos.now(chronos.Moment)
|
|
|
|
let sleepTime = afterSleep - start
|
|
|
|
node.onSecond()
|
|
|
|
let finished = chronos.now(chronos.Moment)
|
|
|
|
let processingTime = finished - afterSleep
|
|
|
|
ticks_delay.set(sleepTime.nanoseconds.float / nanosecondsIn1s)
|
2020-10-01 18:56:42 +00:00
|
|
|
trace "onSecond task completed", sleepTime, processingTime
|
2019-03-27 20:17:01 +00:00
|
|
|
|
2020-08-12 09:29:11 +00:00
|
|
|
proc startSyncManager(node: BeaconNode) =
|
2020-06-30 13:53:57 +00:00
|
|
|
func getLocalHeadSlot(): Slot =
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.head.slot
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
proc getLocalWallSlot(): Slot =
|
2021-08-20 08:58:15 +00:00
|
|
|
node.beaconClock.now.slotOrZero
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
func getFirstSlotAtFinalizedEpoch(): Slot =
|
2021-06-01 11:13:40 +00:00
|
|
|
node.dag.finalizedHead.slot
|
2020-06-11 14:20:53 +00:00
|
|
|
|
2020-04-23 15:31:00 +00:00
|
|
|
proc scoreCheck(peer: Peer): bool =
|
2020-05-28 05:02:28 +00:00
|
|
|
if peer.score < PeerScoreLowLimit:
|
2020-08-12 09:29:11 +00:00
|
|
|
false
|
2020-04-23 15:31:00 +00:00
|
|
|
else:
|
2020-08-12 09:29:11 +00:00
|
|
|
true
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2020-10-27 09:25:28 +00:00
|
|
|
proc onDeletePeer(peer: Peer) =
|
2021-08-09 12:54:45 +00:00
|
|
|
if peer.connectionState notin {ConnectionState.Disconnecting,
|
|
|
|
ConnectionState.Disconnected}:
|
2020-10-27 09:25:28 +00:00
|
|
|
if peer.score < PeerScoreLowLimit:
|
|
|
|
debug "Peer was removed from PeerPool due to low score", peer = peer,
|
|
|
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
|
|
|
score_high_limit = PeerScoreHighLimit
|
2021-03-24 16:20:55 +00:00
|
|
|
asyncSpawn(try: peer.disconnect(PeerScoreLow)
|
|
|
|
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
|
2020-10-27 09:25:28 +00:00
|
|
|
else:
|
|
|
|
debug "Peer was removed from PeerPool", peer = peer,
|
|
|
|
peer_score = peer.score, score_low_limit = PeerScoreLowLimit,
|
|
|
|
score_high_limit = PeerScoreHighLimit
|
2021-03-24 16:20:55 +00:00
|
|
|
asyncSpawn(try: peer.disconnect(FaultOrError)
|
|
|
|
except Exception as exc: raiseAssert exc.msg) # Shouldn't actually happen!
|
2020-10-27 09:25:28 +00:00
|
|
|
|
2020-04-23 15:31:00 +00:00
|
|
|
node.network.peerPool.setScoreCheck(scoreCheck)
|
2020-10-27 09:25:28 +00:00
|
|
|
node.network.peerPool.setOnDeletePeer(onDeletePeer)
|
2020-04-23 15:31:00 +00:00
|
|
|
|
2020-06-03 08:46:29 +00:00
|
|
|
node.syncManager = newSyncManager[Peer, PeerID](
|
2020-04-20 14:59:18 +00:00
|
|
|
node.network.peerPool, getLocalHeadSlot, getLocalWallSlot,
|
2021-05-28 16:34:00 +00:00
|
|
|
getFirstSlotAtFinalizedEpoch, node.blockProcessor, chunkSize = 32
|
2020-04-20 14:59:18 +00:00
|
|
|
)
|
2020-08-10 07:15:50 +00:00
|
|
|
node.syncManager.start()
|
|
|
|
|
2021-03-24 10:05:04 +00:00
|
|
|
func connectedPeersCount(node: BeaconNode): int =
|
2020-09-14 14:50:03 +00:00
|
|
|
len(node.network.peerPool)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2021-08-27 09:00:06 +00:00
|
|
|
proc installRpcHandlers(rpcServer: RpcServer, node: BeaconNode) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
|
|
|
rpcServer.installBeaconApiHandlers(node)
|
|
|
|
rpcServer.installConfigApiHandlers(node)
|
|
|
|
rpcServer.installDebugApiHandlers(node)
|
|
|
|
rpcServer.installEventApiHandlers(node)
|
|
|
|
rpcServer.installNimbusApiHandlers(node)
|
|
|
|
rpcServer.installNodeApiHandlers(node)
|
|
|
|
rpcServer.installValidatorApiHandlers(node)
|
2020-03-16 22:28:54 +00:00
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) =
|
|
|
|
restServer.router.installBeaconApiHandlers(node)
|
|
|
|
restServer.router.installConfigApiHandlers(node)
|
|
|
|
restServer.router.installDebugApiHandlers(node)
|
2021-03-23 22:50:18 +00:00
|
|
|
restServer.router.installEventApiHandlers(node)
|
|
|
|
restServer.router.installNimbusApiHandlers(node)
|
2021-03-17 18:46:45 +00:00
|
|
|
restServer.router.installNodeApiHandlers(node)
|
2021-03-23 22:50:18 +00:00
|
|
|
restServer.router.installValidatorApiHandlers(node)
|
2021-10-04 19:08:31 +00:00
|
|
|
if node.config.validatorApiEnabled:
|
|
|
|
restServer.router.installValidatorManagementHandlers(node)
|
2021-03-17 18:46:45 +00:00
|
|
|
|
2020-08-17 12:07:29 +00:00
|
|
|
proc installMessageValidators(node: BeaconNode) =
|
2021-03-02 06:04:14 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
|
2020-08-12 17:48:31 +00:00
|
|
|
# These validators stay around the whole time, regardless of which specific
|
|
|
|
# subnets are subscribed to during any given epoch.
|
2021-07-07 09:09:47 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
# TODO altair-transition, well, without massive copy/pasting (extract to template or etc)
|
2020-03-31 18:39:02 +00:00
|
|
|
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
|
|
|
|
closureScope:
|
2021-05-10 07:13:36 +00:00
|
|
|
let subnet_id = SubnetId(it)
|
2021-04-02 14:36:43 +00:00
|
|
|
node.network.addAsyncValidator(
|
2021-07-07 09:09:47 +00:00
|
|
|
getAttestationTopic(node.dag.forkDigests.phase0, subnet_id),
|
2020-06-29 18:08:58 +00:00
|
|
|
# This proc needs to be within closureScope; don't lift out of loop.
|
2021-04-02 14:36:43 +00:00
|
|
|
proc(attestation: Attestation): Future[ValidationResult] =
|
2021-05-10 07:13:36 +00:00
|
|
|
node.processor.attestationValidator(attestation, subnet_id))
|
2020-08-12 17:48:31 +00:00
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
node.network.addAsyncValidator(
|
2021-07-07 09:09:47 +00:00
|
|
|
getAggregateAndProofsTopic(node.dag.forkDigests.phase0),
|
2021-04-02 14:36:43 +00:00
|
|
|
proc(signedAggregateAndProof: SignedAggregateAndProof): Future[ValidationResult] =
|
|
|
|
node.processor.aggregateValidator(signedAggregateAndProof))
|
2020-08-17 12:07:29 +00:00
|
|
|
|
2020-08-20 16:30:47 +00:00
|
|
|
node.network.addValidator(
|
2021-07-14 12:18:52 +00:00
|
|
|
getBeaconBlocksTopic(node.dag.forkDigests.phase0),
|
2021-07-15 19:01:07 +00:00
|
|
|
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
|
2020-08-20 16:30:47 +00:00
|
|
|
node.processor[].blockValidator(signedBlock))
|
2020-08-17 12:07:29 +00:00
|
|
|
|
2020-09-14 14:26:31 +00:00
|
|
|
node.network.addValidator(
|
2021-07-07 09:09:47 +00:00
|
|
|
getAttesterSlashingsTopic(node.dag.forkDigests.phase0),
|
2020-09-18 11:53:09 +00:00
|
|
|
proc (attesterSlashing: AttesterSlashing): ValidationResult =
|
2020-09-14 14:26:31 +00:00
|
|
|
node.processor[].attesterSlashingValidator(attesterSlashing))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
2021-07-07 09:09:47 +00:00
|
|
|
getProposerSlashingsTopic(node.dag.forkDigests.phase0),
|
2020-09-18 11:53:09 +00:00
|
|
|
proc (proposerSlashing: ProposerSlashing): ValidationResult =
|
2020-09-14 14:26:31 +00:00
|
|
|
node.processor[].proposerSlashingValidator(proposerSlashing))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
2021-07-07 09:09:47 +00:00
|
|
|
getVoluntaryExitsTopic(node.dag.forkDigests.phase0),
|
2020-09-24 17:05:49 +00:00
|
|
|
proc (signedVoluntaryExit: SignedVoluntaryExit): ValidationResult =
|
|
|
|
node.processor[].voluntaryExitValidator(signedVoluntaryExit))
|
2020-09-14 14:26:31 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
# TODO copy/paste starts here; templatize whole thing
|
|
|
|
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
|
|
|
|
closureScope:
|
|
|
|
let subnet_id = SubnetId(it)
|
|
|
|
node.network.addAsyncValidator(
|
|
|
|
getAttestationTopic(node.dag.forkDigests.altair, subnet_id),
|
|
|
|
# This proc needs to be within closureScope; don't lift out of loop.
|
|
|
|
proc(attestation: Attestation): Future[ValidationResult] =
|
|
|
|
node.processor.attestationValidator(attestation, subnet_id))
|
|
|
|
|
|
|
|
node.network.addAsyncValidator(
|
|
|
|
getAggregateAndProofsTopic(node.dag.forkDigests.altair),
|
|
|
|
proc(signedAggregateAndProof: SignedAggregateAndProof): Future[ValidationResult] =
|
|
|
|
node.processor.aggregateValidator(signedAggregateAndProof))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getBeaconBlocksTopic(node.dag.forkDigests.altair),
|
|
|
|
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
|
|
|
|
node.processor[].blockValidator(signedBlock))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getAttesterSlashingsTopic(node.dag.forkDigests.altair),
|
|
|
|
proc (attesterSlashing: AttesterSlashing): ValidationResult =
|
|
|
|
node.processor[].attesterSlashingValidator(attesterSlashing))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getProposerSlashingsTopic(node.dag.forkDigests.altair),
|
|
|
|
proc (proposerSlashing: ProposerSlashing): ValidationResult =
|
|
|
|
node.processor[].proposerSlashingValidator(proposerSlashing))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getVoluntaryExitsTopic(node.dag.forkDigests.altair),
|
|
|
|
proc (signedVoluntaryExit: SignedVoluntaryExit): ValidationResult =
|
|
|
|
node.processor[].voluntaryExitValidator(signedVoluntaryExit))
|
|
|
|
|
2021-08-29 05:58:27 +00:00
|
|
|
for committeeIdx in allSyncCommittees():
|
|
|
|
closureScope:
|
|
|
|
let idx = committeeIdx
|
|
|
|
node.network.addValidator(
|
|
|
|
getSyncCommitteeTopic(node.dag.forkDigests.altair, idx),
|
|
|
|
# This proc needs to be within closureScope; don't lift out of loop.
|
|
|
|
proc(msg: SyncCommitteeMessage): ValidationResult =
|
|
|
|
node.processor.syncCommitteeMsgValidator(msg, idx))
|
|
|
|
|
|
|
|
node.network.addValidator(
|
|
|
|
getSyncCommitteeContributionAndProofTopic(node.dag.forkDigests.altair),
|
|
|
|
proc(msg: SignedContributionAndProof): ValidationResult =
|
|
|
|
node.processor.syncCommitteeContributionValidator(msg))
|
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
proc stop*(node: BeaconNode) =
|
2020-09-28 15:19:57 +00:00
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Graceful shutdown"
|
2020-09-01 13:44:40 +00:00
|
|
|
if not node.config.inProcessValidators:
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
node.vcProcess.close()
|
|
|
|
except Exception as exc:
|
|
|
|
warn "Couldn't close vc process", msg = exc.msg
|
|
|
|
try:
|
|
|
|
waitFor node.network.stop()
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Couldn't stop network", msg = exc.msg
|
|
|
|
|
2020-11-20 13:23:55 +00:00
|
|
|
node.attachedValidators.slashingProtection.close()
|
2020-09-12 05:35:58 +00:00
|
|
|
node.db.close()
|
2020-11-20 13:23:55 +00:00
|
|
|
notice "Databases closed"
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc run*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
2020-09-28 15:19:57 +00:00
|
|
|
if bnStatus == BeaconNodeStatus.Starting:
|
2020-05-19 18:57:35 +00:00
|
|
|
# it might have been set to "Stopping" with Ctrl+C
|
2020-09-28 15:19:57 +00:00
|
|
|
bnStatus = BeaconNodeStatus.Running
|
2020-05-19 18:57:35 +00:00
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
if not(isNil(node.rpcServer)):
|
2020-05-19 18:57:35 +00:00
|
|
|
node.rpcServer.installRpcHandlers(node)
|
|
|
|
node.rpcServer.start()
|
|
|
|
|
2021-03-17 18:46:45 +00:00
|
|
|
if not(isNil(node.restServer)):
|
|
|
|
node.restServer.installRestHandlers(node)
|
|
|
|
node.restServer.start()
|
|
|
|
|
2020-08-17 12:07:29 +00:00
|
|
|
node.installMessageValidators()
|
2020-05-14 11:19:10 +00:00
|
|
|
|
2021-08-20 08:58:15 +00:00
|
|
|
let startTime = node.beaconClock.now()
|
2021-07-13 11:15:07 +00:00
|
|
|
asyncSpawn runSlotLoop(node, startTime, onSlotStart)
|
2021-03-01 16:36:06 +00:00
|
|
|
asyncSpawn runOnSecondLoop(node)
|
2021-05-28 16:34:00 +00:00
|
|
|
asyncSpawn runQueueProcessingLoop(node.blockProcessor)
|
2019-03-27 20:17:01 +00:00
|
|
|
|
2020-06-18 10:03:36 +00:00
|
|
|
node.requestManager.start()
|
2020-08-12 09:29:11 +00:00
|
|
|
node.startSyncManager()
|
2020-06-18 10:03:36 +00:00
|
|
|
|
2021-08-09 12:54:45 +00:00
|
|
|
node.updateGossipStatus(startTime.slotOrZero)
|
2020-12-01 10:43:02 +00:00
|
|
|
|
2020-11-02 18:02:27 +00:00
|
|
|
## Ctrl+C handling
|
|
|
|
proc controlCHandler() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
setupForeignThreadGc()
|
|
|
|
except Exception as exc: raiseAssert exc.msg # shouldn't happen
|
2020-11-02 18:02:27 +00:00
|
|
|
notice "Shutting down after having received SIGINT"
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
setControlCHook(controlCHandler)
|
|
|
|
except Exception as exc: # TODO Exception
|
|
|
|
warn "Cannot set ctrl-c handler", msg = exc.msg
|
|
|
|
|
2020-12-14 16:45:14 +00:00
|
|
|
# equivalent SIGTERM handler
|
|
|
|
when defined(posix):
|
|
|
|
proc SIGTERMHandler(signal: cint) {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGTERM"
|
|
|
|
bnStatus = BeaconNodeStatus.Stopping
|
|
|
|
c_signal(SIGTERM, SIGTERMHandler)
|
2020-11-02 18:02:27 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
# main event loop
|
2020-09-28 15:19:57 +00:00
|
|
|
while bnStatus == BeaconNodeStatus.Running:
|
2021-03-26 06:52:01 +00:00
|
|
|
poll() # if poll fails, the network is broken
|
2020-04-20 14:59:18 +00:00
|
|
|
|
2020-05-19 18:57:35 +00:00
|
|
|
# time to say goodbye
|
|
|
|
node.stop()
|
2018-11-23 23:58:49 +00:00
|
|
|
|
2018-12-19 12:58:53 +00:00
|
|
|
var gPidFile: string
|
2021-03-26 06:52:01 +00:00
|
|
|
proc createPidFile(filename: string) {.raises: [Defect, IOError].} =
|
2019-07-07 09:53:58 +00:00
|
|
|
writeFile filename, $os.getCurrentProcessId()
|
2018-12-19 12:58:53 +00:00
|
|
|
gPidFile = filename
|
2020-08-19 13:12:10 +00:00
|
|
|
addQuitProc proc {.noconv.} = discard io2.removeFile(gPidFile)
|
|
|
|
|
2020-06-11 12:13:12 +00:00
|
|
|
proc initializeNetworking(node: BeaconNode) {.async.} =
|
2020-11-16 19:15:43 +00:00
|
|
|
info "Listening to incoming network requests"
|
2020-08-03 17:35:27 +00:00
|
|
|
await node.network.startListening()
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-06-19 17:42:28 +00:00
|
|
|
let addressFile = node.config.dataDir / "beacon_node.enr"
|
2020-06-11 12:13:12 +00:00
|
|
|
writeFile(addressFile, node.network.announcedENR.toURI)
|
|
|
|
|
2020-09-21 16:02:27 +00:00
|
|
|
await node.network.start()
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-12-04 16:28:42 +00:00
|
|
|
func shouldWeStartWeb3(node: BeaconNode): bool =
|
|
|
|
(node.config.web3Mode == Web3Mode.enabled) or
|
2021-02-22 16:17:48 +00:00
|
|
|
(node.config.web3Mode == Web3Mode.auto and node.attachedValidators[].count > 0)
|
2020-12-04 16:28:42 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc start(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
2019-11-25 12:47:29 +00:00
|
|
|
let
|
2021-06-01 11:13:40 +00:00
|
|
|
head = node.dag.head
|
|
|
|
finalizedHead = node.dag.finalizedHead
|
2021-08-20 08:58:15 +00:00
|
|
|
genesisTime = node.beaconClock.fromNow(toBeaconTime(Slot 0))
|
2020-06-11 12:13:12 +00:00
|
|
|
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Starting beacon node",
|
2019-11-12 00:05:35 +00:00
|
|
|
version = fullVersionStr,
|
2020-11-16 19:15:43 +00:00
|
|
|
enr = node.network.announcedENR.toURI,
|
|
|
|
peerId = $node.network.switch.peerInfo.peerId,
|
2019-08-16 11:16:56 +00:00
|
|
|
timeSinceFinalization =
|
2021-08-20 08:58:15 +00:00
|
|
|
node.beaconClock.now() - finalizedHead.slot.toBeaconTime(),
|
2020-07-28 13:54:32 +00:00
|
|
|
head = shortLog(head),
|
2020-07-16 13:16:51 +00:00
|
|
|
finalizedHead = shortLog(finalizedHead),
|
2019-03-20 11:52:30 +00:00
|
|
|
SLOTS_PER_EPOCH,
|
|
|
|
SECONDS_PER_SLOT,
|
2019-09-12 01:45:04 +00:00
|
|
|
SPEC_VERSION,
|
2020-11-16 19:15:43 +00:00
|
|
|
dataDir = node.config.dataDir.string,
|
2021-02-22 16:17:48 +00:00
|
|
|
validators = node.attachedValidators[].count
|
2019-03-20 11:52:30 +00:00
|
|
|
|
2020-06-29 05:34:48 +00:00
|
|
|
if genesisTime.inFuture:
|
|
|
|
notice "Waiting for genesis", genesisIn = genesisTime.offset
|
|
|
|
|
2020-06-11 12:13:12 +00:00
|
|
|
waitFor node.initializeNetworking()
|
2020-11-12 16:21:04 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
# TODO this does not account for validators getting attached "later"
|
2020-12-04 16:28:42 +00:00
|
|
|
if node.eth1Monitor != nil and node.shouldWeStartWeb3:
|
2020-11-12 16:21:04 +00:00
|
|
|
node.eth1Monitor.start()
|
|
|
|
|
2019-03-20 11:52:30 +00:00
|
|
|
node.run()
|
|
|
|
|
2019-10-03 01:51:44 +00:00
|
|
|
func formatGwei(amount: uint64): string =
|
|
|
|
# TODO This is implemented in a quite a silly way.
|
|
|
|
# Better routines for formatting decimal numbers
|
|
|
|
# should exists somewhere else.
|
|
|
|
let
|
|
|
|
eth = amount div 1000000000
|
|
|
|
remainder = amount mod 1000000000
|
|
|
|
|
|
|
|
result = $eth
|
|
|
|
if remainder != 0:
|
|
|
|
result.add '.'
|
2020-12-01 18:08:55 +00:00
|
|
|
let remainderStr = $remainder
|
|
|
|
for i in remainderStr.len ..< 9:
|
|
|
|
result.add '0'
|
|
|
|
result.add remainderStr
|
2019-10-03 01:51:44 +00:00
|
|
|
while result[^1] == '0':
|
|
|
|
result.setLen(result.len - 1)
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc initStatusBar(node: BeaconNode) {.raises: [Defect, ValueError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
if not isatty(stdout): return
|
|
|
|
if not node.config.statusBarEnabled: return
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
try:
|
|
|
|
enableTrueColors()
|
|
|
|
except Exception as exc: # TODO Exception
|
|
|
|
error "Couldn't enable colors", err = exc.msg
|
2021-02-22 16:17:48 +00:00
|
|
|
|
2021-03-24 16:20:55 +00:00
|
|
|
proc dataResolver(expr: string): string {.raises: [Defect].} =
|
2021-06-01 11:13:40 +00:00
|
|
|
template justified: untyped = node.dag.head.atEpochStart(
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(
|
|
|
|
node.dag.headState.data, current_justified_checkpoint).epoch)
|
2021-02-22 16:17:48 +00:00
|
|
|
# TODO:
|
|
|
|
# We should introduce a general API for resolving dot expressions
|
|
|
|
# such as `db.latest_block.slot` or `metrics.connected_peers`.
|
|
|
|
# Such an API can be shared between the RPC back-end, CLI tools
|
|
|
|
# such as ncli, a potential GraphQL back-end and so on.
|
|
|
|
# The status bar feature would allow the user to specify an
|
|
|
|
# arbitrary expression that is resolvable through this API.
|
|
|
|
case expr.toLowerAscii
|
|
|
|
of "connected_peers":
|
|
|
|
$(node.connectedPeersCount)
|
|
|
|
|
|
|
|
of "head_root":
|
2021-06-01 11:13:40 +00:00
|
|
|
shortLog(node.dag.head.root)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "head_epoch":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.head.slot.epoch)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "head_epoch_slot":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.head.slot mod SLOTS_PER_EPOCH)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "head_slot":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.head.slot)
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
of "justifed_root":
|
|
|
|
shortLog(justified.blck.root)
|
|
|
|
of "justifed_epoch":
|
|
|
|
$(justified.slot.epoch)
|
|
|
|
of "justifed_epoch_slot":
|
|
|
|
$(justified.slot mod SLOTS_PER_EPOCH)
|
|
|
|
of "justifed_slot":
|
|
|
|
$(justified.slot)
|
|
|
|
|
|
|
|
of "finalized_root":
|
2021-06-01 11:13:40 +00:00
|
|
|
shortLog(node.dag.finalizedHead.blck.root)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "finalized_epoch":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.finalizedHead.slot.epoch)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "finalized_epoch_slot":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.finalizedHead.slot mod SLOTS_PER_EPOCH)
|
2021-02-22 16:17:48 +00:00
|
|
|
of "finalized_slot":
|
2021-06-01 11:13:40 +00:00
|
|
|
$(node.dag.finalizedHead.slot)
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
of "epoch":
|
|
|
|
$node.currentSlot.epoch
|
|
|
|
|
|
|
|
of "epoch_slot":
|
|
|
|
$(node.currentSlot mod SLOTS_PER_EPOCH)
|
|
|
|
|
|
|
|
of "slot":
|
|
|
|
$node.currentSlot
|
|
|
|
|
|
|
|
of "slots_per_epoch":
|
|
|
|
$SLOTS_PER_EPOCH
|
|
|
|
|
|
|
|
of "slot_trailing_digits":
|
|
|
|
var slotStr = $node.currentSlot
|
|
|
|
if slotStr.len > 3: slotStr = slotStr[^3..^1]
|
|
|
|
slotStr
|
|
|
|
|
|
|
|
of "attached_validators_balance":
|
|
|
|
formatGwei(node.attachedValidatorBalanceTotal)
|
|
|
|
|
|
|
|
of "sync_status":
|
|
|
|
if isNil(node.syncManager):
|
|
|
|
"pending"
|
|
|
|
else:
|
|
|
|
if node.syncManager.inProgress:
|
|
|
|
node.syncManager.syncStatus
|
|
|
|
else:
|
|
|
|
"synced"
|
|
|
|
else:
|
|
|
|
# We ignore typos for now and just render the expression
|
|
|
|
# as it was written. TODO: come up with a good way to show
|
|
|
|
# an error message to the user.
|
|
|
|
"$" & expr
|
|
|
|
|
|
|
|
var statusBar = StatusBarView.init(
|
|
|
|
node.config.statusBarContents,
|
|
|
|
dataResolver)
|
|
|
|
|
|
|
|
when compiles(defaultChroniclesStream.output.writer):
|
|
|
|
defaultChroniclesStream.output.writer =
|
|
|
|
proc (logLevel: LogLevel, msg: LogOutputStr) {.raises: [Defect].} =
|
|
|
|
try:
|
|
|
|
# p.hidePrompt
|
|
|
|
erase statusBar
|
|
|
|
# p.writeLine msg
|
|
|
|
stdout.write msg
|
|
|
|
render statusBar
|
|
|
|
# p.showPrompt
|
|
|
|
except Exception as e: # render raises Exception
|
|
|
|
logLoggingFailure(cstring(msg), e)
|
|
|
|
|
|
|
|
proc statusBarUpdatesPollingLoop() {.async.} =
|
|
|
|
try:
|
|
|
|
while true:
|
|
|
|
update statusBar
|
|
|
|
erase statusBar
|
|
|
|
render statusBar
|
|
|
|
await sleepAsync(chronos.seconds(1))
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "Failed to update status bar, no further updates", err = exc.msg
|
|
|
|
|
|
|
|
asyncSpawn statusBarUpdatesPollingLoop()
|
|
|
|
|
2020-11-27 19:48:33 +00:00
|
|
|
proc handleValidatorExitCommand(config: BeaconNodeConf) {.async.} =
|
|
|
|
let port = try:
|
|
|
|
let value = parseInt(config.rpcUrlForExit.port)
|
|
|
|
if value < Port.low.int or value > Port.high.int:
|
|
|
|
raise newException(ValueError,
|
|
|
|
"The port number must be between " & $Port.low & " and " & $Port.high)
|
|
|
|
Port value
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Invalid port number", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let rpcClient = newRpcHttpClient()
|
|
|
|
|
|
|
|
try:
|
|
|
|
await connect(rpcClient, config.rpcUrlForExit.hostname, port)
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to connect to the beacon node RPC service", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
2021-02-15 16:40:00 +00:00
|
|
|
let (validator, validatorIdx, _, _) = try:
|
2020-11-27 19:48:33 +00:00
|
|
|
await rpcClient.get_v1_beacon_states_stateId_validators_validatorId(
|
|
|
|
"head", config.exitedValidator)
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to obtain information for validator", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let exitAtEpoch = if config.exitAtEpoch.isSome:
|
|
|
|
Epoch config.exitAtEpoch.get
|
|
|
|
else:
|
|
|
|
let headSlot = try:
|
|
|
|
await rpcClient.getBeaconHead()
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to obtain the current head slot", err = err.msg
|
|
|
|
quit 1
|
|
|
|
headSlot.epoch
|
|
|
|
|
|
|
|
let
|
|
|
|
validatorsDir = config.validatorsDir
|
|
|
|
validatorKeyAsStr = "0x" & $validator.pubkey
|
|
|
|
keystoreDir = validatorsDir / validatorKeyAsStr
|
|
|
|
|
|
|
|
if not dirExists(keystoreDir):
|
|
|
|
echo "The validator keystores directory '" & config.validatorsDir.string &
|
|
|
|
"' does not contain a keystore for the selected validator with public " &
|
|
|
|
"key '" & validatorKeyAsStr & "'."
|
|
|
|
quit 1
|
|
|
|
|
2021-10-04 19:08:31 +00:00
|
|
|
let signingItem = loadKeystore(
|
2020-11-27 19:48:33 +00:00
|
|
|
validatorsDir,
|
|
|
|
config.secretsDir,
|
|
|
|
validatorKeyAsStr,
|
|
|
|
config.nonInteractive)
|
|
|
|
|
2021-10-04 19:08:31 +00:00
|
|
|
if signingItem.isNone:
|
2020-11-27 19:48:33 +00:00
|
|
|
fatal "Unable to continue without decrypted signing key"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let fork = try:
|
|
|
|
await rpcClient.get_v1_beacon_states_fork("head")
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to obtain the fork id of the head state", err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
let genesisValidatorsRoot = try:
|
|
|
|
(await rpcClient.get_v1_beacon_genesis()).genesis_validators_root
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to obtain the genesis validators root of the network",
|
|
|
|
err = err.msg
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
var signedExit = SignedVoluntaryExit(
|
|
|
|
message: VoluntaryExit(
|
|
|
|
epoch: exitAtEpoch,
|
|
|
|
validator_index: validatorIdx))
|
|
|
|
|
2021-10-04 19:08:31 +00:00
|
|
|
signedExit.signature =
|
|
|
|
block:
|
|
|
|
let key = signingItem.get().privateKey
|
|
|
|
get_voluntary_exit_signature(fork, genesisValidatorsRoot,
|
|
|
|
signedExit.message, key).toValidatorSig()
|
2020-11-27 19:48:33 +00:00
|
|
|
|
|
|
|
template ask(prompt: string): string =
|
|
|
|
try:
|
|
|
|
stdout.write prompt, ": "
|
|
|
|
stdin.readLine()
|
2021-02-15 16:40:00 +00:00
|
|
|
except IOError:
|
2020-11-27 19:48:33 +00:00
|
|
|
fatal "Failed to read user input from stdin"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
try:
|
|
|
|
echoP "PLEASE BEWARE!"
|
|
|
|
|
|
|
|
echoP "Publishing a voluntary exit is an irreversible operation! " &
|
|
|
|
"You won't be able to restart again with the same validator."
|
|
|
|
|
|
|
|
echoP "By requesting an exit now, you'll be exempt from penalties " &
|
|
|
|
"stemming from not performing your validator duties, but you " &
|
2020-11-29 21:07:33 +00:00
|
|
|
"won't be able to withdraw your deposited funds for the time " &
|
2020-11-27 19:48:33 +00:00
|
|
|
"being. This means that your funds will be effectively frozen " &
|
2020-11-29 21:07:33 +00:00
|
|
|
"until withdrawals are enabled in a future phase of Eth2."
|
2020-12-01 08:44:30 +00:00
|
|
|
|
2020-11-27 19:48:33 +00:00
|
|
|
|
|
|
|
echoP "To understand more about the Eth2 roadmap, we recommend you " &
|
|
|
|
"have a look at\n" &
|
|
|
|
"https://ethereum.org/en/eth2/#roadmap"
|
|
|
|
|
2020-11-29 21:07:33 +00:00
|
|
|
echoP "You must keep your validator running for at least 5 epochs " &
|
2020-11-27 19:48:33 +00:00
|
|
|
"(32 minutes) after requesting a validator exit, as you will " &
|
|
|
|
"still be required to perform validator duties until your exit " &
|
|
|
|
"has been processed. The number of epochs could be significantly " &
|
|
|
|
"higher depending on how many other validators are queued to exit."
|
|
|
|
|
|
|
|
echoP "As such, we recommend you keep track of your validator's status " &
|
|
|
|
"using an Eth2 block explorer before shutting down your beacon node."
|
|
|
|
|
|
|
|
const
|
|
|
|
confirmation = "I understand the implications of submitting a voluntary exit"
|
|
|
|
|
|
|
|
while true:
|
|
|
|
echoP "To proceed to submitting your voluntary exit, please type '" &
|
|
|
|
confirmation & "' (without the quotes) in the prompt below and " &
|
|
|
|
"press ENTER or type 'q' to quit."
|
|
|
|
echo ""
|
|
|
|
|
|
|
|
let choice = ask "Your choice"
|
|
|
|
if choice == "q":
|
|
|
|
quit 0
|
|
|
|
elif choice == confirmation:
|
|
|
|
let success = await rpcClient.post_v1_beacon_pool_voluntary_exits(signedExit)
|
|
|
|
if success:
|
|
|
|
echo "Successfully published voluntary exit for validator " &
|
|
|
|
$validatorIdx & "(" & validatorKeyAsStr[0..9] & ")."
|
|
|
|
quit 0
|
|
|
|
else:
|
|
|
|
echo "The voluntary exit was not submitted successfully. Please try again."
|
|
|
|
quit 1
|
|
|
|
except CatchableError as err:
|
2021-01-15 17:39:45 +00:00
|
|
|
fatal "Failed to send the signed exit message to the beacon node RPC",
|
|
|
|
err = err.msg
|
2020-11-27 19:48:33 +00:00
|
|
|
quit 1
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc loadEth2Network(config: BeaconNodeConf): Eth2NetworkMetadata {.raises: [Defect, IOError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
if config.eth2Network.isSome:
|
|
|
|
getMetadataForNetwork(config.eth2Network.get)
|
|
|
|
else:
|
|
|
|
when const_preset == "mainnet":
|
|
|
|
mainnetMetadata
|
|
|
|
else:
|
|
|
|
# Presumably other configurations can have other defaults, but for now
|
|
|
|
# this simplifies the flow
|
|
|
|
echo "Must specify network on non-mainnet node"
|
|
|
|
quit 1
|
2019-01-16 23:01:15 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc loadBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext): BeaconNode {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
let metadata = config.loadEth2Network()
|
|
|
|
|
|
|
|
# Updating the config based on the metadata certainly is not beautiful but it
|
|
|
|
# works
|
|
|
|
for node in metadata.bootstrapNodes:
|
|
|
|
config.bootstrapNodes.add node
|
|
|
|
|
|
|
|
BeaconNode.init(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
metadata.cfg,
|
2021-02-22 16:17:48 +00:00
|
|
|
rng,
|
|
|
|
config,
|
|
|
|
metadata.depositContractDeployedAt,
|
|
|
|
metadata.eth1Network,
|
|
|
|
metadata.genesisData,
|
|
|
|
metadata.genesisDepositsSnapshot)
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doRunBeaconNode(config: var BeaconNodeConf, rng: ref BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
info "Launching beacon node",
|
|
|
|
version = fullVersionStr,
|
|
|
|
bls_backend = $BLS_BACKEND,
|
|
|
|
cmdParams = commandLineParams(),
|
|
|
|
config
|
|
|
|
|
|
|
|
createPidFile(config.dataDir.string / "beacon_node.pid")
|
|
|
|
|
|
|
|
config.createDumpDirs()
|
|
|
|
|
|
|
|
if config.metricsEnabled:
|
2021-04-01 12:44:11 +00:00
|
|
|
let metricsAddress = config.metricsAddress
|
|
|
|
notice "Starting metrics HTTP server",
|
|
|
|
url = "http://" & $metricsAddress & ":" & $config.metricsPort & "/metrics"
|
|
|
|
try:
|
|
|
|
startMetricsHttpServer($metricsAddress, config.metricsPort)
|
|
|
|
except CatchableError as exc: raise exc
|
|
|
|
except Exception as exc: raiseAssert exc.msg # TODO fix metrics
|
2020-10-09 13:57:45 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
# There are no managed event loops in here, to do a graceful shutdown, but
|
|
|
|
# letting the default Ctrl+C handler exit is safe, since we only read from
|
|
|
|
# the db.
|
|
|
|
let node = loadBeaconNode(config, rng)
|
2020-07-07 23:02:14 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if bnStatus == BeaconNodeStatus.Stopping:
|
|
|
|
return
|
2020-07-02 15:14:11 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
initStatusBar(node)
|
2020-07-02 15:52:48 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.nickname != "":
|
|
|
|
dynamicLogScope(node = node.nickname): node.start()
|
2020-07-07 23:02:14 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
node.start()
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doCreateTestnet(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
let launchPadDeposits = try:
|
|
|
|
Json.loadFile(config.testnetDepositsFile.string, seq[LaunchPadDeposit])
|
|
|
|
except SerializationError as err:
|
|
|
|
error "Invalid LaunchPad deposits file",
|
|
|
|
err = formatMsg(err, config.testnetDepositsFile.string)
|
|
|
|
quit 1
|
2020-12-01 11:35:55 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
var deposits: seq[DepositData]
|
2021-04-14 13:15:22 +00:00
|
|
|
for i in 0 ..< launchPadDeposits.len:
|
2021-02-22 16:17:48 +00:00
|
|
|
deposits.add(launchPadDeposits[i] as DepositData)
|
2020-12-01 11:35:55 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let
|
|
|
|
startTime = uint64(times.toUnix(times.getTime()) + config.genesisOffset)
|
|
|
|
outGenesis = config.outputGenesis.string
|
2021-04-06 21:42:59 +00:00
|
|
|
eth1Hash = if config.web3Urls.len == 0: eth1BlockHash
|
|
|
|
else: (waitFor getEth1BlockHash(config.web3Urls[0], blockId("latest"))).asEth2Digest
|
2021-07-13 14:27:10 +00:00
|
|
|
cfg = getRuntimeConfig(config.eth2Network)
|
2021-02-22 16:17:48 +00:00
|
|
|
var
|
2021-05-04 10:19:11 +00:00
|
|
|
initialState = initialize_beacon_state_from_eth1(
|
2021-07-13 14:27:10 +00:00
|
|
|
cfg, eth1Hash, startTime, deposits, {skipBlsValidation})
|
2020-07-02 15:14:11 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-pm/tree/6e41fcf383ebeb5125938850d8e9b4e9888389b4/interop/mocked_start#create-genesis-state
|
|
|
|
initialState.genesis_time = startTime
|
2020-07-07 15:51:02 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
doAssert initialState.validators.len > 0
|
2020-10-02 13:38:32 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let outGenesisExt = splitFile(outGenesis).ext
|
|
|
|
if cmpIgnoreCase(outGenesisExt, ".json") == 0:
|
|
|
|
Json.saveFile(outGenesis, initialState, pretty = true)
|
|
|
|
echo "Wrote ", outGenesis
|
2020-06-02 19:59:51 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let outSszGenesis = outGenesis.changeFileExt "ssz"
|
|
|
|
SSZ.saveFile(outSszGenesis, initialState[])
|
|
|
|
echo "Wrote ", outSszGenesis
|
2019-03-19 17:22:17 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let bootstrapFile = config.outputBootstrapFile.string
|
|
|
|
if bootstrapFile.len > 0:
|
2019-10-29 16:46:41 +00:00
|
|
|
let
|
2021-02-22 16:17:48 +00:00
|
|
|
networkKeys = getPersistentNetKeys(rng, config)
|
|
|
|
netMetadata = getPersistentNetMetadata(config)
|
2021-08-10 06:19:13 +00:00
|
|
|
forkId = getENRForkID(
|
|
|
|
cfg,
|
|
|
|
initialState[].slot.epoch,
|
|
|
|
initialState[].genesis_validators_root)
|
2021-02-22 16:17:48 +00:00
|
|
|
bootstrapEnr = enr.Record.init(
|
|
|
|
1, # sequence number
|
|
|
|
networkKeys.seckey.asEthKey,
|
|
|
|
some(config.bootstrapAddress),
|
|
|
|
some(config.bootstrapPort),
|
|
|
|
some(config.bootstrapPort),
|
2021-08-10 06:19:13 +00:00
|
|
|
[
|
|
|
|
toFieldPair(enrForkIdField, SSZ.encode(forkId)),
|
|
|
|
toFieldPair(enrAttestationSubnetsField, SSZ.encode(netMetadata.attnets))
|
|
|
|
])
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
writeFile(bootstrapFile, bootstrapEnr.tryGet().toURI)
|
|
|
|
echo "Wrote ", bootstrapFile
|
|
|
|
|
2021-04-06 22:12:07 +00:00
|
|
|
proc findWalletWithoutErrors(config: BeaconNodeConf,
|
|
|
|
name: WalletName): Option[WalletPathPair] =
|
|
|
|
let res = findWallet(config, name)
|
|
|
|
if res.isErr:
|
|
|
|
fatal "Failed to locate wallet", error = res.error
|
|
|
|
quit 1
|
|
|
|
res.get
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doDeposits(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.depositsCmd
|
2021-04-08 11:35:58 +00:00
|
|
|
of DepositsCmd.createTestnetDeposits:
|
|
|
|
if config.eth2Network.isNone:
|
|
|
|
fatal "Please specify the intended testnet for the deposits"
|
|
|
|
quit 1
|
2021-04-06 22:12:07 +00:00
|
|
|
let metadata = config.loadEth2Network()
|
2021-02-22 16:17:48 +00:00
|
|
|
var seed: KeySeed
|
|
|
|
defer: burnMem(seed)
|
|
|
|
var walletPath: WalletPathPair
|
|
|
|
|
|
|
|
if config.existingWalletId.isSome:
|
|
|
|
let
|
|
|
|
id = config.existingWalletId.get
|
2021-04-06 22:12:07 +00:00
|
|
|
found = findWalletWithoutErrors(config, id)
|
2019-09-02 10:31:14 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if found.isSome:
|
|
|
|
walletPath = found.get
|
|
|
|
else:
|
|
|
|
fatal "Unable to find wallet with the specified name/uuid", id
|
|
|
|
quit 1
|
2019-07-01 13:20:55 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
var unlocked = unlockWalletInteractively(walletPath.wallet)
|
|
|
|
if unlocked.isOk:
|
|
|
|
swap(seed, unlocked.get)
|
|
|
|
else:
|
|
|
|
# The failure will be reported in `unlockWalletInteractively`.
|
|
|
|
quit 1
|
|
|
|
else:
|
2021-04-06 22:12:07 +00:00
|
|
|
var walletRes = createWalletInteractively(rng, config)
|
2021-02-22 16:17:48 +00:00
|
|
|
if walletRes.isErr:
|
|
|
|
fatal "Unable to create wallet", err = walletRes.error
|
|
|
|
quit 1
|
|
|
|
else:
|
|
|
|
swap(seed, walletRes.get.seed)
|
|
|
|
walletPath = walletRes.get.walletPath
|
2019-03-19 17:22:17 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let vres = secureCreatePath(config.outValidatorsDir)
|
|
|
|
if vres.isErr():
|
|
|
|
fatal "Could not create directory", path = config.outValidatorsDir
|
|
|
|
quit QuitFailure
|
2019-03-19 17:22:17 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let sres = secureCreatePath(config.outSecretsDir)
|
|
|
|
if sres.isErr():
|
|
|
|
fatal "Could not create directory", path = config.outSecretsDir
|
|
|
|
quit QuitFailure
|
2019-09-08 15:32:38 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let deposits = generateDeposits(
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
metadata.cfg,
|
2021-04-06 22:12:07 +00:00
|
|
|
rng,
|
2021-02-22 16:17:48 +00:00
|
|
|
seed,
|
|
|
|
walletPath.wallet.nextAccount,
|
|
|
|
config.totalDeposits,
|
|
|
|
config.outValidatorsDir,
|
|
|
|
config.outSecretsDir)
|
|
|
|
|
|
|
|
if deposits.isErr:
|
|
|
|
fatal "Failed to generate deposits", err = deposits.error
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
try:
|
|
|
|
let depositDataPath = if config.outDepositsFile.isSome:
|
|
|
|
config.outDepositsFile.get.string
|
2020-12-10 10:59:31 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
config.outValidatorsDir / "deposit_data-" & $epochTime() & ".json"
|
|
|
|
|
|
|
|
let launchPadDeposits =
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
mapIt(deposits.value, LaunchPadDeposit.init(metadata.cfg, it))
|
2021-02-22 16:17:48 +00:00
|
|
|
|
|
|
|
Json.saveFile(depositDataPath, launchPadDeposits)
|
|
|
|
echo "Deposit data written to \"", depositDataPath, "\""
|
2018-12-19 12:58:53 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
walletPath.wallet.nextAccount += deposits.value.len
|
|
|
|
let status = saveWallet(walletPath)
|
|
|
|
if status.isErr:
|
|
|
|
fatal "Failed to update wallet file after generating deposits",
|
|
|
|
wallet = walletPath.path,
|
|
|
|
error = status.error
|
|
|
|
quit 1
|
|
|
|
except CatchableError as err:
|
|
|
|
fatal "Failed to create launchpad deposit data file", err = err.msg
|
2020-12-04 16:28:42 +00:00
|
|
|
quit 1
|
2021-04-06 22:12:07 +00:00
|
|
|
#[
|
2021-02-22 16:17:48 +00:00
|
|
|
of DepositsCmd.status:
|
|
|
|
echo "The status command is not implemented yet"
|
|
|
|
quit 1
|
2021-04-06 22:12:07 +00:00
|
|
|
]#
|
2020-10-02 13:38:32 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
of DepositsCmd.`import`:
|
|
|
|
let validatorKeysDir = if config.importedDepositsDir.isSome:
|
|
|
|
config.importedDepositsDir.get
|
|
|
|
else:
|
|
|
|
let cwd = os.getCurrentDir()
|
|
|
|
if dirExists(cwd / "validator_keys"):
|
|
|
|
InputDir(cwd / "validator_keys")
|
2020-07-17 20:59:50 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
echo "The default search path for validator keys is a sub-directory " &
|
|
|
|
"named 'validator_keys' in the current working directory. Since " &
|
|
|
|
"no such directory exists, please either provide the correct path" &
|
|
|
|
"as an argument or copy the imported keys in the expected location."
|
|
|
|
quit 1
|
2020-07-10 15:18:14 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
importKeystoresFromDir(
|
|
|
|
rng,
|
|
|
|
validatorKeysDir.string,
|
|
|
|
config.validatorsDir, config.secretsDir)
|
2020-08-27 18:23:41 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
of DepositsCmd.exit:
|
|
|
|
waitFor handleValidatorExitCommand(config)
|
2019-11-05 18:16:10 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doWallets(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.walletsCmd:
|
|
|
|
of WalletsCmd.create:
|
|
|
|
if config.createdWalletNameFlag.isSome:
|
|
|
|
let
|
|
|
|
name = config.createdWalletNameFlag.get
|
2021-04-06 22:12:07 +00:00
|
|
|
existingWallet = findWalletWithoutErrors(config, name)
|
2021-02-22 16:17:48 +00:00
|
|
|
if existingWallet.isSome:
|
|
|
|
echo "The Wallet '" & name.string & "' already exists."
|
2020-06-24 13:57:09 +00:00
|
|
|
quit 1
|
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
var walletRes = createWalletInteractively(rng, config)
|
|
|
|
if walletRes.isErr:
|
|
|
|
fatal "Unable to create wallet", err = walletRes.error
|
|
|
|
quit 1
|
|
|
|
burnMem(walletRes.get.seed)
|
|
|
|
|
|
|
|
of WalletsCmd.list:
|
|
|
|
for kind, walletFile in walkDir(config.walletsDir):
|
|
|
|
if kind != pcFile: continue
|
|
|
|
if checkSensitiveFilePermissions(walletFile):
|
|
|
|
let walletRes = loadWallet(walletFile)
|
|
|
|
if walletRes.isOk:
|
|
|
|
echo walletRes.get.longName
|
2020-07-17 20:59:50 +00:00
|
|
|
else:
|
2021-02-22 16:17:48 +00:00
|
|
|
warn "Found corrupt wallet file",
|
|
|
|
wallet = walletFile, error = walletRes.error
|
|
|
|
else:
|
|
|
|
warn "Found wallet file with insecure permissions",
|
|
|
|
wallet = walletFile
|
|
|
|
|
|
|
|
of WalletsCmd.restore:
|
|
|
|
restoreWalletInteractively(rng, config)
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doRecord(config: BeaconNodeConf, rng: var BrHmacDrbgContext) {.
|
|
|
|
raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.recordCmd:
|
|
|
|
of RecordCmd.create:
|
|
|
|
let netKeys = getPersistentNetKeys(rng, config)
|
|
|
|
|
|
|
|
var fieldPairs: seq[FieldPair]
|
|
|
|
for field in config.fields:
|
|
|
|
let fieldPair = field.split(":")
|
|
|
|
if fieldPair.len > 1:
|
|
|
|
fieldPairs.add(toFieldPair(fieldPair[0], hexToSeqByte(fieldPair[1])))
|
|
|
|
else:
|
|
|
|
fatal "Invalid field pair"
|
|
|
|
quit QuitFailure
|
2020-06-24 13:57:09 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
let record = enr.Record.init(
|
|
|
|
config.seqNumber,
|
|
|
|
netKeys.seckey.asEthKey,
|
|
|
|
some(config.ipExt),
|
|
|
|
some(config.tcpPortExt),
|
|
|
|
some(config.udpPortExt),
|
|
|
|
fieldPairs).expect("Record within size limits")
|
2020-07-01 09:13:56 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
echo record.toURI()
|
2020-08-21 19:36:42 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
of RecordCmd.print:
|
|
|
|
echo $config.recordPrint
|
2020-03-24 11:13:07 +00:00
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
proc doWeb3Cmd(config: BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
2021-02-22 16:17:48 +00:00
|
|
|
case config.web3Cmd:
|
|
|
|
of Web3Cmd.test:
|
|
|
|
let metadata = config.loadEth2Network()
|
|
|
|
waitFor testWeb3Provider(config.web3TestUrl,
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
metadata.cfg.DEPOSIT_CONTRACT_ADDRESS)
|
2020-11-27 19:48:33 +00:00
|
|
|
|
2021-05-19 06:38:13 +00:00
|
|
|
proc doSlashingExport(conf: BeaconNodeConf) {.raises: [IOError, Defect].}=
|
|
|
|
let
|
|
|
|
dir = conf.validatorsDir()
|
|
|
|
filetrunc = SlashingDbName
|
|
|
|
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
|
|
|
|
let db = SlashingProtectionDB.loadUnchecked(dir, filetrunc, readOnly = false)
|
|
|
|
|
|
|
|
let interchange = conf.exportedInterchangeFile.string
|
|
|
|
db.exportSlashingInterchange(interchange, conf.exportedValidators)
|
|
|
|
echo "Export finished: '", dir/filetrunc & ".sqlite3" , "' into '", interchange, "'"
|
|
|
|
|
|
|
|
proc doSlashingImport(conf: BeaconNodeConf) {.raises: [SerializationError, IOError, Defect].} =
|
|
|
|
let
|
|
|
|
dir = conf.validatorsDir()
|
|
|
|
filetrunc = SlashingDbName
|
|
|
|
# TODO: Make it read-only https://github.com/status-im/nim-eth/issues/312
|
|
|
|
|
|
|
|
let interchange = conf.importedInterchangeFile.string
|
|
|
|
|
|
|
|
var spdir: SPDIR
|
|
|
|
try:
|
|
|
|
spdir = JSON.loadFile(interchange, SPDIR)
|
|
|
|
except SerializationError as err:
|
|
|
|
writeStackTrace()
|
|
|
|
stderr.write $JSON & " load issue for file \"", interchange, "\"\n"
|
|
|
|
stderr.write err.formatMsg(interchange), "\n"
|
|
|
|
quit 1
|
|
|
|
|
|
|
|
# Open DB and handle migration from v1 to v2 if needed
|
|
|
|
let db = SlashingProtectionDB.init(
|
|
|
|
genesis_validators_root = Eth2Digest spdir.metadata.genesis_validators_root,
|
|
|
|
basePath = dir,
|
|
|
|
dbname = filetrunc,
|
|
|
|
modes = {kCompleteArchive}
|
|
|
|
)
|
|
|
|
|
|
|
|
# Now import the slashing interchange file
|
|
|
|
# Failures mode:
|
|
|
|
# - siError can only happen with invalid genesis_validators_root which would be caught above
|
|
|
|
# - siPartial can happen for invalid public keys, slashable blocks, slashable votes
|
|
|
|
let status = db.inclSPDIR(spdir)
|
|
|
|
doAssert status in {siSuccess, siPartial}
|
|
|
|
|
|
|
|
echo "Import finished: '", interchange, "' into '", dir/filetrunc & ".sqlite3", "'"
|
|
|
|
|
|
|
|
proc doSlashingInterchange(conf: BeaconNodeConf) {.raises: [Defect, CatchableError].} =
|
|
|
|
doAssert conf.cmd == slashingdb
|
|
|
|
case conf.slashingdbCmd
|
|
|
|
of SlashProtCmd.`export`:
|
|
|
|
conf.doSlashingExport()
|
|
|
|
of SlashProtCmd.`import`:
|
|
|
|
conf.doSlashingImport()
|
|
|
|
|
2021-03-26 06:52:01 +00:00
|
|
|
{.pop.} # TODO moduletests exceptions
|
2021-02-22 16:17:48 +00:00
|
|
|
programMain:
|
|
|
|
var
|
|
|
|
config = makeBannerAndConfig(clientId, BeaconNodeConf)
|
2020-11-28 18:50:09 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
setupStdoutLogging(config.logLevel)
|
2020-10-02 13:38:32 +00:00
|
|
|
|
2021-02-22 16:17:48 +00:00
|
|
|
if not(checkAndCreateDataDir(string(config.dataDir))):
|
|
|
|
# We are unable to access/create data folder or data folder's
|
|
|
|
# permissions are insecure.
|
|
|
|
quit QuitFailure
|
|
|
|
|
|
|
|
setupLogging(config.logLevel, config.logFile)
|
|
|
|
|
|
|
|
## This Ctrl+C handler exits the program in non-graceful way.
|
|
|
|
## It's responsible for handling Ctrl+C in sub-commands such
|
|
|
|
## as `wallets *` and `deposits *`. In a regular beacon node
|
|
|
|
## run, it will be overwritten later with a different handler
|
|
|
|
## performing a graceful exit.
|
|
|
|
proc exitImmediatelyOnCtrlC() {.noconv.} =
|
|
|
|
when defined(windows):
|
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/4057
|
|
|
|
setupForeignThreadGc()
|
2021-03-16 08:06:45 +00:00
|
|
|
# in case a password prompt disabled echoing
|
|
|
|
resetStdin()
|
2021-02-22 16:17:48 +00:00
|
|
|
echo "" # If we interrupt during an interactive prompt, this
|
|
|
|
# will move the cursor to the next line
|
|
|
|
notice "Shutting down after having received SIGINT"
|
|
|
|
quit 0
|
|
|
|
setControlCHook(exitImmediatelyOnCtrlC)
|
|
|
|
# equivalent SIGTERM handler
|
|
|
|
when defined(posix):
|
|
|
|
proc exitImmediatelyOnSIGTERM(signal: cint) {.noconv.} =
|
|
|
|
notice "Shutting down after having received SIGTERM"
|
|
|
|
quit 0
|
|
|
|
c_signal(SIGTERM, exitImmediatelyOnSIGTERM)
|
|
|
|
|
|
|
|
# Single RNG instance for the application - will be seeded on construction
|
|
|
|
# and avoid using system resources (such as urandom) after that
|
|
|
|
let rng = keys.newRng()
|
|
|
|
|
|
|
|
case config.cmd
|
|
|
|
of createTestnet: doCreateTestnet(config, rng[])
|
|
|
|
of noCommand: doRunBeaconNode(config, rng)
|
|
|
|
of deposits: doDeposits(config, rng[])
|
|
|
|
of wallets: doWallets(config, rng[])
|
|
|
|
of record: doRecord(config, rng[])
|
|
|
|
of web3: doWeb3Cmd(config)
|
2021-05-19 06:38:13 +00:00
|
|
|
of slashingdb: doSlashingInterchange(config)
|