2020-05-06 13:23:45 +00:00
|
|
|
# beacon_chain
|
2022-01-18 13:36:52 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2020-05-06 13:23:45 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2022-07-29 10:53:42 +00:00
|
|
|
when (NimMajor, NimMinor) < (1, 4):
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
else:
|
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# This module is responsible for handling beacon node validators, ie those that
|
|
|
|
# that are running directly in the beacon node and not in a separate validator
|
|
|
|
# client process
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2022-07-06 16:11:44 +00:00
|
|
|
std/[os, tables],
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2022-06-21 19:01:45 +00:00
|
|
|
stew/[byteutils, objects],
|
2021-03-26 14:11:06 +00:00
|
|
|
chronos, metrics,
|
2021-08-28 22:27:51 +00:00
|
|
|
chronicles, chronicles/timings,
|
2022-06-21 19:01:45 +00:00
|
|
|
json_serialization/std/[options, sets, net],
|
2020-05-14 11:19:10 +00:00
|
|
|
eth/db/kvstore,
|
2021-05-12 12:31:02 +00:00
|
|
|
eth/keys, eth/p2p/discoveryv5/[protocol, enr],
|
2021-12-17 12:23:32 +00:00
|
|
|
web3/ethtypes,
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Local modules
|
2022-01-18 13:36:52 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[
|
2021-10-18 09:11:44 +00:00
|
|
|
eth2_merkleization, forks, helpers, network, signatures, state_transition,
|
|
|
|
validator],
|
2021-03-04 09:13:44 +00:00
|
|
|
../consensus_object_pools/[
|
2021-08-28 22:27:51 +00:00
|
|
|
spec_cache, blockchain_dag, block_clearance, attestation_pool, exit_pool,
|
2022-07-13 14:13:54 +00:00
|
|
|
sync_committee_msg_pool, consensus_manager],
|
2021-03-03 06:23:05 +00:00
|
|
|
../eth1/eth1_monitor,
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2021-08-18 18:57:58 +00:00
|
|
|
../sszdump, ../sync/sync_manager,
|
2022-07-13 14:13:54 +00:00
|
|
|
../gossip_processing/block_processor,
|
2022-06-21 19:01:45 +00:00
|
|
|
".."/[conf, beacon_clock, beacon_node],
|
2022-08-01 06:41:47 +00:00
|
|
|
"."/[slashing_protection, validator_pool, keystore_management],
|
|
|
|
".."/spec/mev/rest_bellatrix_mev_calls
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
const
|
|
|
|
delayBuckets = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
|
|
|
|
0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, Inf]
|
|
|
|
|
2022-09-07 16:52:16 +00:00
|
|
|
BUILDER_BLOCK_SUBMISSION_DELAY_TOLERANCE = 4.seconds
|
2022-08-19 21:51:30 +00:00
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE = 3.seconds
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE = 3.seconds
|
2020-11-11 12:14:09 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2022-05-23 12:02:54 +00:00
|
|
|
declareCounter beacon_light_client_finality_updates_sent,
|
|
|
|
"Number of LC finality updates sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_light_client_optimistic_updates_sent,
|
|
|
|
"Number of LC optimistic updates sent by this peer"
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
declareCounter beacon_blocks_proposed,
|
|
|
|
"Number of beacon chain blocks sent by this peer"
|
|
|
|
|
2022-08-29 09:55:20 +00:00
|
|
|
declareCounter beacon_block_production_errors,
|
|
|
|
"Number of times we failed to produce a block"
|
|
|
|
|
|
|
|
declareCounter beacon_block_payload_errors,
|
|
|
|
"Number of times execution client failed to produce block payload"
|
|
|
|
|
2020-11-27 23:34:25 +00:00
|
|
|
declareGauge(attached_validator_balance,
|
|
|
|
"Validator balance at slot end of the first 64 validators, in Gwei",
|
|
|
|
labels = ["pubkey"])
|
2021-08-28 22:27:51 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declarePublicGauge(attached_validator_balance_total,
|
2020-11-27 23:34:25 +00:00
|
|
|
"Validator balance of all attached validators, in Gwei")
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
logScope: topics = "beacval"
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
type
|
2021-08-29 14:50:21 +00:00
|
|
|
ForkedBlockResult* = Result[ForkedBeaconBlock, string]
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
proc findValidator*(validators: auto, pubkey: ValidatorPubKey): Opt[ValidatorIndex] =
|
2021-12-22 12:37:31 +00:00
|
|
|
let idx = validators.findIt(it.pubkey == pubkey)
|
2020-05-06 13:23:45 +00:00
|
|
|
if idx == -1:
|
|
|
|
# We allow adding a validator even if its key is not in the state registry:
|
|
|
|
# it might be that the deposit for this validator has not yet been processed
|
2021-12-22 12:37:31 +00:00
|
|
|
notice "Validator deposit not yet processed, monitoring", pubkey
|
2022-08-19 21:51:30 +00:00
|
|
|
Opt.none ValidatorIndex
|
2020-11-27 23:34:25 +00:00
|
|
|
else:
|
2022-08-19 21:51:30 +00:00
|
|
|
Opt.some idx.ValidatorIndex
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-08-19 10:30:07 +00:00
|
|
|
# TODO: This should probably be moved to the validator_pool module
|
|
|
|
proc addRemoteValidator*(pool: var ValidatorPool,
|
2022-09-17 05:30:07 +00:00
|
|
|
keystore: KeystoreData,
|
2022-08-19 21:51:30 +00:00
|
|
|
index: Opt[ValidatorIndex],
|
2022-09-17 05:30:07 +00:00
|
|
|
feeRecipient: Eth1Address,
|
2022-08-19 10:30:07 +00:00
|
|
|
slot: Slot) =
|
2022-05-10 00:32:12 +00:00
|
|
|
var clients: seq[(RestClientRef, RemoteSignerInfo)]
|
2021-11-30 01:20:21 +00:00
|
|
|
let httpFlags =
|
|
|
|
block:
|
|
|
|
var res: set[HttpClientFlag]
|
2022-09-17 05:30:07 +00:00
|
|
|
if RemoteKeystoreFlag.IgnoreSSLVerification in keystore.flags:
|
2021-11-30 01:20:21 +00:00
|
|
|
res.incl({HttpClientFlag.NoVerifyHost,
|
|
|
|
HttpClientFlag.NoVerifyServerName})
|
|
|
|
res
|
|
|
|
let prestoFlags = {RestClientFlag.CommaSeparatedArray}
|
2022-09-17 05:30:07 +00:00
|
|
|
for remote in keystore.remotes:
|
2022-05-10 00:32:12 +00:00
|
|
|
let client = RestClientRef.new($remote.url, prestoFlags, httpFlags)
|
|
|
|
if client.isErr():
|
|
|
|
warn "Unable to resolve distributed signer address",
|
|
|
|
remote_url = $remote.url, validator = $remote.pubkey
|
|
|
|
clients.add((client.get(), remote))
|
2022-09-17 05:30:07 +00:00
|
|
|
pool.addRemoteValidator(keystore, clients, index, feeRecipient, slot)
|
2021-11-30 01:20:21 +00:00
|
|
|
|
|
|
|
proc addValidators*(node: BeaconNode) =
|
2022-09-17 05:30:07 +00:00
|
|
|
debug "Loading validators", validatorsDir = node.config.validatorsDir()
|
|
|
|
let slot = node.currentSlot()
|
|
|
|
for keystore in listLoadableKeystores(node.config):
|
|
|
|
let
|
|
|
|
index = withState(node.dag.headState):
|
|
|
|
findValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
|
|
|
feeRecipient = node.consensusManager[].getFeeRecipient(
|
|
|
|
keystore.pubkey, index, slot.epoch)
|
|
|
|
|
|
|
|
case keystore.kind
|
|
|
|
of KeystoreKind.Local:
|
|
|
|
node.attachedValidators[].addLocalValidator(
|
|
|
|
keystore, index, feeRecipient, slot)
|
|
|
|
of KeystoreKind.Remote:
|
|
|
|
node.attachedValidators[].addRemoteValidator(
|
|
|
|
keystore, index, feeRecipient, slot)
|
2020-09-01 13:44:40 +00:00
|
|
|
|
2022-07-01 21:52:23 +00:00
|
|
|
proc getAttachedValidator(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey): AttachedValidator =
|
2021-02-22 16:17:48 +00:00
|
|
|
node.attachedValidators[].getValidator(pubkey)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2022-07-01 21:52:23 +00:00
|
|
|
proc getAttachedValidator(node: BeaconNode,
|
|
|
|
state_validators: auto,
|
|
|
|
idx: ValidatorIndex): AttachedValidator =
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
if uint64(idx) < state_validators.lenu64:
|
2021-04-13 13:05:44 +00:00
|
|
|
let validator = node.getAttachedValidator(state_validators[idx].pubkey)
|
2021-06-29 15:09:29 +00:00
|
|
|
if validator != nil and validator.index != some(idx):
|
2020-11-27 23:34:25 +00:00
|
|
|
# Update index, in case the validator was activated!
|
|
|
|
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
2022-08-19 21:51:30 +00:00
|
|
|
validator.index = Opt.some(idx)
|
2020-11-27 23:34:25 +00:00
|
|
|
validator
|
2020-08-10 13:21:31 +00:00
|
|
|
else:
|
|
|
|
warn "Validator index out of bounds",
|
2021-04-13 13:05:44 +00:00
|
|
|
idx, validators = state_validators.len
|
2020-08-10 13:21:31 +00:00
|
|
|
nil
|
|
|
|
|
2022-07-01 21:52:23 +00:00
|
|
|
proc getAttachedValidator(node: BeaconNode,
|
|
|
|
idx: ValidatorIndex): AttachedValidator =
|
2022-08-18 18:07:01 +00:00
|
|
|
let key = node.dag.validatorKey(idx)
|
2021-06-10 07:37:02 +00:00
|
|
|
if key.isSome():
|
|
|
|
let validator = node.getAttachedValidator(key.get().toPubKey())
|
2022-08-19 21:51:30 +00:00
|
|
|
if validator != nil and validator.index != Opt.some(idx):
|
2020-11-27 23:34:25 +00:00
|
|
|
# Update index, in case the validator was activated!
|
|
|
|
notice "Validator activated", pubkey = validator.pubkey, index = idx
|
2022-08-19 21:51:30 +00:00
|
|
|
validator.index = Opt.some(idx)
|
2020-11-27 23:34:25 +00:00
|
|
|
validator
|
2020-08-10 13:21:31 +00:00
|
|
|
else:
|
2021-06-10 07:37:02 +00:00
|
|
|
warn "Validator key not found",
|
2022-08-18 18:07:01 +00:00
|
|
|
idx, head = shortLog(node.dag.head)
|
2020-08-10 13:21:31 +00:00
|
|
|
nil
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-07-08 10:11:22 +00:00
|
|
|
proc isSynced*(node: BeaconNode, head: BlockRef): bool =
|
2020-05-06 13:23:45 +00:00
|
|
|
## TODO This function is here as a placeholder for some better heurestics to
|
|
|
|
## determine if we're in sync and should be producing blocks and
|
|
|
|
## attestations. Generally, the problem is that slot time keeps advancing
|
|
|
|
## even when there are no blocks being produced, so there's no way to
|
|
|
|
## distinguish validators geniunely going missing from the node not being
|
|
|
|
## well connected (during a network split or an internet outage for
|
|
|
|
## example). It would generally be correct to simply keep running as if
|
|
|
|
## we were the only legit node left alive, but then we run into issues:
|
|
|
|
## with enough many empty slots, the validator pool is emptied leading
|
|
|
|
## to empty committees and lots of empty slot processing that will be
|
|
|
|
## thrown away as soon as we're synced again.
|
|
|
|
|
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = node.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
|
|
|
# TODO if everyone follows this logic, the network will not recover from a
|
|
|
|
# halt: nobody will be producing blocks because everone expects someone
|
|
|
|
# else to do it
|
2022-07-04 20:35:33 +00:00
|
|
|
if wallSlot.afterGenesis and
|
|
|
|
head.slot + node.config.syncHorizon < wallSlot.slot:
|
2020-05-06 13:23:45 +00:00
|
|
|
false
|
|
|
|
else:
|
2022-07-04 20:35:33 +00:00
|
|
|
not node.dag.is_optimistic(head.root)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleLightClientUpdates*(node: BeaconNode, slot: Slot) {.async.} =
|
2022-05-23 12:02:54 +00:00
|
|
|
static: doAssert lightClientFinalityUpdateSlotOffset ==
|
|
|
|
lightClientOptimisticUpdateSlotOffset
|
|
|
|
let sendTime = node.beaconClock.fromNow(
|
|
|
|
slot.light_client_finality_update_time())
|
|
|
|
if sendTime.inFuture:
|
|
|
|
debug "Waiting to send LC updates", slot, delay = shortLog(sendTime.offset)
|
|
|
|
await sleepAsync(sendTime.offset)
|
|
|
|
|
2022-06-24 14:57:50 +00:00
|
|
|
template latest(): auto = node.dag.lcDataStore.cache.latest
|
2022-05-23 12:02:54 +00:00
|
|
|
let signature_slot = latest.signature_slot
|
|
|
|
if slot != signature_slot:
|
|
|
|
return
|
|
|
|
|
|
|
|
template sync_aggregate(): auto = latest.sync_aggregate
|
|
|
|
template sync_committee_bits(): auto = sync_aggregate.sync_committee_bits
|
|
|
|
let num_active_participants = countOnes(sync_committee_bits).uint64
|
|
|
|
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
|
|
|
return
|
|
|
|
|
|
|
|
let finalized_slot = latest.finalized_header.slot
|
|
|
|
if finalized_slot > node.lightClientPool[].latestForwardedFinalitySlot:
|
|
|
|
template msg(): auto = latest
|
2022-06-15 08:14:47 +00:00
|
|
|
let sendResult = await node.network.broadcastLightClientFinalityUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
2022-05-23 12:02:54 +00:00
|
|
|
node.lightClientPool[].latestForwardedFinalitySlot = finalized_slot
|
2022-06-15 08:14:47 +00:00
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_finality_updates_sent.inc()
|
|
|
|
notice "LC finality update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC finality update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
|
|
|
let attested_slot = latest.attested_header.slot
|
|
|
|
if attested_slot > node.lightClientPool[].latestForwardedOptimisticSlot:
|
|
|
|
let msg = latest.toOptimistic
|
2022-06-15 08:14:47 +00:00
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientOptimisticUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
2022-05-23 12:02:54 +00:00
|
|
|
node.lightClientPool[].latestForwardedOptimisticSlot = attested_slot
|
2022-06-15 08:14:47 +00:00
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_optimistic_updates_sent.inc()
|
|
|
|
notice "LC optimistic update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC optimistic update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
proc createAndSendAttestation(node: BeaconNode,
|
|
|
|
fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
|
|
|
validator: AttachedValidator,
|
2022-06-29 16:53:59 +00:00
|
|
|
data: AttestationData,
|
2020-06-05 09:57:40 +00:00
|
|
|
committeeLen: int,
|
2020-06-23 10:38:59 +00:00
|
|
|
indexInCommittee: int,
|
2021-05-10 07:13:36 +00:00
|
|
|
subnet_id: SubnetId) {.async.} =
|
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
|
|
|
signature = block:
|
|
|
|
let res = await validator.getAttestationSignature(
|
|
|
|
fork, genesis_validators_root, data)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign attestation", validator = shortLog(validator),
|
2022-09-19 19:50:19 +00:00
|
|
|
attestatingData = shortLog(data), error_msg = res.error()
|
2021-11-30 01:20:21 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2022-06-29 16:53:59 +00:00
|
|
|
attestation =
|
|
|
|
Attestation.init(
|
|
|
|
[uint64 indexInCommittee], committeeLen, data, signature).expect(
|
|
|
|
"valid data")
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeAttestation(
|
2021-05-10 07:13:36 +00:00
|
|
|
attestation, subnet_id, checkSignature = false)
|
2022-06-15 08:14:47 +00:00
|
|
|
if not res.isOk():
|
2021-05-10 07:13:36 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2022-06-29 16:53:59 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubkey)
|
2021-05-10 07:13:36 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending attestation", err = exc.msg
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
proc getBlockProposalEth1Data*(node: BeaconNode,
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState):
|
|
|
|
BlockProposalEth1Data =
|
2020-11-19 17:19:03 +00:00
|
|
|
if node.eth1Monitor.isNil:
|
2021-04-14 09:34:35 +00:00
|
|
|
var pendingDepositsCount =
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state, eth1_data).deposit_count -
|
|
|
|
getStateField(state, eth1_deposit_index)
|
2020-11-24 21:21:47 +00:00
|
|
|
if pendingDepositsCount > 0:
|
|
|
|
result.hasMissingDeposits = true
|
|
|
|
else:
|
2021-06-11 17:51:46 +00:00
|
|
|
result.vote = getStateField(state, eth1_data)
|
2020-11-19 17:19:03 +00:00
|
|
|
else:
|
2021-06-01 11:13:40 +00:00
|
|
|
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
2020-11-24 21:21:47 +00:00
|
|
|
result = node.eth1Monitor.getBlockProposalData(
|
2021-06-11 17:51:46 +00:00
|
|
|
state, finalizedEpochRef.eth1_data,
|
2021-04-14 09:34:35 +00:00
|
|
|
finalizedEpochRef.eth1_deposit_index)
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from web3/engine_api import ForkchoiceUpdatedResponse
|
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
proc forkchoice_updated(
|
|
|
|
head_block_hash: Eth2Digest, safe_block_hash: Eth2Digest,
|
|
|
|
finalized_block_hash: Eth2Digest, timestamp: uint64, random: Eth2Digest,
|
|
|
|
fee_recipient: ethtypes.Address, execution_engine: Eth1Monitor):
|
|
|
|
Future[Option[bellatrix.PayloadID]] {.async.} =
|
2022-08-20 06:09:25 +00:00
|
|
|
logScope:
|
|
|
|
head_block_hash
|
|
|
|
finalized_block_hash
|
|
|
|
|
2021-12-17 12:23:32 +00:00
|
|
|
let
|
2022-04-05 08:40:59 +00:00
|
|
|
forkchoiceResponse =
|
2022-08-20 06:09:25 +00:00
|
|
|
try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
execution_engine.forkchoiceUpdated(
|
2022-08-25 23:34:02 +00:00
|
|
|
head_block_hash, safe_block_hash, finalized_block_hash,
|
|
|
|
timestamp, random.data, fee_recipient),
|
2022-08-20 06:09:25 +00:00
|
|
|
FORKCHOICEUPDATED_TIMEOUT):
|
|
|
|
error "Engine API fork-choice update timed out"
|
|
|
|
default(ForkchoiceUpdatedResponse)
|
|
|
|
except CatchableError as err:
|
|
|
|
error "Engine API fork-choice update failed", err = err.msg
|
|
|
|
default(ForkchoiceUpdatedResponse)
|
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
payloadId = forkchoiceResponse.payloadId
|
|
|
|
|
2021-12-17 12:23:32 +00:00
|
|
|
return if payloadId.isSome:
|
2022-04-08 16:22:49 +00:00
|
|
|
some(bellatrix.PayloadID(payloadId.get))
|
2021-12-17 12:23:32 +00:00
|
|
|
else:
|
2022-04-08 16:22:49 +00:00
|
|
|
none(bellatrix.PayloadID)
|
2021-12-17 12:23:32 +00:00
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
proc get_execution_payload(
|
2022-06-21 19:01:45 +00:00
|
|
|
payload_id: Option[bellatrix.PayloadID], execution_engine: Eth1Monitor):
|
2022-04-14 20:15:34 +00:00
|
|
|
Future[bellatrix.ExecutionPayload] {.async.} =
|
|
|
|
return if payload_id.isNone():
|
|
|
|
# Pre-merge, empty payload
|
|
|
|
default(bellatrix.ExecutionPayload)
|
|
|
|
else:
|
|
|
|
asConsensusExecutionPayload(
|
|
|
|
await execution_engine.getPayload(payload_id.get))
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
proc getFeeRecipient(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Eth1Address =
|
2022-09-17 05:30:07 +00:00
|
|
|
node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch)
|
2022-08-19 21:51:30 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from web3/engine_api_types import PayloadExecutionStatus
|
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
proc getExecutionPayload[T](
|
|
|
|
node: BeaconNode, proposalState: ref ForkedHashedBeaconState,
|
|
|
|
epoch: Epoch, validator_index: ValidatorIndex,
|
|
|
|
pubkey: ValidatorPubKey): Future[Opt[T]] {.async.} =
|
2022-04-14 20:15:34 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload
|
|
|
|
|
|
|
|
template empty_execution_payload(): auto =
|
2022-09-03 20:56:20 +00:00
|
|
|
withState(proposalState[]):
|
|
|
|
when stateFork >= BeaconStateFork.Bellatrix:
|
|
|
|
build_empty_execution_payload(forkyState.data)
|
|
|
|
else:
|
|
|
|
default(T)
|
2022-04-14 20:15:34 +00:00
|
|
|
|
|
|
|
if node.eth1Monitor.isNil:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-04-14 20:15:34 +00:00
|
|
|
warn "getExecutionPayload: eth1Monitor not initialized; using empty execution payload"
|
2022-08-19 21:20:57 +00:00
|
|
|
return Opt.some empty_execution_payload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
# Minimize window for Eth1 monitor to shut down connection
|
|
|
|
await node.consensusManager.eth1Monitor.ensureDataProvider()
|
|
|
|
|
2022-08-26 10:44:50 +00:00
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.1/src/engine/specification.md#request-2
|
2022-05-17 13:57:33 +00:00
|
|
|
const GETPAYLOAD_TIMEOUT = 1.seconds
|
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
let
|
2022-08-25 23:34:02 +00:00
|
|
|
beaconHead = node.attestationPool[].getBeaconHead(node.dag.head)
|
|
|
|
executionBlockRoot = node.dag.loadExecutionBlockRoot(beaconHead.blck)
|
2022-04-14 20:15:34 +00:00
|
|
|
latestHead =
|
2022-07-04 20:35:33 +00:00
|
|
|
if not executionBlockRoot.isZero:
|
|
|
|
executionBlockRoot
|
2022-09-20 06:17:25 +00:00
|
|
|
elif node.eth1Monitor.terminalBlockHash.isSome:
|
|
|
|
node.eth1Monitor.terminalBlockHash.get.asEth2Digest
|
2022-04-14 20:15:34 +00:00
|
|
|
else:
|
2022-09-20 06:17:25 +00:00
|
|
|
default(Eth2Digest)
|
2022-08-25 23:34:02 +00:00
|
|
|
latestSafe = beaconHead.safeExecutionPayloadHash
|
|
|
|
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
2022-08-19 21:51:30 +00:00
|
|
|
feeRecipient = node.getFeeRecipient(pubkey, validator_index, epoch)
|
2022-08-23 16:19:52 +00:00
|
|
|
lastFcU = node.consensusManager.forkchoiceUpdatedInfo
|
2022-09-03 20:56:20 +00:00
|
|
|
timestamp = withState(proposalState[]):
|
|
|
|
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
2022-08-23 16:19:52 +00:00
|
|
|
payload_id =
|
|
|
|
if lastFcU.isSome and
|
|
|
|
lastFcU.get.headBlockRoot == latestHead and
|
2022-08-25 23:34:02 +00:00
|
|
|
lastFcU.get.safeBlockRoot == latestSafe and
|
2022-08-23 16:19:52 +00:00
|
|
|
lastFcU.get.finalizedBlockRoot == latestFinalized and
|
|
|
|
lastFcU.get.timestamp == timestamp and
|
|
|
|
lastFcU.get.feeRecipient == feeRecipient:
|
|
|
|
some bellatrix.PayloadID(lastFcU.get.payloadId)
|
|
|
|
else:
|
|
|
|
debug "getExecutionPayload: didn't find payloadId, re-querying",
|
2022-08-25 23:34:02 +00:00
|
|
|
latestHead, latestSafe, latestFinalized,
|
2022-08-23 16:19:52 +00:00
|
|
|
timestamp,
|
|
|
|
feeRecipient,
|
2022-08-31 16:36:24 +00:00
|
|
|
cachedForkchoiceUpdateInformation = lastFcU
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
let random = withState(proposalState[]):
|
|
|
|
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data))
|
2022-08-23 16:19:52 +00:00
|
|
|
(await forkchoice_updated(
|
2022-09-03 20:56:20 +00:00
|
|
|
latestHead, latestSafe, latestFinalized, timestamp, random,
|
2022-08-23 16:19:52 +00:00
|
|
|
feeRecipient, node.consensusManager.eth1Monitor))
|
2022-08-20 06:09:25 +00:00
|
|
|
payload = try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
get_execution_payload(payload_id, node.consensusManager.eth1Monitor),
|
|
|
|
GETPAYLOAD_TIMEOUT):
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-08-20 06:09:25 +00:00
|
|
|
warn "Getting execution payload from Engine API timed out", payload_id
|
|
|
|
empty_execution_payload
|
|
|
|
except CatchableError as err:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-08-20 06:09:25 +00:00
|
|
|
warn "Getting execution payload from Engine API failed",
|
|
|
|
payload_id, err = err.msg
|
2022-05-17 13:57:33 +00:00
|
|
|
empty_execution_payload
|
2022-08-20 06:09:25 +00:00
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
executionPayloadStatus =
|
2022-05-17 13:57:33 +00:00
|
|
|
awaitWithTimeout(
|
|
|
|
node.consensusManager.eth1Monitor.newExecutionPayload(payload),
|
|
|
|
NEWPAYLOAD_TIMEOUT):
|
|
|
|
info "getExecutionPayload: newPayload timed out"
|
2022-08-19 21:20:57 +00:00
|
|
|
Opt.none PayloadExecutionStatus
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-08-19 21:20:57 +00:00
|
|
|
if executionPayloadStatus.isNone or executionPayloadStatus.get in [
|
|
|
|
PayloadExecutionStatus.invalid,
|
|
|
|
PayloadExecutionStatus.invalid_block_hash]:
|
|
|
|
info "getExecutionPayload: newExecutionPayload invalid",
|
2022-04-14 20:15:34 +00:00
|
|
|
executionPayloadStatus
|
2022-08-19 21:20:57 +00:00
|
|
|
return Opt.none ExecutionPayload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-08-19 21:20:57 +00:00
|
|
|
return Opt.some payload
|
2022-04-14 20:15:34 +00:00
|
|
|
except CatchableError as err:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-04-14 20:15:34 +00:00
|
|
|
error "Error creating non-empty execution payload; using empty execution payload",
|
|
|
|
msg = err.msg
|
2022-08-19 21:20:57 +00:00
|
|
|
return Opt.some empty_execution_payload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
proc makeBeaconBlockForHeadAndSlot*(
|
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot,
|
2022-09-21 07:38:08 +00:00
|
|
|
skip_randao_verification_bool: bool = false,
|
2022-08-01 06:41:47 +00:00
|
|
|
execution_payload: Opt[ExecutionPayload] = Opt.none(ExecutionPayload),
|
|
|
|
transactions_root: Opt[Eth2Digest] = Opt.none(Eth2Digest),
|
|
|
|
execution_payload_root: Opt[Eth2Digest] = Opt.none(Eth2Digest)):
|
|
|
|
Future[ForkedBlockResult] {.async.} =
|
2020-10-22 10:53:33 +00:00
|
|
|
# Advance state to the slot that we're proposing for
|
2021-03-17 10:17:15 +00:00
|
|
|
let
|
2021-06-01 11:13:40 +00:00
|
|
|
proposalState = assignClone(node.dag.headState)
|
2021-03-17 10:17:15 +00:00
|
|
|
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
# TODO fails at checkpoint synced head
|
2022-03-29 07:15:42 +00:00
|
|
|
node.dag.withUpdatedState(
|
|
|
|
proposalState[],
|
|
|
|
head.atSlot(slot - 1).toBlockSlotId().expect("not nil")):
|
2021-11-18 12:02:43 +00:00
|
|
|
# Advance to the given slot without calculating state root - we'll only
|
|
|
|
# need a state root _with_ the block applied
|
|
|
|
var info: ForkedEpochInfo
|
2022-01-17 11:19:58 +00:00
|
|
|
|
|
|
|
process_slots(
|
2022-03-16 07:20:40 +00:00
|
|
|
node.dag.cfg, state, slot, cache, info,
|
2022-01-17 11:19:58 +00:00
|
|
|
{skipLastStateRootCalculation}).expect("advancing 1 slot should not fail")
|
2021-11-18 12:02:43 +00:00
|
|
|
|
2020-05-22 14:21:22 +00:00
|
|
|
let
|
2022-03-16 07:20:40 +00:00
|
|
|
eth1Proposal = node.getBlockProposalEth1Data(state)
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-11-24 21:21:47 +00:00
|
|
|
if eth1Proposal.hasMissingDeposits:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-01-17 11:19:58 +00:00
|
|
|
warn "Eth1 deposits not available. Skipping block proposal", slot
|
2021-08-29 14:50:21 +00:00
|
|
|
return ForkedBlockResult.err("Eth1 deposits not available")
|
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
# Only current hardfork with execution payloads is Bellatrix
|
|
|
|
static: doAssert high(BeaconStateFork) == BeaconStateFork.Bellatrix
|
|
|
|
|
2022-08-19 21:20:57 +00:00
|
|
|
let
|
|
|
|
exits = withState(state):
|
2022-09-10 06:12:07 +00:00
|
|
|
node.exitPool[].getBeaconBlockExits(node.dag.cfg, forkyState.data)
|
2022-08-19 21:20:57 +00:00
|
|
|
effectiveExecutionPayload =
|
|
|
|
if executionPayload.isSome:
|
|
|
|
executionPayload.get
|
|
|
|
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or
|
|
|
|
not (
|
2022-09-03 20:56:20 +00:00
|
|
|
is_merge_transition_complete(proposalState[]) or
|
2022-09-20 06:17:25 +00:00
|
|
|
((not node.eth1Monitor.isNil) and node.eth1Monitor.ttdReached)):
|
2022-08-19 21:20:57 +00:00
|
|
|
# https://github.com/nim-lang/Nim/issues/19802
|
|
|
|
(static(default(bellatrix.ExecutionPayload)))
|
|
|
|
else:
|
|
|
|
let
|
|
|
|
pubkey = node.dag.validatorKey(validator_index)
|
2022-09-03 20:56:20 +00:00
|
|
|
maybeExecutionPayload =
|
|
|
|
(await getExecutionPayload[bellatrix.ExecutionPayload](
|
|
|
|
node, proposalState, slot.epoch, validator_index,
|
|
|
|
# TODO https://github.com/nim-lang/Nim/issues/19802
|
|
|
|
if pubkey.isSome: pubkey.get.toPubKey else: default(ValidatorPubKey)))
|
2022-08-19 21:20:57 +00:00
|
|
|
if maybeExecutionPayload.isNone:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-08-19 21:20:57 +00:00
|
|
|
warn "Unable to get execution payload. Skipping block proposal",
|
|
|
|
slot, validator_index
|
|
|
|
return ForkedBlockResult.err("Unable to get execution payload")
|
|
|
|
maybeExecutionPayload.get
|
|
|
|
|
2022-01-17 11:19:58 +00:00
|
|
|
let res = makeBeaconBlock(
|
2021-10-01 01:29:32 +00:00
|
|
|
node.dag.cfg,
|
2022-03-16 07:20:40 +00:00
|
|
|
state,
|
2021-10-01 01:29:32 +00:00
|
|
|
validator_index,
|
|
|
|
randao_reveal,
|
|
|
|
eth1Proposal.vote,
|
|
|
|
graffiti,
|
2022-03-16 07:20:40 +00:00
|
|
|
node.attestationPool[].getAttestationsForBlock(state, cache),
|
2021-10-01 01:29:32 +00:00
|
|
|
eth1Proposal.deposits,
|
2021-10-18 16:37:27 +00:00
|
|
|
exits,
|
2021-10-06 17:05:06 +00:00
|
|
|
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
SyncAggregate.init()
|
2021-10-01 01:29:32 +00:00
|
|
|
else:
|
2022-03-23 06:46:48 +00:00
|
|
|
node.syncCommitteeMsgPool[].produceSyncAggregate(head.root),
|
2022-08-19 21:20:57 +00:00
|
|
|
effectiveExecutionPayload,
|
2021-11-18 12:02:43 +00:00
|
|
|
noRollback, # Temporary state - no need for rollback
|
2022-08-01 06:41:47 +00:00
|
|
|
cache,
|
2022-09-21 07:38:08 +00:00
|
|
|
verificationFlags = if skip_randao_verification_bool: {skipRandaoVerification} else: {},
|
2022-08-01 06:41:47 +00:00
|
|
|
transactions_root =
|
|
|
|
if transactions_root.isSome:
|
|
|
|
Opt.some transactions_root.get
|
|
|
|
else:
|
|
|
|
Opt.none(Eth2Digest),
|
|
|
|
execution_payload_root =
|
|
|
|
if execution_payload_root.isSome:
|
|
|
|
Opt.some execution_payload_root.get
|
|
|
|
else:
|
|
|
|
Opt.none Eth2Digest)
|
2022-01-17 11:19:58 +00:00
|
|
|
if res.isErr():
|
|
|
|
# This is almost certainly a bug, but it's complex enough that there's a
|
|
|
|
# small risk it might happen even when most proposals succeed - thus we
|
|
|
|
# log instead of asserting
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-01-17 11:19:58 +00:00
|
|
|
error "Cannot create block for proposal",
|
|
|
|
slot, head = shortLog(head), error = res.error()
|
|
|
|
return err($res.error)
|
|
|
|
return ok(res.get())
|
2022-01-05 18:38:04 +00:00
|
|
|
do:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-01-17 11:19:58 +00:00
|
|
|
error "Cannot get proposal state - skipping block production, database corrupt?",
|
2022-01-05 18:38:04 +00:00
|
|
|
head = shortLog(head),
|
|
|
|
slot
|
2022-08-29 09:55:20 +00:00
|
|
|
return err("Cannot create proposal state")
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
proc getBlindedExecutionPayload(
|
|
|
|
node: BeaconNode, slot: Slot, executionBlockRoot: Eth2Digest,
|
|
|
|
pubkey: ValidatorPubKey):
|
|
|
|
Future[Result[ExecutionPayloadHeader, cstring]] {.async.} =
|
|
|
|
if node.payloadBuilderRestClient.isNil:
|
|
|
|
return err "getBlindedBeaconBlock: nil REST client"
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let blindedHeader = awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.getHeader(slot, executionBlockRoot, pubkey),
|
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
return err "Timeout when obtaining blinded header from builder"
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
const httpOk = 200
|
|
|
|
if blindedHeader.status != httpOk:
|
|
|
|
return err "getBlindedExecutionPayload: non-200 HTTP response"
|
|
|
|
else:
|
|
|
|
if not verify_builder_signature(
|
|
|
|
node.dag.cfg.genesisFork, blindedHeader.data.data.message,
|
|
|
|
blindedHeader.data.data.message.pubkey,
|
|
|
|
blindedHeader.data.data.signature):
|
|
|
|
return err "getBlindedExecutionPayload: signature verification failed"
|
|
|
|
|
|
|
|
return ok blindedHeader.data.data.message.header
|
|
|
|
|
|
|
|
import std/macros
|
|
|
|
|
|
|
|
func getFieldNames(x: typedesc[auto]): seq[string] {.compileTime.} =
|
|
|
|
var res: seq[string]
|
|
|
|
for name, _ in fieldPairs(default(x)):
|
|
|
|
res.add name
|
|
|
|
res
|
|
|
|
|
|
|
|
macro copyFields(
|
|
|
|
dst: untyped, src: untyped, fieldNames: static[seq[string]]): untyped =
|
|
|
|
result = newStmtList()
|
|
|
|
for name in fieldNames:
|
|
|
|
if name notin [
|
|
|
|
# These fields are the ones which vary between the blinded and
|
|
|
|
# unblinded objects, and can't simply be copied.
|
|
|
|
"transactions_root", "execution_payload",
|
|
|
|
"execution_payload_header", "body"]:
|
2022-09-03 20:56:20 +00:00
|
|
|
# TODO use stew/assign2
|
2022-08-01 06:41:47 +00:00
|
|
|
result.add newAssignment(
|
|
|
|
newDotExpr(dst, ident(name)), newDotExpr(src, ident(name)))
|
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
proc getBlindedBeaconBlock[T](
|
2022-08-01 06:41:47 +00:00
|
|
|
node: BeaconNode, slot: Slot, head: BlockRef, validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex, forkedBlock: ForkedBeaconBlock,
|
|
|
|
executionPayloadHeader: ExecutionPayloadHeader):
|
2022-09-03 20:56:20 +00:00
|
|
|
Future[Result[T, string]] {.async.} =
|
|
|
|
static: doAssert high(BeaconStateFork) == BeaconStateFork.Bellatrix
|
2022-08-01 06:41:47 +00:00
|
|
|
const
|
|
|
|
blckFields = getFieldNames(typeof(forkedBlock.bellatrixData))
|
|
|
|
blckBodyFields = getFieldNames(typeof(forkedBlock.bellatrixData.body))
|
|
|
|
|
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#block-proposal
|
2022-09-03 20:56:20 +00:00
|
|
|
var blindedBlock: T
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
copyFields(blindedBlock.message, forkedBlock.bellatrixData, blckFields)
|
|
|
|
copyFields(
|
|
|
|
blindedBlock.message.body, forkedBlock.bellatrixData.body, blckBodyFields)
|
|
|
|
blindedBlock.message.body.execution_payload_header = executionPayloadHeader
|
|
|
|
|
|
|
|
# Check with slashing protection before submitBlindedBlock
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
blockRoot = hash_tree_root(blindedBlock.message)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2022-08-01 06:41:47 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated for MEV block",
|
2022-09-19 19:50:19 +00:00
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blindedBlock),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
2022-08-01 06:41:47 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return err("MEV proposal would be slashable: " & $notSlashable.error)
|
|
|
|
|
|
|
|
blindedBlock.signature =
|
|
|
|
block:
|
|
|
|
let res = await validator.getBlockSignature(
|
|
|
|
fork, genesis_validators_root, slot, blockRoot, blindedBlock.message)
|
|
|
|
if res.isErr():
|
|
|
|
return err("Unable to sign block: " & res.error())
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
return ok blindedBlock
|
|
|
|
|
|
|
|
proc proposeBlockMEV(
|
|
|
|
node: BeaconNode, head: BlockRef, validator: AttachedValidator, slot: Slot,
|
|
|
|
randao: ValidatorSig, validator_index: ValidatorIndex):
|
|
|
|
Future[Opt[BlockRef]] {.async.} =
|
|
|
|
let
|
|
|
|
executionBlockRoot = node.dag.loadExecutionBlockRoot(head)
|
|
|
|
executionPayloadHeader = awaitWithTimeout(
|
|
|
|
node.getBlindedExecutionPayload(
|
|
|
|
slot, executionBlockRoot, validator.pubkey),
|
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
Result[ExecutionPayloadHeader, cstring].err(
|
|
|
|
"getBlindedExecutionPayload timed out")
|
|
|
|
|
|
|
|
if executionPayloadHeader.isErr:
|
|
|
|
debug "proposeBlockMEV: getBlindedExecutionPayload failed",
|
|
|
|
error = executionPayloadHeader.error
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
|
|
|
return Opt.none BlockRef
|
|
|
|
|
|
|
|
# When creating this block, need to ensure it uses the MEV-provided execution
|
|
|
|
# payload, both to avoid repeated calls to network services and to ensure the
|
|
|
|
# consistency of this block (e.g., its state root being correct). Since block
|
|
|
|
# processing does not work directly using blinded blocks, fix up transactions
|
|
|
|
# root after running the state transition function on an otherwise equivalent
|
|
|
|
# non-blinded block without transactions.
|
|
|
|
var shimExecutionPayload: ExecutionPayload
|
|
|
|
copyFields(
|
|
|
|
shimExecutionPayload, executionPayloadHeader.get,
|
|
|
|
getFieldNames(ExecutionPayloadHeader))
|
|
|
|
|
|
|
|
let newBlock = await makeBeaconBlockForHeadAndSlot(
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot,
|
|
|
|
execution_payload = Opt.some shimExecutionPayload,
|
|
|
|
transactions_root = Opt.some executionPayloadHeader.get.transactions_root,
|
|
|
|
execution_payload_root =
|
|
|
|
Opt.some hash_tree_root(executionPayloadHeader.get))
|
|
|
|
|
|
|
|
if newBlock.isErr():
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
|
|
|
return Opt.none BlockRef # already logged elsewhere!
|
|
|
|
|
|
|
|
let forkedBlck = newBlock.get()
|
|
|
|
|
|
|
|
# This is only substantively asynchronous with a remote key signer
|
|
|
|
let blindedBlock = awaitWithTimeout(
|
2022-09-03 20:56:20 +00:00
|
|
|
getBlindedBeaconBlock[SignedBlindedBeaconBlock](
|
|
|
|
node, slot, head, validator, validator_index, forkedBlck,
|
2022-08-01 06:41:47 +00:00
|
|
|
executionPayloadHeader.get),
|
|
|
|
500.milliseconds):
|
|
|
|
Result[SignedBlindedBeaconBlock, string].err "getBlindedBlock timed out"
|
|
|
|
|
|
|
|
if blindedBlock.isOk:
|
|
|
|
# By time submitBlindedBlock is called, must already have done slashing
|
|
|
|
# protection check
|
|
|
|
let unblindedPayload =
|
|
|
|
try:
|
2022-08-19 21:51:30 +00:00
|
|
|
awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.submitBlindedBlock(blindedBlock.get),
|
|
|
|
BUILDER_BLOCK_SUBMISSION_DELAY_TOLERANCE):
|
|
|
|
error "Submitting blinded block timed out",
|
|
|
|
blk = shortLog(blindedBlock.get)
|
|
|
|
return Opt.some head
|
2022-08-01 06:41:47 +00:00
|
|
|
# From here on, including error paths, disallow local EL production by
|
|
|
|
# returning Opt.some, regardless of whether on head or newBlock.
|
|
|
|
except RestDecodingError as exc:
|
2022-08-19 21:51:30 +00:00
|
|
|
error "proposeBlockMEV: REST recoding error",
|
2022-08-01 06:41:47 +00:00
|
|
|
slot, head = shortLog(head), validator_index, blindedBlock,
|
|
|
|
error = exc.msg
|
|
|
|
return Opt.some head
|
|
|
|
except CatchableError as exc:
|
2022-08-19 21:51:30 +00:00
|
|
|
error "proposeBlockMEV: exception in submitBlindedBlock",
|
2022-08-01 06:41:47 +00:00
|
|
|
slot, head = shortLog(head), validator_index, blindedBlock,
|
|
|
|
error = exc.msg
|
|
|
|
return Opt.some head
|
|
|
|
|
|
|
|
const httpOk = 200
|
|
|
|
if unblindedPayload.status == httpOk:
|
|
|
|
if hash_tree_root(
|
|
|
|
blindedBlock.get.message.body.execution_payload_header) !=
|
|
|
|
hash_tree_root(unblindedPayload.data.data):
|
|
|
|
debug "proposeBlockMEV: unblinded payload doesn't match blinded payload",
|
|
|
|
blindedPayload =
|
|
|
|
blindedBlock.get.message.body.execution_payload_header
|
|
|
|
else:
|
|
|
|
# Signature provided is consistent with unblinded execution payload,
|
|
|
|
# so construct full beacon block
|
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#block-proposal
|
|
|
|
var signedBlock = bellatrix.SignedBeaconBlock(
|
|
|
|
signature: blindedBlock.get.signature)
|
|
|
|
copyFields(
|
|
|
|
signedBlock.message, blindedBlock.get.message,
|
|
|
|
getFieldNames(typeof(signedBlock.message)))
|
|
|
|
copyFields(
|
|
|
|
signedBlock.message.body, blindedBlock.get.message.body,
|
|
|
|
getFieldNames(typeof(signedBlock.message.body)))
|
|
|
|
signedBlock.message.body.execution_payload = unblindedPayload.data.data
|
|
|
|
|
|
|
|
signedBlock.root = hash_tree_root(signedBlock.message)
|
|
|
|
|
|
|
|
doAssert signedBlock.root == hash_tree_root(blindedBlock.get.message)
|
|
|
|
|
|
|
|
debug "proposeBlockMEV: proposing unblinded block",
|
|
|
|
blck = shortLog(signedBlock)
|
|
|
|
|
|
|
|
let newBlockRef =
|
|
|
|
(await node.router.routeSignedBeaconBlock(signedBlock)).valueOr:
|
|
|
|
# submitBlindedBlock has run, so don't allow fallback to run
|
|
|
|
return Opt.some head # Errors logged in router
|
|
|
|
|
|
|
|
if newBlockRef.isNone():
|
|
|
|
return Opt.some head # Validation errors logged in router
|
|
|
|
|
|
|
|
notice "Block proposed (MEV)",
|
|
|
|
blockRoot = shortLog(signedBlock.root), blck = shortLog(signedBlock),
|
|
|
|
signature = shortLog(signedBlock.signature), validator = shortLog(validator)
|
|
|
|
|
|
|
|
beacon_blocks_proposed.inc()
|
|
|
|
|
|
|
|
return Opt.some newBlockRef.get()
|
|
|
|
else:
|
|
|
|
debug "proposeBlockMEV: submitBlindedBlock failed",
|
|
|
|
slot, head = shortLog(head), validator_index, blindedBlock,
|
|
|
|
payloadStatus = unblindedPayload.status
|
|
|
|
|
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#proposer-slashing
|
|
|
|
# This means if a validator publishes a signature for a
|
|
|
|
# `BlindedBeaconBlock` (via a dissemination of a
|
|
|
|
# `SignedBlindedBeaconBlock`) then the validator **MUST** not use the
|
|
|
|
# local build process as a fallback, even in the event of some failure
|
|
|
|
# with the external buildernetwork.
|
|
|
|
return Opt.some head
|
|
|
|
else:
|
|
|
|
info "proposeBlockMEV: getBlindedBeaconBlock failed",
|
|
|
|
slot, head = shortLog(head), validator_index, blindedBlock,
|
|
|
|
error = blindedBlock.error
|
|
|
|
return Opt.none BlockRef
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot): Future[BlockRef] {.async.} =
|
|
|
|
if head.slot >= slot:
|
|
|
|
# We should normally not have a head newer than the slot we're proposing for
|
|
|
|
# but this can happen if block proposal is delayed
|
|
|
|
warn "Skipping proposal, have newer head already",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
headBlockRoot = shortLog(head.root),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-22 17:04:52 +00:00
|
|
|
return head
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
2021-08-27 09:00:06 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-11-30 01:20:21 +00:00
|
|
|
randao =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getEpochSignature(
|
|
|
|
fork, genesis_validators_root, slot.epoch)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to generate randao reveal",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
2021-11-30 01:20:21 +00:00
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#responsibilites-during-the-merge-transition
|
|
|
|
# "Honest validators will not utilize the external builder network until
|
|
|
|
# after the transition from the proof-of-work chain to the proof-of-stake
|
|
|
|
# beacon chain has been finalized by the proof-of-stake validators."
|
|
|
|
if node.config.payloadBuilderEnable and
|
|
|
|
not node.dag.loadExecutionBlockRoot(node.dag.finalizedHead.blck).isZero:
|
|
|
|
let newBlockMEV = await node.proposeBlockMEV(
|
|
|
|
head, validator, slot, randao, validator_index)
|
|
|
|
|
|
|
|
if newBlockMEV.isSome:
|
|
|
|
# This might be equivalent to the `head` passed in, but it signals that
|
|
|
|
# `submitBlindedBlock` ran, so don't do anything else. Otherwise, it is
|
|
|
|
# fine to try again with the local EL.
|
|
|
|
return newBlockMEV.get
|
|
|
|
|
|
|
|
let newBlock = await makeBeaconBlockForHeadAndSlot(
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2021-08-29 14:50:21 +00:00
|
|
|
if newBlock.isErr():
|
2020-05-22 17:04:52 +00:00
|
|
|
return head # already logged elsewhere!
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let forkedBlck = newBlock.get()
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
withBlck(forkedBlck):
|
|
|
|
let
|
|
|
|
blockRoot = hash_tree_root(blck)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
|
|
|
if notSlashable.isErr:
|
2022-09-19 19:50:19 +00:00
|
|
|
warn "Slashing protection activated for block proposal",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
2021-08-29 14:50:21 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return head
|
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let
|
|
|
|
signature =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getBlockSignature(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot, forkedBlck)
|
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign block",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
2021-12-03 13:58:12 +00:00
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
signedBlock =
|
|
|
|
when blck is phase0.BeaconBlock:
|
|
|
|
phase0.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
|
|
|
elif blck is altair.BeaconBlock:
|
|
|
|
altair.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-01-18 13:36:52 +00:00
|
|
|
elif blck is bellatrix.BeaconBlock:
|
|
|
|
bellatrix.SignedBeaconBlock(
|
2021-12-03 13:58:12 +00:00
|
|
|
message: blck, signature: signature, root: blockRoot)
|
|
|
|
else:
|
2022-02-13 15:21:55 +00:00
|
|
|
static: doAssert "Unknown SignedBeaconBlock type"
|
2022-07-06 16:11:44 +00:00
|
|
|
newBlockRef =
|
|
|
|
(await node.router.routeSignedBeaconBlock(signedBlock)).valueOr:
|
|
|
|
return head # Errors logged in router
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if newBlockRef.isNone():
|
|
|
|
return head # Validation errors logged in router
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notice "Block proposed",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signature = shortLog(signature), validator = shortLog(validator)
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
beacon_blocks_proposed.inc()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
return newBlockRef.get()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
## Perform all attestations that the validators attached to this node should
|
|
|
|
## perform during the given slot
|
|
|
|
if slot + SLOTS_PER_EPOCH < head.slot:
|
|
|
|
# The latest block we know about is a lot newer than the slot we're being
|
|
|
|
# asked to attest to - this makes it unlikely that it will be included
|
|
|
|
# at all.
|
|
|
|
# TODO the oldest attestations allowed are those that are older than the
|
|
|
|
# finalized epoch.. also, it seems that posting very old attestations
|
|
|
|
# is risky from a slashing perspective. More work is needed here.
|
2020-10-01 18:56:42 +00:00
|
|
|
warn "Skipping attestation, head is too recent",
|
2022-01-05 18:38:04 +00:00
|
|
|
head = shortLog(head),
|
2020-05-06 13:23:45 +00:00
|
|
|
slot = shortLog(slot)
|
|
|
|
return
|
|
|
|
|
2022-03-23 11:42:16 +00:00
|
|
|
if slot < node.dag.finalizedHead.slot:
|
|
|
|
# During checkpoint sync, we implicitly finalize the given slot even if the
|
|
|
|
# state transition does not yet consider it final - this is a sanity check
|
|
|
|
# mostly to ensure the `atSlot` below works as expected
|
|
|
|
warn "Skipping attestation - slot already finalized",
|
|
|
|
head = shortLog(head),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
finalized = shortLog(node.dag.finalizedHead)
|
|
|
|
return
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
let attestationHead = head.atSlot(slot)
|
|
|
|
if head != attestationHead.blck:
|
|
|
|
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
|
|
|
# attesting to a past state - we must then recreate the world as it looked
|
|
|
|
# like back then
|
|
|
|
notice "Attesting to a state in the past, falling behind?",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
trace "Checking attestations",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# We need to run attestations exactly for the slot that we're attesting to.
|
|
|
|
# In case blocks went missing, this means advancing past the latest block
|
|
|
|
# using empty slots as fillers.
|
2022-05-24 08:26:35 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#validator-assignments
|
2020-08-10 13:21:31 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
epochRef = node.dag.getEpochRef(
|
|
|
|
attestationHead.blck, slot.epoch, false).valueOr:
|
2022-01-05 18:38:04 +00:00
|
|
|
warn "Cannot construct EpochRef for attestation head, report bug",
|
|
|
|
attestationHead = shortLog(attestationHead), slot
|
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(epochRef.shufflingRef)
|
2021-08-24 19:49:51 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(
|
|
|
|
epochRef.shufflingRef, slot, committee_index)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2021-05-04 13:17:28 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2022-08-18 18:07:01 +00:00
|
|
|
let validator = node.getAttachedValidator(validator_index)
|
2021-05-10 07:13:36 +00:00
|
|
|
if validator == nil:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let
|
|
|
|
data = makeAttestationData(epochRef, attestationHead, committee_index)
|
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_attestation_signing_root(
|
2021-05-10 07:13:36 +00:00
|
|
|
fork, genesis_validators_root, data)
|
|
|
|
registered = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerAttestation(
|
|
|
|
validator_index,
|
2021-07-13 11:15:07 +00:00
|
|
|
validator.pubkey,
|
2021-05-10 07:13:36 +00:00
|
|
|
data.source.epoch,
|
|
|
|
data.target.epoch,
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot)
|
2021-05-10 07:13:36 +00:00
|
|
|
if registered.isOk():
|
2021-10-20 09:16:48 +00:00
|
|
|
let subnet_id = compute_subnet_for_attestation(
|
2022-01-08 23:28:49 +00:00
|
|
|
committees_per_slot, data.slot, committee_index)
|
2021-05-10 07:13:36 +00:00
|
|
|
asyncSpawn createAndSendAttestation(
|
|
|
|
node, fork, genesis_validators_root, validator, data,
|
2021-10-20 09:16:48 +00:00
|
|
|
committee.len(), index_in_committee, subnet_id)
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for attestation",
|
2022-09-19 19:50:19 +00:00
|
|
|
attestationData = shortLog(data),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
|
|
|
validator_index,
|
|
|
|
validator = shortLog(validator),
|
2021-05-10 07:13:36 +00:00
|
|
|
badVoteDetails = $registered.error()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-08-30 00:58:30 +00:00
|
|
|
proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot,
|
2021-11-05 15:39:47 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2021-08-30 00:58:30 +00:00
|
|
|
head: BlockRef) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-11-30 01:20:21 +00:00
|
|
|
msg =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getSyncCommitteeMessage(
|
|
|
|
fork, genesis_validators_root, slot, head.root)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign committee message",
|
2021-11-30 01:20:21 +00:00
|
|
|
validator = shortLog(validator), slot = slot,
|
|
|
|
block_root = shortLog(head.root)
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeSyncCommitteeMessage(
|
2021-11-05 15:39:47 +00:00
|
|
|
msg, subcommitteeIdx, checkSignature = false)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
if not res.isOk():
|
2021-08-30 00:58:30 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2021-12-22 12:37:31 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, msg, validator.pubkey)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
notice "Error sending sync committee message", err = exc.msg
|
|
|
|
|
|
|
|
proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
# TODO Use a view type to avoid the copy
|
2022-08-18 18:07:01 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2022-08-18 18:07:01 +00:00
|
|
|
let validator = node.getAttachedValidator(valIdx)
|
2021-09-23 22:13:25 +00:00
|
|
|
if isNil(validator) or validator.index.isNone():
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
2022-01-08 23:28:49 +00:00
|
|
|
subcommitteeIdx, head)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
|
|
|
proc signAndSendContribution(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2022-07-06 16:11:44 +00:00
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSyncCommitteeSelectionProof(
|
|
|
|
fork, genesis_validators_root, slot, subcommitteeIdx)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate committee selection proof",
|
|
|
|
validator = shortLog(validator), slot,
|
|
|
|
subnet_id = subcommitteeIdx, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
if not is_sync_committee_aggregator(selectionProof):
|
|
|
|
return
|
|
|
|
|
|
|
|
var
|
|
|
|
msg = SignedContributionAndProof(
|
2022-06-29 16:53:59 +00:00
|
|
|
message: ContributionAndProof(
|
|
|
|
aggregator_index: uint64 validator.index.get,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if not node.syncCommitteeMsgPool[].produceContribution(
|
|
|
|
slot,
|
|
|
|
head.root,
|
|
|
|
subcommitteeIdx,
|
|
|
|
msg.message.contribution):
|
|
|
|
return
|
|
|
|
|
|
|
|
msg.signature = block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getContributionAndProofSignature(
|
2022-07-06 16:11:44 +00:00
|
|
|
fork, genesis_validators_root, msg.message)
|
2022-06-29 16:53:59 +00:00
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign sync committee contribution",
|
2022-07-06 16:11:44 +00:00
|
|
|
validator = shortLog(validator), message = shortLog(msg.message),
|
|
|
|
error_msg = res.error()
|
2022-06-29 16:53:59 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedContributionAndProof(msg, false)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending sync committee contribution", err = exc.msg
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleSyncCommitteeContributions(
|
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
for subcommitteeIdx in SyncSubCommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2022-08-18 18:07:01 +00:00
|
|
|
let validator = node.getAttachedValidator(valIdx)
|
2022-07-06 16:11:44 +00:00
|
|
|
if validator == nil:
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn signAndSendContribution(
|
|
|
|
node, validator, subcommitteeIdx, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|
|
|
Future[BlockRef] {.async.} =
|
|
|
|
## Perform the proposal for the given slot, iff we have a validator attached
|
2020-11-20 14:16:04 +00:00
|
|
|
## that is supposed to do so, given the shuffling at that slot for the given
|
|
|
|
## head - to compute the proposer, we need to advance a state to the given
|
|
|
|
## slot
|
2021-06-01 11:13:40 +00:00
|
|
|
let proposer = node.dag.getProposer(head, slot)
|
2020-05-22 14:21:22 +00:00
|
|
|
if proposer.isNone():
|
2020-05-06 13:23:45 +00:00
|
|
|
return head
|
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
let
|
2021-08-27 09:00:06 +00:00
|
|
|
proposerKey = node.dag.validatorKey(proposer.get).get().toPubKey
|
2021-06-01 11:13:40 +00:00
|
|
|
validator = node.attachedValidators[].getValidator(proposerKey)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
return
|
|
|
|
if validator == nil:
|
|
|
|
debug "Expecting block proposal",
|
|
|
|
headRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
proposer_index = proposer.get(),
|
|
|
|
proposer = shortLog(proposerKey)
|
|
|
|
|
|
|
|
head
|
|
|
|
else:
|
|
|
|
await proposeBlock(node, validator, proposer.get(), head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc signAndSendAggregate(
|
2022-08-18 18:07:01 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot, committee_index: CommitteeIndex) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
validator_index = validator.index.get()
|
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
slot, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-09-10 17:16:38 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/validator.md#aggregation-selection
|
2022-08-18 18:07:01 +00:00
|
|
|
if not is_aggregator(
|
|
|
|
shufflingRef, slot, committee_index, selectionProof):
|
2022-07-06 16:11:44 +00:00
|
|
|
return
|
|
|
|
|
2022-09-20 00:08:09 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/validator.md#construct-aggregate
|
2022-07-06 16:11:44 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.1/specs/phase0/validator.md#aggregateandproof
|
|
|
|
var
|
|
|
|
msg = SignedAggregateAndProof(
|
|
|
|
message: AggregateAndProof(
|
|
|
|
aggregator_index: uint64 validator_index,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
|
|
|
msg.message.aggregate = node.attestationPool[].getAggregatedAttestation(
|
|
|
|
slot, committee_index).valueOr:
|
|
|
|
return
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
msg.signature = block:
|
|
|
|
let res = await validator.getAggregateAndProofSignature(
|
|
|
|
fork, genesis_validators_root, msg.message)
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign aggregate",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedAggregateAndProof(
|
|
|
|
msg, checkSignature = false)
|
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
warn "Error sending aggregate", err = exc.msg
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
proc sendAggregatedAttestations(
|
2022-01-05 18:38:04 +00:00
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
|
|
|
# Aggregated attestations must be sent by members of the beacon committees for
|
|
|
|
# the given slot, for which `is_aggregator` returns `true.
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for head, report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2020-10-22 10:53:33 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-07-06 16:11:44 +00:00
|
|
|
for _, validator_index in
|
2022-08-18 18:07:01 +00:00
|
|
|
get_beacon_committee(shufflingRef, slot, committee_index):
|
|
|
|
let validator = node.getAttachedValidator(validator_index)
|
2020-10-22 10:53:33 +00:00
|
|
|
if validator != nil:
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn signAndSendAggregate(
|
2022-08-18 18:07:01 +00:00
|
|
|
node, validator, shufflingRef, slot, committee_index)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
2021-08-28 22:27:51 +00:00
|
|
|
# Technically, this only needs to be done on epoch transitions and if there's
|
|
|
|
# a reorg that spans an epoch transition, but it's easier to implement this
|
|
|
|
# way for now.
|
|
|
|
|
|
|
|
# We'll limit labelled metrics to the first 64, so that we don't overload
|
|
|
|
# Prometheus.
|
|
|
|
|
|
|
|
var total: Gwei
|
|
|
|
var i = 0
|
|
|
|
for _, v in node.attachedValidators[].validators:
|
|
|
|
let balance =
|
|
|
|
if v.index.isNone():
|
|
|
|
0.Gwei
|
|
|
|
elif v.index.get().uint64 >=
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(node.dag.headState, balances).lenu64:
|
2021-08-28 22:27:51 +00:00
|
|
|
debug "Cannot get validator balance, index out of bounds",
|
|
|
|
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
2022-03-16 07:20:40 +00:00
|
|
|
balances = getStateField(node.dag.headState, balances).len,
|
|
|
|
stateRoot = getStateRoot(node.dag.headState)
|
2021-08-28 22:27:51 +00:00
|
|
|
0.Gwei
|
|
|
|
else:
|
2022-05-30 13:30:42 +00:00
|
|
|
getStateField(node.dag.headState, balances).item(v.index.get())
|
2021-08-28 22:27:51 +00:00
|
|
|
|
|
|
|
if i < 64:
|
|
|
|
attached_validator_balance.set(
|
|
|
|
balance.toGaugeValue, labelValues = [shortLog(v.pubkey)])
|
|
|
|
|
|
|
|
inc i
|
|
|
|
total += balance
|
|
|
|
|
|
|
|
node.attachedValidatorBalanceTotal = total
|
|
|
|
attached_validator_balance_total.set(total.toGaugeValue)
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from std/times import epochTime
|
|
|
|
|
|
|
|
proc getValidatorRegistration(
|
2022-08-19 21:51:30 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, epoch: Epoch):
|
2022-08-01 06:41:47 +00:00
|
|
|
Future[Result[SignedValidatorRegistrationV1, string]] {.async.} =
|
|
|
|
# Stand-in, reasonable default
|
|
|
|
const gasLimit = 30000000
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let validatorIdx = validator.index.valueOr:
|
|
|
|
# The validator index will be missing when the validator was not
|
|
|
|
# activated for duties yet. We can safely skip the registration then.
|
|
|
|
return
|
|
|
|
|
|
|
|
let feeRecipient = node.getFeeRecipient(validator.pubkey, validatorIdx, epoch)
|
2022-08-01 06:41:47 +00:00
|
|
|
var validatorRegistration = SignedValidatorRegistrationV1(
|
|
|
|
message: ValidatorRegistrationV1(
|
|
|
|
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
|
|
|
gas_limit: gasLimit,
|
|
|
|
timestamp: epochTime().uint64,
|
|
|
|
pubkey: validator.pubkey))
|
|
|
|
|
|
|
|
let signature = await validator.getBuilderSignature(
|
|
|
|
node.dag.cfg.genesisFork, validatorRegistration.message)
|
|
|
|
|
|
|
|
debug "getValidatorRegistration: registering",
|
|
|
|
validatorRegistration
|
|
|
|
|
|
|
|
if signature.isErr:
|
|
|
|
return err signature.error
|
|
|
|
|
|
|
|
validatorRegistration.signature = signature.get
|
|
|
|
|
|
|
|
return ok validatorRegistration
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
from std/sequtils import toSeq
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
proc registerValidators(node: BeaconNode, epoch: Epoch) {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
try:
|
|
|
|
if (not node.config.payloadBuilderEnable) or
|
|
|
|
node.currentSlot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH:
|
|
|
|
return
|
|
|
|
elif node.config.payloadBuilderEnable and
|
|
|
|
node.payloadBuilderRestClient.isNil:
|
|
|
|
warn "registerValidators: node.config.payloadBuilderEnable and node.payloadBuilderRestClient.isNil"
|
|
|
|
return
|
|
|
|
|
|
|
|
const HttpOk = 200
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let restBuilderStatus = awaitWithTimeout(node.payloadBuilderRestClient.checkBuilderStatus(),
|
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE):
|
|
|
|
debug "Timeout when obtaining builder status"
|
|
|
|
return
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
if restBuilderStatus.status != HttpOk:
|
2022-08-18 11:59:35 +00:00
|
|
|
warn "registerValidators: specified builder or relay not available",
|
2022-08-01 06:41:47 +00:00
|
|
|
builderUrl = node.config.payloadBuilderUrl,
|
|
|
|
builderStatus = restBuilderStatus
|
|
|
|
return
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# The async aspect of signing the registrations can cause the attached
|
|
|
|
# validators to change during the loop.
|
|
|
|
let attachedValidatorPubkeys =
|
|
|
|
toSeq(node.attachedValidators[].validators.keys)
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#validator-registration
|
|
|
|
var validatorRegistrations: seq[SignedValidatorRegistrationV1]
|
2022-09-13 11:52:26 +00:00
|
|
|
|
|
|
|
# First, check for VC-added keys; cheaper because provided pre-signed
|
|
|
|
var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
|
|
|
|
if node.externalBuilderRegistrations.len > 0:
|
|
|
|
withState(node.dag.headState):
|
|
|
|
let currentEpoch = node.currentSlot().epoch
|
|
|
|
for i in 0 ..< forkyState.data.validators.len:
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml
|
|
|
|
# "requests containing currently inactive or unknown validator
|
|
|
|
# pubkeys will be accepted, as they may become active at a later
|
|
|
|
# epoch" which means filtering is needed here, because including
|
|
|
|
# any validators not pending or active may cause the request, as
|
|
|
|
# a whole, to fail.
|
|
|
|
let pubkey = forkyState.data.validators.item(i).pubkey
|
|
|
|
if pubkey in node.externalBuilderRegistrations and
|
|
|
|
forkyState.data.validators.item(i).exit_epoch > currentEpoch:
|
|
|
|
let signedValidatorRegistration =
|
|
|
|
node.externalBuilderRegistrations[pubkey]
|
|
|
|
nonExitedVcPubkeys.incl signedValidatorRegistration.message.pubkey
|
|
|
|
validatorRegistrations.add signedValidatorRegistration
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
for key in attachedValidatorPubkeys:
|
2022-09-13 11:52:26 +00:00
|
|
|
# Already included from VC
|
|
|
|
if key in nonExitedVcPubkeys:
|
|
|
|
warn "registerValidators: same validator registered by beacon node and validator client",
|
|
|
|
pubkey = shortLog(key)
|
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# Time passed during awaits; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let validator = node.attachedValidators[].validators[key]
|
|
|
|
|
2022-08-18 11:59:35 +00:00
|
|
|
if validator.index.isNone:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# https://ethereum.github.io/builder-specs/#/Builder/registerValidator
|
|
|
|
# Builders should verify that `pubkey` corresponds to an active or
|
|
|
|
# pending validator
|
|
|
|
withState(node.dag.headState):
|
2022-09-13 11:53:12 +00:00
|
|
|
if distinctBase(validator.index.get) >=
|
|
|
|
forkyState.data.validators.lenu64:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if node.currentSlot().epoch >=
|
2022-09-13 11:53:12 +00:00
|
|
|
forkyState.data.validators.item(validator.index.get).exit_epoch:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
if validator.externalBuilderRegistration.isSome:
|
|
|
|
validatorRegistrations.add validator.externalBuilderRegistration.get
|
|
|
|
else:
|
|
|
|
let validatorRegistration =
|
|
|
|
await node.getValidatorRegistration(validator, epoch)
|
|
|
|
if validatorRegistration.isErr:
|
|
|
|
error "registerValidators: validatorRegistration failed",
|
|
|
|
validatorRegistration
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Time passed during await; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
node.attachedValidators[].validators[key].externalBuilderRegistration =
|
|
|
|
Opt.some validatorRegistration.get
|
|
|
|
validatorRegistrations.add validatorRegistration.get
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
let registerValidatorResult =
|
2022-08-19 21:51:30 +00:00
|
|
|
awaitWithTimeout(node.payloadBuilderRestClient.registerValidator(validatorRegistrations),
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE):
|
|
|
|
error "Timeout when registering validator with builder"
|
|
|
|
return
|
2022-08-01 06:41:47 +00:00
|
|
|
if HttpOk != registerValidatorResult.status:
|
|
|
|
warn "registerValidators: Couldn't register validator with MEV builder",
|
|
|
|
registerValidatorResult
|
|
|
|
except CatchableError as exc:
|
|
|
|
warn "registerValidators: exception",
|
|
|
|
error = exc.msg
|
|
|
|
|
2020-10-28 07:55:36 +00:00
|
|
|
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
2020-07-22 08:04:21 +00:00
|
|
|
## Perform validator duties - create blocks, vote and aggregate existing votes
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.attachedValidators[].count == 0:
|
2020-05-06 13:23:45 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# The dag head might be updated by sync while we're working due to the
|
2020-10-28 07:55:36 +00:00
|
|
|
# await calls, thus we use a local variable to keep the logic straight here
|
2021-06-01 11:13:40 +00:00
|
|
|
var head = node.dag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
if not node.isSynced(head):
|
2022-02-04 11:25:32 +00:00
|
|
|
info "Syncing in progress; skipping validator duties for now",
|
|
|
|
slot, headSlot = head.slot
|
2020-12-16 13:03:04 +00:00
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
var curSlot = lastSlot + 1
|
|
|
|
|
2021-01-29 12:38:52 +00:00
|
|
|
# If broadcastStartEpoch is 0, it hasn't had time to initialize yet, which
|
|
|
|
# means that it'd be okay not to continue, but it won't gossip regardless.
|
2022-03-29 07:15:42 +00:00
|
|
|
let doppelgangerDetection = node.processor[].doppelgangerDetection
|
|
|
|
if curSlot.epoch < doppelgangerDetection.broadcastStartEpoch and
|
|
|
|
doppelgangerDetection.nodeLaunchSlot > GENESIS_SLOT and
|
2021-02-03 17:11:42 +00:00
|
|
|
node.config.doppelgangerDetection:
|
2022-01-03 21:18:49 +00:00
|
|
|
let
|
2022-09-07 18:34:52 +00:00
|
|
|
nextAttestationSlot =
|
|
|
|
node.consensusManager[].actionTracker.getNextAttestationSlot(slot - 1)
|
|
|
|
nextProposalSlot =
|
|
|
|
node.consensusManager[].actionTracker.getNextProposalSlot(slot - 1)
|
2022-01-03 21:18:49 +00:00
|
|
|
|
|
|
|
if slot in [nextAttestationSlot, nextProposalSlot]:
|
|
|
|
notice "Doppelganger detection active - skipping validator duties while observing activity on the network",
|
2022-02-04 11:25:32 +00:00
|
|
|
slot, epoch = slot.epoch,
|
2022-03-29 07:15:42 +00:00
|
|
|
broadcastStartEpoch = doppelgangerDetection.broadcastStartEpoch
|
2022-01-03 21:18:49 +00:00
|
|
|
|
2020-10-27 17:21:35 +00:00
|
|
|
return
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
# Start by checking if there's work we should have done in the past that we
|
|
|
|
# can still meaningfully do
|
|
|
|
while curSlot < slot:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Catching up on validator duties",
|
2020-05-06 13:23:45 +00:00
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
lastSlot = shortLog(lastSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# For every slot we're catching up, we'll propose then send
|
|
|
|
# attestations - head should normally be advancing along the same branch
|
|
|
|
# in this case
|
|
|
|
head = await handleProposal(node, head, curSlot)
|
|
|
|
|
|
|
|
# For each slot we missed, we need to send out attestations - if we were
|
|
|
|
# proposing during this time, we'll use the newly proposed head, else just
|
|
|
|
# keep reusing the same - the attestation that goes out will actually
|
|
|
|
# rewind the state to what it looked like at the time of that slot
|
|
|
|
handleAttestations(node, head, curSlot)
|
|
|
|
|
|
|
|
curSlot += 1
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.2.0/specs/validator.md#registration-dissemination
|
|
|
|
# This specification suggests validators re-submit to builder software every
|
|
|
|
# `EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION` epochs.
|
|
|
|
if slot.is_epoch and
|
|
|
|
slot.epoch mod EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION == 0:
|
2022-08-19 21:51:30 +00:00
|
|
|
asyncSpawn node.registerValidators(slot.epoch)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-03-17 20:11:29 +00:00
|
|
|
let
|
|
|
|
newHead = await handleProposal(node, head, slot)
|
|
|
|
didSubmitBlock = (newHead != head)
|
|
|
|
head = newHead
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
|
|
|
# The latest point in time when we'll be sending out attestations
|
2022-01-11 10:01:54 +00:00
|
|
|
attestationCutoff = node.beaconClock.fromNow(slot.attestation_deadline())
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if attestationCutoff.inFuture:
|
|
|
|
debug "Waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
|
|
|
|
|
|
|
# Wait either for the block or the attestation cutoff time to arrive
|
2022-03-29 07:15:42 +00:00
|
|
|
if await node.consensusManager[].expectBlock(slot)
|
|
|
|
.withTimeout(attestationCutoff.offset):
|
2021-03-01 16:36:06 +00:00
|
|
|
# The expected block arrived (or expectBlock was called again which
|
2021-03-23 06:57:10 +00:00
|
|
|
# shouldn't happen as this is the only place we use it) - in our async
|
|
|
|
# loop however, we might have been doing other processing that caused delays
|
2021-03-01 16:36:06 +00:00
|
|
|
# here so we'll cap the waiting to the time when we would have sent out
|
|
|
|
# attestations had the block not arrived.
|
|
|
|
# An opposite case is that we received (or produced) a block that has
|
|
|
|
# not yet reached our neighbours. To protect against our attestations
|
|
|
|
# being dropped (because the others have not yet seen the block), we'll
|
2022-03-18 11:02:32 +00:00
|
|
|
# impose a minimum delay of 2000ms. The delay is enforced only when we're
|
2021-03-01 16:36:06 +00:00
|
|
|
# not hitting the "normal" cutoff time for sending out attestations.
|
2021-07-06 13:11:18 +00:00
|
|
|
# An earlier delay of 250ms has proven to be not enough, increasing the
|
2022-03-18 11:02:32 +00:00
|
|
|
# risk of losing attestations, and with growing block sizes, 1000ms
|
|
|
|
# started to be risky as well.
|
2021-07-06 13:11:18 +00:00
|
|
|
# Regardless, because we "just" received the block, we'll impose the
|
|
|
|
# delay.
|
2021-03-01 16:36:06 +00:00
|
|
|
|
2022-03-19 08:59:13 +00:00
|
|
|
# Take into consideration chains with a different slot time
|
|
|
|
const afterBlockDelay = nanos(attestationSlotOffset.nanoseconds div 2)
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
2022-03-18 11:02:32 +00:00
|
|
|
afterBlockTime = node.beaconClock.now() + afterBlockDelay
|
2021-03-01 16:36:06 +00:00
|
|
|
afterBlockCutoff = node.beaconClock.fromNow(
|
2022-03-18 11:02:32 +00:00
|
|
|
min(afterBlockTime, slot.attestation_deadline() + afterBlockDelay))
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if afterBlockCutoff.inFuture:
|
|
|
|
debug "Got block, waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
afterBlockCutoff = shortLog(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
await sleepAsync(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
# Time passed - we might need to select a new head in that case
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].updateHead(slot)
|
2021-06-01 11:13:40 +00:00
|
|
|
head = node.dag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-11-25 12:20:36 +00:00
|
|
|
static: doAssert attestationSlotOffset == syncCommitteeMessageSlotOffset
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
handleAttestations(node, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
handleSyncCommitteeMessages(node, head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2022-09-20 00:08:09 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/validator.md#broadcast-aggregate
|
2022-09-10 17:16:38 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/altair/validator.md#broadcast-sync-committee-contribution
|
2022-07-06 16:11:44 +00:00
|
|
|
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
|
|
|
# the result in aggregates
|
|
|
|
static:
|
|
|
|
doAssert aggregateSlotOffset == syncContributionSlotOffset, "Timing change?"
|
2021-08-23 10:41:48 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
aggregateCutoff = node.beaconClock.fromNow(slot.aggregate_deadline())
|
|
|
|
if aggregateCutoff.inFuture:
|
|
|
|
debug "Waiting to send aggregate attestations",
|
|
|
|
aggregateCutoff = shortLog(aggregateCutoff.offset)
|
|
|
|
await sleepAsync(aggregateCutoff.offset)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let sendAggregatedAttestationsFut =
|
|
|
|
sendAggregatedAttestations(node, head, slot)
|
2021-10-20 09:16:48 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let handleSyncCommitteeContributionsFut =
|
|
|
|
handleSyncCommitteeContributions(node, head, slot)
|
2021-12-03 13:58:12 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
await handleSyncCommitteeContributionsFut
|
|
|
|
await sendAggregatedAttestationsFut
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
|
|
|
## Register upcoming duties of attached validators with the duty tracker
|
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
if node.attachedValidators[].count() == 0 or
|
|
|
|
not node.isSynced(node.dag.head):
|
2021-10-18 09:11:44 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-10-18 09:11:44 +00:00
|
|
|
head = node.dag.head
|
|
|
|
|
|
|
|
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
|
|
|
# be getting the duties one slot at a time
|
|
|
|
for slot in wallSlot ..< wallSlot + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS:
|
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for duties - report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-01-05 18:38:04 +00:00
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2022-08-18 18:07:01 +00:00
|
|
|
let validator = node.getAttachedValidator(validator_index)
|
2021-10-18 09:11:44 +00:00
|
|
|
if validator != nil:
|
|
|
|
let
|
2021-10-20 09:16:48 +00:00
|
|
|
subnet_id = compute_subnet_for_attestation(
|
2022-01-08 23:28:49 +00:00
|
|
|
committees_per_slot, slot, committee_index)
|
2022-06-29 16:53:59 +00:00
|
|
|
let slotSigRes = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
2021-11-30 01:20:21 +00:00
|
|
|
if slotSigRes.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
error "Unable to create slot signature",
|
2021-11-30 01:20:21 +00:00
|
|
|
validator = shortLog(validator),
|
|
|
|
error_msg = slotSigRes.error()
|
|
|
|
continue
|
|
|
|
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-09-07 18:34:52 +00:00
|
|
|
node.consensusManager[].actionTracker.registerDuty(
|
2022-07-06 16:11:44 +00:00
|
|
|
slot, subnet_id, validator_index, isAggregator)
|