2020-05-06 13:23:45 +00:00
|
|
|
# beacon_chain
|
2023-01-06 21:01:10 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2020-05-06 13:23:45 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# This module is responsible for handling beacon node validators, ie those that
|
|
|
|
# that are running directly in the beacon node and not in a separate validator
|
|
|
|
# client process
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2022-12-09 16:05:55 +00:00
|
|
|
std/[os, tables, sequtils],
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2023-02-21 13:21:38 +00:00
|
|
|
stew/[assign2, byteutils],
|
2021-03-26 14:11:06 +00:00
|
|
|
chronos, metrics,
|
2021-08-28 22:27:51 +00:00
|
|
|
chronicles, chronicles/timings,
|
2022-06-21 19:01:45 +00:00
|
|
|
json_serialization/std/[options, sets, net],
|
2020-05-14 11:19:10 +00:00
|
|
|
eth/db/kvstore,
|
2021-05-12 12:31:02 +00:00
|
|
|
eth/keys, eth/p2p/discoveryv5/[protocol, enr],
|
2021-12-17 12:23:32 +00:00
|
|
|
web3/ethtypes,
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Local modules
|
2022-01-18 13:36:52 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[
|
2021-10-18 09:11:44 +00:00
|
|
|
eth2_merkleization, forks, helpers, network, signatures, state_transition,
|
|
|
|
validator],
|
2021-03-04 09:13:44 +00:00
|
|
|
../consensus_object_pools/[
|
2021-08-28 22:27:51 +00:00
|
|
|
spec_cache, blockchain_dag, block_clearance, attestation_pool, exit_pool,
|
2022-07-13 14:13:54 +00:00
|
|
|
sync_committee_msg_pool, consensus_manager],
|
2021-03-03 06:23:05 +00:00
|
|
|
../eth1/eth1_monitor,
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2021-08-18 18:57:58 +00:00
|
|
|
../sszdump, ../sync/sync_manager,
|
2022-07-13 14:13:54 +00:00
|
|
|
../gossip_processing/block_processor,
|
2022-06-21 19:01:45 +00:00
|
|
|
".."/[conf, beacon_clock, beacon_node],
|
2022-08-01 06:41:47 +00:00
|
|
|
"."/[slashing_protection, validator_pool, keystore_management],
|
2023-02-06 18:07:30 +00:00
|
|
|
".."/spec/mev/[rest_bellatrix_mev_calls, rest_capella_mev_calls]
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
const
|
|
|
|
delayBuckets = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
|
|
|
|
0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, Inf]
|
|
|
|
|
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE = 3.seconds
|
2022-11-27 00:07:37 +00:00
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE = 6.seconds
|
2020-11-11 12:14:09 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2022-05-23 12:02:54 +00:00
|
|
|
declareCounter beacon_light_client_finality_updates_sent,
|
|
|
|
"Number of LC finality updates sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_light_client_optimistic_updates_sent,
|
|
|
|
"Number of LC optimistic updates sent by this peer"
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
declareCounter beacon_blocks_proposed,
|
|
|
|
"Number of beacon chain blocks sent by this peer"
|
|
|
|
|
2022-08-29 09:55:20 +00:00
|
|
|
declareCounter beacon_block_production_errors,
|
|
|
|
"Number of times we failed to produce a block"
|
|
|
|
|
|
|
|
declareCounter beacon_block_payload_errors,
|
|
|
|
"Number of times execution client failed to produce block payload"
|
|
|
|
|
2023-01-21 23:13:21 +00:00
|
|
|
declareCounter beacon_blobs_sidecar_payload_errors,
|
|
|
|
"Number of times execution client failed to produce blobs sidecar"
|
|
|
|
|
2022-09-23 06:20:32 +00:00
|
|
|
# Metrics for tracking external block builder usage
|
|
|
|
declareCounter beacon_block_builder_missed_with_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed with fallback"
|
|
|
|
|
|
|
|
declareCounter beacon_block_builder_missed_without_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed without possible fallback"
|
|
|
|
|
2020-11-27 23:34:25 +00:00
|
|
|
declareGauge(attached_validator_balance,
|
|
|
|
"Validator balance at slot end of the first 64 validators, in Gwei",
|
|
|
|
labels = ["pubkey"])
|
2021-08-28 22:27:51 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declarePublicGauge(attached_validator_balance_total,
|
2020-11-27 23:34:25 +00:00
|
|
|
"Validator balance of all attached validators, in Gwei")
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
logScope: topics = "beacval"
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
type
|
2023-04-11 15:19:48 +00:00
|
|
|
ForkedBlockResult =
|
|
|
|
Result[tuple[blck: ForkedBeaconBlock, blockValue: Wei], string]
|
|
|
|
BlindedBlockResult[SBBB] =
|
|
|
|
Result[tuple[blindedBlckPart: SBBB, blockValue: UInt256], string]
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-10-27 17:22:32 +00:00
|
|
|
SyncStatus* {.pure.} = enum
|
|
|
|
synced
|
|
|
|
unsynced
|
|
|
|
optimistic
|
|
|
|
|
2022-11-20 13:55:43 +00:00
|
|
|
proc getValidator*(validators: auto,
|
|
|
|
pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
|
2021-12-22 12:37:31 +00:00
|
|
|
let idx = validators.findIt(it.pubkey == pubkey)
|
2020-05-06 13:23:45 +00:00
|
|
|
if idx == -1:
|
|
|
|
# We allow adding a validator even if its key is not in the state registry:
|
|
|
|
# it might be that the deposit for this validator has not yet been processed
|
2022-11-20 13:55:43 +00:00
|
|
|
Opt.none ValidatorAndIndex
|
2020-11-27 23:34:25 +00:00
|
|
|
else:
|
2022-11-20 13:55:43 +00:00
|
|
|
Opt.some ValidatorAndIndex(index: ValidatorIndex(idx),
|
|
|
|
validator: validators[idx])
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-11-30 01:20:21 +00:00
|
|
|
proc addValidators*(node: BeaconNode) =
|
2023-02-16 17:25:48 +00:00
|
|
|
info "Loading validators", validatorsDir = node.config.validatorsDir(),
|
|
|
|
keystore_cache_available = not(isNil(node.keystoreCache))
|
2022-12-09 16:05:55 +00:00
|
|
|
let
|
|
|
|
epoch = node.currentSlot().epoch
|
2023-02-16 17:25:48 +00:00
|
|
|
for keystore in listLoadableKeystores(node.config, node.keystoreCache):
|
2022-09-17 05:30:07 +00:00
|
|
|
let
|
2022-11-20 13:55:43 +00:00
|
|
|
data = withState(node.dag.headState):
|
|
|
|
getValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
|
|
|
index =
|
|
|
|
if data.isSome():
|
|
|
|
Opt.some(data.get().index)
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorIndex)
|
2022-09-17 05:30:07 +00:00
|
|
|
feeRecipient = node.consensusManager[].getFeeRecipient(
|
2022-12-09 16:05:55 +00:00
|
|
|
keystore.pubkey, index, epoch)
|
2023-02-15 15:10:31 +00:00
|
|
|
gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey)
|
2022-09-17 05:30:07 +00:00
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
|
2023-02-07 14:53:36 +00:00
|
|
|
v.updateValidator(data)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2023-02-20 11:28:56 +00:00
|
|
|
proc getValidator*(node: BeaconNode, idx: ValidatorIndex): Opt[AttachedValidator] =
|
|
|
|
let key = ? node.dag.validatorKey(idx)
|
|
|
|
node.attachedValidators[].getValidator(key.toPubKey())
|
|
|
|
|
2022-12-09 16:05:55 +00:00
|
|
|
proc getValidatorForDuties*(
|
2023-03-02 15:55:45 +00:00
|
|
|
node: BeaconNode, idx: ValidatorIndex, slot: Slot,
|
|
|
|
slashingSafe = false): Opt[AttachedValidator] =
|
2023-01-11 12:29:21 +00:00
|
|
|
let key = ? node.dag.validatorKey(idx)
|
2022-12-09 16:05:55 +00:00
|
|
|
|
2023-02-20 11:28:56 +00:00
|
|
|
node.attachedValidators[].getValidatorForDuties(
|
2023-03-02 15:55:45 +00:00
|
|
|
key.toPubKey(), slot, slashingSafe)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-10-27 17:22:32 +00:00
|
|
|
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
2020-05-06 13:23:45 +00:00
|
|
|
## TODO This function is here as a placeholder for some better heurestics to
|
|
|
|
## determine if we're in sync and should be producing blocks and
|
|
|
|
## attestations. Generally, the problem is that slot time keeps advancing
|
|
|
|
## even when there are no blocks being produced, so there's no way to
|
|
|
|
## distinguish validators geniunely going missing from the node not being
|
|
|
|
## well connected (during a network split or an internet outage for
|
|
|
|
## example). It would generally be correct to simply keep running as if
|
|
|
|
## we were the only legit node left alive, but then we run into issues:
|
|
|
|
## with enough many empty slots, the validator pool is emptied leading
|
|
|
|
## to empty committees and lots of empty slot processing that will be
|
|
|
|
## thrown away as soon as we're synced again.
|
|
|
|
|
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = node.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
|
|
|
# TODO if everyone follows this logic, the network will not recover from a
|
|
|
|
# halt: nobody will be producing blocks because everone expects someone
|
|
|
|
# else to do it
|
2022-07-04 20:35:33 +00:00
|
|
|
if wallSlot.afterGenesis and
|
|
|
|
head.slot + node.config.syncHorizon < wallSlot.slot:
|
2022-10-27 17:22:32 +00:00
|
|
|
SyncStatus.unsynced
|
2020-05-06 13:23:45 +00:00
|
|
|
else:
|
2022-10-27 17:22:32 +00:00
|
|
|
if node.dag.is_optimistic(head.root):
|
|
|
|
SyncStatus.optimistic
|
|
|
|
else:
|
|
|
|
SyncStatus.synced
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleLightClientUpdates*(node: BeaconNode, slot: Slot) {.async.} =
|
2022-05-23 12:02:54 +00:00
|
|
|
static: doAssert lightClientFinalityUpdateSlotOffset ==
|
|
|
|
lightClientOptimisticUpdateSlotOffset
|
|
|
|
let sendTime = node.beaconClock.fromNow(
|
|
|
|
slot.light_client_finality_update_time())
|
|
|
|
if sendTime.inFuture:
|
|
|
|
debug "Waiting to send LC updates", slot, delay = shortLog(sendTime.offset)
|
|
|
|
await sleepAsync(sendTime.offset)
|
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyFinalityUpdate(node.dag.lcDataStore.cache.latest):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let signature_slot = forkyFinalityUpdate.signature_slot
|
|
|
|
if slot != signature_slot:
|
|
|
|
return
|
|
|
|
|
|
|
|
let num_active_participants =
|
|
|
|
forkyFinalityUpdate.sync_aggregate.num_active_participants
|
|
|
|
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
|
|
|
return
|
|
|
|
|
2023-01-13 15:46:35 +00:00
|
|
|
let finalized_slot = forkyFinalityUpdate.finalized_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
if finalized_slot > node.lightClientPool[].latestForwardedFinalitySlot:
|
|
|
|
template msg(): auto = forkyFinalityUpdate
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientFinalityUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
|
|
|
node.lightClientPool[].latestForwardedFinalitySlot = finalized_slot
|
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_finality_updates_sent.inc()
|
|
|
|
notice "LC finality update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC finality update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2023-01-13 15:46:35 +00:00
|
|
|
let attested_slot = forkyFinalityUpdate.attested_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
if attested_slot > node.lightClientPool[].latestForwardedOptimisticSlot:
|
|
|
|
let msg = forkyFinalityUpdate.toOptimistic
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientOptimisticUpdate(msg)
|
2022-06-15 08:14:47 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
|
|
|
node.lightClientPool[].latestForwardedOptimisticSlot = attested_slot
|
2022-06-15 08:14:47 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_optimistic_updates_sent.inc()
|
|
|
|
notice "LC optimistic update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC optimistic update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
proc createAndSendAttestation(node: BeaconNode,
|
|
|
|
fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
|
|
|
validator: AttachedValidator,
|
2022-06-29 16:53:59 +00:00
|
|
|
data: AttestationData,
|
2020-06-05 09:57:40 +00:00
|
|
|
committeeLen: int,
|
2020-06-23 10:38:59 +00:00
|
|
|
indexInCommittee: int,
|
2021-05-10 07:13:36 +00:00
|
|
|
subnet_id: SubnetId) {.async.} =
|
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
|
|
|
signature = block:
|
|
|
|
let res = await validator.getAttestationSignature(
|
|
|
|
fork, genesis_validators_root, data)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign attestation", validator = shortLog(validator),
|
2023-04-19 19:42:30 +00:00
|
|
|
attestationData = shortLog(data), error_msg = res.error()
|
2021-11-30 01:20:21 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2022-06-29 16:53:59 +00:00
|
|
|
attestation =
|
|
|
|
Attestation.init(
|
|
|
|
[uint64 indexInCommittee], committeeLen, data, signature).expect(
|
|
|
|
"valid data")
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2023-03-02 15:55:45 +00:00
|
|
|
validator.doppelgangerActivity(attestation.data.slot.epoch)
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeAttestation(
|
2021-05-10 07:13:36 +00:00
|
|
|
attestation, subnet_id, checkSignature = false)
|
2022-06-15 08:14:47 +00:00
|
|
|
if not res.isOk():
|
2021-05-10 07:13:36 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2022-06-29 16:53:59 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubkey)
|
2021-05-10 07:13:36 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending attestation", err = exc.msg
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
proc getBlockProposalEth1Data*(node: BeaconNode,
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState):
|
|
|
|
BlockProposalEth1Data =
|
2023-03-05 01:40:21 +00:00
|
|
|
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
|
|
|
result = node.elManager.getBlockProposalData(
|
|
|
|
state, finalizedEpochRef.eth1_data,
|
|
|
|
finalizedEpochRef.eth1_deposit_index)
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
proc getFeeRecipient(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Eth1Address =
|
2022-09-17 05:30:07 +00:00
|
|
|
node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch)
|
2022-08-19 21:51:30 +00:00
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
proc getGasLimit(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey): uint64 =
|
|
|
|
node.consensusManager[].getGasLimit(pubkey)
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from web3/engine_api_types import PayloadExecutionStatus
|
2022-12-04 07:42:03 +00:00
|
|
|
from ../spec/datatypes/capella import BeaconBlock, ExecutionPayload
|
2023-03-05 01:40:21 +00:00
|
|
|
from ../spec/datatypes/deneb import BeaconBlock, ExecutionPayload, shortLog
|
|
|
|
from ../spec/beaconstate import get_expected_withdrawals
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
proc getExecutionPayload(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning,
|
2022-09-03 20:56:20 +00:00
|
|
|
node: BeaconNode, proposalState: ref ForkedHashedBeaconState,
|
2023-03-05 01:40:21 +00:00
|
|
|
epoch: Epoch, validator_index: ValidatorIndex): Future[Opt[PayloadType]] {.async.} =
|
2022-04-14 20:15:34 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload
|
|
|
|
|
2022-11-08 14:19:56 +00:00
|
|
|
let feeRecipient = block:
|
|
|
|
let pubkey = node.dag.validatorKey(validator_index)
|
|
|
|
if pubkey.isNone():
|
|
|
|
error "Cannot get proposer pubkey, bug?", validator_index
|
|
|
|
default(Eth1Address)
|
|
|
|
else:
|
|
|
|
node.getFeeRecipient(pubkey.get().toPubKey(), validator_index, epoch)
|
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
try:
|
|
|
|
let
|
2022-08-25 23:34:02 +00:00
|
|
|
beaconHead = node.attestationPool[].getBeaconHead(node.dag.head)
|
2023-02-23 02:10:07 +00:00
|
|
|
executionHead = withState(proposalState[]):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Bellatrix:
|
2023-02-23 02:10:07 +00:00
|
|
|
forkyState.data.latest_execution_payload_header.block_hash
|
2022-04-14 20:15:34 +00:00
|
|
|
else:
|
2023-01-18 14:01:49 +00:00
|
|
|
(static(default(Eth2Digest)))
|
2022-08-25 23:34:02 +00:00
|
|
|
latestSafe = beaconHead.safeExecutionPayloadHash
|
|
|
|
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
2022-09-03 20:56:20 +00:00
|
|
|
timestamp = withState(proposalState[]):
|
|
|
|
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
2023-03-05 01:40:21 +00:00
|
|
|
random = withState(proposalState[]):
|
|
|
|
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data))
|
2023-01-06 21:01:10 +00:00
|
|
|
withdrawals = withState(proposalState[]):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Capella:
|
2023-03-05 01:40:21 +00:00
|
|
|
get_expected_withdrawals(forkyState.data)
|
2022-08-23 16:19:52 +00:00
|
|
|
else:
|
2023-03-05 01:40:21 +00:00
|
|
|
@[]
|
|
|
|
payload = await node.elManager.getPayload(
|
|
|
|
PayloadType, executionHead, latestSafe, latestFinalized,
|
|
|
|
timestamp, random, feeRecipient, withdrawals)
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
if payload.isNone:
|
|
|
|
error "Failed to obtain execution payload from EL",
|
|
|
|
executionHeadBlock = executionHead
|
|
|
|
return Opt.none(PayloadType)
|
2023-02-20 08:46:37 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
return Opt.some payload.get
|
2022-04-14 20:15:34 +00:00
|
|
|
except CatchableError as err:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2023-04-18 09:21:15 +00:00
|
|
|
error "Error creating non-empty execution payload",
|
2022-04-14 20:15:34 +00:00
|
|
|
msg = err.msg
|
2023-04-18 09:21:15 +00:00
|
|
|
return Opt.none PayloadType
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
proc makeBeaconBlockForHeadAndSlot*(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning,
|
2022-08-01 06:41:47 +00:00
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot,
|
2023-02-21 13:21:38 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
# These parameters are for the builder API
|
|
|
|
execution_payload: Opt[PayloadType],
|
2022-12-02 07:39:01 +00:00
|
|
|
transactions_root: Opt[Eth2Digest],
|
2023-02-21 13:21:38 +00:00
|
|
|
execution_payload_root: Opt[Eth2Digest],
|
|
|
|
withdrawals_root: Opt[Eth2Digest]):
|
2022-08-01 06:41:47 +00:00
|
|
|
Future[ForkedBlockResult] {.async.} =
|
2020-10-22 10:53:33 +00:00
|
|
|
# Advance state to the slot that we're proposing for
|
2022-12-02 07:39:01 +00:00
|
|
|
var cache = StateCache()
|
2021-03-17 10:17:15 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
let
|
|
|
|
# The clearance state already typically sits at the right slot per
|
|
|
|
# `advanceClearanceState`
|
2022-12-02 07:39:01 +00:00
|
|
|
|
|
|
|
# TODO can use `valueOr:`/`return err($error)` if/when
|
|
|
|
# https://github.com/status-im/nim-stew/issues/161 is addressed
|
|
|
|
maybeState = node.dag.getProposalState(head, slot, cache)
|
|
|
|
|
|
|
|
if maybeState.isErr:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
return err($maybeState.error)
|
|
|
|
|
|
|
|
let
|
|
|
|
state = maybeState.get
|
2022-10-04 11:24:16 +00:00
|
|
|
payloadFut =
|
2022-12-19 13:11:12 +00:00
|
|
|
if execution_payload.isSome:
|
2023-02-21 13:21:38 +00:00
|
|
|
# Builder API
|
|
|
|
|
|
|
|
# In Capella, only get withdrawals root from relay.
|
|
|
|
# The execution payload will be small enough to be safe to copy because
|
|
|
|
# it won't have transactions (it's blinded)
|
|
|
|
var modified_execution_payload = execution_payload
|
|
|
|
withState(state[]):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Capella and
|
2023-03-05 01:40:21 +00:00
|
|
|
PayloadType.toFork >= ConsensusFork.Capella:
|
2023-02-21 13:21:38 +00:00
|
|
|
let withdrawals = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD](
|
|
|
|
get_expected_withdrawals(forkyState.data))
|
|
|
|
if withdrawals_root.isNone or
|
|
|
|
hash_tree_root(withdrawals) != withdrawals_root.get:
|
2023-04-05 13:35:32 +00:00
|
|
|
# If engine API returned a block, will use that
|
2023-02-21 13:21:38 +00:00
|
|
|
return err("Builder relay provided incorrect withdrawals root")
|
|
|
|
# Otherwise, the state transition function notices that there are
|
|
|
|
# too few withdrawals.
|
2023-03-05 01:40:21 +00:00
|
|
|
assign(modified_execution_payload.get.executionPayload.withdrawals,
|
|
|
|
withdrawals)
|
2023-02-21 13:21:38 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
let fut = newFuture[Opt[PayloadType]]("given-payload")
|
2023-02-21 13:21:38 +00:00
|
|
|
fut.complete(modified_execution_payload)
|
2022-10-04 11:24:16 +00:00
|
|
|
fut
|
2023-02-23 02:10:07 +00:00
|
|
|
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or
|
|
|
|
not state[].is_merge_transition_complete:
|
2023-03-05 01:40:21 +00:00
|
|
|
let fut = newFuture[Opt[PayloadType]]("empty-payload")
|
|
|
|
fut.complete(Opt.some(default(PayloadType)))
|
2022-10-04 11:24:16 +00:00
|
|
|
fut
|
|
|
|
else:
|
|
|
|
# Create execution payload while packing attestations
|
2023-03-05 01:40:21 +00:00
|
|
|
getExecutionPayload(PayloadType, node, state, slot.epoch, validator_index)
|
2021-11-18 12:02:43 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
eth1Proposal = node.getBlockProposalEth1Data(state[])
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
if eth1Proposal.hasMissingDeposits:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Eth1 deposits not available. Skipping block proposal", slot
|
|
|
|
return err("Eth1 deposits not available")
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
let
|
|
|
|
attestations =
|
|
|
|
node.attestationPool[].getAttestationsForBlock(state[], cache)
|
|
|
|
exits = withState(state[]):
|
2023-01-19 22:00:40 +00:00
|
|
|
node.validatorChangePool[].getBeaconBlockValidatorChanges(
|
|
|
|
node.dag.cfg, forkyState.data)
|
2022-10-04 11:24:16 +00:00
|
|
|
syncAggregate =
|
|
|
|
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
SyncAggregate.init()
|
|
|
|
else:
|
|
|
|
node.syncCommitteeMsgPool[].produceSyncAggregate(head.root)
|
|
|
|
payload = (await payloadFut).valueOr:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Unable to get execution payload. Skipping block proposal",
|
|
|
|
slot, validator_index
|
|
|
|
return err("Unable to get execution payload")
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
let blck = makeBeaconBlock(
|
2021-10-01 01:29:32 +00:00
|
|
|
node.dag.cfg,
|
2022-10-04 11:24:16 +00:00
|
|
|
state[],
|
2021-10-01 01:29:32 +00:00
|
|
|
validator_index,
|
|
|
|
randao_reveal,
|
|
|
|
eth1Proposal.vote,
|
|
|
|
graffiti,
|
2022-10-04 11:24:16 +00:00
|
|
|
attestations,
|
2021-10-01 01:29:32 +00:00
|
|
|
eth1Proposal.deposits,
|
2021-10-18 16:37:27 +00:00
|
|
|
exits,
|
2022-10-04 11:24:16 +00:00
|
|
|
syncAggregate,
|
|
|
|
payload,
|
2021-11-18 12:02:43 +00:00
|
|
|
noRollback, # Temporary state - no need for rollback
|
2022-08-01 06:41:47 +00:00
|
|
|
cache,
|
2022-12-19 13:11:12 +00:00
|
|
|
verificationFlags = {},
|
2022-10-04 11:24:16 +00:00
|
|
|
transactions_root = transactions_root,
|
|
|
|
execution_payload_root = execution_payload_root).mapErr do (error: cstring) -> string:
|
|
|
|
# This is almost certainly a bug, but it's complex enough that there's a
|
|
|
|
# small risk it might happen even when most proposals succeed - thus we
|
|
|
|
# log instead of asserting
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-10-04 11:24:16 +00:00
|
|
|
error "Cannot create block for proposal",
|
2022-10-27 14:46:49 +00:00
|
|
|
slot, head = shortLog(head), error
|
2022-10-04 11:24:16 +00:00
|
|
|
$error
|
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
return ok((blck.get, payload.blockValue))
|
|
|
|
|
2022-12-02 07:39:01 +00:00
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/20900 to avoid default
|
|
|
|
# parameters
|
2023-03-05 01:40:21 +00:00
|
|
|
proc makeBeaconBlockForHeadAndSlot*(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, randao_reveal: ValidatorSig,
|
2022-12-02 07:39:01 +00:00
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot):
|
2023-03-05 01:40:21 +00:00
|
|
|
Future[ForkedBlockResult] {.async.} =
|
|
|
|
return await makeBeaconBlockForHeadAndSlot(
|
|
|
|
PayloadType, node, randao_reveal, validator_index, graffiti, head, slot,
|
|
|
|
execution_payload = Opt.none(PayloadType),
|
2022-12-02 07:39:01 +00:00
|
|
|
transactions_root = Opt.none(Eth2Digest),
|
2023-02-21 13:21:38 +00:00
|
|
|
execution_payload_root = Opt.none(Eth2Digest),
|
|
|
|
withdrawals_root = Opt.none(Eth2Digest))
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
proc getBlindedExecutionPayload[
|
|
|
|
EPH: bellatrix.ExecutionPayloadHeader | capella.ExecutionPayloadHeader](
|
2022-08-01 06:41:47 +00:00
|
|
|
node: BeaconNode, slot: Slot, executionBlockRoot: Eth2Digest,
|
2023-04-11 15:19:48 +00:00
|
|
|
pubkey: ValidatorPubKey): Future[BlindedBlockResult[EPH]] {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
if node.payloadBuilderRestClient.isNil:
|
2022-09-26 19:13:50 +00:00
|
|
|
return err "getBlindedExecutionPayload: nil REST client"
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
when EPH is capella.ExecutionPayloadHeader:
|
|
|
|
let blindedHeader = awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.getHeaderCapella(
|
|
|
|
slot, executionBlockRoot, pubkey),
|
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
return err "Timeout when obtaining Capella blinded header from builder"
|
|
|
|
elif EPH is bellatrix.ExecutionPayloadHeader:
|
|
|
|
let blindedHeader = awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.getHeaderBellatrix(
|
|
|
|
slot, executionBlockRoot, pubkey),
|
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
return err "Timeout when obtaining Bellatrix blinded header from builder"
|
|
|
|
else:
|
|
|
|
static: doAssert false
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
const httpOk = 200
|
|
|
|
if blindedHeader.status != httpOk:
|
|
|
|
return err "getBlindedExecutionPayload: non-200 HTTP response"
|
|
|
|
else:
|
|
|
|
if not verify_builder_signature(
|
|
|
|
node.dag.cfg.genesisFork, blindedHeader.data.data.message,
|
|
|
|
blindedHeader.data.data.message.pubkey,
|
|
|
|
blindedHeader.data.data.signature):
|
|
|
|
return err "getBlindedExecutionPayload: signature verification failed"
|
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
return ok((
|
|
|
|
blindedBlckPart: blindedHeader.data.data.message.header,
|
|
|
|
blockValue: blindedHeader.data.data.message.value))
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
from ./message_router_mev import
|
|
|
|
copyFields, getFieldNames, unblindAndRouteBlockMEV
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
func constructSignableBlindedBlock[T](
|
2023-02-21 13:21:38 +00:00
|
|
|
blck: bellatrix.BeaconBlock | capella.BeaconBlock,
|
|
|
|
executionPayloadHeader: bellatrix.ExecutionPayloadHeader |
|
|
|
|
capella.ExecutionPayloadHeader): T =
|
2022-08-01 06:41:47 +00:00
|
|
|
const
|
2023-02-21 13:21:38 +00:00
|
|
|
blckFields = getFieldNames(typeof(blck))
|
|
|
|
blckBodyFields = getFieldNames(typeof(blck.body))
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
var blindedBlock: T
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#block-proposal
|
2023-02-21 13:21:38 +00:00
|
|
|
copyFields(blindedBlock.message, blck, blckFields)
|
|
|
|
copyFields(blindedBlock.message.body, blck.body, blckBodyFields)
|
|
|
|
assign(
|
|
|
|
blindedBlock.message.body.execution_payload_header, executionPayloadHeader)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
blindedBlock
|
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
func constructPlainBlindedBlock[
|
|
|
|
T: bellatrix_mev.BlindedBeaconBlock | capella_mev.BlindedBeaconBlock,
|
|
|
|
EPH: bellatrix.ExecutionPayloadHeader | capella.ExecutionPayloadHeader](
|
|
|
|
blck: ForkyBeaconBlock, executionPayloadHeader: EPH): T =
|
2022-10-31 17:39:03 +00:00
|
|
|
const
|
2023-02-21 13:21:38 +00:00
|
|
|
blckFields = getFieldNames(typeof(blck))
|
|
|
|
blckBodyFields = getFieldNames(typeof(blck.body))
|
2022-10-31 17:39:03 +00:00
|
|
|
|
|
|
|
var blindedBlock: T
|
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#block-proposal
|
2023-02-21 13:21:38 +00:00
|
|
|
copyFields(blindedBlock, blck, blckFields)
|
|
|
|
copyFields(blindedBlock.body, blck.body, blckBodyFields)
|
|
|
|
assign(blindedBlock.body.execution_payload_header, executionPayloadHeader)
|
2022-10-31 17:39:03 +00:00
|
|
|
|
|
|
|
blindedBlock
|
|
|
|
|
|
|
|
proc blindedBlockCheckSlashingAndSign[T](
|
|
|
|
node: BeaconNode, slot: Slot, validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex, nonsignedBlindedBlock: T):
|
|
|
|
Future[Result[T, string]] {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
# Check with slashing protection before submitBlindedBlock
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2022-10-31 17:39:03 +00:00
|
|
|
blockRoot = hash_tree_root(nonsignedBlindedBlock.message)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2022-08-01 06:41:47 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated for MEV block",
|
2022-10-31 17:39:03 +00:00
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(nonsignedBlindedBlock),
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = shortLog(signingRoot),
|
2022-08-01 06:41:47 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return err("MEV proposal would be slashable: " & $notSlashable.error)
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
var blindedBlock = nonsignedBlindedBlock
|
2022-08-01 06:41:47 +00:00
|
|
|
blindedBlock.signature =
|
|
|
|
block:
|
|
|
|
let res = await validator.getBlockSignature(
|
|
|
|
fork, genesis_validators_root, slot, blockRoot, blindedBlock.message)
|
|
|
|
if res.isErr():
|
|
|
|
return err("Unable to sign block: " & res.error())
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
return ok blindedBlock
|
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
proc getBlindedBeaconBlock[
|
|
|
|
T: bellatrix_mev.SignedBlindedBeaconBlock |
|
|
|
|
capella_mev.SignedBlindedBeaconBlock](
|
2022-10-31 17:39:03 +00:00
|
|
|
node: BeaconNode, slot: Slot, validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex, forkedBlock: ForkedBeaconBlock,
|
2023-02-21 13:21:38 +00:00
|
|
|
executionPayloadHeader: bellatrix.ExecutionPayloadHeader |
|
|
|
|
capella.ExecutionPayloadHeader):
|
2022-10-31 17:39:03 +00:00
|
|
|
Future[Result[T, string]] {.async.} =
|
2023-02-21 13:21:38 +00:00
|
|
|
withBlck(forkedBlock):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2023-02-23 10:37:45 +00:00
|
|
|
debugRaiseAssert $denebImplementationMissing & ": getBlindedBeaconBlock"
|
2023-02-21 13:21:38 +00:00
|
|
|
return err("getBlindedBeaconBlock: Deneb blinded block creation not implemented")
|
2023-03-11 00:35:52 +00:00
|
|
|
elif consensusFork >= ConsensusFork.Bellatrix:
|
2023-02-21 13:21:38 +00:00
|
|
|
when not (
|
|
|
|
(T is bellatrix_mev.SignedBlindedBeaconBlock and
|
2023-03-11 00:35:52 +00:00
|
|
|
consensusFork == ConsensusFork.Bellatrix) or
|
2023-02-21 13:21:38 +00:00
|
|
|
(T is capella_mev.SignedBlindedBeaconBlock and
|
2023-03-11 00:35:52 +00:00
|
|
|
consensusFork == ConsensusFork.Capella)):
|
2023-02-21 13:21:38 +00:00
|
|
|
return err("getBlindedBeaconBlock: mismatched block/payload types")
|
|
|
|
else:
|
|
|
|
return await blindedBlockCheckSlashingAndSign(
|
|
|
|
node, slot, validator, validator_index,
|
|
|
|
constructSignableBlindedBlock[T](blck, executionPayloadHeader))
|
|
|
|
else:
|
|
|
|
return err("getBlindedBeaconBlock: attempt to construct pre-Bellatrix blinded block")
|
2022-10-31 17:39:03 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
proc getBlindedBlockParts[EPH: ForkyExecutionPayloadHeader](
|
2022-11-10 20:18:08 +00:00
|
|
|
node: BeaconNode, head: BlockRef, pubkey: ValidatorPubKey,
|
2022-12-12 13:02:06 +00:00
|
|
|
slot: Slot, randao: ValidatorSig, validator_index: ValidatorIndex,
|
2023-04-11 15:19:48 +00:00
|
|
|
graffiti: GraffitiBytes): Future[Result[(EPH, UInt256, ForkedBeaconBlock), string]]
|
2022-10-31 17:39:03 +00:00
|
|
|
{.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
let
|
2023-04-11 16:56:29 +00:00
|
|
|
executionBlockRoot = node.dag.loadExecutionBlockHash(head)
|
2022-10-05 16:29:20 +00:00
|
|
|
executionPayloadHeader =
|
|
|
|
try:
|
|
|
|
awaitWithTimeout(
|
2023-02-21 13:21:38 +00:00
|
|
|
getBlindedExecutionPayload[EPH](
|
|
|
|
node, slot, executionBlockRoot, pubkey),
|
2022-10-05 16:29:20 +00:00
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
2023-04-11 15:19:48 +00:00
|
|
|
BlindedBlockResult[EPH].err("getBlindedExecutionPayload timed out")
|
2022-10-05 16:29:20 +00:00
|
|
|
except RestDecodingError as exc:
|
2023-04-11 15:19:48 +00:00
|
|
|
BlindedBlockResult[EPH].err(
|
|
|
|
"getBlindedExecutionPayload REST decoding error: " & exc.msg)
|
2022-10-05 16:29:20 +00:00
|
|
|
except CatchableError as exc:
|
2023-04-11 15:19:48 +00:00
|
|
|
BlindedBlockResult[EPH].err(
|
|
|
|
"getBlindedExecutionPayload error: " & exc.msg)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if executionPayloadHeader.isErr:
|
2023-02-21 13:21:38 +00:00
|
|
|
debug "getBlindedBlockParts: getBlindedExecutionPayload failed",
|
2022-10-05 16:29:20 +00:00
|
|
|
error = executionPayloadHeader.error, slot, validator_index,
|
|
|
|
head = shortLog(head)
|
2022-08-01 06:41:47 +00:00
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
2022-10-31 17:39:03 +00:00
|
|
|
return err(executionPayloadHeader.error)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
# When creating this block, need to ensure it uses the MEV-provided execution
|
|
|
|
# payload, both to avoid repeated calls to network services and to ensure the
|
|
|
|
# consistency of this block (e.g., its state root being correct). Since block
|
|
|
|
# processing does not work directly using blinded blocks, fix up transactions
|
|
|
|
# root after running the state transition function on an otherwise equivalent
|
|
|
|
# non-blinded block without transactions.
|
2023-02-21 13:21:38 +00:00
|
|
|
when EPH is bellatrix.ExecutionPayloadHeader:
|
2023-03-05 01:40:21 +00:00
|
|
|
type PayloadType = bellatrix.ExecutionPayloadForSigning
|
2023-02-21 13:21:38 +00:00
|
|
|
let withdrawals_root = Opt.none Eth2Digest
|
|
|
|
elif EPH is capella.ExecutionPayloadHeader:
|
2023-03-05 01:40:21 +00:00
|
|
|
type PayloadType = capella.ExecutionPayloadForSigning
|
2023-04-11 15:19:48 +00:00
|
|
|
let withdrawals_root =
|
|
|
|
Opt.some executionPayloadHeader.get.blindedBlckPart.withdrawals_root
|
2023-03-05 01:40:21 +00:00
|
|
|
elif EPH is deneb.ExecutionPayloadHeader:
|
|
|
|
type PayloadType = deneb.ExecutionPayloadForSigning
|
2023-02-21 13:21:38 +00:00
|
|
|
let withdrawals_root = Opt.some executionPayloadHeader.get.withdrawals_root
|
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
var shimExecutionPayload: PayloadType
|
2022-08-01 06:41:47 +00:00
|
|
|
copyFields(
|
2023-04-11 15:19:48 +00:00
|
|
|
shimExecutionPayload.executionPayload,
|
|
|
|
executionPayloadHeader.get.blindedBlckPart, getFieldNames(EPH))
|
2023-02-21 13:21:38 +00:00
|
|
|
# In Capella and later, this doesn't have withdrawals, which each node knows
|
|
|
|
# regardless of EL or builder API. makeBeaconBlockForHeadAndSlot fills it in
|
|
|
|
# when it detects builder API usage.
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-03-05 01:40:21 +00:00
|
|
|
let newBlock = await makeBeaconBlockForHeadAndSlot(
|
|
|
|
PayloadType, node, randao, validator_index, graffiti, head, slot,
|
2022-08-01 06:41:47 +00:00
|
|
|
execution_payload = Opt.some shimExecutionPayload,
|
2023-04-11 15:19:48 +00:00
|
|
|
transactions_root =
|
|
|
|
Opt.some executionPayloadHeader.get.blindedBlckPart.transactions_root,
|
2022-08-01 06:41:47 +00:00
|
|
|
execution_payload_root =
|
2023-02-21 13:21:38 +00:00
|
|
|
Opt.some hash_tree_root(executionPayloadHeader.get),
|
|
|
|
withdrawals_root = withdrawals_root)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if newBlock.isErr():
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
2022-10-31 17:39:03 +00:00
|
|
|
return err(newBlock.error) # already logged elsewhere!
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
let forkedBlck = newBlock.get()
|
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
return ok(
|
|
|
|
(executionPayloadHeader.get.blindedBlckPart,
|
|
|
|
executionPayloadHeader.get.blockValue,
|
|
|
|
forkedBlck.blck))
|
2022-10-31 17:39:03 +00:00
|
|
|
|
2023-04-05 13:35:32 +00:00
|
|
|
proc getBuilderBid[
|
2023-02-21 13:21:38 +00:00
|
|
|
SBBB: bellatrix_mev.SignedBlindedBeaconBlock |
|
|
|
|
capella_mev.SignedBlindedBeaconBlock](
|
2022-10-31 17:39:03 +00:00
|
|
|
node: BeaconNode, head: BlockRef, validator: AttachedValidator, slot: Slot,
|
|
|
|
randao: ValidatorSig, validator_index: ValidatorIndex):
|
2023-04-11 15:19:48 +00:00
|
|
|
Future[BlindedBlockResult[SBBB]] {.async.} =
|
2023-03-22 17:48:48 +00:00
|
|
|
# Used by the BN's own validators, but not the REST server
|
2023-02-21 13:21:38 +00:00
|
|
|
when SBBB is bellatrix_mev.SignedBlindedBeaconBlock:
|
|
|
|
type EPH = bellatrix.ExecutionPayloadHeader
|
|
|
|
elif SBBB is capella_mev.SignedBlindedBeaconBlock:
|
|
|
|
type EPH = capella.ExecutionPayloadHeader
|
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
|
|
|
let blindedBlockParts = await getBlindedBlockParts[EPH](
|
2022-12-12 13:02:06 +00:00
|
|
|
node, head, validator.pubkey, slot, randao, validator_index,
|
|
|
|
node.graffitiBytes)
|
2022-10-31 17:39:03 +00:00
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Not signed yet, fine to try to fall back on EL
|
|
|
|
beacon_block_builder_missed_with_fallback.inc()
|
2023-04-05 13:35:32 +00:00
|
|
|
return err blindedBlockParts.error()
|
2022-10-31 17:39:03 +00:00
|
|
|
|
|
|
|
# These, together, get combined into the blinded block for signing and
|
|
|
|
# proposal through the relay network.
|
2023-04-11 15:19:48 +00:00
|
|
|
let (executionPayloadHeader, bidValue, forkedBlck) = blindedBlockParts.get
|
2022-10-31 17:39:03 +00:00
|
|
|
|
2023-03-23 19:54:41 +00:00
|
|
|
# This is only substantively asynchronous with a remote key signer, whereas
|
|
|
|
# using local key signing, the await can't stall indefinitely any more than
|
|
|
|
# any other await. However, by imposing an arbitrary timeout, it risks that
|
|
|
|
# getBlindedBeaconBlock will check slashing conditions, register that block
|
|
|
|
# in the database to avoid future slashing, then take long enough to exceed
|
|
|
|
# any specific timeout provided. It's always better to at least try to send
|
|
|
|
# this proposal. Furthermore, because one attempt to propose on that slot's
|
|
|
|
# already been registered, the EL fallback will refuses to function, so the
|
|
|
|
# timeout ensures missing both by builder and engine APIs.
|
|
|
|
#
|
|
|
|
# When using web3signer or some other remote signer, this is to some extent
|
|
|
|
# difficult to avoid entirely, because some timeout should exist, so Nimbus
|
|
|
|
# can still fall back to EL block production in time. For local signing, it
|
|
|
|
# simply therefore uses `await` and avoids this potential race.
|
2023-03-23 14:51:55 +00:00
|
|
|
let blindedBlock =
|
|
|
|
case validator.kind
|
|
|
|
of ValidatorKind.Local:
|
|
|
|
await getBlindedBeaconBlock[SBBB](
|
2022-10-31 17:39:03 +00:00
|
|
|
node, slot, validator, validator_index, forkedBlck,
|
2023-03-23 14:51:55 +00:00
|
|
|
executionPayloadHeader)
|
|
|
|
of ValidatorKind.Remote:
|
|
|
|
awaitWithTimeout(
|
|
|
|
getBlindedBeaconBlock[SBBB](
|
|
|
|
node, slot, validator, validator_index, forkedBlck,
|
|
|
|
executionPayloadHeader),
|
|
|
|
1.seconds):
|
|
|
|
Result[SBBB, string].err("getBlindedBlock timed out")
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
if blindedBlock.isErr:
|
2023-04-05 13:35:32 +00:00
|
|
|
return err blindedBlock.error()
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
return ok (blindedBlock.get, bidValue)
|
2023-04-05 13:35:32 +00:00
|
|
|
|
|
|
|
proc proposeBlockMEV(node: BeaconNode, blindedBlock: auto):
|
|
|
|
Future[Result[BlockRef, string]] {.async.} =
|
2023-04-11 15:19:48 +00:00
|
|
|
let unblindedBlockRef = await node.unblindAndRouteBlockMEV(blindedBlock)
|
2022-11-08 18:08:43 +00:00
|
|
|
return if unblindedBlockRef.isOk and unblindedBlockRef.get.isSome:
|
|
|
|
beacon_blocks_proposed.inc()
|
2023-04-05 13:35:32 +00:00
|
|
|
ok(unblindedBlockRef.get.get)
|
2022-11-08 18:08:43 +00:00
|
|
|
else:
|
|
|
|
# unblindedBlockRef.isOk and unblindedBlockRef.get.isNone indicates that
|
|
|
|
# the block failed to validate and integrate into the DAG, which for the
|
|
|
|
# purpose of this return value, is equivalent. It's used to drive Beacon
|
|
|
|
# REST API output.
|
2023-04-11 15:19:48 +00:00
|
|
|
#
|
|
|
|
# https://collective.flashbots.net/t/post-mortem-april-3rd-2023-mev-boost-relay-incident-and-related-timing-issue/1540
|
|
|
|
# has caused false positives, because
|
|
|
|
# "A potential mitigation to this attack is to introduce a cutoff timing
|
|
|
|
# into the proposer's slot whereafter this time (e.g. 3 seconds) the relay
|
|
|
|
# will no longer return a block to the proposer. Relays began to roll out
|
|
|
|
# this mitigation in the evening of April 3rd UTC time with a 2 second
|
|
|
|
# cutoff, and notified other relays to do the same. After receiving
|
|
|
|
# credible reports of honest validators missing their slots the suggested
|
|
|
|
# timing cutoff was increased to 3 seconds."
|
2022-11-30 12:08:48 +00:00
|
|
|
let errMsg =
|
|
|
|
if unblindedBlockRef.isErr:
|
|
|
|
unblindedBlockRef.error
|
|
|
|
else:
|
2023-04-11 15:19:48 +00:00
|
|
|
"Unblinded block not returned to proposer"
|
2023-04-05 13:35:32 +00:00
|
|
|
err errMsg
|
2022-11-08 18:08:43 +00:00
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
proc makeBlindedBeaconBlockForHeadAndSlot*[
|
|
|
|
BBB: bellatrix_mev.BlindedBeaconBlock | capella_mev.BlindedBeaconBlock](
|
2022-10-31 17:39:03 +00:00
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
2023-04-11 15:19:48 +00:00
|
|
|
slot: Slot): Future[BlindedBlockResult[BBB]] {.async.} =
|
2022-10-31 17:39:03 +00:00
|
|
|
## Requests a beacon node to produce a valid blinded block, which can then be
|
|
|
|
## signed by a validator. A blinded block is a block with only a transactions
|
|
|
|
## root, rather than a full transactions list.
|
2023-03-22 17:48:48 +00:00
|
|
|
##
|
|
|
|
## This function is used by the validator client, but not the beacon node for
|
|
|
|
## its own validators.
|
2023-02-21 13:21:38 +00:00
|
|
|
when BBB is bellatrix_mev.BlindedBeaconBlock:
|
|
|
|
type EPH = bellatrix.ExecutionPayloadHeader
|
|
|
|
elif BBB is capella_mev.BlindedBeaconBlock:
|
|
|
|
type EPH = capella.ExecutionPayloadHeader
|
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
let
|
2022-11-10 20:18:08 +00:00
|
|
|
pubkey =
|
2022-10-31 17:39:03 +00:00
|
|
|
# Relevant state for knowledge of validators
|
|
|
|
withState(node.dag.headState):
|
2023-03-22 17:48:48 +00:00
|
|
|
if livenessFailsafeInEffect(
|
|
|
|
forkyState.data.block_roots.data, forkyState.data.slot):
|
|
|
|
# It's head block's slot which matters here, not proposal slot
|
|
|
|
return err("Builder API liveness failsafe in effect")
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
if distinctBase(validator_index) >= forkyState.data.validators.lenu64:
|
|
|
|
debug "makeBlindedBeaconBlockForHeadAndSlot: invalid validator index",
|
|
|
|
head = shortLog(head),
|
|
|
|
validator_index,
|
|
|
|
validators_len = forkyState.data.validators.len
|
|
|
|
return err("Invalid validator index")
|
|
|
|
|
2022-11-10 20:18:08 +00:00
|
|
|
forkyState.data.validators.item(validator_index).pubkey
|
2022-10-31 17:39:03 +00:00
|
|
|
|
2023-02-21 13:21:38 +00:00
|
|
|
blindedBlockParts = await getBlindedBlockParts[EPH](
|
2022-12-12 13:02:06 +00:00
|
|
|
node, head, pubkey, slot, randao_reveal, validator_index, graffiti)
|
2022-10-31 17:39:03 +00:00
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Don't try EL fallback -- VC specifically requested a blinded block
|
|
|
|
return err("Unable to create blinded block")
|
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
let (executionPayloadHeader, bidValue, forkedBlck) = blindedBlockParts.get
|
2023-02-21 13:21:38 +00:00
|
|
|
withBlck(forkedBlck):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2023-02-23 10:37:45 +00:00
|
|
|
debugRaiseAssert $denebImplementationMissing & ": makeBlindedBeaconBlockForHeadAndSlot"
|
2023-03-11 00:35:52 +00:00
|
|
|
elif consensusFork >= ConsensusFork.Bellatrix:
|
|
|
|
when ((consensusFork == ConsensusFork.Bellatrix and
|
2023-02-21 13:21:38 +00:00
|
|
|
EPH is bellatrix.ExecutionPayloadHeader) or
|
2023-03-11 00:35:52 +00:00
|
|
|
(consensusFork == ConsensusFork.Capella and
|
2023-02-21 13:21:38 +00:00
|
|
|
EPH is capella.ExecutionPayloadHeader)):
|
2023-04-11 15:19:48 +00:00
|
|
|
return ok (constructPlainBlindedBlock[BBB, EPH](
|
|
|
|
blck, executionPayloadHeader), bidValue)
|
2023-02-21 13:21:38 +00:00
|
|
|
else:
|
|
|
|
return err("makeBlindedBeaconBlockForHeadAndSlot: mismatched block/payload types")
|
|
|
|
else:
|
|
|
|
return err("Attempt to create pre-Bellatrix blinded block")
|
2022-12-14 17:30:56 +00:00
|
|
|
|
2023-04-05 13:35:32 +00:00
|
|
|
proc proposeBlockAux(
|
|
|
|
SBBB: typedesc, EPS: typedesc, node: BeaconNode,
|
|
|
|
validator: AttachedValidator, validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef, slot: Slot, randao: ValidatorSig, fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest): Future[BlockRef] {.async.} =
|
|
|
|
# Collect bids
|
|
|
|
let usePayloadBuilder =
|
|
|
|
if node.config.payloadBuilderEnable:
|
2023-03-22 17:48:48 +00:00
|
|
|
withState(node.dag.headState):
|
|
|
|
# Head slot, not proposal slot, matters here
|
2023-04-05 13:35:32 +00:00
|
|
|
# TODO it might make some sense to allow use of builder API if local
|
|
|
|
# EL fails -- i.e. it would change priorities, so any block from the
|
|
|
|
# execution layer client would override builder API. But it seems an
|
|
|
|
# odd requirement to produce no block at all in those conditions.
|
|
|
|
not livenessFailsafeInEffect(
|
2023-03-22 17:48:48 +00:00
|
|
|
forkyState.data.block_roots.data, forkyState.data.slot)
|
2022-12-04 07:42:03 +00:00
|
|
|
else:
|
2023-04-05 13:35:32 +00:00
|
|
|
false
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2023-04-05 13:35:32 +00:00
|
|
|
let
|
|
|
|
payloadBuilderBidFut =
|
|
|
|
if usePayloadBuilder:
|
|
|
|
getBuilderBid[SBBB](node, head, validator, slot, randao, validator_index)
|
|
|
|
else:
|
2023-04-11 15:19:48 +00:00
|
|
|
let fut = newFuture[BlindedBlockResult[SBBB]]("builder-bid")
|
|
|
|
fut.complete(BlindedBlockResult[SBBB].err(
|
2023-04-05 13:35:32 +00:00
|
|
|
"either payload builder disabled or liveness failsafe active"))
|
|
|
|
fut
|
|
|
|
engineBlockFut = makeBeaconBlockForHeadAndSlot(
|
|
|
|
EPS, node, randao, validator_index, node.graffitiBytes, head, slot)
|
|
|
|
|
|
|
|
# getBuilderBid times out after BUILDER_PROPOSAL_DELAY_TOLERANCE, with 1 more
|
|
|
|
# second for remote validators. makeBeaconBlockForHeadAndSlot times out after
|
|
|
|
# 1 second.
|
|
|
|
await allFutures(payloadBuilderBidFut, engineBlockFut)
|
|
|
|
doAssert payloadBuilderBidFut.finished and engineBlockFut.finished
|
|
|
|
|
|
|
|
let builderBidAvailable =
|
|
|
|
if payloadBuilderBidFut.completed:
|
|
|
|
if payloadBuilderBidFut.read().isOk:
|
|
|
|
true
|
|
|
|
elif usePayloadBuilder:
|
|
|
|
info "Payload builder error",
|
|
|
|
slot, head = shortLog(head), validator = shortLog(validator),
|
|
|
|
err = payloadBuilderBidFut.read().error()
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
# Effectively the same case, but without the log message
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
info "Payload builder bid future failed",
|
|
|
|
slot, head = shortLog(head), validator = shortLog(validator),
|
|
|
|
err = payloadBuilderBidFut.error.msg
|
|
|
|
false
|
|
|
|
|
|
|
|
let engineBidAvailable =
|
|
|
|
if engineBlockFut.completed:
|
2023-04-11 15:19:48 +00:00
|
|
|
if engineBlockFut.read.isOk:
|
2023-04-05 13:35:32 +00:00
|
|
|
true
|
|
|
|
else:
|
|
|
|
info "Engine block building error",
|
|
|
|
slot, head = shortLog(head), validator = shortLog(validator),
|
|
|
|
err = payloadBuilderBidFut.read().error()
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
info "Engine block building failed",
|
|
|
|
slot, head = shortLog(head), validator = shortLog(validator),
|
|
|
|
err = engineBlockFut.error.msg
|
|
|
|
false
|
|
|
|
|
|
|
|
let useBuilderBlock =
|
|
|
|
if builderBidAvailable:
|
2023-04-11 15:19:48 +00:00
|
|
|
(not engineBidAvailable) or payloadBuilderBidFut.read.get().blockValue >
|
|
|
|
engineBlockFut.read.get().blockValue
|
2023-04-05 13:35:32 +00:00
|
|
|
else:
|
|
|
|
if not engineBidAvailable:
|
|
|
|
return head # errors logged in router
|
|
|
|
false
|
|
|
|
|
|
|
|
if useBuilderBlock:
|
|
|
|
let
|
2023-04-11 15:19:48 +00:00
|
|
|
blindedBlock = payloadBuilderBidFut.read
|
2023-04-05 13:35:32 +00:00
|
|
|
# Before proposeBlockMEV, can fall back to EL; after, cannot without
|
|
|
|
# risking slashing.
|
2023-04-11 15:19:48 +00:00
|
|
|
maybeUnblindedBlock = await proposeBlockMEV(
|
|
|
|
node, blindedBlock.get.blindedBlckPart)
|
2023-04-05 13:35:32 +00:00
|
|
|
|
|
|
|
return maybeUnblindedBlock.valueOr:
|
|
|
|
warn "Blinded block proposal incomplete",
|
|
|
|
head = shortLog(head), slot, validator_index,
|
|
|
|
validator = shortLog(validator),
|
|
|
|
err = maybeUnblindedBlock.error,
|
2023-04-11 15:19:48 +00:00
|
|
|
blindedBlck = shortLog(blindedBlock.get().blindedBlckPart)
|
2023-04-05 13:35:32 +00:00
|
|
|
beacon_block_builder_missed_without_fallback.inc()
|
|
|
|
return head
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2023-04-11 15:19:48 +00:00
|
|
|
var forkedBlck = engineBlockFut.read.get().blck
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
withBlck(forkedBlck):
|
2023-03-09 00:34:17 +00:00
|
|
|
var blobs_sidecar = deneb.BlobsSidecar(
|
2023-01-25 17:35:46 +00:00
|
|
|
beacon_block_slot: slot,
|
|
|
|
)
|
2023-03-09 00:34:17 +00:00
|
|
|
when blck is deneb.BeaconBlock:
|
2023-03-05 01:40:21 +00:00
|
|
|
# TODO: The blobs_sidecar variable is not currently used.
|
|
|
|
# It could be initialized in makeBeaconBlockForHeadAndSlot
|
|
|
|
# where the required information is available.
|
|
|
|
# blobs_sidecar.blobs = forkedBlck.blobs
|
|
|
|
# blobs_sidecar.kzg_aggregated_proof = kzg_aggregated_proof
|
|
|
|
discard
|
2023-01-21 23:13:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let
|
|
|
|
blockRoot = hash_tree_root(blck)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2023-02-01 14:14:50 +00:00
|
|
|
blobs_sidecar.beacon_block_root = blockRoot
|
2021-08-29 14:50:21 +00:00
|
|
|
if notSlashable.isErr:
|
2022-09-19 19:50:19 +00:00
|
|
|
warn "Slashing protection activated for block proposal",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
2021-08-29 14:50:21 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return head
|
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let
|
|
|
|
signature =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getBlockSignature(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot, forkedBlck)
|
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign block",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
2021-12-03 13:58:12 +00:00
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
signedBlock =
|
|
|
|
when blck is phase0.BeaconBlock:
|
|
|
|
phase0.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
|
|
|
elif blck is altair.BeaconBlock:
|
|
|
|
altair.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-01-18 13:36:52 +00:00
|
|
|
elif blck is bellatrix.BeaconBlock:
|
|
|
|
bellatrix.SignedBeaconBlock(
|
2021-12-03 13:58:12 +00:00
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-11-02 16:23:30 +00:00
|
|
|
elif blck is capella.BeaconBlock:
|
|
|
|
capella.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2023-03-09 00:34:17 +00:00
|
|
|
elif blck is deneb.BeaconBlock:
|
2023-02-28 11:36:17 +00:00
|
|
|
# TODO: also route blobs
|
2023-03-05 01:40:21 +00:00
|
|
|
deneb.SignedBeaconBlock(message: blck, signature: signature, root: blockRoot)
|
2023-02-28 11:36:17 +00:00
|
|
|
else:
|
2022-02-13 15:21:55 +00:00
|
|
|
static: doAssert "Unknown SignedBeaconBlock type"
|
2022-07-06 16:11:44 +00:00
|
|
|
newBlockRef =
|
|
|
|
(await node.router.routeSignedBeaconBlock(signedBlock)).valueOr:
|
|
|
|
return head # Errors logged in router
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if newBlockRef.isNone():
|
|
|
|
return head # Validation errors logged in router
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notice "Block proposed",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signature = shortLog(signature), validator = shortLog(validator)
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
beacon_blocks_proposed.inc()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
return newBlockRef.get()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2023-04-05 13:35:32 +00:00
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot): Future[BlockRef] {.async.} =
|
|
|
|
if head.slot >= slot:
|
|
|
|
# We should normally not have a head newer than the slot we're proposing for
|
|
|
|
# but this can happen if block proposal is delayed
|
|
|
|
warn "Skipping proposal, have newer head already",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
headBlockRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot)
|
|
|
|
return head
|
|
|
|
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
randao = block:
|
|
|
|
let res = await validator.getEpochSignature(
|
|
|
|
fork, genesis_validators_root, slot.epoch)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate randao reveal",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
template proposeBlockContinuation(type1, type2: untyped): auto =
|
|
|
|
await proposeBlockAux(
|
|
|
|
type1, type2, node, validator, validator_index, head, slot, randao, fork,
|
|
|
|
genesis_validators_root)
|
|
|
|
|
|
|
|
return
|
|
|
|
if slot.epoch >= node.dag.cfg.DENEB_FORK_EPOCH:
|
|
|
|
debugRaiseAssert $denebImplementationMissing & ": proposeBlock"
|
|
|
|
proposeBlockContinuation(
|
|
|
|
capella_mev.SignedBlindedBeaconBlock, deneb.ExecutionPayloadForSigning)
|
|
|
|
elif slot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH:
|
|
|
|
proposeBlockContinuation(
|
|
|
|
capella_mev.SignedBlindedBeaconBlock, capella.ExecutionPayloadForSigning)
|
|
|
|
else:
|
|
|
|
proposeBlockContinuation(
|
|
|
|
bellatrix_mev.SignedBlindedBeaconBlock, bellatrix.ExecutionPayloadForSigning)
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
## Perform all attestations that the validators attached to this node should
|
|
|
|
## perform during the given slot
|
|
|
|
if slot + SLOTS_PER_EPOCH < head.slot:
|
|
|
|
# The latest block we know about is a lot newer than the slot we're being
|
|
|
|
# asked to attest to - this makes it unlikely that it will be included
|
|
|
|
# at all.
|
|
|
|
# TODO the oldest attestations allowed are those that are older than the
|
|
|
|
# finalized epoch.. also, it seems that posting very old attestations
|
|
|
|
# is risky from a slashing perspective. More work is needed here.
|
2020-10-01 18:56:42 +00:00
|
|
|
warn "Skipping attestation, head is too recent",
|
2022-01-05 18:38:04 +00:00
|
|
|
head = shortLog(head),
|
2020-05-06 13:23:45 +00:00
|
|
|
slot = shortLog(slot)
|
|
|
|
return
|
|
|
|
|
2022-03-23 11:42:16 +00:00
|
|
|
if slot < node.dag.finalizedHead.slot:
|
|
|
|
# During checkpoint sync, we implicitly finalize the given slot even if the
|
|
|
|
# state transition does not yet consider it final - this is a sanity check
|
|
|
|
# mostly to ensure the `atSlot` below works as expected
|
|
|
|
warn "Skipping attestation - slot already finalized",
|
|
|
|
head = shortLog(head),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
finalized = shortLog(node.dag.finalizedHead)
|
|
|
|
return
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
let attestationHead = head.atSlot(slot)
|
|
|
|
if head != attestationHead.blck:
|
|
|
|
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
|
|
|
# attesting to a past state - we must then recreate the world as it looked
|
|
|
|
# like back then
|
|
|
|
notice "Attesting to a state in the past, falling behind?",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
trace "Checking attestations",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# We need to run attestations exactly for the slot that we're attesting to.
|
|
|
|
# In case blocks went missing, this means advancing past the latest block
|
|
|
|
# using empty slots as fillers.
|
2023-04-18 22:00:06 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.5/specs/phase0/validator.md#validator-assignments
|
2020-08-10 13:21:31 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
epochRef = node.dag.getEpochRef(
|
|
|
|
attestationHead.blck, slot.epoch, false).valueOr:
|
2022-01-05 18:38:04 +00:00
|
|
|
warn "Cannot construct EpochRef for attestation head, report bug",
|
2022-09-27 16:56:08 +00:00
|
|
|
attestationHead = shortLog(attestationHead), slot, error
|
2022-01-05 18:38:04 +00:00
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(epochRef.shufflingRef)
|
2021-08-24 19:49:51 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(
|
|
|
|
epochRef.shufflingRef, slot, committee_index)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2021-05-04 13:17:28 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2023-03-02 15:55:45 +00:00
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
2021-05-10 07:13:36 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let
|
|
|
|
data = makeAttestationData(epochRef, attestationHead, committee_index)
|
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_attestation_signing_root(
|
2021-05-10 07:13:36 +00:00
|
|
|
fork, genesis_validators_root, data)
|
|
|
|
registered = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerAttestation(
|
|
|
|
validator_index,
|
2021-07-13 11:15:07 +00:00
|
|
|
validator.pubkey,
|
2021-05-10 07:13:36 +00:00
|
|
|
data.source.epoch,
|
|
|
|
data.target.epoch,
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot)
|
2021-05-10 07:13:36 +00:00
|
|
|
if registered.isOk():
|
2021-10-20 09:16:48 +00:00
|
|
|
let subnet_id = compute_subnet_for_attestation(
|
2022-01-08 23:28:49 +00:00
|
|
|
committees_per_slot, data.slot, committee_index)
|
2021-05-10 07:13:36 +00:00
|
|
|
asyncSpawn createAndSendAttestation(
|
|
|
|
node, fork, genesis_validators_root, validator, data,
|
2021-10-20 09:16:48 +00:00
|
|
|
committee.len(), index_in_committee, subnet_id)
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for attestation",
|
2022-09-19 19:50:19 +00:00
|
|
|
attestationData = shortLog(data),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
|
|
|
validator_index,
|
|
|
|
validator = shortLog(validator),
|
2021-05-10 07:13:36 +00:00
|
|
|
badVoteDetails = $registered.error()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-08-30 00:58:30 +00:00
|
|
|
proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot,
|
2021-11-05 15:39:47 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2021-08-30 00:58:30 +00:00
|
|
|
head: BlockRef) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-11-30 01:20:21 +00:00
|
|
|
msg =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getSyncCommitteeMessage(
|
|
|
|
fork, genesis_validators_root, slot, head.root)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign committee message",
|
2021-11-30 01:20:21 +00:00
|
|
|
validator = shortLog(validator), slot = slot,
|
|
|
|
block_root = shortLog(head.root)
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeSyncCommitteeMessage(
|
2021-11-05 15:39:47 +00:00
|
|
|
msg, subcommitteeIdx, checkSignature = false)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
if not res.isOk():
|
2021-08-30 00:58:30 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2021-12-22 12:37:31 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, msg, validator.pubkey)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
notice "Error sending sync committee message", err = exc.msg
|
|
|
|
|
|
|
|
proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
# TODO Use a view type to avoid the copy
|
2022-08-18 18:07:01 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2023-02-20 11:28:56 +00:00
|
|
|
let validator = node.getValidatorForDuties(
|
|
|
|
valIdx, slot, slashingSafe = true).valueOr:
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
2022-01-08 23:28:49 +00:00
|
|
|
subcommitteeIdx, head)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
|
|
|
proc signAndSendContribution(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2022-07-06 16:11:44 +00:00
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSyncCommitteeSelectionProof(
|
|
|
|
fork, genesis_validators_root, slot, subcommitteeIdx)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate committee selection proof",
|
|
|
|
validator = shortLog(validator), slot,
|
|
|
|
subnet_id = subcommitteeIdx, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
if not is_sync_committee_aggregator(selectionProof):
|
|
|
|
return
|
|
|
|
|
|
|
|
var
|
|
|
|
msg = SignedContributionAndProof(
|
2022-06-29 16:53:59 +00:00
|
|
|
message: ContributionAndProof(
|
|
|
|
aggregator_index: uint64 validator.index.get,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if not node.syncCommitteeMsgPool[].produceContribution(
|
|
|
|
slot,
|
|
|
|
head.root,
|
|
|
|
subcommitteeIdx,
|
|
|
|
msg.message.contribution):
|
|
|
|
return
|
|
|
|
|
|
|
|
msg.signature = block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getContributionAndProofSignature(
|
2022-07-06 16:11:44 +00:00
|
|
|
fork, genesis_validators_root, msg.message)
|
2022-06-29 16:53:59 +00:00
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign sync committee contribution",
|
2022-07-06 16:11:44 +00:00
|
|
|
validator = shortLog(validator), message = shortLog(msg.message),
|
|
|
|
error_msg = res.error()
|
2022-06-29 16:53:59 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedContributionAndProof(msg, false)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending sync committee contribution", err = exc.msg
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleSyncCommitteeContributions(
|
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
for subcommitteeIdx in SyncSubCommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2023-02-20 11:28:56 +00:00
|
|
|
let validator = node.getValidatorForDuties(
|
|
|
|
valIdx, slot, slashingSafe = true).valueOr:
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn signAndSendContribution(
|
|
|
|
node, validator, subcommitteeIdx, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|
|
|
Future[BlockRef] {.async.} =
|
|
|
|
## Perform the proposal for the given slot, iff we have a validator attached
|
2020-11-20 14:16:04 +00:00
|
|
|
## that is supposed to do so, given the shuffling at that slot for the given
|
|
|
|
## head - to compute the proposer, we need to advance a state to the given
|
|
|
|
## slot
|
2021-06-01 11:13:40 +00:00
|
|
|
let
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer = node.dag.getProposer(head, slot).valueOr:
|
|
|
|
return head
|
|
|
|
proposerKey = node.dag.validatorKey(proposer).get().toPubKey
|
|
|
|
validator = node.getValidatorForDuties(proposer, slot).valueOr:
|
2022-11-24 07:48:10 +00:00
|
|
|
debug "Expecting block proposal", headRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot),
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer_index = proposer,
|
2022-11-24 07:48:10 +00:00
|
|
|
proposer = shortLog(proposerKey)
|
2022-12-09 16:05:55 +00:00
|
|
|
return head
|
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
return await proposeBlock(node, validator, proposer, head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc signAndSendAggregate(
|
2022-08-18 18:07:01 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot, committee_index: CommitteeIndex) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
validator_index = validator.index.get()
|
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
slot, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2023-04-18 22:00:06 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.5/specs/phase0/validator.md#aggregation-selection
|
2022-08-18 18:07:01 +00:00
|
|
|
if not is_aggregator(
|
|
|
|
shufflingRef, slot, committee_index, selectionProof):
|
2022-07-06 16:11:44 +00:00
|
|
|
return
|
|
|
|
|
2023-03-17 01:10:31 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.4/specs/phase0/validator.md#construct-aggregate
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.4/specs/phase0/validator.md#aggregateandproof
|
2022-07-06 16:11:44 +00:00
|
|
|
var
|
|
|
|
msg = SignedAggregateAndProof(
|
|
|
|
message: AggregateAndProof(
|
|
|
|
aggregator_index: uint64 validator_index,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
|
|
|
msg.message.aggregate = node.attestationPool[].getAggregatedAttestation(
|
|
|
|
slot, committee_index).valueOr:
|
|
|
|
return
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
msg.signature = block:
|
|
|
|
let res = await validator.getAggregateAndProofSignature(
|
|
|
|
fork, genesis_validators_root, msg.message)
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign aggregate",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
2023-03-02 15:55:45 +00:00
|
|
|
validator.doppelgangerActivity(msg.message.aggregate.data.slot.epoch)
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedAggregateAndProof(
|
|
|
|
msg, checkSignature = false)
|
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
warn "Error sending aggregate", err = exc.msg
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
proc sendAggregatedAttestations(
|
2022-01-05 18:38:04 +00:00
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
|
|
|
# Aggregated attestations must be sent by members of the beacon committees for
|
2022-12-09 16:05:55 +00:00
|
|
|
# the given slot, for which `is_aggregator` returns `true`.
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for head, report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2020-10-22 10:53:33 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-07-06 16:11:44 +00:00
|
|
|
for _, validator_index in
|
2022-08-18 18:07:01 +00:00
|
|
|
get_beacon_committee(shufflingRef, slot, committee_index):
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
2022-11-24 07:48:10 +00:00
|
|
|
continue
|
2022-12-09 16:05:55 +00:00
|
|
|
asyncSpawn signAndSendAggregate(node, validator, shufflingRef, slot,
|
|
|
|
committee_index)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
2021-08-28 22:27:51 +00:00
|
|
|
# Technically, this only needs to be done on epoch transitions and if there's
|
|
|
|
# a reorg that spans an epoch transition, but it's easier to implement this
|
|
|
|
# way for now.
|
|
|
|
|
|
|
|
# We'll limit labelled metrics to the first 64, so that we don't overload
|
|
|
|
# Prometheus.
|
|
|
|
|
|
|
|
var total: Gwei
|
|
|
|
var i = 0
|
|
|
|
for _, v in node.attachedValidators[].validators:
|
|
|
|
let balance =
|
|
|
|
if v.index.isNone():
|
|
|
|
0.Gwei
|
|
|
|
elif v.index.get().uint64 >=
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(node.dag.headState, balances).lenu64:
|
2021-08-28 22:27:51 +00:00
|
|
|
debug "Cannot get validator balance, index out of bounds",
|
|
|
|
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
2022-03-16 07:20:40 +00:00
|
|
|
balances = getStateField(node.dag.headState, balances).len,
|
|
|
|
stateRoot = getStateRoot(node.dag.headState)
|
2021-08-28 22:27:51 +00:00
|
|
|
0.Gwei
|
|
|
|
else:
|
2022-05-30 13:30:42 +00:00
|
|
|
getStateField(node.dag.headState, balances).item(v.index.get())
|
2021-08-28 22:27:51 +00:00
|
|
|
|
|
|
|
if i < 64:
|
|
|
|
attached_validator_balance.set(
|
|
|
|
balance.toGaugeValue, labelValues = [shortLog(v.pubkey)])
|
|
|
|
|
|
|
|
inc i
|
|
|
|
total += balance
|
|
|
|
|
|
|
|
node.attachedValidatorBalanceTotal = total
|
|
|
|
attached_validator_balance_total.set(total.toGaugeValue)
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from std/times import epochTime
|
|
|
|
|
|
|
|
proc getValidatorRegistration(
|
2022-08-19 21:51:30 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, epoch: Epoch):
|
2022-08-01 06:41:47 +00:00
|
|
|
Future[Result[SignedValidatorRegistrationV1, string]] {.async.} =
|
2022-08-19 21:51:30 +00:00
|
|
|
let validatorIdx = validator.index.valueOr:
|
|
|
|
# The validator index will be missing when the validator was not
|
|
|
|
# activated for duties yet. We can safely skip the registration then.
|
|
|
|
return
|
|
|
|
|
|
|
|
let feeRecipient = node.getFeeRecipient(validator.pubkey, validatorIdx, epoch)
|
2023-02-15 15:10:31 +00:00
|
|
|
let gasLimit = node.getGasLimit(validator.pubkey)
|
2022-08-01 06:41:47 +00:00
|
|
|
var validatorRegistration = SignedValidatorRegistrationV1(
|
|
|
|
message: ValidatorRegistrationV1(
|
|
|
|
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
|
|
|
gas_limit: gasLimit,
|
|
|
|
timestamp: epochTime().uint64,
|
|
|
|
pubkey: validator.pubkey))
|
|
|
|
|
|
|
|
let signature = await validator.getBuilderSignature(
|
|
|
|
node.dag.cfg.genesisFork, validatorRegistration.message)
|
|
|
|
|
|
|
|
debug "getValidatorRegistration: registering",
|
|
|
|
validatorRegistration
|
|
|
|
|
|
|
|
if signature.isErr:
|
|
|
|
return err signature.error
|
|
|
|
|
|
|
|
validatorRegistration.signature = signature.get
|
|
|
|
|
|
|
|
return ok validatorRegistration
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
from std/sequtils import toSeq
|
|
|
|
|
2022-11-26 18:50:42 +00:00
|
|
|
proc registerValidators*(node: BeaconNode, epoch: Epoch) {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
try:
|
|
|
|
if (not node.config.payloadBuilderEnable) or
|
|
|
|
node.currentSlot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH:
|
|
|
|
return
|
|
|
|
elif node.config.payloadBuilderEnable and
|
|
|
|
node.payloadBuilderRestClient.isNil:
|
|
|
|
warn "registerValidators: node.config.payloadBuilderEnable and node.payloadBuilderRestClient.isNil"
|
|
|
|
return
|
|
|
|
|
|
|
|
const HttpOk = 200
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let restBuilderStatus = awaitWithTimeout(node.payloadBuilderRestClient.checkBuilderStatus(),
|
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE):
|
|
|
|
debug "Timeout when obtaining builder status"
|
|
|
|
return
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
if restBuilderStatus.status != HttpOk:
|
2022-08-18 11:59:35 +00:00
|
|
|
warn "registerValidators: specified builder or relay not available",
|
2022-08-01 06:41:47 +00:00
|
|
|
builderUrl = node.config.payloadBuilderUrl,
|
|
|
|
builderStatus = restBuilderStatus
|
|
|
|
return
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# The async aspect of signing the registrations can cause the attached
|
|
|
|
# validators to change during the loop.
|
|
|
|
let attachedValidatorPubkeys =
|
|
|
|
toSeq(node.attachedValidators[].validators.keys)
|
|
|
|
|
2022-11-26 23:11:14 +00:00
|
|
|
const emptyNestedSeq = @[newSeq[SignedValidatorRegistrationV1](0)]
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#validator-registration
|
2022-11-26 23:11:14 +00:00
|
|
|
# Seed with single empty inner list to avoid special cases
|
|
|
|
var validatorRegistrations = emptyNestedSeq
|
|
|
|
|
|
|
|
# Some relay networks disallow large request bodies, so split requests
|
|
|
|
template addValidatorRegistration(
|
|
|
|
validatorRegistration: SignedValidatorRegistrationV1) =
|
|
|
|
const registrationValidatorChunkSize = 1000
|
|
|
|
|
|
|
|
if validatorRegistrations[^1].len < registrationValidatorChunkSize:
|
|
|
|
validatorRegistrations[^1].add validatorRegistration
|
|
|
|
else:
|
|
|
|
validatorRegistrations.add @[validatorRegistration]
|
2022-09-13 11:52:26 +00:00
|
|
|
|
|
|
|
# First, check for VC-added keys; cheaper because provided pre-signed
|
|
|
|
var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
|
|
|
|
if node.externalBuilderRegistrations.len > 0:
|
|
|
|
withState(node.dag.headState):
|
|
|
|
let currentEpoch = node.currentSlot().epoch
|
|
|
|
for i in 0 ..< forkyState.data.validators.len:
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml
|
|
|
|
# "requests containing currently inactive or unknown validator
|
|
|
|
# pubkeys will be accepted, as they may become active at a later
|
|
|
|
# epoch" which means filtering is needed here, because including
|
|
|
|
# any validators not pending or active may cause the request, as
|
|
|
|
# a whole, to fail.
|
|
|
|
let pubkey = forkyState.data.validators.item(i).pubkey
|
|
|
|
if pubkey in node.externalBuilderRegistrations and
|
|
|
|
forkyState.data.validators.item(i).exit_epoch > currentEpoch:
|
|
|
|
let signedValidatorRegistration =
|
|
|
|
node.externalBuilderRegistrations[pubkey]
|
|
|
|
nonExitedVcPubkeys.incl signedValidatorRegistration.message.pubkey
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration signedValidatorRegistration
|
2022-09-13 11:52:26 +00:00
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
for key in attachedValidatorPubkeys:
|
2022-09-13 11:52:26 +00:00
|
|
|
# Already included from VC
|
|
|
|
if key in nonExitedVcPubkeys:
|
|
|
|
warn "registerValidators: same validator registered by beacon node and validator client",
|
|
|
|
pubkey = shortLog(key)
|
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# Time passed during awaits; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let validator = node.attachedValidators[].validators[key]
|
|
|
|
|
2022-08-18 11:59:35 +00:00
|
|
|
if validator.index.isNone:
|
|
|
|
continue
|
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/apis/builder/validators.yaml
|
2022-08-18 11:59:35 +00:00
|
|
|
# Builders should verify that `pubkey` corresponds to an active or
|
|
|
|
# pending validator
|
|
|
|
withState(node.dag.headState):
|
2022-09-13 11:53:12 +00:00
|
|
|
if distinctBase(validator.index.get) >=
|
|
|
|
forkyState.data.validators.lenu64:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if node.currentSlot().epoch >=
|
2022-09-13 11:53:12 +00:00
|
|
|
forkyState.data.validators.item(validator.index.get).exit_epoch:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
if validator.externalBuilderRegistration.isSome:
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration validator.externalBuilderRegistration.get
|
2022-08-31 00:29:03 +00:00
|
|
|
else:
|
|
|
|
let validatorRegistration =
|
|
|
|
await node.getValidatorRegistration(validator, epoch)
|
|
|
|
if validatorRegistration.isErr:
|
|
|
|
error "registerValidators: validatorRegistration failed",
|
|
|
|
validatorRegistration
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Time passed during await; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
node.attachedValidators[].validators[key].externalBuilderRegistration =
|
|
|
|
Opt.some validatorRegistration.get
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration validatorRegistration.get
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-11-26 23:11:14 +00:00
|
|
|
if validatorRegistrations == emptyNestedSeq:
|
|
|
|
return
|
|
|
|
|
|
|
|
# TODO if there are too many chunks, could trigger DoS protections, so
|
|
|
|
# might randomize order to accumulate cumulative coverage
|
|
|
|
for chunkIdx in 0 ..< validatorRegistrations.len:
|
|
|
|
let registerValidatorResult =
|
|
|
|
awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.registerValidator(
|
|
|
|
validatorRegistrations[chunkIdx]),
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE):
|
|
|
|
error "Timeout when registering validator with builder"
|
|
|
|
continue # Try next batch regardless
|
|
|
|
if HttpOk != registerValidatorResult.status:
|
|
|
|
warn "registerValidators: Couldn't register validator with MEV builder",
|
|
|
|
registerValidatorResult
|
2022-08-01 06:41:47 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
warn "registerValidators: exception",
|
|
|
|
error = exc.msg
|
|
|
|
|
2023-02-07 14:53:36 +00:00
|
|
|
proc updateValidators(
|
|
|
|
node: BeaconNode, validators: openArray[Validator]) =
|
|
|
|
# Since validator indicies are stable, we only check the "updated" range -
|
|
|
|
# checking all validators would significantly slow down this loop when there
|
|
|
|
# are many inactive keys
|
|
|
|
for i in node.dutyValidatorCount..validators.high:
|
2023-02-20 11:28:56 +00:00
|
|
|
let
|
|
|
|
v = node.attachedValidators[].getValidator(validators[i].pubkey).valueOr:
|
|
|
|
continue
|
|
|
|
v.index = Opt.some ValidatorIndex(i)
|
2023-02-07 14:53:36 +00:00
|
|
|
|
|
|
|
node.dutyValidatorCount = validators.len
|
|
|
|
|
|
|
|
for validator in node.attachedValidators[]:
|
|
|
|
# Check if any validators have been activated
|
|
|
|
if validator.needsUpdate and validator.index.isSome():
|
|
|
|
# Activation epoch can change after index is assigned..
|
|
|
|
let index = validator.index.get()
|
|
|
|
if index < validators.lenu64:
|
|
|
|
validator.updateValidator(
|
|
|
|
Opt.some(ValidatorAndIndex(
|
|
|
|
index: index, validator: validators[int index]
|
|
|
|
)))
|
|
|
|
|
2020-10-28 07:55:36 +00:00
|
|
|
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
2020-07-22 08:04:21 +00:00
|
|
|
## Perform validator duties - create blocks, vote and aggregate existing votes
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.attachedValidators[].count == 0:
|
2020-05-06 13:23:45 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# The dag head might be updated by sync while we're working due to the
|
2020-10-28 07:55:36 +00:00
|
|
|
# await calls, thus we use a local variable to keep the logic straight here
|
2021-06-01 11:13:40 +00:00
|
|
|
var head = node.dag.head
|
2022-10-27 17:22:32 +00:00
|
|
|
case node.isSynced(head)
|
|
|
|
of SyncStatus.unsynced:
|
|
|
|
info "Beacon node not in sync; skipping validator duties for now",
|
|
|
|
slot, headSlot = head.slot
|
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
of SyncStatus.optimistic:
|
|
|
|
info "Execution client not in sync; skipping validator duties for now",
|
2022-02-04 11:25:32 +00:00
|
|
|
slot, headSlot = head.slot
|
2020-12-16 13:03:04 +00:00
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2022-10-27 17:22:32 +00:00
|
|
|
of SyncStatus.synced:
|
|
|
|
discard # keep going
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2023-02-07 14:53:36 +00:00
|
|
|
withState(node.dag.headState):
|
|
|
|
node.updateValidators(forkyState.data.validators.asSeq())
|
2022-12-09 16:05:55 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
var curSlot = lastSlot + 1
|
|
|
|
|
|
|
|
# Start by checking if there's work we should have done in the past that we
|
|
|
|
# can still meaningfully do
|
|
|
|
while curSlot < slot:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Catching up on validator duties",
|
2020-05-06 13:23:45 +00:00
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
lastSlot = shortLog(lastSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# For every slot we're catching up, we'll propose then send
|
|
|
|
# attestations - head should normally be advancing along the same branch
|
|
|
|
# in this case
|
|
|
|
head = await handleProposal(node, head, curSlot)
|
|
|
|
|
|
|
|
# For each slot we missed, we need to send out attestations - if we were
|
|
|
|
# proposing during this time, we'll use the newly proposed head, else just
|
|
|
|
# keep reusing the same - the attestation that goes out will actually
|
|
|
|
# rewind the state to what it looked like at the time of that slot
|
|
|
|
handleAttestations(node, head, curSlot)
|
|
|
|
|
|
|
|
curSlot += 1
|
|
|
|
|
2022-03-17 20:11:29 +00:00
|
|
|
let
|
|
|
|
newHead = await handleProposal(node, head, slot)
|
|
|
|
didSubmitBlock = (newHead != head)
|
|
|
|
head = newHead
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
|
|
|
# The latest point in time when we'll be sending out attestations
|
2022-01-11 10:01:54 +00:00
|
|
|
attestationCutoff = node.beaconClock.fromNow(slot.attestation_deadline())
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if attestationCutoff.inFuture:
|
|
|
|
debug "Waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
|
|
|
|
|
|
|
# Wait either for the block or the attestation cutoff time to arrive
|
2022-03-29 07:15:42 +00:00
|
|
|
if await node.consensusManager[].expectBlock(slot)
|
|
|
|
.withTimeout(attestationCutoff.offset):
|
2021-03-01 16:36:06 +00:00
|
|
|
# The expected block arrived (or expectBlock was called again which
|
2021-03-23 06:57:10 +00:00
|
|
|
# shouldn't happen as this is the only place we use it) - in our async
|
|
|
|
# loop however, we might have been doing other processing that caused delays
|
2021-03-01 16:36:06 +00:00
|
|
|
# here so we'll cap the waiting to the time when we would have sent out
|
|
|
|
# attestations had the block not arrived.
|
|
|
|
# An opposite case is that we received (or produced) a block that has
|
|
|
|
# not yet reached our neighbours. To protect against our attestations
|
|
|
|
# being dropped (because the others have not yet seen the block), we'll
|
2022-03-18 11:02:32 +00:00
|
|
|
# impose a minimum delay of 2000ms. The delay is enforced only when we're
|
2021-03-01 16:36:06 +00:00
|
|
|
# not hitting the "normal" cutoff time for sending out attestations.
|
2021-07-06 13:11:18 +00:00
|
|
|
# An earlier delay of 250ms has proven to be not enough, increasing the
|
2022-03-18 11:02:32 +00:00
|
|
|
# risk of losing attestations, and with growing block sizes, 1000ms
|
|
|
|
# started to be risky as well.
|
2021-07-06 13:11:18 +00:00
|
|
|
# Regardless, because we "just" received the block, we'll impose the
|
|
|
|
# delay.
|
2021-03-01 16:36:06 +00:00
|
|
|
|
2022-03-19 08:59:13 +00:00
|
|
|
# Take into consideration chains with a different slot time
|
|
|
|
const afterBlockDelay = nanos(attestationSlotOffset.nanoseconds div 2)
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
2022-03-18 11:02:32 +00:00
|
|
|
afterBlockTime = node.beaconClock.now() + afterBlockDelay
|
2021-03-01 16:36:06 +00:00
|
|
|
afterBlockCutoff = node.beaconClock.fromNow(
|
2022-03-18 11:02:32 +00:00
|
|
|
min(afterBlockTime, slot.attestation_deadline() + afterBlockDelay))
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if afterBlockCutoff.inFuture:
|
|
|
|
debug "Got block, waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
afterBlockCutoff = shortLog(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
await sleepAsync(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
# Time passed - we might need to select a new head in that case
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].updateHead(slot)
|
2021-06-01 11:13:40 +00:00
|
|
|
head = node.dag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-11-25 12:20:36 +00:00
|
|
|
static: doAssert attestationSlotOffset == syncCommitteeMessageSlotOffset
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
handleAttestations(node, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
handleSyncCommitteeMessages(node, head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2023-03-17 01:10:31 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.4/specs/phase0/validator.md#broadcast-aggregate
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.4/specs/altair/validator.md#broadcast-sync-committee-contribution
|
2022-07-06 16:11:44 +00:00
|
|
|
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
|
|
|
# the result in aggregates
|
|
|
|
static:
|
|
|
|
doAssert aggregateSlotOffset == syncContributionSlotOffset, "Timing change?"
|
2021-08-23 10:41:48 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
aggregateCutoff = node.beaconClock.fromNow(slot.aggregate_deadline())
|
|
|
|
if aggregateCutoff.inFuture:
|
|
|
|
debug "Waiting to send aggregate attestations",
|
|
|
|
aggregateCutoff = shortLog(aggregateCutoff.offset)
|
|
|
|
await sleepAsync(aggregateCutoff.offset)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let sendAggregatedAttestationsFut =
|
|
|
|
sendAggregatedAttestations(node, head, slot)
|
2021-10-20 09:16:48 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let handleSyncCommitteeContributionsFut =
|
|
|
|
handleSyncCommitteeContributions(node, head, slot)
|
2021-12-03 13:58:12 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
await handleSyncCommitteeContributionsFut
|
|
|
|
await sendAggregatedAttestationsFut
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
|
|
|
## Register upcoming duties of attached validators with the duty tracker
|
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
if node.attachedValidators[].count() == 0 or
|
2022-10-27 17:22:32 +00:00
|
|
|
node.isSynced(node.dag.head) != SyncStatus.synced:
|
2021-10-18 09:11:44 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-10-18 09:11:44 +00:00
|
|
|
head = node.dag.head
|
|
|
|
|
|
|
|
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
|
|
|
# be getting the duties one slot at a time
|
|
|
|
for slot in wallSlot ..< wallSlot + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS:
|
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for duties - report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-01-05 18:38:04 +00:00
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2022-12-09 16:05:55 +00:00
|
|
|
let
|
2023-02-20 11:28:56 +00:00
|
|
|
validator = node.getValidator(validator_index).valueOr:
|
|
|
|
continue
|
|
|
|
|
2022-12-09 16:05:55 +00:00
|
|
|
subnet_id = compute_subnet_for_attestation(
|
|
|
|
committees_per_slot, slot, committee_index)
|
|
|
|
slotSigRes = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if slotSigRes.isErr():
|
|
|
|
error "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
error_msg = slotSigRes.error()
|
|
|
|
continue
|
|
|
|
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
|
|
|
|
|
|
|
node.consensusManager[].actionTracker.registerDuty(
|
|
|
|
slot, subnet_id, validator_index, isAggregator)
|