2020-05-06 13:23:45 +00:00
|
|
|
# beacon_chain
|
2023-01-06 21:01:10 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2020-05-06 13:23:45 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2021-03-26 06:52:01 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# This module is responsible for handling beacon node validators, ie those that
|
|
|
|
# that are running directly in the beacon node and not in a separate validator
|
|
|
|
# client process
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2022-12-09 16:05:55 +00:00
|
|
|
std/[os, tables, sequtils],
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
2022-11-24 14:38:07 +00:00
|
|
|
stew/byteutils,
|
2021-03-26 14:11:06 +00:00
|
|
|
chronos, metrics,
|
2021-08-28 22:27:51 +00:00
|
|
|
chronicles, chronicles/timings,
|
2022-06-21 19:01:45 +00:00
|
|
|
json_serialization/std/[options, sets, net],
|
2020-05-14 11:19:10 +00:00
|
|
|
eth/db/kvstore,
|
2021-05-12 12:31:02 +00:00
|
|
|
eth/keys, eth/p2p/discoveryv5/[protocol, enr],
|
2021-12-17 12:23:32 +00:00
|
|
|
web3/ethtypes,
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# Local modules
|
2022-01-18 13:36:52 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2021-08-18 18:57:58 +00:00
|
|
|
../spec/[
|
2021-10-18 09:11:44 +00:00
|
|
|
eth2_merkleization, forks, helpers, network, signatures, state_transition,
|
|
|
|
validator],
|
2021-03-04 09:13:44 +00:00
|
|
|
../consensus_object_pools/[
|
2021-08-28 22:27:51 +00:00
|
|
|
spec_cache, blockchain_dag, block_clearance, attestation_pool, exit_pool,
|
2022-07-13 14:13:54 +00:00
|
|
|
sync_committee_msg_pool, consensus_manager],
|
2021-03-03 06:23:05 +00:00
|
|
|
../eth1/eth1_monitor,
|
2021-03-05 13:12:00 +00:00
|
|
|
../networking/eth2_network,
|
2021-08-18 18:57:58 +00:00
|
|
|
../sszdump, ../sync/sync_manager,
|
2022-07-13 14:13:54 +00:00
|
|
|
../gossip_processing/block_processor,
|
2022-06-21 19:01:45 +00:00
|
|
|
".."/[conf, beacon_clock, beacon_node],
|
2022-08-01 06:41:47 +00:00
|
|
|
"."/[slashing_protection, validator_pool, keystore_management],
|
2023-02-06 18:07:30 +00:00
|
|
|
".."/spec/mev/[rest_bellatrix_mev_calls, rest_capella_mev_calls]
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
const
|
|
|
|
delayBuckets = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
|
|
|
|
0.05, 0.1, 0.5, 1.0, 2.0, 4.0, 8.0, Inf]
|
|
|
|
|
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE = 3.seconds
|
2022-11-27 00:07:37 +00:00
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE = 6.seconds
|
2020-11-11 12:14:09 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
# Metrics for tracking attestation and beacon block loss
|
2022-05-23 12:02:54 +00:00
|
|
|
declareCounter beacon_light_client_finality_updates_sent,
|
|
|
|
"Number of LC finality updates sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_light_client_optimistic_updates_sent,
|
|
|
|
"Number of LC optimistic updates sent by this peer"
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
declareCounter beacon_blocks_proposed,
|
|
|
|
"Number of beacon chain blocks sent by this peer"
|
|
|
|
|
2022-08-29 09:55:20 +00:00
|
|
|
declareCounter beacon_block_production_errors,
|
|
|
|
"Number of times we failed to produce a block"
|
|
|
|
|
|
|
|
declareCounter beacon_block_payload_errors,
|
|
|
|
"Number of times execution client failed to produce block payload"
|
|
|
|
|
2023-01-21 23:13:21 +00:00
|
|
|
declareCounter beacon_blobs_sidecar_payload_errors,
|
|
|
|
"Number of times execution client failed to produce blobs sidecar"
|
|
|
|
|
2022-09-23 06:20:32 +00:00
|
|
|
# Metrics for tracking external block builder usage
|
|
|
|
declareCounter beacon_block_builder_missed_with_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed with fallback"
|
|
|
|
|
|
|
|
declareCounter beacon_block_builder_missed_without_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed without possible fallback"
|
|
|
|
|
2020-11-27 23:34:25 +00:00
|
|
|
declareGauge(attached_validator_balance,
|
|
|
|
"Validator balance at slot end of the first 64 validators, in Gwei",
|
|
|
|
labels = ["pubkey"])
|
2021-08-28 22:27:51 +00:00
|
|
|
|
2020-11-28 18:53:51 +00:00
|
|
|
declarePublicGauge(attached_validator_balance_total,
|
2020-11-27 23:34:25 +00:00
|
|
|
"Validator balance of all attached validators, in Gwei")
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
logScope: topics = "beacval"
|
|
|
|
|
2021-08-23 10:41:48 +00:00
|
|
|
type
|
2021-08-29 14:50:21 +00:00
|
|
|
ForkedBlockResult* = Result[ForkedBeaconBlock, string]
|
2023-02-06 18:07:30 +00:00
|
|
|
BlindedBlockResult* = Result[bellatrix_mev.BlindedBeaconBlock, string]
|
2021-08-23 10:41:48 +00:00
|
|
|
|
2022-10-27 17:22:32 +00:00
|
|
|
SyncStatus* {.pure.} = enum
|
|
|
|
synced
|
|
|
|
unsynced
|
|
|
|
optimistic
|
|
|
|
|
2022-11-20 13:55:43 +00:00
|
|
|
proc getValidator*(validators: auto,
|
|
|
|
pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
|
2021-12-22 12:37:31 +00:00
|
|
|
let idx = validators.findIt(it.pubkey == pubkey)
|
2020-05-06 13:23:45 +00:00
|
|
|
if idx == -1:
|
|
|
|
# We allow adding a validator even if its key is not in the state registry:
|
|
|
|
# it might be that the deposit for this validator has not yet been processed
|
2022-11-20 13:55:43 +00:00
|
|
|
Opt.none ValidatorAndIndex
|
2020-11-27 23:34:25 +00:00
|
|
|
else:
|
2022-11-20 13:55:43 +00:00
|
|
|
Opt.some ValidatorAndIndex(index: ValidatorIndex(idx),
|
|
|
|
validator: validators[idx])
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-11-30 01:20:21 +00:00
|
|
|
proc addValidators*(node: BeaconNode) =
|
2023-02-16 17:25:48 +00:00
|
|
|
info "Loading validators", validatorsDir = node.config.validatorsDir(),
|
|
|
|
keystore_cache_available = not(isNil(node.keystoreCache))
|
2022-12-09 16:05:55 +00:00
|
|
|
let
|
|
|
|
epoch = node.currentSlot().epoch
|
2023-02-16 17:25:48 +00:00
|
|
|
for keystore in listLoadableKeystores(node.config, node.keystoreCache):
|
2022-09-17 05:30:07 +00:00
|
|
|
let
|
2022-11-20 13:55:43 +00:00
|
|
|
data = withState(node.dag.headState):
|
|
|
|
getValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
|
|
|
index =
|
|
|
|
if data.isSome():
|
|
|
|
Opt.some(data.get().index)
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorIndex)
|
2022-09-17 05:30:07 +00:00
|
|
|
feeRecipient = node.consensusManager[].getFeeRecipient(
|
2022-12-09 16:05:55 +00:00
|
|
|
keystore.pubkey, index, epoch)
|
2023-02-15 15:10:31 +00:00
|
|
|
gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey)
|
2022-09-17 05:30:07 +00:00
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
v = node.attachedValidators[].addValidator(keystore, feeRecipient, gasLimit)
|
2023-02-07 14:53:36 +00:00
|
|
|
v.updateValidator(data)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2022-12-09 16:05:55 +00:00
|
|
|
proc getValidatorForDuties*(
|
|
|
|
node: BeaconNode,
|
|
|
|
idx: ValidatorIndex, slot: Slot): Opt[AttachedValidator] =
|
2023-01-11 12:29:21 +00:00
|
|
|
let key = ? node.dag.validatorKey(idx)
|
2022-12-09 16:05:55 +00:00
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
node.attachedValidators[].getValidatorForDuties(key.toPubKey(), slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-10-27 17:22:32 +00:00
|
|
|
proc isSynced*(node: BeaconNode, head: BlockRef): SyncStatus =
|
2020-05-06 13:23:45 +00:00
|
|
|
## TODO This function is here as a placeholder for some better heurestics to
|
|
|
|
## determine if we're in sync and should be producing blocks and
|
|
|
|
## attestations. Generally, the problem is that slot time keeps advancing
|
|
|
|
## even when there are no blocks being produced, so there's no way to
|
|
|
|
## distinguish validators geniunely going missing from the node not being
|
|
|
|
## well connected (during a network split or an internet outage for
|
|
|
|
## example). It would generally be correct to simply keep running as if
|
|
|
|
## we were the only legit node left alive, but then we run into issues:
|
|
|
|
## with enough many empty slots, the validator pool is emptied leading
|
|
|
|
## to empty committees and lots of empty slot processing that will be
|
|
|
|
## thrown away as soon as we're synced again.
|
|
|
|
|
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = node.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
|
|
|
# TODO if everyone follows this logic, the network will not recover from a
|
|
|
|
# halt: nobody will be producing blocks because everone expects someone
|
|
|
|
# else to do it
|
2022-07-04 20:35:33 +00:00
|
|
|
if wallSlot.afterGenesis and
|
|
|
|
head.slot + node.config.syncHorizon < wallSlot.slot:
|
2022-10-27 17:22:32 +00:00
|
|
|
SyncStatus.unsynced
|
2020-05-06 13:23:45 +00:00
|
|
|
else:
|
2022-10-27 17:22:32 +00:00
|
|
|
if node.dag.is_optimistic(head.root):
|
|
|
|
SyncStatus.optimistic
|
|
|
|
else:
|
|
|
|
SyncStatus.synced
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleLightClientUpdates*(node: BeaconNode, slot: Slot) {.async.} =
|
2022-05-23 12:02:54 +00:00
|
|
|
static: doAssert lightClientFinalityUpdateSlotOffset ==
|
|
|
|
lightClientOptimisticUpdateSlotOffset
|
|
|
|
let sendTime = node.beaconClock.fromNow(
|
|
|
|
slot.light_client_finality_update_time())
|
|
|
|
if sendTime.inFuture:
|
|
|
|
debug "Waiting to send LC updates", slot, delay = shortLog(sendTime.offset)
|
|
|
|
await sleepAsync(sendTime.offset)
|
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
withForkyFinalityUpdate(node.dag.lcDataStore.cache.latest):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
let signature_slot = forkyFinalityUpdate.signature_slot
|
|
|
|
if slot != signature_slot:
|
|
|
|
return
|
|
|
|
|
|
|
|
let num_active_participants =
|
|
|
|
forkyFinalityUpdate.sync_aggregate.num_active_participants
|
|
|
|
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
|
|
|
return
|
|
|
|
|
2023-01-13 15:46:35 +00:00
|
|
|
let finalized_slot = forkyFinalityUpdate.finalized_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
if finalized_slot > node.lightClientPool[].latestForwardedFinalitySlot:
|
|
|
|
template msg(): auto = forkyFinalityUpdate
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientFinalityUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
|
|
|
node.lightClientPool[].latestForwardedFinalitySlot = finalized_slot
|
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_finality_updates_sent.inc()
|
|
|
|
notice "LC finality update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC finality update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2023-01-13 15:46:35 +00:00
|
|
|
let attested_slot = forkyFinalityUpdate.attested_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
if attested_slot > node.lightClientPool[].latestForwardedOptimisticSlot:
|
|
|
|
let msg = forkyFinalityUpdate.toOptimistic
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientOptimisticUpdate(msg)
|
2022-06-15 08:14:47 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
|
|
|
node.lightClientPool[].latestForwardedOptimisticSlot = attested_slot
|
2022-06-15 08:14:47 +00:00
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_optimistic_updates_sent.inc()
|
|
|
|
notice "LC optimistic update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC optimistic update failed to send",
|
|
|
|
error = sendResult.error()
|
2022-05-23 12:02:54 +00:00
|
|
|
|
2020-06-05 09:57:40 +00:00
|
|
|
proc createAndSendAttestation(node: BeaconNode,
|
|
|
|
fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
|
|
|
validator: AttachedValidator,
|
2022-06-29 16:53:59 +00:00
|
|
|
data: AttestationData,
|
2020-06-05 09:57:40 +00:00
|
|
|
committeeLen: int,
|
2020-06-23 10:38:59 +00:00
|
|
|
indexInCommittee: int,
|
2021-05-10 07:13:36 +00:00
|
|
|
subnet_id: SubnetId) {.async.} =
|
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
|
|
|
signature = block:
|
|
|
|
let res = await validator.getAttestationSignature(
|
|
|
|
fork, genesis_validators_root, data)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign attestation", validator = shortLog(validator),
|
2022-09-19 19:50:19 +00:00
|
|
|
attestatingData = shortLog(data), error_msg = res.error()
|
2021-11-30 01:20:21 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2022-06-29 16:53:59 +00:00
|
|
|
attestation =
|
|
|
|
Attestation.init(
|
|
|
|
[uint64 indexInCommittee], committeeLen, data, signature).expect(
|
|
|
|
"valid data")
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeAttestation(
|
2021-05-10 07:13:36 +00:00
|
|
|
attestation, subnet_id, checkSignature = false)
|
2022-06-15 08:14:47 +00:00
|
|
|
if not res.isOk():
|
2021-05-10 07:13:36 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2022-06-29 16:53:59 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubkey)
|
2021-05-10 07:13:36 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending attestation", err = exc.msg
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-11-19 17:19:03 +00:00
|
|
|
proc getBlockProposalEth1Data*(node: BeaconNode,
|
2021-06-11 17:51:46 +00:00
|
|
|
state: ForkedHashedBeaconState):
|
|
|
|
BlockProposalEth1Data =
|
2020-11-19 17:19:03 +00:00
|
|
|
if node.eth1Monitor.isNil:
|
2022-12-05 21:36:53 +00:00
|
|
|
let pendingDepositsCount =
|
2021-06-11 17:51:46 +00:00
|
|
|
getStateField(state, eth1_data).deposit_count -
|
|
|
|
getStateField(state, eth1_deposit_index)
|
2020-11-24 21:21:47 +00:00
|
|
|
if pendingDepositsCount > 0:
|
|
|
|
result.hasMissingDeposits = true
|
|
|
|
else:
|
2021-06-11 17:51:46 +00:00
|
|
|
result.vote = getStateField(state, eth1_data)
|
2020-11-19 17:19:03 +00:00
|
|
|
else:
|
2021-06-01 11:13:40 +00:00
|
|
|
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
2020-11-24 21:21:47 +00:00
|
|
|
result = node.eth1Monitor.getBlockProposalData(
|
2021-06-11 17:51:46 +00:00
|
|
|
state, finalizedEpochRef.eth1_data,
|
2021-04-14 09:34:35 +00:00
|
|
|
finalizedEpochRef.eth1_deposit_index)
|
2020-11-19 17:19:03 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from web3/engine_api import ForkchoiceUpdatedResponse
|
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
proc forkchoice_updated(
|
|
|
|
head_block_hash: Eth2Digest, safe_block_hash: Eth2Digest,
|
|
|
|
finalized_block_hash: Eth2Digest, timestamp: uint64, random: Eth2Digest,
|
2023-01-06 21:01:10 +00:00
|
|
|
fee_recipient: ethtypes.Address, withdrawals: Opt[seq[Withdrawal]],
|
|
|
|
execution_engine: Eth1Monitor):
|
2022-09-03 20:56:20 +00:00
|
|
|
Future[Option[bellatrix.PayloadID]] {.async.} =
|
2022-08-20 06:09:25 +00:00
|
|
|
logScope:
|
|
|
|
head_block_hash
|
|
|
|
finalized_block_hash
|
|
|
|
|
2021-12-17 12:23:32 +00:00
|
|
|
let
|
2022-04-05 08:40:59 +00:00
|
|
|
forkchoiceResponse =
|
2022-08-20 06:09:25 +00:00
|
|
|
try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
execution_engine.forkchoiceUpdated(
|
2022-08-25 23:34:02 +00:00
|
|
|
head_block_hash, safe_block_hash, finalized_block_hash,
|
2023-01-06 21:01:10 +00:00
|
|
|
timestamp, random.data, fee_recipient, withdrawals),
|
2022-08-20 06:09:25 +00:00
|
|
|
FORKCHOICEUPDATED_TIMEOUT):
|
|
|
|
error "Engine API fork-choice update timed out"
|
|
|
|
default(ForkchoiceUpdatedResponse)
|
|
|
|
except CatchableError as err:
|
|
|
|
error "Engine API fork-choice update failed", err = err.msg
|
|
|
|
default(ForkchoiceUpdatedResponse)
|
|
|
|
|
2022-04-05 08:40:59 +00:00
|
|
|
payloadId = forkchoiceResponse.payloadId
|
|
|
|
|
2021-12-17 12:23:32 +00:00
|
|
|
return if payloadId.isSome:
|
2022-04-08 16:22:49 +00:00
|
|
|
some(bellatrix.PayloadID(payloadId.get))
|
2021-12-17 12:23:32 +00:00
|
|
|
else:
|
2022-04-08 16:22:49 +00:00
|
|
|
none(bellatrix.PayloadID)
|
2021-12-17 12:23:32 +00:00
|
|
|
|
2022-12-04 07:42:03 +00:00
|
|
|
proc get_execution_payload[EP](
|
2022-06-21 19:01:45 +00:00
|
|
|
payload_id: Option[bellatrix.PayloadID], execution_engine: Eth1Monitor):
|
2023-02-13 17:15:16 +00:00
|
|
|
Future[Opt[EP]] {.async.} =
|
2022-04-14 20:15:34 +00:00
|
|
|
return if payload_id.isNone():
|
|
|
|
# Pre-merge, empty payload
|
2023-02-13 17:15:16 +00:00
|
|
|
Opt.some default(EP)
|
2022-04-14 20:15:34 +00:00
|
|
|
else:
|
2022-12-04 07:42:03 +00:00
|
|
|
when EP is bellatrix.ExecutionPayload:
|
2023-02-13 17:15:16 +00:00
|
|
|
Opt.some asConsensusExecutionPayload(
|
2023-01-04 19:13:17 +00:00
|
|
|
await execution_engine.getPayloadV1(payload_id.get))
|
|
|
|
elif EP is capella.ExecutionPayload:
|
2023-02-13 17:15:16 +00:00
|
|
|
Opt.some asConsensusExecutionPayload(
|
2023-01-04 19:13:17 +00:00
|
|
|
await execution_engine.getPayloadV2(payload_id.get))
|
|
|
|
elif EP is eip4844.ExecutionPayload:
|
2023-02-13 17:15:16 +00:00
|
|
|
Opt.some asConsensusExecutionPayload(
|
2023-01-21 23:13:21 +00:00
|
|
|
await execution_engine.getPayloadV3(payload_id.get))
|
2023-01-04 19:13:17 +00:00
|
|
|
else:
|
|
|
|
static: doAssert "unknown execution payload type"
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
proc getFeeRecipient(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Eth1Address =
|
2022-09-17 05:30:07 +00:00
|
|
|
node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch)
|
2022-08-19 21:51:30 +00:00
|
|
|
|
2023-02-15 15:10:31 +00:00
|
|
|
proc getGasLimit(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey): uint64 =
|
|
|
|
node.consensusManager[].getGasLimit(pubkey)
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from web3/engine_api_types import PayloadExecutionStatus
|
2022-12-04 07:42:03 +00:00
|
|
|
from ../spec/datatypes/capella import BeaconBlock, ExecutionPayload
|
2022-12-20 19:00:56 +00:00
|
|
|
from ../spec/datatypes/eip4844 import BeaconBlock, ExecutionPayload
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
proc getExecutionPayload[T](
|
|
|
|
node: BeaconNode, proposalState: ref ForkedHashedBeaconState,
|
2022-10-04 11:24:16 +00:00
|
|
|
epoch: Epoch, validator_index: ValidatorIndex): Future[Opt[T]] {.async.} =
|
2022-04-14 20:15:34 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/bellatrix/validator.md#executionpayload
|
|
|
|
|
2022-11-08 14:19:56 +00:00
|
|
|
let feeRecipient = block:
|
|
|
|
let pubkey = node.dag.validatorKey(validator_index)
|
|
|
|
if pubkey.isNone():
|
|
|
|
error "Cannot get proposer pubkey, bug?", validator_index
|
|
|
|
default(Eth1Address)
|
|
|
|
else:
|
|
|
|
node.getFeeRecipient(pubkey.get().toPubKey(), validator_index, epoch)
|
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
template empty_execution_payload(): auto =
|
2022-12-04 07:42:03 +00:00
|
|
|
# Callers should already ensure these match, but type system doesn't
|
|
|
|
# transmit this information through the Forked types, so this has to
|
|
|
|
# be re-proven here.
|
2022-09-03 20:56:20 +00:00
|
|
|
withState(proposalState[]):
|
2023-02-13 17:15:16 +00:00
|
|
|
when stateFork >= ConsensusFork.Capella:
|
|
|
|
# As of Capella, because EL state root changes in way more difficult to
|
|
|
|
# compute way from CL due to incorporation of withdrawals into EL state
|
|
|
|
# cannot use fake-EL fallback. Unlike transactions, withdrawals are not
|
|
|
|
# optional, so one cannot avoid this by not including any withdrawals.
|
|
|
|
Opt.none T
|
|
|
|
elif (stateFork == ConsensusFork.Bellatrix and
|
2022-12-04 07:42:03 +00:00
|
|
|
T is bellatrix.ExecutionPayload):
|
2023-02-13 17:15:16 +00:00
|
|
|
Opt.some build_empty_execution_payload(forkyState.data, feeRecipient)
|
|
|
|
elif stateFork == ConsensusFork.Bellatrix:
|
2022-12-04 07:42:03 +00:00
|
|
|
raiseAssert "getExecutionPayload: mismatched proposalState and ExecutionPayload fork"
|
2022-09-03 20:56:20 +00:00
|
|
|
else:
|
2023-02-13 17:15:16 +00:00
|
|
|
# Vacuously -- these are pre-Bellatrix and not used.
|
|
|
|
Opt.some default(T)
|
2022-04-14 20:15:34 +00:00
|
|
|
|
|
|
|
if node.eth1Monitor.isNil:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-04-14 20:15:34 +00:00
|
|
|
warn "getExecutionPayload: eth1Monitor not initialized; using empty execution payload"
|
2023-02-13 17:15:16 +00:00
|
|
|
return empty_execution_payload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
# Minimize window for Eth1 monitor to shut down connection
|
|
|
|
await node.consensusManager.eth1Monitor.ensureDataProvider()
|
|
|
|
|
2023-02-01 18:49:36 +00:00
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/paris.md#request-2
|
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/shanghai.md#request-2
|
2022-05-17 13:57:33 +00:00
|
|
|
const GETPAYLOAD_TIMEOUT = 1.seconds
|
|
|
|
|
2022-04-14 20:15:34 +00:00
|
|
|
let
|
2022-08-25 23:34:02 +00:00
|
|
|
beaconHead = node.attestationPool[].getBeaconHead(node.dag.head)
|
|
|
|
executionBlockRoot = node.dag.loadExecutionBlockRoot(beaconHead.blck)
|
2022-04-14 20:15:34 +00:00
|
|
|
latestHead =
|
2022-07-04 20:35:33 +00:00
|
|
|
if not executionBlockRoot.isZero:
|
|
|
|
executionBlockRoot
|
2022-04-14 20:15:34 +00:00
|
|
|
else:
|
2023-01-18 14:01:49 +00:00
|
|
|
(static(default(Eth2Digest)))
|
2022-08-25 23:34:02 +00:00
|
|
|
latestSafe = beaconHead.safeExecutionPayloadHash
|
|
|
|
latestFinalized = beaconHead.finalizedExecutionPayloadHash
|
2022-08-23 16:19:52 +00:00
|
|
|
lastFcU = node.consensusManager.forkchoiceUpdatedInfo
|
2022-09-03 20:56:20 +00:00
|
|
|
timestamp = withState(proposalState[]):
|
|
|
|
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
2023-01-06 21:01:10 +00:00
|
|
|
withdrawals = withState(proposalState[]):
|
2023-01-28 19:53:41 +00:00
|
|
|
when stateFork >= ConsensusFork.Capella:
|
2023-01-06 21:01:10 +00:00
|
|
|
Opt.some get_expected_withdrawals(forkyState.data)
|
|
|
|
else:
|
|
|
|
Opt.none(seq[Withdrawal])
|
2022-08-23 16:19:52 +00:00
|
|
|
payload_id =
|
|
|
|
if lastFcU.isSome and
|
|
|
|
lastFcU.get.headBlockRoot == latestHead and
|
2022-08-25 23:34:02 +00:00
|
|
|
lastFcU.get.safeBlockRoot == latestSafe and
|
2022-08-23 16:19:52 +00:00
|
|
|
lastFcU.get.finalizedBlockRoot == latestFinalized and
|
|
|
|
lastFcU.get.timestamp == timestamp and
|
2023-01-06 21:01:10 +00:00
|
|
|
lastFcU.get.feeRecipient == feeRecipient and
|
|
|
|
lastFcU.get.withdrawals == withdrawals:
|
2022-08-23 16:19:52 +00:00
|
|
|
some bellatrix.PayloadID(lastFcU.get.payloadId)
|
|
|
|
else:
|
|
|
|
debug "getExecutionPayload: didn't find payloadId, re-querying",
|
2022-08-25 23:34:02 +00:00
|
|
|
latestHead, latestSafe, latestFinalized,
|
2022-08-23 16:19:52 +00:00
|
|
|
timestamp,
|
|
|
|
feeRecipient,
|
2022-08-31 16:36:24 +00:00
|
|
|
cachedForkchoiceUpdateInformation = lastFcU
|
2022-08-23 16:19:52 +00:00
|
|
|
|
2023-01-06 21:01:10 +00:00
|
|
|
let random = withState(proposalState[]): get_randao_mix(
|
|
|
|
forkyState.data, get_current_epoch(forkyState.data))
|
2022-08-23 16:19:52 +00:00
|
|
|
(await forkchoice_updated(
|
2022-09-03 20:56:20 +00:00
|
|
|
latestHead, latestSafe, latestFinalized, timestamp, random,
|
2023-01-06 21:01:10 +00:00
|
|
|
feeRecipient, withdrawals, node.consensusManager.eth1Monitor))
|
2022-08-20 06:09:25 +00:00
|
|
|
payload = try:
|
2022-05-17 13:57:33 +00:00
|
|
|
awaitWithTimeout(
|
2022-12-04 07:42:03 +00:00
|
|
|
get_execution_payload[T](payload_id, node.consensusManager.eth1Monitor),
|
2022-10-14 19:48:56 +00:00
|
|
|
GETPAYLOAD_TIMEOUT):
|
|
|
|
beacon_block_payload_errors.inc()
|
|
|
|
warn "Getting execution payload from Engine API timed out", payload_id
|
|
|
|
empty_execution_payload
|
|
|
|
except CatchableError as err:
|
|
|
|
beacon_block_payload_errors.inc()
|
|
|
|
warn "Getting execution payload from Engine API failed",
|
|
|
|
payload_id, err = err.msg
|
|
|
|
empty_execution_payload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2023-02-13 17:15:16 +00:00
|
|
|
return payload
|
2022-04-14 20:15:34 +00:00
|
|
|
except CatchableError as err:
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_payload_errors.inc()
|
2022-04-14 20:15:34 +00:00
|
|
|
error "Error creating non-empty execution payload; using empty execution payload",
|
|
|
|
msg = err.msg
|
2023-02-13 17:15:16 +00:00
|
|
|
return empty_execution_payload
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2023-01-21 23:13:21 +00:00
|
|
|
proc getBlobsBundle(
|
|
|
|
node: BeaconNode, epoch: Epoch, validator_index: ValidatorIndex,
|
|
|
|
payload_id: PayloadID): Future[BlobsBundleV1] {.async.} =
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/dev/specs/eip4844/validator.md#get_blobs_and_kzg_commitments
|
|
|
|
|
|
|
|
# Minimize window for Eth1 monitor to shut down connection
|
|
|
|
await node.consensusManager.eth1Monitor.ensureDataProvider()
|
|
|
|
|
2023-02-01 18:49:36 +00:00
|
|
|
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.2/src/engine/experimental/blob-extension.md#engine_getblobsbundlev1
|
2023-01-21 23:13:21 +00:00
|
|
|
const GETBLOBS_TIMEOUT = 1.seconds
|
|
|
|
|
|
|
|
let payload = try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
node.consensusManager.eth1Monitor.getBlobsBundleV1(payload_id),
|
|
|
|
GETBLOBS_TIMEOUT):
|
|
|
|
beacon_block_payload_errors.inc()
|
|
|
|
warn "Getting blobs sidecar from Engine API timed out", payload_id
|
|
|
|
default(BlobsBundleV1)
|
|
|
|
except CatchableError as err:
|
|
|
|
beacon_block_payload_errors.inc()
|
|
|
|
warn "Getting blobs sidecar from Engine API failed",
|
|
|
|
payload_id, err = err.msg
|
|
|
|
default(BlobsBundleV1)
|
|
|
|
|
|
|
|
return payload
|
|
|
|
|
2022-12-02 07:39:01 +00:00
|
|
|
proc makeBeaconBlockForHeadAndSlot*[EP](
|
2022-08-01 06:41:47 +00:00
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot,
|
2022-12-02 07:39:01 +00:00
|
|
|
execution_payload: Opt[EP],
|
|
|
|
transactions_root: Opt[Eth2Digest],
|
|
|
|
execution_payload_root: Opt[Eth2Digest]):
|
2022-08-01 06:41:47 +00:00
|
|
|
Future[ForkedBlockResult] {.async.} =
|
2020-10-22 10:53:33 +00:00
|
|
|
# Advance state to the slot that we're proposing for
|
2022-12-02 07:39:01 +00:00
|
|
|
var cache = StateCache()
|
2021-03-17 10:17:15 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
let
|
|
|
|
# The clearance state already typically sits at the right slot per
|
|
|
|
# `advanceClearanceState`
|
2022-12-02 07:39:01 +00:00
|
|
|
|
|
|
|
# TODO can use `valueOr:`/`return err($error)` if/when
|
|
|
|
# https://github.com/status-im/nim-stew/issues/161 is addressed
|
|
|
|
maybeState = node.dag.getProposalState(head, slot, cache)
|
|
|
|
|
|
|
|
if maybeState.isErr:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
return err($maybeState.error)
|
|
|
|
|
|
|
|
let
|
|
|
|
state = maybeState.get
|
2022-10-04 11:24:16 +00:00
|
|
|
payloadFut =
|
2022-12-19 13:11:12 +00:00
|
|
|
if execution_payload.isSome:
|
2022-12-02 07:39:01 +00:00
|
|
|
let fut = newFuture[Opt[EP]]("given-payload")
|
2022-12-19 13:11:12 +00:00
|
|
|
fut.complete(execution_payload)
|
2022-10-04 11:24:16 +00:00
|
|
|
fut
|
2023-01-18 14:01:49 +00:00
|
|
|
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or not (
|
|
|
|
state[].is_merge_transition_complete or
|
|
|
|
slot.epoch >= node.mergeAtEpoch):
|
2022-12-02 07:39:01 +00:00
|
|
|
let fut = newFuture[Opt[EP]]("empty-payload")
|
2022-10-04 11:24:16 +00:00
|
|
|
# https://github.com/nim-lang/Nim/issues/19802
|
2022-12-02 07:39:01 +00:00
|
|
|
fut.complete(Opt.some(default(EP)))
|
2022-10-04 11:24:16 +00:00
|
|
|
fut
|
|
|
|
else:
|
|
|
|
# Create execution payload while packing attestations
|
2022-12-02 07:39:01 +00:00
|
|
|
getExecutionPayload[EP](
|
2022-10-04 11:24:16 +00:00
|
|
|
node, state, slot.epoch, validator_index)
|
2021-11-18 12:02:43 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
eth1Proposal = node.getBlockProposalEth1Data(state[])
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
if eth1Proposal.hasMissingDeposits:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Eth1 deposits not available. Skipping block proposal", slot
|
|
|
|
return err("Eth1 deposits not available")
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
let
|
|
|
|
attestations =
|
|
|
|
node.attestationPool[].getAttestationsForBlock(state[], cache)
|
|
|
|
exits = withState(state[]):
|
2023-01-19 22:00:40 +00:00
|
|
|
node.validatorChangePool[].getBeaconBlockValidatorChanges(
|
|
|
|
node.dag.cfg, forkyState.data)
|
2022-10-04 11:24:16 +00:00
|
|
|
syncAggregate =
|
|
|
|
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
|
|
|
|
SyncAggregate.init()
|
|
|
|
else:
|
|
|
|
node.syncCommitteeMsgPool[].produceSyncAggregate(head.root)
|
|
|
|
payload = (await payloadFut).valueOr:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Unable to get execution payload. Skipping block proposal",
|
|
|
|
slot, validator_index
|
|
|
|
return err("Unable to get execution payload")
|
2022-04-14 20:15:34 +00:00
|
|
|
|
2022-10-04 11:24:16 +00:00
|
|
|
return makeBeaconBlock(
|
2021-10-01 01:29:32 +00:00
|
|
|
node.dag.cfg,
|
2022-10-04 11:24:16 +00:00
|
|
|
state[],
|
2021-10-01 01:29:32 +00:00
|
|
|
validator_index,
|
|
|
|
randao_reveal,
|
|
|
|
eth1Proposal.vote,
|
|
|
|
graffiti,
|
2022-10-04 11:24:16 +00:00
|
|
|
attestations,
|
2021-10-01 01:29:32 +00:00
|
|
|
eth1Proposal.deposits,
|
2021-10-18 16:37:27 +00:00
|
|
|
exits,
|
2022-10-04 11:24:16 +00:00
|
|
|
syncAggregate,
|
|
|
|
payload,
|
2023-01-21 23:13:21 +00:00
|
|
|
(static(default(KZGCommitmentList))),
|
2021-11-18 12:02:43 +00:00
|
|
|
noRollback, # Temporary state - no need for rollback
|
2022-08-01 06:41:47 +00:00
|
|
|
cache,
|
2022-12-19 13:11:12 +00:00
|
|
|
verificationFlags = {},
|
2022-10-04 11:24:16 +00:00
|
|
|
transactions_root = transactions_root,
|
|
|
|
execution_payload_root = execution_payload_root).mapErr do (error: cstring) -> string:
|
|
|
|
# This is almost certainly a bug, but it's complex enough that there's a
|
|
|
|
# small risk it might happen even when most proposals succeed - thus we
|
|
|
|
# log instead of asserting
|
2022-08-29 09:55:20 +00:00
|
|
|
beacon_block_production_errors.inc()
|
2022-10-04 11:24:16 +00:00
|
|
|
error "Cannot create block for proposal",
|
2022-10-27 14:46:49 +00:00
|
|
|
slot, head = shortLog(head), error
|
2022-10-04 11:24:16 +00:00
|
|
|
$error
|
|
|
|
|
2022-12-02 07:39:01 +00:00
|
|
|
# workaround for https://github.com/nim-lang/Nim/issues/20900 to avoid default
|
|
|
|
# parameters
|
|
|
|
proc makeBeaconBlockForHeadAndSlot*[EP](
|
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot):
|
2022-12-19 13:11:12 +00:00
|
|
|
Future[ForkedBlockResult] =
|
|
|
|
return makeBeaconBlockForHeadAndSlot[EP](
|
2022-12-02 07:39:01 +00:00
|
|
|
node, randao_reveal, validator_index, graffiti, head, slot,
|
2022-12-19 13:11:12 +00:00
|
|
|
execution_payload = Opt.none(EP),
|
2022-12-02 07:39:01 +00:00
|
|
|
transactions_root = Opt.none(Eth2Digest),
|
|
|
|
execution_payload_root = Opt.none(Eth2Digest))
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
proc getBlindedExecutionPayload(
|
|
|
|
node: BeaconNode, slot: Slot, executionBlockRoot: Eth2Digest,
|
|
|
|
pubkey: ValidatorPubKey):
|
2022-11-02 16:23:30 +00:00
|
|
|
Future[Result[bellatrix.ExecutionPayloadHeader, string]] {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
if node.payloadBuilderRestClient.isNil:
|
2022-09-26 19:13:50 +00:00
|
|
|
return err "getBlindedExecutionPayload: nil REST client"
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let blindedHeader = awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.getHeader(slot, executionBlockRoot, pubkey),
|
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
return err "Timeout when obtaining blinded header from builder"
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
const httpOk = 200
|
|
|
|
if blindedHeader.status != httpOk:
|
|
|
|
return err "getBlindedExecutionPayload: non-200 HTTP response"
|
|
|
|
else:
|
|
|
|
if not verify_builder_signature(
|
|
|
|
node.dag.cfg.genesisFork, blindedHeader.data.data.message,
|
|
|
|
blindedHeader.data.data.message.pubkey,
|
|
|
|
blindedHeader.data.data.signature):
|
|
|
|
return err "getBlindedExecutionPayload: signature verification failed"
|
|
|
|
|
|
|
|
return ok blindedHeader.data.data.message.header
|
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
from ./message_router_mev import
|
|
|
|
copyFields, getFieldNames, unblindAndRouteBlockMEV
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
func constructSignableBlindedBlock[T](
|
|
|
|
forkedBlock: ForkedBeaconBlock,
|
2022-11-02 16:23:30 +00:00
|
|
|
executionPayloadHeader: bellatrix.ExecutionPayloadHeader): T =
|
2022-08-01 06:41:47 +00:00
|
|
|
const
|
|
|
|
blckFields = getFieldNames(typeof(forkedBlock.bellatrixData))
|
|
|
|
blckBodyFields = getFieldNames(typeof(forkedBlock.bellatrixData.body))
|
|
|
|
|
2022-09-03 20:56:20 +00:00
|
|
|
var blindedBlock: T
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#block-proposal
|
2022-08-01 06:41:47 +00:00
|
|
|
copyFields(blindedBlock.message, forkedBlock.bellatrixData, blckFields)
|
|
|
|
copyFields(
|
|
|
|
blindedBlock.message.body, forkedBlock.bellatrixData.body, blckBodyFields)
|
|
|
|
blindedBlock.message.body.execution_payload_header = executionPayloadHeader
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
blindedBlock
|
|
|
|
|
|
|
|
func constructPlainBlindedBlock[T](
|
|
|
|
forkedBlock: ForkedBeaconBlock,
|
2022-11-02 16:23:30 +00:00
|
|
|
executionPayloadHeader: bellatrix.ExecutionPayloadHeader): T =
|
2022-10-31 17:39:03 +00:00
|
|
|
const
|
|
|
|
blckFields = getFieldNames(typeof(forkedBlock.bellatrixData))
|
|
|
|
blckBodyFields = getFieldNames(typeof(forkedBlock.bellatrixData.body))
|
|
|
|
|
|
|
|
var blindedBlock: T
|
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#block-proposal
|
2022-10-31 17:39:03 +00:00
|
|
|
copyFields(blindedBlock, forkedBlock.bellatrixData, blckFields)
|
|
|
|
copyFields(blindedBlock.body, forkedBlock.bellatrixData.body, blckBodyFields)
|
|
|
|
blindedBlock.body.execution_payload_header = executionPayloadHeader
|
|
|
|
|
|
|
|
blindedBlock
|
|
|
|
|
|
|
|
proc blindedBlockCheckSlashingAndSign[T](
|
|
|
|
node: BeaconNode, slot: Slot, validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex, nonsignedBlindedBlock: T):
|
|
|
|
Future[Result[T, string]] {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
# Check with slashing protection before submitBlindedBlock
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2022-10-31 17:39:03 +00:00
|
|
|
blockRoot = hash_tree_root(nonsignedBlindedBlock.message)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2022-08-01 06:41:47 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated for MEV block",
|
2022-10-31 17:39:03 +00:00
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(nonsignedBlindedBlock),
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = shortLog(signingRoot),
|
2022-08-01 06:41:47 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return err("MEV proposal would be slashable: " & $notSlashable.error)
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
var blindedBlock = nonsignedBlindedBlock
|
2022-08-01 06:41:47 +00:00
|
|
|
blindedBlock.signature =
|
|
|
|
block:
|
|
|
|
let res = await validator.getBlockSignature(
|
|
|
|
fork, genesis_validators_root, slot, blockRoot, blindedBlock.message)
|
|
|
|
if res.isErr():
|
|
|
|
return err("Unable to sign block: " & res.error())
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
return ok blindedBlock
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
proc getBlindedBeaconBlock[T](
|
|
|
|
node: BeaconNode, slot: Slot, validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex, forkedBlock: ForkedBeaconBlock,
|
2022-11-02 16:23:30 +00:00
|
|
|
executionPayloadHeader: bellatrix.ExecutionPayloadHeader):
|
2022-10-31 17:39:03 +00:00
|
|
|
Future[Result[T, string]] {.async.} =
|
|
|
|
return await blindedBlockCheckSlashingAndSign(
|
|
|
|
node, slot, validator, validator_index, constructSignableBlindedBlock[T](
|
|
|
|
forkedBlock, executionPayloadHeader))
|
|
|
|
|
|
|
|
proc getBlindedBlockParts(
|
2022-11-10 20:18:08 +00:00
|
|
|
node: BeaconNode, head: BlockRef, pubkey: ValidatorPubKey,
|
2022-12-12 13:02:06 +00:00
|
|
|
slot: Slot, randao: ValidatorSig, validator_index: ValidatorIndex,
|
|
|
|
graffiti: GraffitiBytes):
|
2022-11-02 16:23:30 +00:00
|
|
|
Future[Result[(bellatrix.ExecutionPayloadHeader, ForkedBeaconBlock), string]]
|
2022-10-31 17:39:03 +00:00
|
|
|
{.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
let
|
|
|
|
executionBlockRoot = node.dag.loadExecutionBlockRoot(head)
|
2022-10-05 16:29:20 +00:00
|
|
|
executionPayloadHeader =
|
|
|
|
try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
node.getBlindedExecutionPayload(
|
2022-11-10 20:18:08 +00:00
|
|
|
slot, executionBlockRoot, pubkey),
|
2022-10-05 16:29:20 +00:00
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
2022-11-02 16:23:30 +00:00
|
|
|
Result[bellatrix.ExecutionPayloadHeader, string].err(
|
2022-10-05 16:29:20 +00:00
|
|
|
"getBlindedExecutionPayload timed out")
|
|
|
|
except RestDecodingError as exc:
|
2022-11-02 16:23:30 +00:00
|
|
|
Result[bellatrix.ExecutionPayloadHeader, string].err(
|
2022-10-05 16:29:20 +00:00
|
|
|
"getBlindedExecutionPayload REST decoding error")
|
|
|
|
except CatchableError as exc:
|
2022-11-02 16:23:30 +00:00
|
|
|
Result[bellatrix.ExecutionPayloadHeader, string].err(
|
2022-10-05 16:29:20 +00:00
|
|
|
"getBlindedExecutionPayload error")
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
if executionPayloadHeader.isErr:
|
|
|
|
debug "proposeBlockMEV: getBlindedExecutionPayload failed",
|
2022-10-05 16:29:20 +00:00
|
|
|
error = executionPayloadHeader.error, slot, validator_index,
|
|
|
|
head = shortLog(head)
|
2022-08-01 06:41:47 +00:00
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
2022-10-31 17:39:03 +00:00
|
|
|
return err(executionPayloadHeader.error)
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
# When creating this block, need to ensure it uses the MEV-provided execution
|
|
|
|
# payload, both to avoid repeated calls to network services and to ensure the
|
|
|
|
# consistency of this block (e.g., its state root being correct). Since block
|
|
|
|
# processing does not work directly using blinded blocks, fix up transactions
|
|
|
|
# root after running the state transition function on an otherwise equivalent
|
|
|
|
# non-blinded block without transactions.
|
2022-11-02 16:23:30 +00:00
|
|
|
var shimExecutionPayload: bellatrix.ExecutionPayload
|
2022-08-01 06:41:47 +00:00
|
|
|
copyFields(
|
|
|
|
shimExecutionPayload, executionPayloadHeader.get,
|
2022-11-02 16:23:30 +00:00
|
|
|
getFieldNames(bellatrix.ExecutionPayloadHeader))
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-12-02 07:39:01 +00:00
|
|
|
let newBlock = await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload](
|
2022-12-12 13:02:06 +00:00
|
|
|
node, randao, validator_index, graffiti, head, slot,
|
2022-08-01 06:41:47 +00:00
|
|
|
execution_payload = Opt.some shimExecutionPayload,
|
|
|
|
transactions_root = Opt.some executionPayloadHeader.get.transactions_root,
|
|
|
|
execution_payload_root =
|
|
|
|
Opt.some hash_tree_root(executionPayloadHeader.get))
|
|
|
|
|
|
|
|
if newBlock.isErr():
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
2022-10-31 17:39:03 +00:00
|
|
|
return err(newBlock.error) # already logged elsewhere!
|
2022-08-01 06:41:47 +00:00
|
|
|
|
|
|
|
let forkedBlck = newBlock.get()
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
return ok((executionPayloadHeader.get, forkedBlck))
|
|
|
|
|
|
|
|
proc proposeBlockMEV(
|
|
|
|
node: BeaconNode, head: BlockRef, validator: AttachedValidator, slot: Slot,
|
|
|
|
randao: ValidatorSig, validator_index: ValidatorIndex):
|
|
|
|
Future[Opt[BlockRef]] {.async.} =
|
|
|
|
let blindedBlockParts = await getBlindedBlockParts(
|
2022-12-12 13:02:06 +00:00
|
|
|
node, head, validator.pubkey, slot, randao, validator_index,
|
|
|
|
node.graffitiBytes)
|
2022-10-31 17:39:03 +00:00
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Not signed yet, fine to try to fall back on EL
|
|
|
|
beacon_block_builder_missed_with_fallback.inc()
|
|
|
|
return Opt.none BlockRef
|
|
|
|
|
|
|
|
# These, together, get combined into the blinded block for signing and
|
|
|
|
# proposal through the relay network.
|
|
|
|
let (executionPayloadHeader, forkedBlck) = blindedBlockParts.get
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
# This is only substantively asynchronous with a remote key signer
|
|
|
|
let blindedBlock = awaitWithTimeout(
|
2023-02-06 18:07:30 +00:00
|
|
|
getBlindedBeaconBlock[bellatrix_mev.SignedBlindedBeaconBlock](
|
2022-10-31 17:39:03 +00:00
|
|
|
node, slot, validator, validator_index, forkedBlck,
|
|
|
|
executionPayloadHeader),
|
2022-08-01 06:41:47 +00:00
|
|
|
500.milliseconds):
|
2023-02-06 18:07:30 +00:00
|
|
|
Result[bellatrix_mev.SignedBlindedBeaconBlock, string].err(
|
|
|
|
"getBlindedBlock timed out")
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
if blindedBlock.isErr:
|
2022-08-01 06:41:47 +00:00
|
|
|
info "proposeBlockMEV: getBlindedBeaconBlock failed",
|
|
|
|
slot, head = shortLog(head), validator_index, blindedBlock,
|
|
|
|
error = blindedBlock.error
|
|
|
|
return Opt.none BlockRef
|
|
|
|
|
2022-11-08 18:08:43 +00:00
|
|
|
# Before unblindAndRouteBlockMEV, can fall back to EL; after, cannot
|
|
|
|
let unblindedBlockRef = await node.unblindAndRouteBlockMEV(
|
|
|
|
blindedBlock.get)
|
|
|
|
return if unblindedBlockRef.isOk and unblindedBlockRef.get.isSome:
|
|
|
|
beacon_blocks_proposed.inc()
|
|
|
|
unblindedBlockRef.get
|
|
|
|
else:
|
|
|
|
# Signal to the caller that a signed, blinded beacon block was sent to the
|
|
|
|
# builder API server, at which point no local EL fallback can occur. Using
|
|
|
|
# non-`none` opt with the same head indicates this to proposeBlock(), with
|
|
|
|
# any non-`none` return value indicating this in general.
|
|
|
|
#
|
|
|
|
# unblindedBlockRef.isOk and unblindedBlockRef.get.isNone indicates that
|
|
|
|
# the block failed to validate and integrate into the DAG, which for the
|
|
|
|
# purpose of this return value, is equivalent. It's used to drive Beacon
|
|
|
|
# REST API output.
|
2022-11-30 12:08:48 +00:00
|
|
|
let errMsg =
|
|
|
|
if unblindedBlockRef.isErr:
|
|
|
|
unblindedBlockRef.error
|
|
|
|
else:
|
2022-11-30 12:34:31 +00:00
|
|
|
"Unblinded block failed either to validate or integrate into validated store"
|
2022-11-08 18:08:43 +00:00
|
|
|
warn "proposeBlockMEV: blinded block not successfully unblinded and proposed",
|
|
|
|
head = shortLog(head), slot, validator_index,
|
|
|
|
validator = shortLog(validator),
|
2022-11-30 12:08:48 +00:00
|
|
|
err = errMsg, blindedBlck = shortLog(blindedBlock.get)
|
2022-11-08 18:08:43 +00:00
|
|
|
Opt.some head
|
|
|
|
|
2022-10-31 17:39:03 +00:00
|
|
|
proc makeBlindedBeaconBlockForHeadAndSlot*(
|
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot): Future[BlindedBlockResult] {.async.} =
|
|
|
|
## Requests a beacon node to produce a valid blinded block, which can then be
|
|
|
|
## signed by a validator. A blinded block is a block with only a transactions
|
|
|
|
## root, rather than a full transactions list.
|
|
|
|
let
|
2022-11-10 20:18:08 +00:00
|
|
|
pubkey =
|
2022-10-31 17:39:03 +00:00
|
|
|
# Relevant state for knowledge of validators
|
|
|
|
withState(node.dag.headState):
|
|
|
|
if distinctBase(validator_index) >= forkyState.data.validators.lenu64:
|
|
|
|
debug "makeBlindedBeaconBlockForHeadAndSlot: invalid validator index",
|
|
|
|
head = shortLog(head),
|
|
|
|
validator_index,
|
|
|
|
validators_len = forkyState.data.validators.len
|
|
|
|
return err("Invalid validator index")
|
|
|
|
|
2022-11-10 20:18:08 +00:00
|
|
|
forkyState.data.validators.item(validator_index).pubkey
|
2022-10-31 17:39:03 +00:00
|
|
|
|
|
|
|
blindedBlockParts = await getBlindedBlockParts(
|
2022-12-12 13:02:06 +00:00
|
|
|
node, head, pubkey, slot, randao_reveal, validator_index, graffiti)
|
2022-10-31 17:39:03 +00:00
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Don't try EL fallback -- VC specifically requested a blinded block
|
|
|
|
return err("Unable to create blinded block")
|
|
|
|
|
|
|
|
let (executionPayloadHeader, forkedBlck) = blindedBlockParts.get
|
2023-02-06 18:07:30 +00:00
|
|
|
return ok constructPlainBlindedBlock[bellatrix_mev.BlindedBeaconBlock](
|
2022-10-31 17:39:03 +00:00
|
|
|
forkedBlck, executionPayloadHeader)
|
|
|
|
|
2022-12-14 17:30:56 +00:00
|
|
|
from ../spec/datatypes/eip4844 import shortLog
|
|
|
|
|
2020-05-22 17:04:52 +00:00
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot): Future[BlockRef] {.async.} =
|
|
|
|
if head.slot >= slot:
|
|
|
|
# We should normally not have a head newer than the slot we're proposing for
|
|
|
|
# but this can happen if block proposal is delayed
|
|
|
|
warn "Skipping proposal, have newer head already",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
headBlockRoot = shortLog(head.root),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-22 17:04:52 +00:00
|
|
|
return head
|
2020-05-22 14:21:22 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
2021-08-27 09:00:06 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-11-30 01:20:21 +00:00
|
|
|
randao =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getEpochSignature(
|
|
|
|
fork, genesis_validators_root, slot.epoch)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to generate randao reveal",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
2021-11-30 01:20:21 +00:00
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
|
2023-01-30 19:22:05 +00:00
|
|
|
if node.config.payloadBuilderEnable:
|
2022-08-01 06:41:47 +00:00
|
|
|
let newBlockMEV = await node.proposeBlockMEV(
|
|
|
|
head, validator, slot, randao, validator_index)
|
|
|
|
|
|
|
|
if newBlockMEV.isSome:
|
|
|
|
# This might be equivalent to the `head` passed in, but it signals that
|
|
|
|
# `submitBlindedBlock` ran, so don't do anything else. Otherwise, it is
|
|
|
|
# fine to try again with the local EL.
|
2022-09-23 06:20:32 +00:00
|
|
|
if newBlockMEV.get == head:
|
|
|
|
# Returning same block as head indicates failure to generate new block
|
|
|
|
beacon_block_builder_missed_without_fallback.inc()
|
2022-08-01 06:41:47 +00:00
|
|
|
return newBlockMEV.get
|
|
|
|
|
2022-12-04 07:42:03 +00:00
|
|
|
let newBlock =
|
2023-02-15 14:44:09 +00:00
|
|
|
if slot.epoch >= node.dag.cfg.DENEB_FORK_EPOCH:
|
2023-01-21 23:13:21 +00:00
|
|
|
await makeBeaconBlockForHeadAndSlot[eip4844.ExecutionPayload](
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
|
|
|
elif slot.epoch >= node.dag.cfg.CAPELLA_FORK_EPOCH:
|
2022-12-04 07:42:03 +00:00
|
|
|
await makeBeaconBlockForHeadAndSlot[capella.ExecutionPayload](
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
|
|
|
else:
|
|
|
|
await makeBeaconBlockForHeadAndSlot[bellatrix.ExecutionPayload](
|
|
|
|
node, randao, validator_index, node.graffitiBytes, head, slot)
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2021-08-29 14:50:21 +00:00
|
|
|
if newBlock.isErr():
|
2020-05-22 17:04:52 +00:00
|
|
|
return head # already logged elsewhere!
|
2021-05-04 13:17:28 +00:00
|
|
|
|
2023-01-21 23:13:21 +00:00
|
|
|
var forkedBlck = newBlock.get()
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
withBlck(forkedBlck):
|
2023-01-25 17:35:46 +00:00
|
|
|
var blobs_sidecar = eip4844.BlobsSidecar(
|
|
|
|
beacon_block_slot: slot,
|
|
|
|
)
|
2023-01-21 23:13:21 +00:00
|
|
|
when blck is eip4844.BeaconBlock and const_preset != "minimal":
|
2023-01-25 17:35:46 +00:00
|
|
|
# TODO when lastfcu is none, getExecutionPayload re-queries the EE.
|
|
|
|
# We don't do that here, which could lead us to propose invalid blocks
|
|
|
|
# (with a payload but no blobs).
|
|
|
|
if not (node.eth1Monitor.isNil) and
|
|
|
|
node.consensusManager.forkchoiceUpdatedInfo.isSome():
|
|
|
|
|
|
|
|
let
|
|
|
|
lastFcU = node.consensusManager.forkchoiceUpdatedInfo
|
|
|
|
payload_id = bellatrix.PayloadID(lastFcU.get.payloadId)
|
|
|
|
bundle = await getBlobsBundle(node, slot.epoch, validator_index, default(PayloadID))
|
|
|
|
|
|
|
|
# todo: actually compute proof over blobs using nim-kzg-4844
|
|
|
|
kzg_aggregated_proof = default(KZGProof)
|
|
|
|
|
|
|
|
blck.body.blob_kzg_commitments =
|
|
|
|
List[eip4844.KZGCommitment, Limit MAX_BLOBS_PER_BLOCK].init(
|
|
|
|
mapIt(bundle.kzgs, eip4844.KzgCommitment(it)))
|
|
|
|
|
|
|
|
blobs_sidecar.blobs = List[eip4844.Blob, Limit MAX_BLOBS_PER_BLOCK].init(
|
|
|
|
mapIt(bundle.blobs, eip4844.Blob(it)))
|
|
|
|
blobs_sidecar.kzg_aggregated_proof = kzg_aggregated_proof
|
2023-01-21 23:13:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let
|
|
|
|
blockRoot = hash_tree_root(blck)
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
2022-09-19 19:50:19 +00:00
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2023-02-01 14:14:50 +00:00
|
|
|
blobs_sidecar.beacon_block_root = blockRoot
|
2021-08-29 14:50:21 +00:00
|
|
|
if notSlashable.isErr:
|
2022-09-19 19:50:19 +00:00
|
|
|
warn "Slashing protection activated for block proposal",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
2021-08-29 14:50:21 +00:00
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return head
|
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
let
|
|
|
|
signature =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getBlockSignature(
|
2021-12-03 13:58:12 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot, forkedBlck)
|
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign block",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
2021-12-03 13:58:12 +00:00
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
signedBlock =
|
|
|
|
when blck is phase0.BeaconBlock:
|
|
|
|
phase0.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
|
|
|
elif blck is altair.BeaconBlock:
|
|
|
|
altair.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-01-18 13:36:52 +00:00
|
|
|
elif blck is bellatrix.BeaconBlock:
|
|
|
|
bellatrix.SignedBeaconBlock(
|
2021-12-03 13:58:12 +00:00
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-11-02 16:23:30 +00:00
|
|
|
elif blck is capella.BeaconBlock:
|
|
|
|
capella.SignedBeaconBlock(
|
|
|
|
message: blck, signature: signature, root: blockRoot)
|
2022-12-14 17:30:56 +00:00
|
|
|
elif blck is eip4844.BeaconBlock:
|
2023-01-16 16:26:48 +00:00
|
|
|
eip4844.SignedBeaconBlockAndBlobsSidecar(
|
|
|
|
beacon_block:eip4844.SignedBeaconBlock(message: blck, signature: signature, root: blockRoot),
|
2023-01-21 23:13:21 +00:00
|
|
|
blobs_sidecar: blobs_sidecar
|
2023-01-16 16:26:48 +00:00
|
|
|
)
|
2021-12-03 13:58:12 +00:00
|
|
|
else:
|
2022-02-13 15:21:55 +00:00
|
|
|
static: doAssert "Unknown SignedBeaconBlock type"
|
2022-07-06 16:11:44 +00:00
|
|
|
newBlockRef =
|
|
|
|
(await node.router.routeSignedBeaconBlock(signedBlock)).valueOr:
|
|
|
|
return head # Errors logged in router
|
2021-08-29 14:50:21 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if newBlockRef.isNone():
|
|
|
|
return head # Validation errors logged in router
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
notice "Block proposed",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(blck),
|
|
|
|
signature = shortLog(signature), validator = shortLog(validator)
|
2021-09-27 14:22:58 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
beacon_blocks_proposed.inc()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-12-03 13:58:12 +00:00
|
|
|
return newBlockRef.get()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
## Perform all attestations that the validators attached to this node should
|
|
|
|
## perform during the given slot
|
|
|
|
if slot + SLOTS_PER_EPOCH < head.slot:
|
|
|
|
# The latest block we know about is a lot newer than the slot we're being
|
|
|
|
# asked to attest to - this makes it unlikely that it will be included
|
|
|
|
# at all.
|
|
|
|
# TODO the oldest attestations allowed are those that are older than the
|
|
|
|
# finalized epoch.. also, it seems that posting very old attestations
|
|
|
|
# is risky from a slashing perspective. More work is needed here.
|
2020-10-01 18:56:42 +00:00
|
|
|
warn "Skipping attestation, head is too recent",
|
2022-01-05 18:38:04 +00:00
|
|
|
head = shortLog(head),
|
2020-05-06 13:23:45 +00:00
|
|
|
slot = shortLog(slot)
|
|
|
|
return
|
|
|
|
|
2022-03-23 11:42:16 +00:00
|
|
|
if slot < node.dag.finalizedHead.slot:
|
|
|
|
# During checkpoint sync, we implicitly finalize the given slot even if the
|
|
|
|
# state transition does not yet consider it final - this is a sanity check
|
|
|
|
# mostly to ensure the `atSlot` below works as expected
|
|
|
|
warn "Skipping attestation - slot already finalized",
|
|
|
|
head = shortLog(head),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
finalized = shortLog(node.dag.finalizedHead)
|
|
|
|
return
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
let attestationHead = head.atSlot(slot)
|
|
|
|
if head != attestationHead.blck:
|
|
|
|
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
|
|
|
# attesting to a past state - we must then recreate the world as it looked
|
|
|
|
# like back then
|
|
|
|
notice "Attesting to a state in the past, falling behind?",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
trace "Checking attestations",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# We need to run attestations exactly for the slot that we're attesting to.
|
|
|
|
# In case blocks went missing, this means advancing past the latest block
|
|
|
|
# using empty slots as fillers.
|
2023-02-09 22:08:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#validator-assignments
|
2020-08-10 13:21:31 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
epochRef = node.dag.getEpochRef(
|
|
|
|
attestationHead.blck, slot.epoch, false).valueOr:
|
2022-01-05 18:38:04 +00:00
|
|
|
warn "Cannot construct EpochRef for attestation head, report bug",
|
2022-09-27 16:56:08 +00:00
|
|
|
attestationHead = shortLog(attestationHead), slot, error
|
2022-01-05 18:38:04 +00:00
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(epochRef.shufflingRef)
|
2021-08-24 19:49:51 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(
|
|
|
|
epochRef.shufflingRef, slot, committee_index)
|
2020-08-10 13:21:31 +00:00
|
|
|
|
2021-05-04 13:17:28 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
2021-05-10 07:13:36 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let
|
|
|
|
data = makeAttestationData(epochRef, attestationHead, committee_index)
|
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot = compute_attestation_signing_root(
|
2021-05-10 07:13:36 +00:00
|
|
|
fork, genesis_validators_root, data)
|
|
|
|
registered = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerAttestation(
|
|
|
|
validator_index,
|
2021-07-13 11:15:07 +00:00
|
|
|
validator.pubkey,
|
2021-05-10 07:13:36 +00:00
|
|
|
data.source.epoch,
|
|
|
|
data.target.epoch,
|
2022-09-19 19:50:19 +00:00
|
|
|
signingRoot)
|
2021-05-10 07:13:36 +00:00
|
|
|
if registered.isOk():
|
2021-10-20 09:16:48 +00:00
|
|
|
let subnet_id = compute_subnet_for_attestation(
|
2022-01-08 23:28:49 +00:00
|
|
|
committees_per_slot, data.slot, committee_index)
|
2021-05-10 07:13:36 +00:00
|
|
|
asyncSpawn createAndSendAttestation(
|
|
|
|
node, fork, genesis_validators_root, validator, data,
|
2021-10-20 09:16:48 +00:00
|
|
|
committee.len(), index_in_committee, subnet_id)
|
2021-05-10 07:13:36 +00:00
|
|
|
else:
|
|
|
|
warn "Slashing protection activated for attestation",
|
2022-09-19 19:50:19 +00:00
|
|
|
attestationData = shortLog(data),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
|
|
|
validator_index,
|
|
|
|
validator = shortLog(validator),
|
2021-05-10 07:13:36 +00:00
|
|
|
badVoteDetails = $registered.error()
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-08-30 00:58:30 +00:00
|
|
|
proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot,
|
2021-11-05 15:39:47 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2021-08-30 00:58:30 +00:00
|
|
|
head: BlockRef) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-11-30 01:20:21 +00:00
|
|
|
msg =
|
|
|
|
block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getSyncCommitteeMessage(
|
|
|
|
fork, genesis_validators_root, slot, head.root)
|
2021-11-30 01:20:21 +00:00
|
|
|
if res.isErr():
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Unable to sign committee message",
|
2021-11-30 01:20:21 +00:00
|
|
|
validator = shortLog(validator), slot = slot,
|
|
|
|
block_root = shortLog(head.root)
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeSyncCommitteeMessage(
|
2021-11-05 15:39:47 +00:00
|
|
|
msg, subcommitteeIdx, checkSignature = false)
|
2022-07-06 16:11:44 +00:00
|
|
|
|
|
|
|
if not res.isOk():
|
2021-08-30 00:58:30 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
if node.config.dumpEnabled:
|
2021-12-22 12:37:31 +00:00
|
|
|
dump(node.config.dumpDirOutgoing, msg, validator.pubkey)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
notice "Error sending sync committee message", err = exc.msg
|
|
|
|
|
|
|
|
proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
|
|
|
# TODO Use a view type to avoid the copy
|
2022-08-18 18:07:01 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
2022-01-08 23:28:49 +00:00
|
|
|
subcommitteeIdx, head)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
|
|
|
proc signAndSendContribution(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
2022-07-06 16:11:44 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
|
|
|
head: BlockRef,
|
|
|
|
slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
try:
|
2022-06-29 16:53:59 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2022-07-06 16:11:44 +00:00
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSyncCommitteeSelectionProof(
|
|
|
|
fork, genesis_validators_root, slot, subcommitteeIdx)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate committee selection proof",
|
|
|
|
validator = shortLog(validator), slot,
|
|
|
|
subnet_id = subcommitteeIdx, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
if not is_sync_committee_aggregator(selectionProof):
|
|
|
|
return
|
|
|
|
|
|
|
|
var
|
|
|
|
msg = SignedContributionAndProof(
|
2022-06-29 16:53:59 +00:00
|
|
|
message: ContributionAndProof(
|
|
|
|
aggregator_index: uint64 validator.index.get,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if not node.syncCommitteeMsgPool[].produceContribution(
|
|
|
|
slot,
|
|
|
|
head.root,
|
|
|
|
subcommitteeIdx,
|
|
|
|
msg.message.contribution):
|
|
|
|
return
|
|
|
|
|
|
|
|
msg.signature = block:
|
2022-06-29 16:53:59 +00:00
|
|
|
let res = await validator.getContributionAndProofSignature(
|
2022-07-06 16:11:44 +00:00
|
|
|
fork, genesis_validators_root, msg.message)
|
2022-06-29 16:53:59 +00:00
|
|
|
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign sync committee contribution",
|
2022-07-06 16:11:44 +00:00
|
|
|
validator = shortLog(validator), message = shortLog(msg.message),
|
|
|
|
error_msg = res.error()
|
2022-06-29 16:53:59 +00:00
|
|
|
return
|
|
|
|
res.get()
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedContributionAndProof(msg, false)
|
2021-08-30 00:58:30 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
2022-06-29 16:53:59 +00:00
|
|
|
warn "Error sending sync committee contribution", err = exc.msg
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc handleSyncCommitteeContributions(
|
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
2021-08-30 00:58:30 +00:00
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-04-08 16:22:49 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
Speed up altair block processing 2x (#3115)
* Speed up altair block processing >2x
Like #3089, this PR drastially speeds up historical REST queries and
other long state replays.
* cache sync committee validator indices
* use ~80mb less memory for validator pubkey mappings
* batch-verify sync aggregate signature (fixes #2985)
* document sync committee hack with head block vs sync message block
* add batch signature verification failure tests
Before:
```
../env.sh nim c -d:release -r ncli_db --db:mainnet_0/db bench --start-slot:-1000
All time are ms
Average, StdDev, Min, Max, Samples, Test
Validation is turned off meaning that no BLS operations are performed
5830.675, 0.000, 5830.675, 5830.675, 1, Initialize DB
0.481, 1.878, 0.215, 59.167, 981, Load block from database
8422.566, 0.000, 8422.566, 8422.566, 1, Load state from database
6.996, 1.678, 0.042, 14.385, 969, Advance slot, non-epoch
93.217, 8.318, 84.192, 122.209, 32, Advance slot, epoch
20.513, 23.665, 11.510, 201.561, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
After:
```
7081.422, 0.000, 7081.422, 7081.422, 1, Initialize DB
0.553, 2.122, 0.175, 66.692, 981, Load block from database
5439.446, 0.000, 5439.446, 5439.446, 1, Load state from database
6.829, 1.575, 0.043, 12.156, 969, Advance slot, non-epoch
94.716, 2.749, 88.395, 100.026, 32, Advance slot, epoch
11.636, 23.766, 4.889, 205.250, 981, Apply block, no slot processing
0.000, 0.000, 0.000, 0.000, 0, Database load
0.000, 0.000, 0.000, 0.000, 0, Database store
```
* add comment
2021-11-24 12:43:50 +00:00
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
for subcommitteeIdx in SyncSubCommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(valIdx, slot).valueOr:
|
2021-08-30 00:58:30 +00:00
|
|
|
continue
|
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
asyncSpawn signAndSendContribution(
|
|
|
|
node, validator, subcommitteeIdx, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
|
|
|
Future[BlockRef] {.async.} =
|
|
|
|
## Perform the proposal for the given slot, iff we have a validator attached
|
2020-11-20 14:16:04 +00:00
|
|
|
## that is supposed to do so, given the shuffling at that slot for the given
|
|
|
|
## head - to compute the proposer, we need to advance a state to the given
|
|
|
|
## slot
|
2021-06-01 11:13:40 +00:00
|
|
|
let
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer = node.dag.getProposer(head, slot).valueOr:
|
|
|
|
return head
|
|
|
|
proposerKey = node.dag.validatorKey(proposer).get().toPubKey
|
|
|
|
validator = node.getValidatorForDuties(proposer, slot).valueOr:
|
2022-11-24 07:48:10 +00:00
|
|
|
debug "Expecting block proposal", headRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot),
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer_index = proposer,
|
2022-11-24 07:48:10 +00:00
|
|
|
proposer = shortLog(proposerKey)
|
2022-12-09 16:05:55 +00:00
|
|
|
return head
|
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
return await proposeBlock(node, validator, proposer, head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
proc signAndSendAggregate(
|
2022-08-18 18:07:01 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
2022-07-06 16:11:44 +00:00
|
|
|
slot: Slot, committee_index: CommitteeIndex) {.async.} =
|
|
|
|
try:
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
validator_index = validator.index.get()
|
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
slot, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2023-02-09 22:08:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#aggregation-selection
|
2022-08-18 18:07:01 +00:00
|
|
|
if not is_aggregator(
|
|
|
|
shufflingRef, slot, committee_index, selectionProof):
|
2022-07-06 16:11:44 +00:00
|
|
|
return
|
|
|
|
|
2023-02-09 22:08:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#construct-aggregate
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#aggregateandproof
|
2022-07-06 16:11:44 +00:00
|
|
|
var
|
|
|
|
msg = SignedAggregateAndProof(
|
|
|
|
message: AggregateAndProof(
|
|
|
|
aggregator_index: uint64 validator_index,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
|
|
|
msg.message.aggregate = node.attestationPool[].getAggregatedAttestation(
|
|
|
|
slot, committee_index).valueOr:
|
|
|
|
return
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
msg.signature = block:
|
|
|
|
let res = await validator.getAggregateAndProofSignature(
|
|
|
|
fork, genesis_validators_root, msg.message)
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign aggregate",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedAggregateAndProof(
|
|
|
|
msg, checkSignature = false)
|
|
|
|
except CatchableError as exc:
|
|
|
|
# An error could happen here when the signature task fails - we must
|
|
|
|
# not leak the exception because this is an asyncSpawn task
|
|
|
|
warn "Error sending aggregate", err = exc.msg
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2021-08-19 10:45:31 +00:00
|
|
|
proc sendAggregatedAttestations(
|
2022-01-05 18:38:04 +00:00
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) {.async.} =
|
|
|
|
# Aggregated attestations must be sent by members of the beacon committees for
|
2022-12-09 16:05:55 +00:00
|
|
|
# the given slot, for which `is_aggregator` returns `true`.
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for head, report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2020-10-22 10:53:33 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-07-06 16:11:44 +00:00
|
|
|
for _, validator_index in
|
2022-08-18 18:07:01 +00:00
|
|
|
get_beacon_committee(shufflingRef, slot, committee_index):
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
2022-11-24 07:48:10 +00:00
|
|
|
continue
|
2022-12-09 16:05:55 +00:00
|
|
|
asyncSpawn signAndSendAggregate(node, validator, shufflingRef, slot,
|
|
|
|
committee_index)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
2021-08-28 22:27:51 +00:00
|
|
|
# Technically, this only needs to be done on epoch transitions and if there's
|
|
|
|
# a reorg that spans an epoch transition, but it's easier to implement this
|
|
|
|
# way for now.
|
|
|
|
|
|
|
|
# We'll limit labelled metrics to the first 64, so that we don't overload
|
|
|
|
# Prometheus.
|
|
|
|
|
|
|
|
var total: Gwei
|
|
|
|
var i = 0
|
|
|
|
for _, v in node.attachedValidators[].validators:
|
|
|
|
let balance =
|
|
|
|
if v.index.isNone():
|
|
|
|
0.Gwei
|
|
|
|
elif v.index.get().uint64 >=
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(node.dag.headState, balances).lenu64:
|
2021-08-28 22:27:51 +00:00
|
|
|
debug "Cannot get validator balance, index out of bounds",
|
|
|
|
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
2022-03-16 07:20:40 +00:00
|
|
|
balances = getStateField(node.dag.headState, balances).len,
|
|
|
|
stateRoot = getStateRoot(node.dag.headState)
|
2021-08-28 22:27:51 +00:00
|
|
|
0.Gwei
|
|
|
|
else:
|
2022-05-30 13:30:42 +00:00
|
|
|
getStateField(node.dag.headState, balances).item(v.index.get())
|
2021-08-28 22:27:51 +00:00
|
|
|
|
|
|
|
if i < 64:
|
|
|
|
attached_validator_balance.set(
|
|
|
|
balance.toGaugeValue, labelValues = [shortLog(v.pubkey)])
|
|
|
|
|
|
|
|
inc i
|
|
|
|
total += balance
|
|
|
|
|
|
|
|
node.attachedValidatorBalanceTotal = total
|
|
|
|
attached_validator_balance_total.set(total.toGaugeValue)
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
from std/times import epochTime
|
|
|
|
|
|
|
|
proc getValidatorRegistration(
|
2022-08-19 21:51:30 +00:00
|
|
|
node: BeaconNode, validator: AttachedValidator, epoch: Epoch):
|
2022-08-01 06:41:47 +00:00
|
|
|
Future[Result[SignedValidatorRegistrationV1, string]] {.async.} =
|
2022-08-19 21:51:30 +00:00
|
|
|
let validatorIdx = validator.index.valueOr:
|
|
|
|
# The validator index will be missing when the validator was not
|
|
|
|
# activated for duties yet. We can safely skip the registration then.
|
|
|
|
return
|
|
|
|
|
|
|
|
let feeRecipient = node.getFeeRecipient(validator.pubkey, validatorIdx, epoch)
|
2023-02-15 15:10:31 +00:00
|
|
|
let gasLimit = node.getGasLimit(validator.pubkey)
|
2022-08-01 06:41:47 +00:00
|
|
|
var validatorRegistration = SignedValidatorRegistrationV1(
|
|
|
|
message: ValidatorRegistrationV1(
|
|
|
|
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
|
|
|
gas_limit: gasLimit,
|
|
|
|
timestamp: epochTime().uint64,
|
|
|
|
pubkey: validator.pubkey))
|
|
|
|
|
|
|
|
let signature = await validator.getBuilderSignature(
|
|
|
|
node.dag.cfg.genesisFork, validatorRegistration.message)
|
|
|
|
|
|
|
|
debug "getValidatorRegistration: registering",
|
|
|
|
validatorRegistration
|
|
|
|
|
|
|
|
if signature.isErr:
|
|
|
|
return err signature.error
|
|
|
|
|
|
|
|
validatorRegistration.signature = signature.get
|
|
|
|
|
|
|
|
return ok validatorRegistration
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
from std/sequtils import toSeq
|
|
|
|
|
2022-11-26 18:50:42 +00:00
|
|
|
proc registerValidators*(node: BeaconNode, epoch: Epoch) {.async.} =
|
2022-08-01 06:41:47 +00:00
|
|
|
try:
|
|
|
|
if (not node.config.payloadBuilderEnable) or
|
|
|
|
node.currentSlot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH:
|
|
|
|
return
|
|
|
|
elif node.config.payloadBuilderEnable and
|
|
|
|
node.payloadBuilderRestClient.isNil:
|
|
|
|
warn "registerValidators: node.config.payloadBuilderEnable and node.payloadBuilderRestClient.isNil"
|
|
|
|
return
|
|
|
|
|
|
|
|
const HttpOk = 200
|
|
|
|
|
2022-08-19 21:51:30 +00:00
|
|
|
let restBuilderStatus = awaitWithTimeout(node.payloadBuilderRestClient.checkBuilderStatus(),
|
|
|
|
BUILDER_STATUS_DELAY_TOLERANCE):
|
|
|
|
debug "Timeout when obtaining builder status"
|
|
|
|
return
|
|
|
|
|
2022-08-01 06:41:47 +00:00
|
|
|
if restBuilderStatus.status != HttpOk:
|
2022-08-18 11:59:35 +00:00
|
|
|
warn "registerValidators: specified builder or relay not available",
|
2022-08-01 06:41:47 +00:00
|
|
|
builderUrl = node.config.payloadBuilderUrl,
|
|
|
|
builderStatus = restBuilderStatus
|
|
|
|
return
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# The async aspect of signing the registrations can cause the attached
|
|
|
|
# validators to change during the loop.
|
|
|
|
let attachedValidatorPubkeys =
|
|
|
|
toSeq(node.attachedValidators[].validators.keys)
|
|
|
|
|
2022-11-26 23:11:14 +00:00
|
|
|
const emptyNestedSeq = @[newSeq[SignedValidatorRegistrationV1](0)]
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#validator-registration
|
2022-11-26 23:11:14 +00:00
|
|
|
# Seed with single empty inner list to avoid special cases
|
|
|
|
var validatorRegistrations = emptyNestedSeq
|
|
|
|
|
|
|
|
# Some relay networks disallow large request bodies, so split requests
|
|
|
|
template addValidatorRegistration(
|
|
|
|
validatorRegistration: SignedValidatorRegistrationV1) =
|
|
|
|
const registrationValidatorChunkSize = 1000
|
|
|
|
|
|
|
|
if validatorRegistrations[^1].len < registrationValidatorChunkSize:
|
|
|
|
validatorRegistrations[^1].add validatorRegistration
|
|
|
|
else:
|
|
|
|
validatorRegistrations.add @[validatorRegistration]
|
2022-09-13 11:52:26 +00:00
|
|
|
|
|
|
|
# First, check for VC-added keys; cheaper because provided pre-signed
|
|
|
|
var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
|
|
|
|
if node.externalBuilderRegistrations.len > 0:
|
|
|
|
withState(node.dag.headState):
|
|
|
|
let currentEpoch = node.currentSlot().epoch
|
|
|
|
for i in 0 ..< forkyState.data.validators.len:
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.3.0/apis/validator/register_validator.yaml
|
|
|
|
# "requests containing currently inactive or unknown validator
|
|
|
|
# pubkeys will be accepted, as they may become active at a later
|
|
|
|
# epoch" which means filtering is needed here, because including
|
|
|
|
# any validators not pending or active may cause the request, as
|
|
|
|
# a whole, to fail.
|
|
|
|
let pubkey = forkyState.data.validators.item(i).pubkey
|
|
|
|
if pubkey in node.externalBuilderRegistrations and
|
|
|
|
forkyState.data.validators.item(i).exit_epoch > currentEpoch:
|
|
|
|
let signedValidatorRegistration =
|
|
|
|
node.externalBuilderRegistrations[pubkey]
|
|
|
|
nonExitedVcPubkeys.incl signedValidatorRegistration.message.pubkey
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration signedValidatorRegistration
|
2022-09-13 11:52:26 +00:00
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
for key in attachedValidatorPubkeys:
|
2022-09-13 11:52:26 +00:00
|
|
|
# Already included from VC
|
|
|
|
if key in nonExitedVcPubkeys:
|
|
|
|
warn "registerValidators: same validator registered by beacon node and validator client",
|
|
|
|
pubkey = shortLog(key)
|
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
# Time passed during awaits; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
|
|
|
|
|
|
|
let validator = node.attachedValidators[].validators[key]
|
|
|
|
|
2022-08-18 11:59:35 +00:00
|
|
|
if validator.index.isNone:
|
|
|
|
continue
|
|
|
|
|
2023-02-18 00:54:30 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.3.0/apis/builder/validators.yaml
|
2022-08-18 11:59:35 +00:00
|
|
|
# Builders should verify that `pubkey` corresponds to an active or
|
|
|
|
# pending validator
|
|
|
|
withState(node.dag.headState):
|
2022-09-13 11:53:12 +00:00
|
|
|
if distinctBase(validator.index.get) >=
|
|
|
|
forkyState.data.validators.lenu64:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if node.currentSlot().epoch >=
|
2022-09-13 11:53:12 +00:00
|
|
|
forkyState.data.validators.item(validator.index.get).exit_epoch:
|
2022-08-18 11:59:35 +00:00
|
|
|
continue
|
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
if validator.externalBuilderRegistration.isSome:
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration validator.externalBuilderRegistration.get
|
2022-08-31 00:29:03 +00:00
|
|
|
else:
|
|
|
|
let validatorRegistration =
|
|
|
|
await node.getValidatorRegistration(validator, epoch)
|
|
|
|
if validatorRegistration.isErr:
|
|
|
|
error "registerValidators: validatorRegistration failed",
|
|
|
|
validatorRegistration
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Time passed during await; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-08-31 00:29:03 +00:00
|
|
|
node.attachedValidators[].validators[key].externalBuilderRegistration =
|
|
|
|
Opt.some validatorRegistration.get
|
2022-11-26 23:11:14 +00:00
|
|
|
addValidatorRegistration validatorRegistration.get
|
2022-08-01 06:41:47 +00:00
|
|
|
|
2022-11-26 23:11:14 +00:00
|
|
|
if validatorRegistrations == emptyNestedSeq:
|
|
|
|
return
|
|
|
|
|
|
|
|
# TODO if there are too many chunks, could trigger DoS protections, so
|
|
|
|
# might randomize order to accumulate cumulative coverage
|
|
|
|
for chunkIdx in 0 ..< validatorRegistrations.len:
|
|
|
|
let registerValidatorResult =
|
|
|
|
awaitWithTimeout(
|
|
|
|
node.payloadBuilderRestClient.registerValidator(
|
|
|
|
validatorRegistrations[chunkIdx]),
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE):
|
|
|
|
error "Timeout when registering validator with builder"
|
|
|
|
continue # Try next batch regardless
|
|
|
|
if HttpOk != registerValidatorResult.status:
|
|
|
|
warn "registerValidators: Couldn't register validator with MEV builder",
|
|
|
|
registerValidatorResult
|
2022-08-01 06:41:47 +00:00
|
|
|
except CatchableError as exc:
|
|
|
|
warn "registerValidators: exception",
|
|
|
|
error = exc.msg
|
|
|
|
|
2023-02-07 14:53:36 +00:00
|
|
|
proc updateValidators(
|
|
|
|
node: BeaconNode, validators: openArray[Validator]) =
|
|
|
|
# Since validator indicies are stable, we only check the "updated" range -
|
|
|
|
# checking all validators would significantly slow down this loop when there
|
|
|
|
# are many inactive keys
|
|
|
|
for i in node.dutyValidatorCount..validators.high:
|
|
|
|
let v = node.attachedValidators[].getValidator(validators[i].pubkey)
|
|
|
|
if v != nil:
|
|
|
|
v.index = Opt.some ValidatorIndex(i)
|
|
|
|
|
|
|
|
node.dutyValidatorCount = validators.len
|
|
|
|
|
|
|
|
for validator in node.attachedValidators[]:
|
|
|
|
# Check if any validators have been activated
|
|
|
|
if validator.needsUpdate and validator.index.isSome():
|
|
|
|
# Activation epoch can change after index is assigned..
|
|
|
|
let index = validator.index.get()
|
|
|
|
if index < validators.lenu64:
|
|
|
|
validator.updateValidator(
|
|
|
|
Opt.some(ValidatorAndIndex(
|
|
|
|
index: index, validator: validators[int index]
|
|
|
|
)))
|
|
|
|
|
2020-10-28 07:55:36 +00:00
|
|
|
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
2020-07-22 08:04:21 +00:00
|
|
|
## Perform validator duties - create blocks, vote and aggregate existing votes
|
2021-02-22 16:17:48 +00:00
|
|
|
if node.attachedValidators[].count == 0:
|
2020-05-06 13:23:45 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# The dag head might be updated by sync while we're working due to the
|
2020-10-28 07:55:36 +00:00
|
|
|
# await calls, thus we use a local variable to keep the logic straight here
|
2021-06-01 11:13:40 +00:00
|
|
|
var head = node.dag.head
|
2022-10-27 17:22:32 +00:00
|
|
|
case node.isSynced(head)
|
|
|
|
of SyncStatus.unsynced:
|
|
|
|
info "Beacon node not in sync; skipping validator duties for now",
|
|
|
|
slot, headSlot = head.slot
|
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
of SyncStatus.optimistic:
|
|
|
|
info "Execution client not in sync; skipping validator duties for now",
|
2022-02-04 11:25:32 +00:00
|
|
|
slot, headSlot = head.slot
|
2020-12-16 13:03:04 +00:00
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
return
|
2022-10-27 17:22:32 +00:00
|
|
|
of SyncStatus.synced:
|
|
|
|
discard # keep going
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2023-02-07 14:53:36 +00:00
|
|
|
withState(node.dag.headState):
|
|
|
|
node.updateValidators(forkyState.data.validators.asSeq())
|
2022-12-09 16:05:55 +00:00
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
var curSlot = lastSlot + 1
|
|
|
|
|
|
|
|
# Start by checking if there's work we should have done in the past that we
|
|
|
|
# can still meaningfully do
|
|
|
|
while curSlot < slot:
|
2020-10-01 18:56:42 +00:00
|
|
|
notice "Catching up on validator duties",
|
2020-05-06 13:23:45 +00:00
|
|
|
curSlot = shortLog(curSlot),
|
|
|
|
lastSlot = shortLog(lastSlot),
|
2020-07-16 13:16:51 +00:00
|
|
|
slot = shortLog(slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
|
|
|
# For every slot we're catching up, we'll propose then send
|
|
|
|
# attestations - head should normally be advancing along the same branch
|
|
|
|
# in this case
|
|
|
|
head = await handleProposal(node, head, curSlot)
|
|
|
|
|
|
|
|
# For each slot we missed, we need to send out attestations - if we were
|
|
|
|
# proposing during this time, we'll use the newly proposed head, else just
|
|
|
|
# keep reusing the same - the attestation that goes out will actually
|
|
|
|
# rewind the state to what it looked like at the time of that slot
|
|
|
|
handleAttestations(node, head, curSlot)
|
|
|
|
|
|
|
|
curSlot += 1
|
|
|
|
|
2022-03-17 20:11:29 +00:00
|
|
|
let
|
|
|
|
newHead = await handleProposal(node, head, slot)
|
|
|
|
didSubmitBlock = (newHead != head)
|
|
|
|
head = newHead
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
|
|
|
# The latest point in time when we'll be sending out attestations
|
2022-01-11 10:01:54 +00:00
|
|
|
attestationCutoff = node.beaconClock.fromNow(slot.attestation_deadline())
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if attestationCutoff.inFuture:
|
|
|
|
debug "Waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
|
|
|
|
|
|
|
# Wait either for the block or the attestation cutoff time to arrive
|
2022-03-29 07:15:42 +00:00
|
|
|
if await node.consensusManager[].expectBlock(slot)
|
|
|
|
.withTimeout(attestationCutoff.offset):
|
2021-03-01 16:36:06 +00:00
|
|
|
# The expected block arrived (or expectBlock was called again which
|
2021-03-23 06:57:10 +00:00
|
|
|
# shouldn't happen as this is the only place we use it) - in our async
|
|
|
|
# loop however, we might have been doing other processing that caused delays
|
2021-03-01 16:36:06 +00:00
|
|
|
# here so we'll cap the waiting to the time when we would have sent out
|
|
|
|
# attestations had the block not arrived.
|
|
|
|
# An opposite case is that we received (or produced) a block that has
|
|
|
|
# not yet reached our neighbours. To protect against our attestations
|
|
|
|
# being dropped (because the others have not yet seen the block), we'll
|
2022-03-18 11:02:32 +00:00
|
|
|
# impose a minimum delay of 2000ms. The delay is enforced only when we're
|
2021-03-01 16:36:06 +00:00
|
|
|
# not hitting the "normal" cutoff time for sending out attestations.
|
2021-07-06 13:11:18 +00:00
|
|
|
# An earlier delay of 250ms has proven to be not enough, increasing the
|
2022-03-18 11:02:32 +00:00
|
|
|
# risk of losing attestations, and with growing block sizes, 1000ms
|
|
|
|
# started to be risky as well.
|
2021-07-06 13:11:18 +00:00
|
|
|
# Regardless, because we "just" received the block, we'll impose the
|
|
|
|
# delay.
|
2021-03-01 16:36:06 +00:00
|
|
|
|
2022-03-19 08:59:13 +00:00
|
|
|
# Take into consideration chains with a different slot time
|
|
|
|
const afterBlockDelay = nanos(attestationSlotOffset.nanoseconds div 2)
|
2021-03-01 16:36:06 +00:00
|
|
|
let
|
2022-03-18 11:02:32 +00:00
|
|
|
afterBlockTime = node.beaconClock.now() + afterBlockDelay
|
2021-03-01 16:36:06 +00:00
|
|
|
afterBlockCutoff = node.beaconClock.fromNow(
|
2022-03-18 11:02:32 +00:00
|
|
|
min(afterBlockTime, slot.attestation_deadline() + afterBlockDelay))
|
2021-03-01 16:36:06 +00:00
|
|
|
|
|
|
|
if afterBlockCutoff.inFuture:
|
|
|
|
debug "Got block, waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
afterBlockCutoff = shortLog(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
await sleepAsync(afterBlockCutoff.offset)
|
|
|
|
|
|
|
|
# Time passed - we might need to select a new head in that case
|
2021-03-11 10:10:57 +00:00
|
|
|
node.consensusManager[].updateHead(slot)
|
2021-06-01 11:13:40 +00:00
|
|
|
head = node.dag.head
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2021-11-25 12:20:36 +00:00
|
|
|
static: doAssert attestationSlotOffset == syncCommitteeMessageSlotOffset
|
|
|
|
|
2020-05-06 13:23:45 +00:00
|
|
|
handleAttestations(node, head, slot)
|
2021-08-30 00:58:30 +00:00
|
|
|
handleSyncCommitteeMessages(node, head, slot)
|
2020-05-06 13:23:45 +00:00
|
|
|
|
2020-12-16 13:03:04 +00:00
|
|
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
2020-11-27 23:34:25 +00:00
|
|
|
|
2023-02-09 22:08:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/phase0/validator.md#broadcast-aggregate
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.2/specs/altair/validator.md#broadcast-sync-committee-contribution
|
2022-07-06 16:11:44 +00:00
|
|
|
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
|
|
|
# the result in aggregates
|
|
|
|
static:
|
|
|
|
doAssert aggregateSlotOffset == syncContributionSlotOffset, "Timing change?"
|
2021-08-23 10:41:48 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
aggregateCutoff = node.beaconClock.fromNow(slot.aggregate_deadline())
|
|
|
|
if aggregateCutoff.inFuture:
|
|
|
|
debug "Waiting to send aggregate attestations",
|
|
|
|
aggregateCutoff = shortLog(aggregateCutoff.offset)
|
|
|
|
await sleepAsync(aggregateCutoff.offset)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let sendAggregatedAttestationsFut =
|
|
|
|
sendAggregatedAttestations(node, head, slot)
|
2021-10-20 09:16:48 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let handleSyncCommitteeContributionsFut =
|
|
|
|
handleSyncCommitteeContributions(node, head, slot)
|
2021-12-03 13:58:12 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
await handleSyncCommitteeContributionsFut
|
|
|
|
await sendAggregatedAttestationsFut
|
2021-10-18 09:11:44 +00:00
|
|
|
|
|
|
|
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
|
|
|
|
## Register upcoming duties of attached validators with the duty tracker
|
|
|
|
|
2022-03-29 07:15:42 +00:00
|
|
|
if node.attachedValidators[].count() == 0 or
|
2022-10-27 17:22:32 +00:00
|
|
|
node.isSynced(node.dag.head) != SyncStatus.synced:
|
2021-10-18 09:11:44 +00:00
|
|
|
# Nothing to do because we have no validator attached
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
2022-06-29 16:53:59 +00:00
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2021-10-18 09:11:44 +00:00
|
|
|
head = node.dag.head
|
|
|
|
|
|
|
|
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
|
|
|
# be getting the duties one slot at a time
|
|
|
|
for slot in wallSlot ..< wallSlot + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS:
|
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
2022-07-06 16:11:44 +00:00
|
|
|
warn "Cannot construct EpochRef for duties - report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
2022-01-05 18:38:04 +00:00
|
|
|
let
|
2021-10-18 09:11:44 +00:00
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
2022-08-18 18:07:01 +00:00
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-08-18 18:07:01 +00:00
|
|
|
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
2021-10-18 09:11:44 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
2022-12-09 16:05:55 +00:00
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
2022-11-24 07:48:10 +00:00
|
|
|
continue
|
2022-12-09 16:05:55 +00:00
|
|
|
let
|
|
|
|
subnet_id = compute_subnet_for_attestation(
|
|
|
|
committees_per_slot, slot, committee_index)
|
|
|
|
slotSigRes = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if slotSigRes.isErr():
|
|
|
|
error "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
error_msg = slotSigRes.error()
|
|
|
|
continue
|
|
|
|
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
|
|
|
|
|
|
|
node.consensusManager[].actionTracker.registerDuty(
|
|
|
|
slot, subnet_id, validator_index, isAggregator)
|