2023-08-23 16:39:57 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2023-08-23 16:39:57 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
# This module is responsible for handling beacon node validators, ie those that
|
|
|
|
# that are running directly in the beacon node and not in a separate validator
|
|
|
|
# client process - we name these "beacon validators" while those running
|
|
|
|
# outside are "client validators".
|
|
|
|
# This module also contains implementation logic for the REST validator API.
|
|
|
|
|
|
|
|
import
|
|
|
|
# Standard library
|
2023-10-11 11:48:35 +00:00
|
|
|
std/[os, tables],
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
# Nimble packages
|
|
|
|
stew/[assign2, byteutils],
|
|
|
|
chronos, metrics,
|
|
|
|
chronicles, chronicles/timings,
|
|
|
|
json_serialization/std/[options, sets, net],
|
|
|
|
eth/db/kvstore,
|
2023-12-12 15:15:00 +00:00
|
|
|
web3/primitives,
|
2023-08-23 16:39:57 +00:00
|
|
|
kzg4844,
|
|
|
|
|
|
|
|
# Local modules
|
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
|
|
|
../spec/[
|
|
|
|
eth2_merkleization, forks, helpers, network, signatures, state_transition,
|
|
|
|
validator],
|
|
|
|
../consensus_object_pools/[
|
2023-12-23 05:55:47 +00:00
|
|
|
spec_cache, blockchain_dag, block_clearance, attestation_pool,
|
2024-03-14 03:44:00 +00:00
|
|
|
sync_committee_msg_pool, validator_change_pool, consensus_manager,
|
|
|
|
common_tools],
|
2023-08-23 16:39:57 +00:00
|
|
|
../el/el_manager,
|
|
|
|
../networking/eth2_network,
|
|
|
|
../sszdump, ../sync/sync_manager,
|
|
|
|
../gossip_processing/block_processor,
|
|
|
|
".."/[conf, beacon_clock, beacon_node],
|
|
|
|
"."/[
|
|
|
|
keystore_management, slashing_protection, validator_duties, validator_pool],
|
2024-02-29 12:37:08 +00:00
|
|
|
".."/spec/mev/rest_deneb_mev_calls
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-03-20 03:51:55 +00:00
|
|
|
from std/sequtils import countIt, foldl, mapIt
|
2023-08-23 16:39:57 +00:00
|
|
|
from eth/async_utils import awaitWithTimeout
|
|
|
|
|
|
|
|
# Metrics for tracking attestation and beacon block loss
|
|
|
|
declareCounter beacon_light_client_finality_updates_sent,
|
|
|
|
"Number of LC finality updates sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_light_client_optimistic_updates_sent,
|
|
|
|
"Number of LC optimistic updates sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_blocks_proposed,
|
|
|
|
"Number of beacon chain blocks sent by this peer"
|
|
|
|
|
|
|
|
declareCounter beacon_block_production_errors,
|
|
|
|
"Number of times we failed to produce a block"
|
|
|
|
|
|
|
|
# Metrics for tracking external block builder usage
|
|
|
|
declareCounter beacon_block_builder_missed_with_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed with fallback"
|
|
|
|
|
|
|
|
declareCounter beacon_block_builder_missed_without_fallback,
|
|
|
|
"Number of beacon chain blocks where an attempt to use an external block builder failed without possible fallback"
|
|
|
|
|
|
|
|
declareGauge(attached_validator_balance,
|
|
|
|
"Validator balance at slot end of the first 64 validators, in Gwei",
|
|
|
|
labels = ["pubkey"])
|
|
|
|
|
|
|
|
declarePublicGauge(attached_validator_balance_total,
|
|
|
|
"Validator balance of all attached validators, in Gwei")
|
|
|
|
|
|
|
|
logScope: topics = "beacval"
|
|
|
|
|
|
|
|
type
|
2024-03-11 14:18:50 +00:00
|
|
|
EngineBid* = object
|
|
|
|
blck*: ForkedBeaconBlock
|
|
|
|
executionPayloadValue*: Wei
|
|
|
|
consensusBlockValue*: UInt256
|
|
|
|
blobsBundleOpt*: Opt[BlobsBundle]
|
2024-02-07 11:26:04 +00:00
|
|
|
|
2024-03-11 14:18:50 +00:00
|
|
|
BuilderBid[SBBB] = object
|
|
|
|
blindedBlckPart*: SBBB
|
|
|
|
executionPayloadValue*: UInt256
|
|
|
|
consensusBlockValue*: UInt256
|
2024-02-07 11:26:04 +00:00
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
ForkedBlockResult =
|
2024-02-07 11:26:04 +00:00
|
|
|
Result[EngineBid, string]
|
2023-08-23 16:39:57 +00:00
|
|
|
BlindedBlockResult[SBBB] =
|
2024-02-07 11:26:04 +00:00
|
|
|
Result[BuilderBid[SBBB], string]
|
|
|
|
|
|
|
|
Bids[SBBB] = object
|
|
|
|
engineBid: Opt[EngineBid]
|
|
|
|
builderBid: Opt[BuilderBid[SBBB]]
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc getValidator*(validators: auto,
|
|
|
|
pubkey: ValidatorPubKey): Opt[ValidatorAndIndex] =
|
|
|
|
let idx = validators.findIt(it.pubkey == pubkey)
|
|
|
|
if idx == -1:
|
|
|
|
# We allow adding a validator even if its key is not in the state registry:
|
|
|
|
# it might be that the deposit for this validator has not yet been processed
|
|
|
|
Opt.none ValidatorAndIndex
|
|
|
|
else:
|
|
|
|
Opt.some ValidatorAndIndex(index: ValidatorIndex(idx),
|
|
|
|
validator: validators[idx])
|
|
|
|
|
2024-03-11 14:18:50 +00:00
|
|
|
func blockConsensusValue(r: BlockRewards): UInt256 {.noinit.} =
|
|
|
|
# Returns value of `block-consensus-value` in Wei units.
|
|
|
|
u256(r.attestations + r.sync_aggregate +
|
|
|
|
r.proposer_slashings + r.attester_slashings) * u256(1000000000)
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc addValidatorsFromWeb3Signer(
|
|
|
|
node: BeaconNode, web3signerUrl: Web3SignerUrl, epoch: Epoch)
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-08-31 12:16:15 +00:00
|
|
|
let dynamicStores =
|
2024-02-07 11:26:04 +00:00
|
|
|
# Error is already reported via log warning.
|
|
|
|
(await queryValidatorsSource(web3signerUrl)).valueOr(
|
|
|
|
default(seq[KeystoreData]))
|
2023-08-31 12:16:15 +00:00
|
|
|
|
|
|
|
for keystore in dynamicStores:
|
|
|
|
let
|
|
|
|
data =
|
|
|
|
withState(node.dag.headState):
|
|
|
|
getValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
|
|
|
index =
|
|
|
|
if data.isSome():
|
|
|
|
Opt.some(data.get().index)
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorIndex)
|
|
|
|
feeRecipient =
|
|
|
|
node.consensusManager[].getFeeRecipient(keystore.pubkey, index, epoch)
|
|
|
|
gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey)
|
|
|
|
v = node.attachedValidators[].addValidator(keystore, feeRecipient,
|
|
|
|
gasLimit)
|
2023-08-23 16:39:57 +00:00
|
|
|
v.updateValidator(data)
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc addValidators*(node: BeaconNode) {.async: (raises: [CancelledError]).} =
|
2023-09-06 19:04:10 +00:00
|
|
|
info "Loading validators", validatorsDir = node.config.validatorsDir(),
|
|
|
|
keystore_cache_available = not(isNil(node.keystoreCache))
|
|
|
|
let epoch = node.currentSlot().epoch
|
|
|
|
|
|
|
|
for keystore in listLoadableKeystores(node.config, node.keystoreCache):
|
|
|
|
let
|
|
|
|
data = withState(node.dag.headState):
|
|
|
|
getValidator(forkyState.data.validators.asSeq(), keystore.pubkey)
|
|
|
|
index =
|
|
|
|
if data.isSome():
|
|
|
|
Opt.some(data.get().index)
|
|
|
|
else:
|
|
|
|
Opt.none(ValidatorIndex)
|
|
|
|
feeRecipient = node.consensusManager[].getFeeRecipient(
|
|
|
|
keystore.pubkey, index, epoch)
|
|
|
|
gasLimit = node.consensusManager[].getGasLimit(keystore.pubkey)
|
|
|
|
|
|
|
|
v = node.attachedValidators[].addValidator(keystore, feeRecipient,
|
|
|
|
gasLimit)
|
|
|
|
v.updateValidator(data)
|
|
|
|
|
2024-01-13 10:53:53 +00:00
|
|
|
# We use `allFutures` because all failures are already reported as
|
|
|
|
# user-visible warnings in `queryValidatorsSource`.
|
|
|
|
# We don't consider them fatal because the Web3Signer may be experiencing
|
|
|
|
# a temporary hiccup that will be resolved later.
|
2024-02-07 11:26:04 +00:00
|
|
|
# TODO mapIt version fails at type deduction - figure out..
|
|
|
|
var futs: seq[Future[void].Raising([CancelledError])]
|
|
|
|
for it in node.config.web3SignerUrls:
|
|
|
|
futs.add node.addValidatorsFromWeb3Signer(it, epoch)
|
|
|
|
await allFutures(futs)
|
2023-09-06 19:04:10 +00:00
|
|
|
|
|
|
|
proc pollForDynamicValidators*(node: BeaconNode,
|
2023-10-13 12:42:00 +00:00
|
|
|
web3signerUrl: Web3SignerUrl,
|
2024-02-07 11:26:04 +00:00
|
|
|
intervalInSeconds: int)
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-09-06 19:04:10 +00:00
|
|
|
if intervalInSeconds == 0:
|
2023-09-04 19:14:58 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
proc addValidatorProc(keystore: KeystoreData) =
|
|
|
|
let
|
|
|
|
epoch = node.currentSlot().epoch
|
|
|
|
index = Opt.none(ValidatorIndex)
|
|
|
|
feeRecipient =
|
|
|
|
node.consensusManager[].getFeeRecipient(keystore.pubkey, index, epoch)
|
|
|
|
gasLimit =
|
|
|
|
node.consensusManager[].getGasLimit(keystore.pubkey)
|
|
|
|
discard node.attachedValidators[].addValidator(keystore, feeRecipient,
|
|
|
|
gasLimit)
|
|
|
|
|
|
|
|
var
|
2023-09-06 19:04:10 +00:00
|
|
|
timeout = seconds(intervalInSeconds)
|
2023-09-04 19:14:58 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
while true:
|
|
|
|
await sleepAsync(timeout)
|
|
|
|
timeout =
|
|
|
|
block:
|
|
|
|
let res = await queryValidatorsSource(web3signerUrl)
|
|
|
|
if res.isOk():
|
|
|
|
let keystores = res.get()
|
|
|
|
debug "Validators source has been polled for validators",
|
|
|
|
keystores_found = len(keystores),
|
|
|
|
web3signer_url = web3signerUrl.url
|
|
|
|
node.attachedValidators.updateDynamicValidators(web3signerUrl,
|
|
|
|
keystores,
|
|
|
|
addValidatorProc)
|
|
|
|
seconds(intervalInSeconds)
|
|
|
|
else:
|
|
|
|
# In case of error we going to repeat our call with much smaller
|
|
|
|
# interval.
|
|
|
|
seconds(5)
|
2023-09-04 19:14:58 +00:00
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
proc getValidator*(node: BeaconNode, idx: ValidatorIndex): Opt[AttachedValidator] =
|
|
|
|
let key = ? node.dag.validatorKey(idx)
|
|
|
|
node.attachedValidators[].getValidator(key.toPubKey())
|
|
|
|
|
|
|
|
proc getValidatorForDuties*(
|
|
|
|
node: BeaconNode, idx: ValidatorIndex, slot: Slot,
|
|
|
|
slashingSafe = false): Opt[AttachedValidator] =
|
|
|
|
let key = ? node.dag.validatorKey(idx)
|
|
|
|
|
|
|
|
node.attachedValidators[].getValidatorForDuties(
|
|
|
|
key.toPubKey(), slot, slashingSafe)
|
|
|
|
|
2024-03-14 03:44:00 +00:00
|
|
|
proc getGraffitiBytes*(
|
|
|
|
node: BeaconNode, validator: AttachedValidator): GraffitiBytes =
|
|
|
|
getGraffiti(node.config.validatorsDir, node.config.defaultGraffitiBytes(),
|
|
|
|
validator.pubkey)
|
|
|
|
|
2024-03-20 02:23:53 +00:00
|
|
|
type ChainSyncStatus* {.pure.} = enum
|
|
|
|
Syncing,
|
|
|
|
Synced,
|
|
|
|
Degraded
|
|
|
|
|
|
|
|
proc syncStatus*(node: BeaconNode, head: BlockRef): ChainSyncStatus =
|
|
|
|
## Generally, the problem is that slot time keeps advancing
|
|
|
|
## even when there are no blocks being produced, so there's no way to
|
|
|
|
## distinguish validators geniunely going missing from the node not being
|
|
|
|
## well connected (during a network split or an internet outage for
|
|
|
|
## example). It would generally be correct to simply keep running as if
|
|
|
|
## we were the only legit node left alive, but then we run into issues:
|
|
|
|
## with enough many empty slots, the validator pool is emptied leading
|
|
|
|
## to empty committees and lots of empty slot processing that will be
|
|
|
|
## thrown away as soon as we're synced again.
|
2023-08-23 16:39:57 +00:00
|
|
|
let
|
|
|
|
# The slot we should be at, according to the clock
|
|
|
|
beaconTime = node.beaconClock.now()
|
|
|
|
wallSlot = beaconTime.toSlot()
|
|
|
|
|
2024-03-20 02:23:53 +00:00
|
|
|
if not wallSlot.afterGenesis or
|
|
|
|
head.slot + node.config.syncHorizon >= wallSlot.slot:
|
|
|
|
node.dag.resetChainProgressWatchdog()
|
|
|
|
return ChainSyncStatus.Synced
|
|
|
|
|
|
|
|
if node.dag.chainIsProgressing():
|
|
|
|
# Chain is progressing, we are out of sync
|
|
|
|
return ChainSyncStatus.Syncing
|
|
|
|
|
2024-03-20 03:51:55 +00:00
|
|
|
let numPeers = len(node.network.peerPool)
|
2024-03-20 02:23:53 +00:00
|
|
|
if numPeers <= node.config.maxPeers div 4:
|
2024-03-21 03:55:29 +00:00
|
|
|
# We may have poor connectivity, wait until more peers are available.
|
|
|
|
# This could also be intermittent, as state replays while chain is degraded
|
|
|
|
# may take significant amounts of time, during which many peers are lost
|
2024-03-20 02:23:53 +00:00
|
|
|
return ChainSyncStatus.Syncing
|
|
|
|
|
2024-03-20 03:51:55 +00:00
|
|
|
let
|
|
|
|
maxHeadSlot = node.dag.heads.foldl(max(a, b.slot), GENESIS_SLOT)
|
|
|
|
numPeersWithHigherProgress = node.network.peerPool.peers
|
|
|
|
.countIt(it != nil and it.getHeadSlot() > maxHeadSlot)
|
2024-03-20 02:23:53 +00:00
|
|
|
if numPeersWithHigherProgress > node.config.maxPeers div 8:
|
|
|
|
# A peer indicates that they are on a later slot, wait for sync manager
|
|
|
|
# to progress, or for it to kick the peer if they are faking the status
|
|
|
|
warn "Chain appears to have stalled, but peers indicate higher progress",
|
2024-03-20 03:51:55 +00:00
|
|
|
numPeersWithHigherProgress, numPeers, maxPeers = node.config.maxPeers,
|
|
|
|
head, maxHeadSlot
|
2024-03-20 02:23:53 +00:00
|
|
|
node.dag.resetChainProgressWatchdog()
|
|
|
|
return ChainSyncStatus.Syncing
|
|
|
|
|
|
|
|
# We are on the latest slot among all of our peers, and there has been no
|
|
|
|
# chain progress for an extended period of time.
|
use separate state when catching up to perform validator duties (#6131)
There are situations where all states in the `blockchain_dag` are
occupied and cannot be borrowed.
- headState: Many assumptions in the code that it cannot be advanced
- clearanceState: Resets every time a new block gets imported, including
blocks from non-canonical branches
- epochRefState: Used even more frequently than clearanceState
This means that during the catch-up mechanic where the head state is
slowly advanced to wall clock to catch up on validator duties in the
situation where the canonical head is way behind non-canonical heads,
we cannot use any of the three existing states. In that situation,
Nimbus already consumes an increased amount of memory due to all the
`BlockRef`, fork choice states and so on, so experience is degraded.
It seems reasonable to allocate a fourth state temporarily during that
mechanic, until a new proposal could be made on the canonical chain.
Note that currently, on `unstable`, proposals _do_ happen every couple
hours because sync manager doesn't manage to discover additional heads
in a split-view scenario on Goerli. However, with the branch discovery
module, new blocks are discovered all the time, and the clearanceState
may no longer be borrowed as it is reset to different branch too often.
The extra state could also find other uses in the future, e.g., for
incremental computations as in reindexing the database, or online
collection of historical light client data.
2024-03-24 06:18:33 +00:00
|
|
|
if node.dag.incrementalState == nil:
|
|
|
|
# The head state is too far in the past to timely perform validator duties
|
|
|
|
return ChainSyncStatus.Degraded
|
|
|
|
if node.dag.incrementalState[].latest_block_id != node.dag.head.bid:
|
|
|
|
# The incremental state is not yet on the correct head (see `onSlotEnd`)
|
|
|
|
return ChainSyncStatus.Degraded
|
|
|
|
let incrementalSlot = getStateField(node.dag.incrementalState[], slot)
|
|
|
|
if incrementalSlot + node.config.syncHorizon < wallSlot.slot:
|
|
|
|
# The incremental state still needs to advance further (see `onSlotEnd`)
|
2024-03-20 02:23:53 +00:00
|
|
|
return ChainSyncStatus.Degraded
|
|
|
|
|
|
|
|
# It is reasonable safe to assume that the network has halted, resume duties
|
|
|
|
ChainSyncStatus.Synced
|
|
|
|
|
|
|
|
proc isSynced*(node: BeaconNode, head: BlockRef): bool =
|
|
|
|
node.syncStatus(head) == ChainSyncStatus.Synced
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc handleLightClientUpdates*(node: BeaconNode, slot: Slot)
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-11-17 03:57:15 +00:00
|
|
|
template pool: untyped = node.lightClientPool[]
|
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
static: doAssert lightClientFinalityUpdateSlotOffset ==
|
|
|
|
lightClientOptimisticUpdateSlotOffset
|
|
|
|
let sendTime = node.beaconClock.fromNow(
|
|
|
|
slot.light_client_finality_update_time())
|
|
|
|
if sendTime.inFuture:
|
|
|
|
debug "Waiting to send LC updates", slot, delay = shortLog(sendTime.offset)
|
|
|
|
await sleepAsync(sendTime.offset)
|
|
|
|
|
|
|
|
withForkyFinalityUpdate(node.dag.lcDataStore.cache.latest):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
let signature_slot = forkyFinalityUpdate.signature_slot
|
|
|
|
if slot != signature_slot:
|
|
|
|
return
|
|
|
|
|
|
|
|
let num_active_participants =
|
|
|
|
forkyFinalityUpdate.sync_aggregate.num_active_participants
|
|
|
|
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
|
|
|
return
|
|
|
|
|
2023-11-17 03:57:15 +00:00
|
|
|
let
|
|
|
|
finalized_slot =
|
|
|
|
forkyFinalityUpdate.finalized_header.beacon.slot
|
|
|
|
has_supermajority =
|
|
|
|
hasSupermajoritySyncParticipation(num_active_participants.uint64)
|
|
|
|
newFinality =
|
|
|
|
if finalized_slot > pool.latestForwardedFinalitySlot:
|
|
|
|
true
|
|
|
|
elif finalized_slot < pool.latestForwardedFinalitySlot:
|
|
|
|
false
|
|
|
|
elif pool.latestForwardedFinalityHasSupermajority:
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
has_supermajority
|
|
|
|
if newFinality:
|
2023-08-23 16:39:57 +00:00
|
|
|
template msg(): auto = forkyFinalityUpdate
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientFinalityUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
2023-11-17 03:57:15 +00:00
|
|
|
pool.latestForwardedFinalitySlot = finalized_slot
|
|
|
|
pool.latestForwardedFinalityHasSupermajority = has_supermajority
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_finality_updates_sent.inc()
|
|
|
|
notice "LC finality update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC finality update failed to send",
|
|
|
|
error = sendResult.error()
|
|
|
|
|
|
|
|
let attested_slot = forkyFinalityUpdate.attested_header.beacon.slot
|
2023-11-17 03:57:15 +00:00
|
|
|
if attested_slot > pool.latestForwardedOptimisticSlot:
|
2023-08-23 16:39:57 +00:00
|
|
|
let msg = forkyFinalityUpdate.toOptimistic
|
|
|
|
let sendResult =
|
|
|
|
await node.network.broadcastLightClientOptimisticUpdate(msg)
|
|
|
|
|
|
|
|
# Optimization for message with ephemeral validity, whether sent or not
|
2023-11-17 03:57:15 +00:00
|
|
|
pool.latestForwardedOptimisticSlot = attested_slot
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if sendResult.isOk:
|
|
|
|
beacon_light_client_optimistic_updates_sent.inc()
|
|
|
|
notice "LC optimistic update sent", message = shortLog(msg)
|
|
|
|
else:
|
|
|
|
warn "LC optimistic update failed to send",
|
|
|
|
error = sendResult.error()
|
|
|
|
|
|
|
|
proc createAndSendAttestation(node: BeaconNode,
|
|
|
|
fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
2023-11-19 13:08:07 +00:00
|
|
|
registered: RegisteredAttestation,
|
2024-02-07 11:26:04 +00:00
|
|
|
subnet_id: SubnetId)
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
|
|
|
signature = block:
|
|
|
|
let res = await registered.validator.getAttestationSignature(
|
|
|
|
fork, genesis_validators_root, registered.data)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign attestation",
|
|
|
|
validator = shortLog(registered.validator),
|
|
|
|
attestationData = shortLog(registered.data),
|
|
|
|
error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
|
|
|
attestation = registered.toAttestation(signature)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
registered.validator.doppelgangerActivity(attestation.data.slot.epoch)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeAttestation(
|
|
|
|
attestation, subnet_id, checkSignature = false)
|
|
|
|
if not res.isOk():
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if node.config.dumpEnabled:
|
|
|
|
dump(
|
|
|
|
node.config.dumpDirOutgoing, attestation.data,
|
|
|
|
registered.validator.pubkey)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc getBlockProposalEth1Data*(node: BeaconNode,
|
|
|
|
state: ForkedHashedBeaconState):
|
|
|
|
BlockProposalEth1Data =
|
|
|
|
let finalizedEpochRef = node.dag.getFinalizedEpochRef()
|
|
|
|
result = node.elManager.getBlockProposalData(
|
|
|
|
state, finalizedEpochRef.eth1_data,
|
|
|
|
finalizedEpochRef.eth1_deposit_index)
|
|
|
|
|
|
|
|
proc getFeeRecipient(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey,
|
|
|
|
validatorIdx: ValidatorIndex,
|
|
|
|
epoch: Epoch): Eth1Address =
|
|
|
|
node.consensusManager[].getFeeRecipient(pubkey, Opt.some(validatorIdx), epoch)
|
|
|
|
|
|
|
|
proc getGasLimit(node: BeaconNode,
|
|
|
|
pubkey: ValidatorPubKey): uint64 =
|
|
|
|
node.consensusManager[].getGasLimit(pubkey)
|
|
|
|
|
|
|
|
from web3/engine_api_types import PayloadExecutionStatus
|
|
|
|
from ../spec/datatypes/capella import BeaconBlock, ExecutionPayload
|
|
|
|
from ../spec/datatypes/deneb import BeaconBlock, ExecutionPayload, shortLog
|
|
|
|
from ../spec/beaconstate import get_expected_withdrawals
|
|
|
|
|
|
|
|
proc getExecutionPayload(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning,
|
2024-02-17 09:15:02 +00:00
|
|
|
node: BeaconNode, head: BlockRef, proposalState: ref ForkedHashedBeaconState,
|
|
|
|
validator_index: ValidatorIndex): Future[Opt[PayloadType]]
|
|
|
|
{.async: (raises: [CancelledError], raw: true).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/validator.md#executionpayload
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let
|
2024-02-17 09:15:02 +00:00
|
|
|
epoch = withState(proposalState[]):
|
|
|
|
forkyState.data.slot.epoch
|
2024-02-07 11:26:04 +00:00
|
|
|
feeRecipient = block:
|
|
|
|
let pubkey = node.dag.validatorKey(validator_index)
|
|
|
|
if pubkey.isNone():
|
|
|
|
warn "Cannot get proposer pubkey, bug?", validator_index
|
|
|
|
default(Eth1Address)
|
|
|
|
else:
|
|
|
|
node.getFeeRecipient(pubkey.get().toPubKey(), validator_index, epoch)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-17 09:15:02 +00:00
|
|
|
beaconHead = node.attestationPool[].getBeaconHead(head)
|
2024-02-07 11:26:04 +00:00
|
|
|
executionHead = withState(proposalState[]):
|
|
|
|
when consensusFork >= ConsensusFork.Bellatrix:
|
|
|
|
forkyState.data.latest_execution_payload_header.block_hash
|
|
|
|
else:
|
|
|
|
(static(default(Eth2Digest)))
|
2024-02-08 00:24:55 +00:00
|
|
|
latestSafe = beaconHead.safeExecutionBlockHash
|
|
|
|
latestFinalized = beaconHead.finalizedExecutionBlockHash
|
2024-02-07 11:26:04 +00:00
|
|
|
timestamp = withState(proposalState[]):
|
|
|
|
compute_timestamp_at_slot(forkyState.data, forkyState.data.slot)
|
|
|
|
random = withState(proposalState[]):
|
|
|
|
get_randao_mix(forkyState.data, get_current_epoch(forkyState.data))
|
|
|
|
withdrawals = withState(proposalState[]):
|
|
|
|
when consensusFork >= ConsensusFork.Capella:
|
|
|
|
get_expected_withdrawals(forkyState.data)
|
|
|
|
else:
|
|
|
|
@[]
|
|
|
|
|
|
|
|
info "Requesting engine payload",
|
|
|
|
beaconHead = shortLog(beaconHead.blck),
|
|
|
|
executionHead = shortLog(executionHead),
|
|
|
|
validatorIndex = validator_index,
|
|
|
|
feeRecipient = $feeRecipient
|
|
|
|
|
2024-02-17 09:15:02 +00:00
|
|
|
node.elManager.getPayload(
|
2024-02-07 11:26:04 +00:00
|
|
|
PayloadType, beaconHead.blck.bid.root, executionHead, latestSafe,
|
|
|
|
latestFinalized, timestamp, random, feeRecipient, withdrawals)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc makeBeaconBlockForHeadAndSlot*(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning,
|
|
|
|
node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot,
|
|
|
|
|
|
|
|
# These parameters are for the builder API
|
|
|
|
execution_payload: Opt[PayloadType],
|
|
|
|
transactions_root: Opt[Eth2Digest],
|
|
|
|
execution_payload_root: Opt[Eth2Digest],
|
2023-11-02 03:56:04 +00:00
|
|
|
withdrawals_root: Opt[Eth2Digest],
|
|
|
|
kzg_commitments: Opt[KzgCommitments]):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[ForkedBlockResult] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
# Advance state to the slot that we're proposing for
|
|
|
|
var cache = StateCache()
|
|
|
|
|
|
|
|
let
|
|
|
|
# The clearance state already typically sits at the right slot per
|
|
|
|
# `advanceClearanceState`
|
|
|
|
|
|
|
|
# TODO can use `valueOr:`/`return err($error)` if/when
|
|
|
|
# https://github.com/status-im/nim-stew/issues/161 is addressed
|
|
|
|
maybeState = node.dag.getProposalState(head, slot, cache)
|
|
|
|
|
|
|
|
if maybeState.isErr:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
return err($maybeState.error)
|
|
|
|
|
|
|
|
let
|
|
|
|
state = maybeState.get
|
|
|
|
payloadFut =
|
|
|
|
if execution_payload.isSome:
|
|
|
|
# Builder API
|
|
|
|
|
|
|
|
# In Capella, only get withdrawals root from relay.
|
|
|
|
# The execution payload will be small enough to be safe to copy because
|
|
|
|
# it won't have transactions (it's blinded)
|
|
|
|
var modified_execution_payload = execution_payload
|
|
|
|
withState(state[]):
|
|
|
|
when consensusFork >= ConsensusFork.Capella and
|
2023-09-27 15:10:28 +00:00
|
|
|
PayloadType.kind >= ConsensusFork.Capella:
|
2023-08-23 16:39:57 +00:00
|
|
|
let withdrawals = List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD](
|
|
|
|
get_expected_withdrawals(forkyState.data))
|
|
|
|
if withdrawals_root.isNone or
|
|
|
|
hash_tree_root(withdrawals) != withdrawals_root.get:
|
|
|
|
# If engine API returned a block, will use that
|
|
|
|
return err("Builder relay provided incorrect withdrawals root")
|
|
|
|
# Otherwise, the state transition function notices that there are
|
|
|
|
# too few withdrawals.
|
|
|
|
assign(modified_execution_payload.get.executionPayload.withdrawals,
|
|
|
|
withdrawals)
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let fut = Future[Opt[PayloadType]].Raising([CancelledError]).init(
|
|
|
|
"given-payload")
|
2023-08-23 16:39:57 +00:00
|
|
|
fut.complete(modified_execution_payload)
|
|
|
|
fut
|
|
|
|
elif slot.epoch < node.dag.cfg.BELLATRIX_FORK_EPOCH or
|
|
|
|
not state[].is_merge_transition_complete:
|
2024-02-07 11:26:04 +00:00
|
|
|
let fut = Future[Opt[PayloadType]].Raising([CancelledError]).init(
|
|
|
|
"empty-payload")
|
2023-08-23 16:39:57 +00:00
|
|
|
fut.complete(Opt.some(default(PayloadType)))
|
|
|
|
fut
|
|
|
|
else:
|
|
|
|
# Create execution payload while packing attestations
|
2024-02-17 09:15:02 +00:00
|
|
|
getExecutionPayload(PayloadType, node, head, state, validator_index)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
eth1Proposal = node.getBlockProposalEth1Data(state[])
|
|
|
|
|
|
|
|
if eth1Proposal.hasMissingDeposits:
|
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Eth1 deposits not available. Skipping block proposal", slot
|
|
|
|
return err("Eth1 deposits not available")
|
|
|
|
|
|
|
|
let
|
|
|
|
attestations =
|
|
|
|
node.attestationPool[].getAttestationsForBlock(state[], cache)
|
|
|
|
exits = withState(state[]):
|
|
|
|
node.validatorChangePool[].getBeaconBlockValidatorChanges(
|
|
|
|
node.dag.cfg, forkyState.data)
|
2023-11-24 16:34:25 +00:00
|
|
|
# TODO workaround for https://github.com/arnetheduck/nim-results/issues/34
|
|
|
|
payloadRes = await payloadFut
|
|
|
|
payload = payloadRes.valueOr:
|
2023-08-23 16:39:57 +00:00
|
|
|
beacon_block_production_errors.inc()
|
|
|
|
warn "Unable to get execution payload. Skipping block proposal",
|
|
|
|
slot, validator_index
|
|
|
|
return err("Unable to get execution payload")
|
|
|
|
|
2024-03-11 14:18:50 +00:00
|
|
|
let res = makeBeaconBlockWithRewards(
|
2023-08-23 16:39:57 +00:00
|
|
|
node.dag.cfg,
|
|
|
|
state[],
|
|
|
|
validator_index,
|
|
|
|
randao_reveal,
|
|
|
|
eth1Proposal.vote,
|
|
|
|
graffiti,
|
|
|
|
attestations,
|
|
|
|
eth1Proposal.deposits,
|
|
|
|
exits,
|
2023-09-05 10:31:33 +00:00
|
|
|
node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot),
|
2023-08-23 16:39:57 +00:00
|
|
|
payload,
|
|
|
|
noRollback, # Temporary state - no need for rollback
|
|
|
|
cache,
|
|
|
|
verificationFlags = {},
|
|
|
|
transactions_root = transactions_root,
|
2023-11-02 03:56:04 +00:00
|
|
|
execution_payload_root = execution_payload_root,
|
|
|
|
kzg_commitments = kzg_commitments).mapErr do (error: cstring) -> string:
|
2023-08-23 16:39:57 +00:00
|
|
|
# This is almost certainly a bug, but it's complex enough that there's a
|
|
|
|
# small risk it might happen even when most proposals succeed - thus we
|
|
|
|
# log instead of asserting
|
|
|
|
beacon_block_production_errors.inc()
|
2023-11-24 16:34:25 +00:00
|
|
|
warn "Cannot create block for proposal",
|
2023-08-23 16:39:57 +00:00
|
|
|
slot, head = shortLog(head), error
|
|
|
|
$error
|
|
|
|
|
|
|
|
var blobsBundleOpt = Opt.none(BlobsBundle)
|
|
|
|
when payload is deneb.ExecutionPayloadForSigning:
|
2023-11-04 13:49:58 +00:00
|
|
|
blobsBundleOpt = Opt.some(payload.blobsBundle)
|
2024-03-11 14:18:50 +00:00
|
|
|
|
|
|
|
if res.isOk:
|
|
|
|
ok(EngineBid(
|
|
|
|
blck: res.get().blck,
|
|
|
|
executionPayloadValue: payload.blockValue,
|
|
|
|
consensusBlockValue: res.get().rewards.blockConsensusValue(),
|
|
|
|
blobsBundleOpt: blobsBundleOpt
|
|
|
|
))
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-03-11 14:18:50 +00:00
|
|
|
err(res.error)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc makeBeaconBlockForHeadAndSlot*(
|
|
|
|
PayloadType: type ForkyExecutionPayloadForSigning, node: BeaconNode, randao_reveal: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes, head: BlockRef,
|
|
|
|
slot: Slot):
|
2023-11-19 13:08:07 +00:00
|
|
|
Future[ForkedBlockResult] =
|
|
|
|
return makeBeaconBlockForHeadAndSlot(
|
2023-08-23 16:39:57 +00:00
|
|
|
PayloadType, node, randao_reveal, validator_index, graffiti, head, slot,
|
|
|
|
execution_payload = Opt.none(PayloadType),
|
|
|
|
transactions_root = Opt.none(Eth2Digest),
|
|
|
|
execution_payload_root = Opt.none(Eth2Digest),
|
2023-11-02 03:56:04 +00:00
|
|
|
withdrawals_root = Opt.none(Eth2Digest),
|
|
|
|
kzg_commitments = Opt.none(KzgCommitments))
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc getBlindedExecutionPayload[
|
2024-02-29 12:37:08 +00:00
|
|
|
EPH: deneb_mev.BlindedExecutionPayloadAndBlobsBundle](
|
2023-08-23 16:39:57 +00:00
|
|
|
node: BeaconNode, payloadBuilderClient: RestClientRef, slot: Slot,
|
2024-02-08 00:24:55 +00:00
|
|
|
executionBlockHash: Eth2Digest, pubkey: ValidatorPubKey):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[BlindedBlockResult[EPH]] {.async: (raises: [CancelledError, RestError]).} =
|
2023-10-18 02:54:02 +00:00
|
|
|
# Not ideal to use `when` where instead of splitting into separate functions,
|
|
|
|
# but Nim doesn't overload on generic EPH type parameter.
|
2024-02-29 12:37:08 +00:00
|
|
|
when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
|
2024-01-24 23:27:22 +00:00
|
|
|
let
|
|
|
|
response = awaitWithTimeout(
|
|
|
|
payloadBuilderClient.getHeaderDeneb(
|
2024-02-08 00:24:55 +00:00
|
|
|
slot, executionBlockHash, pubkey),
|
2024-01-24 23:27:22 +00:00
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
return err "Timeout obtaining Deneb blinded header from builder"
|
|
|
|
|
|
|
|
res = decodeBytes(
|
|
|
|
GetHeaderResponseDeneb, response.data, response.contentType)
|
|
|
|
|
|
|
|
blindedHeader = res.valueOr:
|
|
|
|
return err(
|
|
|
|
"Unable to decode Deneb blinded header: " & $res.error &
|
|
|
|
" with HTTP status " & $response.status & ", Content-Type " &
|
|
|
|
$response.contentType & " and content " & $response.data)
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
|
|
|
const httpOk = 200
|
2024-01-24 23:27:22 +00:00
|
|
|
if response.status != httpOk:
|
2023-08-23 16:39:57 +00:00
|
|
|
return err "getBlindedExecutionPayload: non-200 HTTP response"
|
|
|
|
else:
|
|
|
|
if not verify_builder_signature(
|
2024-01-24 23:27:22 +00:00
|
|
|
node.dag.cfg.genesisFork, blindedHeader.data.message,
|
|
|
|
blindedHeader.data.message.pubkey, blindedHeader.data.signature):
|
2023-08-23 16:39:57 +00:00
|
|
|
return err "getBlindedExecutionPayload: signature verification failed"
|
|
|
|
|
2024-03-01 00:02:13 +00:00
|
|
|
when EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
|
2024-01-24 23:27:22 +00:00
|
|
|
template builderBid: untyped = blindedHeader.data.message
|
2024-03-11 14:18:50 +00:00
|
|
|
return ok(BuilderBid[EPH](
|
2023-11-16 00:20:13 +00:00
|
|
|
blindedBlckPart: EPH(
|
|
|
|
execution_payload_header: builderBid.header,
|
|
|
|
blob_kzg_commitments: builderBid.blob_kzg_commitments),
|
2024-03-11 14:18:50 +00:00
|
|
|
executionPayloadValue: builderBid.value))
|
2023-10-18 02:54:02 +00:00
|
|
|
else:
|
|
|
|
static: doAssert false
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
from ./message_router_mev import
|
|
|
|
copyFields, getFieldNames, unblindAndRouteBlockMEV
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
proc constructSignableBlindedBlock[T: deneb_mev.SignedBlindedBeaconBlock](
|
2023-10-18 02:54:02 +00:00
|
|
|
blck: deneb.BeaconBlock,
|
2023-11-16 00:20:13 +00:00
|
|
|
blindedBundle: deneb_mev.BlindedExecutionPayloadAndBlobsBundle): T =
|
2023-10-19 17:34:32 +00:00
|
|
|
# Leaves signature field default, to be filled in by caller
|
2023-10-18 02:54:02 +00:00
|
|
|
const
|
|
|
|
blckFields = getFieldNames(typeof(blck))
|
|
|
|
blckBodyFields = getFieldNames(typeof(blck.body))
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
var blindedBlock: T
|
2023-10-18 02:54:02 +00:00
|
|
|
|
2024-01-22 07:36:46 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal
|
2023-10-18 02:54:02 +00:00
|
|
|
copyFields(blindedBlock.message, blck, blckFields)
|
|
|
|
copyFields(blindedBlock.message.body, blck.body, blckBodyFields)
|
|
|
|
assign(
|
|
|
|
blindedBlock.message.body.execution_payload_header,
|
2023-11-16 00:20:13 +00:00
|
|
|
blindedBundle.execution_payload_header)
|
|
|
|
assign(
|
|
|
|
blindedBlock.message.body.blob_kzg_commitments,
|
|
|
|
blindedBundle.blob_kzg_commitments)
|
2023-10-18 02:54:02 +00:00
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
blindedBlock
|
2023-10-18 02:54:02 +00:00
|
|
|
|
2023-12-03 09:04:12 +00:00
|
|
|
func constructPlainBlindedBlock[T: deneb_mev.BlindedBeaconBlock](
|
|
|
|
blck: ForkyBeaconBlock,
|
2023-12-04 16:36:50 +00:00
|
|
|
blindedBundle: deneb_mev.BlindedExecutionPayloadAndBlobsBundle): T =
|
2023-12-03 09:04:12 +00:00
|
|
|
# https://github.com/nim-lang/Nim/issues/23020 workaround
|
|
|
|
static: doAssert T is deneb_mev.BlindedBeaconBlock
|
|
|
|
|
|
|
|
const
|
|
|
|
blckFields = getFieldNames(typeof(blck))
|
|
|
|
blckBodyFields = getFieldNames(typeof(blck.body))
|
|
|
|
|
|
|
|
var blindedBlock: T
|
|
|
|
|
2024-01-22 07:36:46 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#block-proposal
|
2023-12-03 09:04:12 +00:00
|
|
|
copyFields(blindedBlock, blck, blckFields)
|
|
|
|
copyFields(blindedBlock.body, blck.body, blckBodyFields)
|
2023-12-04 16:36:50 +00:00
|
|
|
assign(
|
|
|
|
blindedBlock.body.execution_payload_header,
|
|
|
|
blindedBundle.execution_payload_header)
|
|
|
|
assign(
|
|
|
|
blindedBlock.body.blob_kzg_commitments,
|
|
|
|
blindedBundle.blob_kzg_commitments)
|
2023-12-03 09:04:12 +00:00
|
|
|
|
|
|
|
blindedBlock
|
|
|
|
|
2024-03-01 00:02:13 +00:00
|
|
|
proc blindedBlockCheckSlashingAndSign[T: deneb_mev.SignedBlindedBeaconBlock](
|
2023-10-11 11:48:35 +00:00
|
|
|
node: BeaconNode, slot: Slot, validator: AttachedValidator,
|
2023-11-16 00:20:13 +00:00
|
|
|
validator_index: ValidatorIndex, nonsignedBlindedBlock: T):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[Result[T, string]] {.async: (raises: [CancelledError]).} =
|
2023-10-18 02:54:02 +00:00
|
|
|
# Check with slashing protection before submitBlindedBlock
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
blockRoot = hash_tree_root(nonsignedBlindedBlock.message)
|
|
|
|
signingRoot = compute_block_signing_root(
|
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
|
|
|
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated for MEV block",
|
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(nonsignedBlindedBlock),
|
|
|
|
signingRoot = shortLog(signingRoot), validator = validator.pubkey,
|
|
|
|
slot = slot, existingProposal = notSlashable.error
|
|
|
|
return err("MEV proposal would be slashable: " & $notSlashable.error)
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
var blindedBlock = nonsignedBlindedBlock
|
|
|
|
blindedBlock.signature = block:
|
2023-10-18 02:54:02 +00:00
|
|
|
let res = await validator.getBlockSignature(
|
2023-11-16 00:20:13 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot, blindedBlock.message)
|
2023-10-18 02:54:02 +00:00
|
|
|
if res.isErr():
|
2023-11-16 00:20:13 +00:00
|
|
|
return err("Unable to sign block: " & res.error())
|
2023-10-18 02:54:02 +00:00
|
|
|
res.get()
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
return ok blindedBlock
|
2023-10-11 11:48:35 +00:00
|
|
|
|
2024-03-01 00:02:13 +00:00
|
|
|
proc getUnsignedBlindedBeaconBlock[T: deneb_mev.SignedBlindedBeaconBlock](
|
2023-11-28 23:30:14 +00:00
|
|
|
node: BeaconNode, slot: Slot,
|
2023-08-23 16:39:57 +00:00
|
|
|
validator_index: ValidatorIndex, forkedBlock: ForkedBeaconBlock,
|
2023-10-11 11:48:35 +00:00
|
|
|
executionPayloadHeader: capella.ExecutionPayloadHeader |
|
2023-11-16 00:20:13 +00:00
|
|
|
deneb_mev.BlindedExecutionPayloadAndBlobsBundle):
|
2023-10-18 02:54:02 +00:00
|
|
|
Result[T, string] =
|
2023-08-23 16:39:57 +00:00
|
|
|
withBlck(forkedBlock):
|
2024-03-01 05:30:09 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2023-08-23 16:39:57 +00:00
|
|
|
when not (
|
2023-11-16 00:20:13 +00:00
|
|
|
(T is deneb_mev.SignedBlindedBeaconBlock and
|
2024-03-01 05:30:09 +00:00
|
|
|
consensusFork == ConsensusFork.Deneb)):
|
2023-08-23 16:39:57 +00:00
|
|
|
return err("getUnsignedBlindedBeaconBlock: mismatched block/payload types")
|
|
|
|
else:
|
2023-09-21 10:49:14 +00:00
|
|
|
return ok constructSignableBlindedBlock[T](
|
|
|
|
forkyBlck, executionPayloadHeader)
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-03-01 05:30:09 +00:00
|
|
|
return err("getUnsignedBlindedBeaconBlock: attempt to construct pre-Deneb blinded block")
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2023-10-18 02:54:02 +00:00
|
|
|
proc getBlindedBlockParts[
|
|
|
|
EPH: capella.ExecutionPayloadHeader |
|
2023-11-16 00:20:13 +00:00
|
|
|
deneb_mev.BlindedExecutionPayloadAndBlobsBundle](
|
2023-08-23 16:39:57 +00:00
|
|
|
node: BeaconNode, payloadBuilderClient: RestClientRef, head: BlockRef,
|
|
|
|
pubkey: ValidatorPubKey, slot: Slot, randao: ValidatorSig,
|
|
|
|
validator_index: ValidatorIndex, graffiti: GraffitiBytes):
|
2024-03-11 14:18:50 +00:00
|
|
|
Future[Result[(EPH, UInt256, UInt256, ForkedBeaconBlock), string]]
|
2024-02-07 11:26:04 +00:00
|
|
|
{.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
let
|
2024-02-09 22:10:38 +00:00
|
|
|
executionBlockHash = node.dag.loadExecutionBlockHash(head).valueOr:
|
|
|
|
# With checkpoint sync, the checkpoint block may be unavailable,
|
|
|
|
# and it could already be the parent of the new block before backfill.
|
|
|
|
# Fallback to EL, hopefully the block is available on the local path.
|
|
|
|
warn "Failed to load parent execution block hash, skipping block builder",
|
|
|
|
slot, validator_index, head = shortLog(head)
|
|
|
|
return err("loadExecutionBlockHash failed")
|
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
executionPayloadHeader =
|
|
|
|
try:
|
|
|
|
awaitWithTimeout(
|
|
|
|
getBlindedExecutionPayload[EPH](
|
2024-02-08 00:24:55 +00:00
|
|
|
node, payloadBuilderClient, slot, executionBlockHash, pubkey),
|
2023-08-23 16:39:57 +00:00
|
|
|
BUILDER_PROPOSAL_DELAY_TOLERANCE):
|
|
|
|
BlindedBlockResult[EPH].err("getBlindedExecutionPayload timed out")
|
|
|
|
except RestDecodingError as exc:
|
|
|
|
BlindedBlockResult[EPH].err(
|
|
|
|
"getBlindedExecutionPayload REST decoding error: " & exc.msg)
|
2024-02-07 11:26:04 +00:00
|
|
|
except RestError as exc:
|
2023-08-23 16:39:57 +00:00
|
|
|
BlindedBlockResult[EPH].err(
|
2024-02-07 11:26:04 +00:00
|
|
|
"getBlindedExecutionPayload REST error: " & exc.msg)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if executionPayloadHeader.isErr:
|
|
|
|
warn "Could not obtain blinded execution payload header",
|
|
|
|
error = executionPayloadHeader.error, slot, validator_index,
|
|
|
|
head = shortLog(head)
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
|
|
|
return err(executionPayloadHeader.error)
|
|
|
|
|
|
|
|
# When creating this block, need to ensure it uses the MEV-provided execution
|
|
|
|
# payload, both to avoid repeated calls to network services and to ensure the
|
|
|
|
# consistency of this block (e.g., its state root being correct). Since block
|
|
|
|
# processing does not work directly using blinded blocks, fix up transactions
|
|
|
|
# root after running the state transition function on an otherwise equivalent
|
|
|
|
# non-blinded block without transactions.
|
2023-10-18 02:54:02 +00:00
|
|
|
#
|
|
|
|
# This doesn't have withdrawals, which each node has regardless of engine or
|
|
|
|
# builder API. makeBeaconBlockForHeadAndSlot fills it in later.
|
2023-10-12 11:49:48 +00:00
|
|
|
when EPH is capella.ExecutionPayloadHeader:
|
2023-08-23 16:39:57 +00:00
|
|
|
type PayloadType = capella.ExecutionPayloadForSigning
|
2023-10-18 02:54:02 +00:00
|
|
|
template actualEPH: untyped = executionPayloadHeader.get.blindedBlckPart
|
2023-08-23 16:39:57 +00:00
|
|
|
let withdrawals_root =
|
|
|
|
Opt.some executionPayloadHeader.get.blindedBlckPart.withdrawals_root
|
2023-11-02 03:56:04 +00:00
|
|
|
const kzg_commitments = Opt.none KzgCommitments
|
2023-10-18 02:54:02 +00:00
|
|
|
|
|
|
|
var shimExecutionPayload: PayloadType
|
|
|
|
copyFields(
|
|
|
|
shimExecutionPayload.executionPayload,
|
|
|
|
executionPayloadHeader.get.blindedBlckPart, getFieldNames(EPH))
|
2023-11-16 00:20:13 +00:00
|
|
|
elif EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle:
|
2023-08-23 16:39:57 +00:00
|
|
|
type PayloadType = deneb.ExecutionPayloadForSigning
|
2023-10-18 02:54:02 +00:00
|
|
|
template actualEPH: untyped =
|
|
|
|
executionPayloadHeader.get.blindedBlckPart.execution_payload_header
|
2023-11-02 03:56:04 +00:00
|
|
|
let
|
|
|
|
withdrawals_root = Opt.some actualEPH.withdrawals_root
|
|
|
|
kzg_commitments = Opt.some(
|
2023-11-16 00:20:13 +00:00
|
|
|
executionPayloadHeader.get.blindedBlckPart.blob_kzg_commitments)
|
2023-10-18 02:54:02 +00:00
|
|
|
|
|
|
|
var shimExecutionPayload: PayloadType
|
|
|
|
type DenebEPH =
|
2023-11-16 00:20:13 +00:00
|
|
|
deneb_mev.BlindedExecutionPayloadAndBlobsBundle.execution_payload_header
|
2023-10-18 02:54:02 +00:00
|
|
|
copyFields(
|
|
|
|
shimExecutionPayload.executionPayload, actualEPH, getFieldNames(DenebEPH))
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
|
|
|
let newBlock = await makeBeaconBlockForHeadAndSlot(
|
|
|
|
PayloadType, node, randao, validator_index, graffiti, head, slot,
|
|
|
|
execution_payload = Opt.some shimExecutionPayload,
|
2023-10-18 02:54:02 +00:00
|
|
|
transactions_root = Opt.some actualEPH.transactions_root,
|
|
|
|
execution_payload_root = Opt.some hash_tree_root(actualEPH),
|
2023-11-02 03:56:04 +00:00
|
|
|
withdrawals_root = withdrawals_root,
|
|
|
|
kzg_commitments = kzg_commitments)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if newBlock.isErr():
|
|
|
|
# Haven't committed to the MEV block, so allow EL fallback.
|
|
|
|
return err(newBlock.error) # already logged elsewhere!
|
|
|
|
|
|
|
|
let forkedBlck = newBlock.get()
|
|
|
|
|
|
|
|
return ok(
|
|
|
|
(executionPayloadHeader.get.blindedBlckPart,
|
2024-03-11 14:18:50 +00:00
|
|
|
executionPayloadHeader.get.executionPayloadValue,
|
|
|
|
forkedBlck.consensusBlockValue,
|
2023-08-23 16:39:57 +00:00
|
|
|
forkedBlck.blck))
|
|
|
|
|
2024-03-01 05:30:09 +00:00
|
|
|
proc getBuilderBid[SBBB: deneb_mev.SignedBlindedBeaconBlock](
|
2023-08-23 16:39:57 +00:00
|
|
|
node: BeaconNode, payloadBuilderClient: RestClientRef, head: BlockRef,
|
2023-11-28 23:30:14 +00:00
|
|
|
validator_pubkey: ValidatorPubKey, slot: Slot, randao: ValidatorSig,
|
2024-03-14 03:44:00 +00:00
|
|
|
graffitiBytes: GraffitiBytes, validator_index: ValidatorIndex):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[BlindedBlockResult[SBBB]] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Returns the unsigned blinded block obtained from the Builder API.
|
|
|
|
## Used by the BN's own validators, but not the REST server
|
2024-03-01 05:30:09 +00:00
|
|
|
when SBBB is deneb_mev.SignedBlindedBeaconBlock:
|
2023-11-16 00:20:13 +00:00
|
|
|
type EPH = deneb_mev.BlindedExecutionPayloadAndBlobsBundle
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
|
|
|
let blindedBlockParts = await getBlindedBlockParts[EPH](
|
2023-11-28 23:30:14 +00:00
|
|
|
node, payloadBuilderClient, head, validator_pubkey, slot, randao,
|
2024-03-14 03:44:00 +00:00
|
|
|
validator_index, graffitiBytes)
|
2023-08-23 16:39:57 +00:00
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Not signed yet, fine to try to fall back on EL
|
|
|
|
beacon_block_builder_missed_with_fallback.inc()
|
|
|
|
return err blindedBlockParts.error()
|
|
|
|
|
|
|
|
# These, together, get combined into the blinded block for signing and
|
|
|
|
# proposal through the relay network.
|
2024-03-11 14:18:50 +00:00
|
|
|
let (executionPayloadHeader, bidValue, consensusValue, forkedBlck) =
|
|
|
|
blindedBlockParts.get
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
let unsignedBlindedBlock = getUnsignedBlindedBeaconBlock[SBBB](
|
2023-11-28 23:30:14 +00:00
|
|
|
node, slot, validator_index, forkedBlck, executionPayloadHeader)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if unsignedBlindedBlock.isErr:
|
|
|
|
return err unsignedBlindedBlock.error()
|
|
|
|
|
2024-03-11 14:18:50 +00:00
|
|
|
ok(BuilderBid[SBBB](
|
|
|
|
blindedBlckPart: unsignedBlindedBlock.get,
|
|
|
|
executionPayloadValue: bidValue,
|
|
|
|
consensusBlockValue: consensusValue
|
|
|
|
))
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc proposeBlockMEV(
|
2023-10-12 11:49:48 +00:00
|
|
|
node: BeaconNode, payloadBuilderClient: RestClientRef,
|
2024-03-01 05:30:09 +00:00
|
|
|
blindedBlock: deneb_mev.SignedBlindedBeaconBlock):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[Result[BlockRef, string]] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
let unblindedBlockRef = await node.unblindAndRouteBlockMEV(
|
|
|
|
payloadBuilderClient, blindedBlock)
|
|
|
|
return if unblindedBlockRef.isOk and unblindedBlockRef.get.isSome:
|
|
|
|
beacon_blocks_proposed.inc()
|
|
|
|
ok(unblindedBlockRef.get.get)
|
|
|
|
else:
|
|
|
|
# unblindedBlockRef.isOk and unblindedBlockRef.get.isNone indicates that
|
|
|
|
# the block failed to validate and integrate into the DAG, which for the
|
|
|
|
# purpose of this return value, is equivalent. It's used to drive Beacon
|
|
|
|
# REST API output.
|
|
|
|
#
|
|
|
|
# https://collective.flashbots.net/t/post-mortem-april-3rd-2023-mev-boost-relay-incident-and-related-timing-issue/1540
|
|
|
|
# has caused false positives, because
|
|
|
|
# "A potential mitigation to this attack is to introduce a cutoff timing
|
|
|
|
# into the proposer's slot whereafter this time (e.g. 3 seconds) the relay
|
|
|
|
# will no longer return a block to the proposer. Relays began to roll out
|
|
|
|
# this mitigation in the evening of April 3rd UTC time with a 2 second
|
|
|
|
# cutoff, and notified other relays to do the same. After receiving
|
|
|
|
# credible reports of honest validators missing their slots the suggested
|
|
|
|
# timing cutoff was increased to 3 seconds."
|
|
|
|
let errMsg =
|
|
|
|
if unblindedBlockRef.isErr:
|
|
|
|
unblindedBlockRef.error
|
|
|
|
else:
|
|
|
|
"Unblinded block not returned to proposer"
|
|
|
|
err errMsg
|
|
|
|
|
|
|
|
func isEFMainnet(cfg: RuntimeConfig): bool =
|
|
|
|
cfg.DEPOSIT_CHAIN_ID == 1 and cfg.DEPOSIT_NETWORK_ID == 1
|
|
|
|
|
2023-11-28 23:30:14 +00:00
|
|
|
proc makeBlindedBeaconBlockForHeadAndSlot*[BBB: ForkyBlindedBeaconBlock](
|
2023-08-23 16:39:57 +00:00
|
|
|
node: BeaconNode, payloadBuilderClient: RestClientRef,
|
|
|
|
randao_reveal: ValidatorSig, validator_index: ValidatorIndex,
|
|
|
|
graffiti: GraffitiBytes, head: BlockRef, slot: Slot):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[BlindedBlockResult[BBB]] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Requests a beacon node to produce a valid blinded block, which can then be
|
|
|
|
## signed by a validator. A blinded block is a block with only a transactions
|
|
|
|
## root, rather than a full transactions list.
|
|
|
|
##
|
|
|
|
## This function is used by the validator client, but not the beacon node for
|
|
|
|
## its own validators.
|
2023-11-28 23:30:14 +00:00
|
|
|
when BBB is deneb_mev.BlindedBeaconBlock:
|
|
|
|
type EPH = deneb_mev.BlindedExecutionPayloadAndBlobsBundle
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
static: doAssert false
|
|
|
|
|
|
|
|
let
|
|
|
|
pubkey =
|
|
|
|
# Relevant state for knowledge of validators
|
|
|
|
withState(node.dag.headState):
|
|
|
|
if node.dag.cfg.isEFMainnet and livenessFailsafeInEffect(
|
|
|
|
forkyState.data.block_roots.data, forkyState.data.slot):
|
|
|
|
# It's head block's slot which matters here, not proposal slot
|
|
|
|
return err("Builder API liveness failsafe in effect")
|
|
|
|
|
|
|
|
if distinctBase(validator_index) >= forkyState.data.validators.lenu64:
|
|
|
|
debug "makeBlindedBeaconBlockForHeadAndSlot: invalid validator index",
|
|
|
|
head = shortLog(head),
|
|
|
|
validator_index,
|
|
|
|
validators_len = forkyState.data.validators.len
|
|
|
|
return err("Invalid validator index")
|
|
|
|
|
|
|
|
forkyState.data.validators.item(validator_index).pubkey
|
|
|
|
|
|
|
|
blindedBlockParts = await getBlindedBlockParts[EPH](
|
|
|
|
node, payloadBuilderClient, head, pubkey, slot, randao_reveal,
|
|
|
|
validator_index, graffiti)
|
|
|
|
if blindedBlockParts.isErr:
|
|
|
|
# Don't try EL fallback -- VC specifically requested a blinded block
|
|
|
|
return err("Unable to create blinded block")
|
|
|
|
|
2024-03-11 14:18:50 +00:00
|
|
|
let (executionPayloadHeader, bidValue, consensusValue, forkedBlck) =
|
|
|
|
blindedBlockParts.get
|
2023-08-23 16:39:57 +00:00
|
|
|
withBlck(forkedBlck):
|
2023-10-11 11:48:35 +00:00
|
|
|
when consensusFork >= ConsensusFork.Capella:
|
|
|
|
when ((consensusFork == ConsensusFork.Deneb and
|
2023-12-03 09:04:12 +00:00
|
|
|
EPH is deneb_mev.BlindedExecutionPayloadAndBlobsBundle) or
|
2023-08-23 16:39:57 +00:00
|
|
|
(consensusFork == ConsensusFork.Capella and
|
|
|
|
EPH is capella.ExecutionPayloadHeader)):
|
2024-03-11 14:18:50 +00:00
|
|
|
return ok(
|
|
|
|
BuilderBid[BBB](
|
|
|
|
blindedBlckPart:
|
|
|
|
constructPlainBlindedBlock[BBB](forkyBlck, executionPayloadHeader),
|
|
|
|
executionPayloadValue: bidValue,
|
|
|
|
consensusBlockValue: consensusValue))
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
return err("makeBlindedBeaconBlockForHeadAndSlot: mismatched block/payload types")
|
|
|
|
else:
|
2023-10-11 11:48:35 +00:00
|
|
|
return err("Attempt to create pre-Capella blinded block")
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc collectBids(
|
2023-08-23 16:39:57 +00:00
|
|
|
SBBB: typedesc, EPS: typedesc, node: BeaconNode,
|
2023-11-28 23:30:14 +00:00
|
|
|
payloadBuilderClient: RestClientRef, validator_pubkey: ValidatorPubKey,
|
|
|
|
validator_index: ValidatorIndex, graffitiBytes: GraffitiBytes,
|
|
|
|
head: BlockRef, slot: Slot,
|
2024-02-07 11:26:04 +00:00
|
|
|
randao: ValidatorSig): Future[Bids[SBBB]] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
let usePayloadBuilder =
|
2023-08-24 02:02:30 +00:00
|
|
|
if not payloadBuilderClient.isNil:
|
2023-08-23 16:39:57 +00:00
|
|
|
withState(node.dag.headState):
|
|
|
|
# Head slot, not proposal slot, matters here
|
|
|
|
# TODO it might make some sense to allow use of builder API if local
|
|
|
|
# EL fails -- i.e. it would change priorities, so any block from the
|
|
|
|
# execution layer client would override builder API. But it seems an
|
|
|
|
# odd requirement to produce no block at all in those conditions.
|
|
|
|
(not node.dag.cfg.isEFMainnet) or (not livenessFailsafeInEffect(
|
|
|
|
forkyState.data.block_roots.data, forkyState.data.slot))
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
|
|
|
|
let
|
|
|
|
payloadBuilderBidFut =
|
|
|
|
if usePayloadBuilder:
|
|
|
|
when not (EPS is bellatrix.ExecutionPayloadForSigning):
|
2023-11-28 23:30:14 +00:00
|
|
|
getBuilderBid[SBBB](node, payloadBuilderClient, head,
|
2024-03-14 03:44:00 +00:00
|
|
|
validator_pubkey, slot, randao, graffitiBytes,
|
|
|
|
validator_index)
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
let fut = newFuture[BlindedBlockResult[SBBB]]("builder-bid")
|
|
|
|
fut.complete(BlindedBlockResult[SBBB].err(
|
|
|
|
"Bellatrix Builder API unsupported"))
|
|
|
|
fut
|
|
|
|
else:
|
|
|
|
let fut = newFuture[BlindedBlockResult[SBBB]]("builder-bid")
|
|
|
|
fut.complete(BlindedBlockResult[SBBB].err(
|
|
|
|
"either payload builder disabled or liveness failsafe active"))
|
|
|
|
fut
|
|
|
|
engineBlockFut = makeBeaconBlockForHeadAndSlot(
|
2023-11-28 23:30:14 +00:00
|
|
|
EPS, node, randao, validator_index, graffitiBytes, head, slot)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
# getBuilderBid times out after BUILDER_PROPOSAL_DELAY_TOLERANCE, with 1 more
|
|
|
|
# second for remote validators. makeBeaconBlockForHeadAndSlot times out after
|
|
|
|
# 1 second.
|
|
|
|
await allFutures(payloadBuilderBidFut, engineBlockFut)
|
|
|
|
doAssert payloadBuilderBidFut.finished and engineBlockFut.finished
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let builderBid =
|
2023-08-23 16:39:57 +00:00
|
|
|
if payloadBuilderBidFut.completed:
|
2024-02-07 11:26:04 +00:00
|
|
|
if payloadBuilderBidFut.value().isOk:
|
|
|
|
Opt.some(payloadBuilderBidFut.value().value())
|
2023-08-23 16:39:57 +00:00
|
|
|
elif usePayloadBuilder:
|
2024-02-17 09:15:02 +00:00
|
|
|
notice "Payload builder error",
|
2023-11-28 23:30:14 +00:00
|
|
|
slot, head = shortLog(head), validator = shortLog(validator_pubkey),
|
2024-02-07 11:26:04 +00:00
|
|
|
err = payloadBuilderBidFut.value().error()
|
|
|
|
Opt.none(BuilderBid[SBBB])
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
|
|
|
# Effectively the same case, but without the log message
|
2024-02-07 11:26:04 +00:00
|
|
|
Opt.none(BuilderBid[SBBB])
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-02-17 09:15:02 +00:00
|
|
|
notice "Payload builder bid request failed",
|
2023-11-28 23:30:14 +00:00
|
|
|
slot, head = shortLog(head), validator = shortLog(validator_pubkey),
|
2023-08-23 16:39:57 +00:00
|
|
|
err = payloadBuilderBidFut.error.msg
|
2024-02-07 11:26:04 +00:00
|
|
|
Opt.none(BuilderBid[SBBB])
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let engineBid =
|
2023-08-23 16:39:57 +00:00
|
|
|
if engineBlockFut.completed:
|
2024-02-07 11:26:04 +00:00
|
|
|
if engineBlockFut.value.isOk:
|
|
|
|
Opt.some(engineBlockFut.value().value())
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-02-17 09:15:02 +00:00
|
|
|
notice "Engine block building error",
|
2023-11-28 23:30:14 +00:00
|
|
|
slot, head = shortLog(head), validator = shortLog(validator_pubkey),
|
2024-02-07 11:26:04 +00:00
|
|
|
err = engineBlockFut.value.error()
|
|
|
|
Opt.none(EngineBid)
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-02-17 09:15:02 +00:00
|
|
|
notice "Engine block building failed",
|
2023-11-28 23:30:14 +00:00
|
|
|
slot, head = shortLog(head), validator = shortLog(validator_pubkey),
|
2023-08-23 16:39:57 +00:00
|
|
|
err = engineBlockFut.error.msg
|
2024-02-07 11:26:04 +00:00
|
|
|
Opt.none(EngineBid)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
Bids[SBBB](
|
|
|
|
engineBid: engineBid,
|
|
|
|
builderBid: builderBid)
|
2023-08-24 02:02:30 +00:00
|
|
|
|
|
|
|
func builderBetterBid(
|
|
|
|
localBlockValueBoost: uint8, builderValue: UInt256, engineValue: Wei): bool =
|
|
|
|
# Scale down to ensure no overflows; if lower few bits would have been
|
|
|
|
# otherwise decisive, was close enough not to matter. Calibrate to let
|
|
|
|
# uint8-range percentages avoid overflowing.
|
|
|
|
const scalingBits = 10
|
|
|
|
static: doAssert 1 shl scalingBits >
|
|
|
|
high(typeof(localBlockValueBoost)).uint16 + 100
|
|
|
|
let
|
|
|
|
scaledBuilderValue = (builderValue shr scalingBits) * 100
|
|
|
|
scaledEngineValue = engineValue shr scalingBits
|
|
|
|
scaledBuilderValue >
|
|
|
|
scaledEngineValue * (localBlockValueBoost.uint16 + 100).u256
|
|
|
|
|
|
|
|
proc proposeBlockAux(
|
|
|
|
SBBB: typedesc, EPS: typedesc, node: BeaconNode,
|
|
|
|
validator: AttachedValidator, validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef, slot: Slot, randao: ValidatorSig, fork: Fork,
|
|
|
|
genesis_validators_root: Eth2Digest,
|
2024-02-07 11:26:04 +00:00
|
|
|
localBlockValueBoost: uint8): Future[BlockRef] {.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
2024-03-14 03:44:00 +00:00
|
|
|
graffitiBytes = node.getGraffitiBytes(validator)
|
2024-02-07 11:26:04 +00:00
|
|
|
payloadBuilderClient =
|
|
|
|
node.getPayloadBuilderClient(validator_index.distinctBase).valueOr(nil)
|
|
|
|
|
|
|
|
collectedBids = await collectBids(
|
|
|
|
SBBB, EPS, node, payloadBuilderClient, validator.pubkey, validator_index,
|
2024-03-14 03:44:00 +00:00
|
|
|
graffitiBytes, head, slot, randao)
|
2024-02-07 11:26:04 +00:00
|
|
|
|
|
|
|
useBuilderBlock =
|
|
|
|
if collectedBids.builderBid.isSome():
|
|
|
|
collectedBids.engineBid.isNone() or builderBetterBid(
|
|
|
|
localBlockValueBoost,
|
2024-03-11 14:18:50 +00:00
|
|
|
collectedBids.builderBid.value().executionPayloadValue,
|
|
|
|
collectedBids.engineBid.value().executionPayloadValue)
|
2024-02-07 11:26:04 +00:00
|
|
|
else:
|
|
|
|
if not collectedBids.engineBid.isSome():
|
|
|
|
return head # errors logged in router
|
|
|
|
false
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2023-12-15 20:31:14 +00:00
|
|
|
# There should always be an engine bid, and if payloadBuilderClient exists,
|
|
|
|
# not getting a builder bid is also an error. Do not report lack of builder
|
|
|
|
# when that's intentional. Replicate some of the nested if statements here,
|
|
|
|
# because that avoids entangling logging with other functionality. The logs
|
|
|
|
# here are inteded to clarify that, for example, when the builder API relay
|
|
|
|
# URL is provided for this validator, it's reasonable for Nimbus not to use
|
|
|
|
# it for every block.
|
2024-02-07 11:26:04 +00:00
|
|
|
if collectedBids.engineBid.isSome():
|
2023-12-15 20:31:14 +00:00
|
|
|
# Three cases: builder bid expected and absent, builder bid expected and
|
|
|
|
# present, and builder bid not expected.
|
2024-02-07 11:26:04 +00:00
|
|
|
if collectedBids.builderBid.isSome():
|
2023-12-15 20:31:14 +00:00
|
|
|
info "Compared engine and builder block bids",
|
|
|
|
localBlockValueBoost,
|
|
|
|
useBuilderBlock,
|
|
|
|
builderBlockValue =
|
2024-03-11 14:18:50 +00:00
|
|
|
toString(collectedBids.builderBid.value().executionPayloadValue, 10),
|
2024-02-10 04:13:00 +00:00
|
|
|
engineBlockValue =
|
2024-03-11 14:18:50 +00:00
|
|
|
toString(collectedBids.engineBid.value().executionPayloadValue, 10)
|
2023-12-15 20:31:14 +00:00
|
|
|
elif payloadBuilderClient.isNil:
|
|
|
|
discard # builder API not configured for this block
|
|
|
|
else:
|
|
|
|
info "Did not receive expected builder bid; using engine block",
|
2024-03-11 14:18:50 +00:00
|
|
|
engineBlockValue = collectedBids.engineBid.value().executionPayloadValue
|
2023-12-15 20:31:14 +00:00
|
|
|
else:
|
|
|
|
# Similar three cases: builder bid expected and absent, builder bid
|
|
|
|
# expected and present, and builder bid not expected. However, only
|
|
|
|
# the second is worth logging, because the other two result in this
|
|
|
|
# block being missed altogether, and with details logged elsewhere.
|
2024-02-07 11:26:04 +00:00
|
|
|
if collectedBids.builderBid.isSome:
|
2023-12-15 20:31:14 +00:00
|
|
|
info "Did not receive expected engine bid; using builder block",
|
|
|
|
builderBlockValue =
|
2024-03-11 14:18:50 +00:00
|
|
|
collectedBids.builderBid.value().executionPayloadValue
|
2023-12-15 20:31:14 +00:00
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
if useBuilderBlock:
|
|
|
|
let
|
|
|
|
blindedBlock = (await blindedBlockCheckSlashingAndSign(
|
|
|
|
node, slot, validator, validator_index,
|
2024-02-07 11:26:04 +00:00
|
|
|
collectedBids.builderBid.value().blindedBlckPart)).valueOr:
|
2023-08-23 16:39:57 +00:00
|
|
|
return head
|
|
|
|
# Before proposeBlockMEV, can fall back to EL; after, cannot without
|
|
|
|
# risking slashing.
|
|
|
|
maybeUnblindedBlock = await proposeBlockMEV(
|
|
|
|
node, payloadBuilderClient, blindedBlock)
|
|
|
|
|
|
|
|
return maybeUnblindedBlock.valueOr:
|
|
|
|
warn "Blinded block proposal incomplete",
|
|
|
|
head = shortLog(head), slot, validator_index,
|
|
|
|
validator = shortLog(validator),
|
|
|
|
err = maybeUnblindedBlock.error,
|
|
|
|
blindedBlck = shortLog(blindedBlock)
|
|
|
|
beacon_block_builder_missed_without_fallback.inc()
|
|
|
|
return head
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let engineBid = collectedBids.engineBid.value()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
withBlck(engineBid.blck):
|
2023-08-23 16:39:57 +00:00
|
|
|
let
|
2023-09-21 10:49:14 +00:00
|
|
|
blockRoot = hash_tree_root(forkyBlck)
|
2023-08-23 16:39:57 +00:00
|
|
|
signingRoot = compute_block_signing_root(
|
|
|
|
fork, genesis_validators_root, slot, blockRoot)
|
|
|
|
|
|
|
|
notSlashable = node.attachedValidators
|
|
|
|
.slashingProtection
|
|
|
|
.registerBlock(validator_index, validator.pubkey, slot, signingRoot)
|
|
|
|
|
|
|
|
if notSlashable.isErr:
|
|
|
|
warn "Slashing protection activated for block proposal",
|
2023-09-21 10:49:14 +00:00
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(forkyBlck),
|
2023-08-23 16:39:57 +00:00
|
|
|
signingRoot = shortLog(signingRoot),
|
|
|
|
validator = validator.pubkey,
|
|
|
|
slot = slot,
|
|
|
|
existingProposal = notSlashable.error
|
|
|
|
return head
|
|
|
|
|
|
|
|
let
|
|
|
|
signature =
|
|
|
|
block:
|
|
|
|
let res = await validator.getBlockSignature(
|
2024-02-07 11:26:04 +00:00
|
|
|
fork, genesis_validators_root, slot, blockRoot, engineBid.blck)
|
2023-08-23 16:39:57 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign block",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return head
|
|
|
|
res.get()
|
2023-11-06 06:48:43 +00:00
|
|
|
signedBlock = consensusFork.SignedBeaconBlock(
|
|
|
|
message: forkyBlck, signature: signature, root: blockRoot)
|
|
|
|
blobsOpt =
|
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
|
|
|
template blobsBundle: untyped =
|
2024-02-07 11:26:04 +00:00
|
|
|
engineBid.blobsBundleOpt.get
|
2023-11-06 06:48:43 +00:00
|
|
|
Opt.some(signedBlock.create_blob_sidecars(
|
|
|
|
blobsBundle.proofs, blobsBundle.blobs))
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2023-11-06 06:48:43 +00:00
|
|
|
Opt.none(seq[BlobSidecar])
|
2024-01-16 12:41:49 +00:00
|
|
|
|
|
|
|
# BIG BUG SOURCE: The `let` below cannot be combined with the others above!
|
|
|
|
# If combined, there are sometimes `SIGSEGV` during `test_keymanager_api`.
|
|
|
|
# This has only been observed on macOS (aarch64) in Jenkins, not on GitHub.
|
|
|
|
#
|
|
|
|
# - macOS 14.2.1 (23C71)
|
|
|
|
# - Xcode 15.1 (15C65)
|
|
|
|
# - Nim v1.6.18 (a749a8b742bd0a4272c26a65517275db4720e58a)
|
|
|
|
#
|
|
|
|
# Issue has started occuring around 12 Jan 2024, in a CI run for PR #5731.
|
|
|
|
# The PR did not change anything related to this, suggesting an environment
|
|
|
|
# or hardware change. The issue is flaky; could have been introduced earlier
|
|
|
|
# before surfacing in the aforementioned PR. About 30% to hit bug.
|
|
|
|
#
|
|
|
|
# [2024-01-12T11:54:21.011Z] Wrote test_keymanager_api/bootstrap_node.enr
|
|
|
|
# [2024-01-12T11:54:29.294Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
|
|
|
# [2024-01-12T11:54:29.294Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
|
|
|
# [2024-01-12T11:54:34.870Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(1016) main
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(1006) NimMain
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-libp2p/libp2p/protocols/rendezvous.nim(997) PreMain
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1502) atmtest_keymanager_apidotnim_Init000
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1475) main
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1481) main
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(307) startBeaconNode
|
|
|
|
# [2024-01-12T11:54:34.870Z] beacon_chain/nimbus_beacon_node.nim(1900) start
|
|
|
|
# [2024-01-12T11:54:34.870Z] beacon_chain/nimbus_beacon_node.nim(1847) run
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(1465) delayedTests
|
|
|
|
# [2024-01-12T11:54:34.870Z] tests/test_keymanager_api.nim(392) runTests
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-unittest2/unittest2.nim(1147) runTests
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-unittest2/unittest2.nim(1086) runDirect
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-testutils/testutils/unittests.nim(16) runTestX60gensym2933
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(656) waitFor
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(631) pollFor
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(378) futureContinue
|
|
|
|
# [2024-01-12T11:54:34.870Z] beacon_chain/validators/beacon_validators.nim(82) proposeBlockAux
|
|
|
|
# [2024-01-12T11:54:34.870Z] vendor/nimbus-build-system/vendor/Nim/lib/system/excpt.nim(631) signalHandler
|
|
|
|
# [2024-01-12T11:54:34.870Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
|
|
|
#
|
2024-02-26 02:02:03 +00:00
|
|
|
# This appeared again around 25 Feb 2024, in a CI run for PR #5959,
|
|
|
|
# despite the extra `let` having been applied -- once more observed on
|
|
|
|
# macOS (aarch64) in Jenkins, and much rarer than before.
|
|
|
|
#
|
|
|
|
# [2024-02-25T23:21:24.533Z] Wrote test_keymanager_api/bootstrap_node.enr
|
|
|
|
# [2024-02-25T23:21:32.756Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
|
|
|
# [2024-02-25T23:21:32.756Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
|
|
|
# [2024-02-25T23:21:37.219Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1068) main
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1058) NimMain
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1049) PreMain
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1501) atmtest_keymanager_apidotnim_Init000
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1474) main
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1480) main
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(307) startBeaconNode
|
|
|
|
# [2024-02-25T23:21:37.219Z] beacon_chain/nimbus_beacon_node.nim(1916) start
|
|
|
|
# [2024-02-25T23:21:37.219Z] beacon_chain/nimbus_beacon_node.nim(1863) run
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(1464) delayedTests
|
|
|
|
# [2024-02-25T23:21:37.219Z] tests/test_keymanager_api.nim(391) runTests
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-unittest2/unittest2.nim(1151) runTests
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-unittest2/unittest2.nim(1086) runDirect
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-testutils/testutils/unittests.nim(16) runTestX60gensym3188
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(660) waitFor
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(635) pollFor
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronos/chronos/internal/asyncfutures.nim(382) futureContinue
|
|
|
|
# [2024-02-25T23:21:37.219Z] vendor/nim-chronicles/chronicles.nim(251) proposeBlockAux
|
|
|
|
# [2024-02-25T23:21:37.219Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
|
|
|
#
|
|
|
|
# One theory is that PR #5946 may increase the frequency, as there were
|
|
|
|
# times where the Jenkins CI failed almost every time using a shorter trace.
|
|
|
|
# However, the problem was once more flaky, with some passes in-between.
|
|
|
|
# For now, PR #5946 was reverted (low priority), and the problem is gone,
|
|
|
|
# whether related or not.
|
|
|
|
#
|
|
|
|
# [2024-02-23T23:11:47.700Z] Wrote test_keymanager_api/bootstrap_node.enr
|
|
|
|
# [2024-02-23T23:11:54.728Z] Serialization/deserialization [Beacon Node] [Preset: mainnet] . (0.00s)
|
|
|
|
# [2024-02-23T23:11:54.728Z] ListKeys requests [Beacon Node] [Preset: mainnet] .... (0.01s)
|
|
|
|
# [2024-02-23T23:11:59.523Z] ImportKeystores requests [Beacon Node] [Preset: mainnet] Traceback (most recent call last, using override)
|
|
|
|
# [2024-02-23T23:11:59.523Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1067) main
|
|
|
|
# [2024-02-23T23:11:59.523Z] vendor/nim-libp2p/libp2p/protocols/pubsub/pubsub.nim(1057) NimMain
|
|
|
|
# [2024-02-23T23:11:59.523Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-02-23T23:11:59.523Z] vendor/nim-chronos/chronos/internal/asyncengine.nim(150) poll
|
|
|
|
# [2024-02-23T23:11:59.523Z] SIGSEGV: Illegal storage access. (Attempt to read from nil?)
|
|
|
|
#
|
2024-01-16 12:41:49 +00:00
|
|
|
# The generated `nimcache` differs slightly if the `let` are separated from
|
|
|
|
# a single block; separation introduces an additional state in closure iter.
|
|
|
|
# This change, maybe combined with some macOS specific compiler specifics,
|
|
|
|
# could this trigger the `SIGSEGV`? Maybe the extra state adds just enough
|
|
|
|
# complexity to the function to disable certain problematic optimizations?
|
|
|
|
# The change in size of the environment changes a number of things such as
|
|
|
|
# alignment and which parts of an environment contain pointers and so on,
|
|
|
|
# which in turn may have surprising behavioural effects, ie most likely this
|
|
|
|
# extra state masks some underlying issue. Furthermore, the combination of
|
|
|
|
# `(await xyz).valueOr: return` is not very commonly used with other `await`
|
|
|
|
# in the same `let` block, which could explain this not being more common.
|
|
|
|
#
|
|
|
|
# Note that when compiling for Wasm, there are similar bugs with `results`
|
|
|
|
# when inlining unwraps, e.g., in `eth2_rest_serialization.nim`.
|
|
|
|
# These have not been investigated thoroughly so far as that project uses
|
|
|
|
# Nim 2.0 with --mm:orc and is just a prototype for Wasm, no production use.
|
|
|
|
# But maybe there is something weird going on with `results` related to the
|
|
|
|
# random `SIGSEGV` that we are now observing here, related to doing too much
|
|
|
|
# inline logic without defining intermediate isolated `let` statements.
|
|
|
|
#
|
|
|
|
# if mediaType == ApplicationJsonMediaType:
|
|
|
|
# try:
|
|
|
|
# - ok RestJson.decode(value, T,
|
|
|
|
# - requireAllFields = true,
|
|
|
|
# - allowUnknownFields = true)
|
|
|
|
# + let r = RestJson.decode(value, T,
|
|
|
|
# + requireAllFields = true,
|
|
|
|
# + allowUnknownFields = true)
|
|
|
|
# + ok r
|
|
|
|
# except SerializationError as exc:
|
|
|
|
# debug "Failed to deserialize REST JSON data",
|
|
|
|
# err = exc.formatMsg("<data>"),
|
|
|
|
#
|
|
|
|
# At this time we can only speculate about the trigger of these issues.
|
|
|
|
# Until a shared pattern can be identified, it is better to apply
|
|
|
|
# workarounds that at least avoid the known to be reachable triggers.
|
|
|
|
# The solution is hacky and far from desirable; it is what it is.
|
|
|
|
let
|
2023-11-06 06:48:43 +00:00
|
|
|
newBlockRef = (
|
|
|
|
await node.router.routeSignedBeaconBlock(signedBlock, blobsOpt)
|
|
|
|
).valueOr:
|
|
|
|
return head # Errors logged in router
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
if newBlockRef.isNone():
|
|
|
|
return head # Validation errors logged in router
|
|
|
|
|
|
|
|
notice "Block proposed",
|
2023-09-21 10:49:14 +00:00
|
|
|
blockRoot = shortLog(blockRoot), blck = shortLog(forkyBlck),
|
2023-08-23 16:39:57 +00:00
|
|
|
signature = shortLog(signature), validator = shortLog(validator)
|
|
|
|
|
|
|
|
beacon_blocks_proposed.inc()
|
|
|
|
|
|
|
|
return newBlockRef.get()
|
|
|
|
|
|
|
|
proc proposeBlock(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
head: BlockRef,
|
2024-02-07 11:26:04 +00:00
|
|
|
slot: Slot): Future[BlockRef] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
if head.slot >= slot:
|
|
|
|
# We should normally not have a head newer than the slot we're proposing for
|
|
|
|
# but this can happen if block proposal is delayed
|
|
|
|
warn "Skipping proposal, have newer head already",
|
|
|
|
headSlot = shortLog(head.slot),
|
|
|
|
headBlockRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot)
|
|
|
|
return head
|
|
|
|
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
randao = block:
|
|
|
|
let res = await validator.getEpochSignature(
|
|
|
|
fork, genesis_validators_root, slot.epoch)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate randao reveal",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return head
|
|
|
|
res.get()
|
|
|
|
|
|
|
|
template proposeBlockContinuation(type1, type2: untyped): auto =
|
|
|
|
await proposeBlockAux(
|
|
|
|
type1, type2, node, validator, validator_index, head, slot, randao, fork,
|
|
|
|
genesis_validators_root, node.config.localBlockValueBoost)
|
|
|
|
|
2023-11-16 00:20:13 +00:00
|
|
|
return withConsensusFork(node.dag.cfg.consensusForkAtEpoch(slot.epoch)):
|
2024-02-28 18:27:26 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2023-08-23 16:39:57 +00:00
|
|
|
proposeBlockContinuation(
|
2023-11-16 00:20:13 +00:00
|
|
|
consensusFork.SignedBlindedBeaconBlock,
|
|
|
|
consensusFork.ExecutionPayloadForSigning)
|
2023-08-23 16:39:57 +00:00
|
|
|
else:
|
2024-02-28 18:27:26 +00:00
|
|
|
# Pre-Deneb MEV is not supported; this signals that, because it triggers
|
2023-09-27 16:06:22 +00:00
|
|
|
# intentional SignedBlindedBeaconBlock/ExecutionPayload mismatches.
|
2023-08-23 16:39:57 +00:00
|
|
|
proposeBlockContinuation(
|
2024-02-28 18:27:26 +00:00
|
|
|
deneb_mev.SignedBlindedBeaconBlock,
|
|
|
|
max(ConsensusFork.Bellatrix, consensusFork).ExecutionPayloadForSigning)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc sendAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Perform all attestations that the validators attached to this node should
|
|
|
|
## perform during the given slot
|
|
|
|
if slot + SLOTS_PER_EPOCH < head.slot:
|
|
|
|
# The latest block we know about is a lot newer than the slot we're being
|
|
|
|
# asked to attest to - this makes it unlikely that it will be included
|
|
|
|
# at all.
|
|
|
|
# TODO the oldest attestations allowed are those that are older than the
|
|
|
|
# finalized epoch.. also, it seems that posting very old attestations
|
|
|
|
# is risky from a slashing perspective. More work is needed here.
|
|
|
|
warn "Skipping attestation, head is too recent",
|
|
|
|
head = shortLog(head),
|
|
|
|
slot = shortLog(slot)
|
|
|
|
return
|
|
|
|
|
|
|
|
if slot < node.dag.finalizedHead.slot:
|
|
|
|
# During checkpoint sync, we implicitly finalize the given slot even if the
|
|
|
|
# state transition does not yet consider it final - this is a sanity check
|
|
|
|
# mostly to ensure the `atSlot` below works as expected
|
|
|
|
warn "Skipping attestation - slot already finalized",
|
|
|
|
head = shortLog(head),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
finalized = shortLog(node.dag.finalizedHead)
|
|
|
|
return
|
|
|
|
|
|
|
|
let attestationHead = head.atSlot(slot)
|
|
|
|
if head != attestationHead.blck:
|
|
|
|
# In rare cases, such as when we're busy syncing or just slow, we'll be
|
|
|
|
# attesting to a past state - we must then recreate the world as it looked
|
|
|
|
# like back then
|
|
|
|
notice "Attesting to a state in the past, falling behind?",
|
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
|
|
|
|
|
|
|
trace "Checking attestations",
|
|
|
|
attestationHead = shortLog(attestationHead),
|
|
|
|
head = shortLog(head)
|
|
|
|
|
|
|
|
# We need to run attestations exactly for the slot that we're attesting to.
|
|
|
|
# In case blocks went missing, this means advancing past the latest block
|
|
|
|
# using empty slots as fillers.
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#validator-assignments
|
2023-08-23 16:39:57 +00:00
|
|
|
let
|
|
|
|
epochRef = node.dag.getEpochRef(
|
|
|
|
attestationHead.blck, slot.epoch, false).valueOr:
|
|
|
|
warn "Cannot construct EpochRef for attestation head, report bug",
|
|
|
|
attestationHead = shortLog(attestationHead), slot, error
|
|
|
|
return
|
|
|
|
committees_per_slot = get_committee_count_per_slot(epochRef.shufflingRef)
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
2023-11-19 13:08:07 +00:00
|
|
|
registeredRes = node.attachedValidators.slashingProtection.withContext:
|
|
|
|
var tmp: seq[(RegisteredAttestation, SubnetId)]
|
|
|
|
|
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
|
|
|
let
|
|
|
|
committee = get_beacon_committee(
|
|
|
|
epochRef.shufflingRef, slot, committee_index)
|
|
|
|
subnet_id = compute_subnet_for_attestation(
|
|
|
|
committees_per_slot, slot, committee_index)
|
|
|
|
|
|
|
|
for index_in_committee, validator_index in committee:
|
|
|
|
let
|
2024-02-07 11:26:04 +00:00
|
|
|
validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
|
|
|
continue
|
2023-11-19 13:08:07 +00:00
|
|
|
data = makeAttestationData(epochRef, attestationHead, committee_index)
|
|
|
|
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
|
|
|
|
signingRoot = compute_attestation_signing_root(
|
|
|
|
fork, genesis_validators_root, data)
|
|
|
|
registered = registerAttestationInContext(
|
|
|
|
validator_index, validator.pubkey, data.source.epoch,
|
|
|
|
data.target.epoch, signingRoot)
|
|
|
|
if registered.isErr():
|
|
|
|
warn "Slashing protection activated for attestation",
|
|
|
|
attestationData = shortLog(data),
|
|
|
|
signingRoot = shortLog(signingRoot),
|
|
|
|
validator_index,
|
|
|
|
validator = shortLog(validator),
|
|
|
|
badVoteDetails = $registered.error()
|
|
|
|
continue
|
|
|
|
|
|
|
|
tmp.add((RegisteredAttestation(
|
|
|
|
validator: validator,
|
|
|
|
index_in_committee: uint64 index_in_committee,
|
|
|
|
committee_len: committee.len(), data: data), subnet_id
|
|
|
|
))
|
|
|
|
tmp
|
|
|
|
|
|
|
|
if registeredRes.isErr():
|
|
|
|
warn "Could not update slashing database, skipping attestation duties",
|
|
|
|
error = registeredRes.error()
|
|
|
|
else:
|
|
|
|
for attestation in registeredRes[]:
|
|
|
|
asyncSpawn createAndSendAttestation(
|
|
|
|
node, fork, genesis_validators_root, attestation[0], attestation[1])
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
slot: Slot,
|
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2024-02-07 11:26:04 +00:00
|
|
|
head: BlockRef)
|
|
|
|
{.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
msg =
|
|
|
|
block:
|
|
|
|
let res = await validator.getSyncCommitteeMessage(
|
|
|
|
fork, genesis_validators_root, slot, head.root)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign committee message",
|
|
|
|
validator = shortLog(validator), slot = slot,
|
|
|
|
block_root = shortLog(head.root)
|
|
|
|
return
|
|
|
|
res.get()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Logged in the router
|
|
|
|
let res = await node.router.routeSyncCommitteeMessage(
|
|
|
|
msg, subcommitteeIdx, checkSignature = false)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if not res.isOk():
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if node.config.dumpEnabled:
|
|
|
|
dump(node.config.dumpDirOutgoing, msg, validator.pubkey)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc sendSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
|
2023-08-23 16:39:57 +00:00
|
|
|
let
|
|
|
|
syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
|
|
|
|
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
|
|
|
let validator = node.getValidatorForDuties(
|
|
|
|
valIdx, slot, slashingSafe = true).valueOr:
|
|
|
|
continue
|
|
|
|
asyncSpawn createAndSendSyncCommitteeMessage(node, validator, slot,
|
|
|
|
subcommitteeIdx, head)
|
|
|
|
|
|
|
|
proc signAndSendContribution(node: BeaconNode,
|
|
|
|
validator: AttachedValidator,
|
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
|
|
|
head: BlockRef,
|
2024-02-07 11:26:04 +00:00
|
|
|
slot: Slot) {.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSyncCommitteeSelectionProof(
|
|
|
|
fork, genesis_validators_root, slot, subcommitteeIdx)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to generate committee selection proof",
|
|
|
|
validator = shortLog(validator), slot,
|
|
|
|
subnet_id = subcommitteeIdx, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if not is_sync_committee_aggregator(selectionProof):
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
var
|
|
|
|
msg = SignedContributionAndProof(
|
|
|
|
message: ContributionAndProof(
|
|
|
|
aggregator_index: uint64 validator.index.get,
|
|
|
|
selection_proof: selectionProof))
|
|
|
|
|
|
|
|
if not node.syncCommitteeMsgPool[].produceContribution(
|
|
|
|
slot,
|
|
|
|
head.bid,
|
|
|
|
subcommitteeIdx,
|
|
|
|
msg.message.contribution):
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
msg.signature = block:
|
|
|
|
let res = await validator.getContributionAndProofSignature(
|
|
|
|
fork, genesis_validators_root, msg.message)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign sync committee contribution",
|
|
|
|
validator = shortLog(validator), message = shortLog(msg.message),
|
|
|
|
error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedContributionAndProof(msg, false)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc sendSyncCommitteeContributions(
|
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) =
|
2023-12-06 16:23:45 +00:00
|
|
|
let syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
for subcommitteeIdx in SyncSubcommitteeIndex:
|
|
|
|
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
|
|
|
|
let validator = node.getValidatorForDuties(
|
|
|
|
valIdx, slot, slashingSafe = true).valueOr:
|
|
|
|
continue
|
|
|
|
|
|
|
|
asyncSpawn signAndSendContribution(
|
|
|
|
node, validator, subcommitteeIdx, head, slot)
|
|
|
|
|
|
|
|
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[BlockRef] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Perform the proposal for the given slot, iff we have a validator attached
|
|
|
|
## that is supposed to do so, given the shuffling at that slot for the given
|
|
|
|
## head - to compute the proposer, we need to advance a state to the given
|
|
|
|
## slot
|
|
|
|
let
|
|
|
|
proposer = node.dag.getProposer(head, slot).valueOr:
|
|
|
|
return head
|
|
|
|
proposerKey = node.dag.validatorKey(proposer).get().toPubKey
|
|
|
|
validator = node.getValidatorForDuties(proposer, slot).valueOr:
|
|
|
|
debug "Expecting block proposal", headRoot = shortLog(head.root),
|
|
|
|
slot = shortLog(slot),
|
|
|
|
proposer_index = proposer,
|
|
|
|
proposer = shortLog(proposerKey)
|
|
|
|
return head
|
|
|
|
|
|
|
|
return await proposeBlock(node, validator, proposer, head, slot)
|
|
|
|
|
|
|
|
proc signAndSendAggregate(
|
|
|
|
node: BeaconNode, validator: AttachedValidator, shufflingRef: ShufflingRef,
|
2024-02-07 11:26:04 +00:00
|
|
|
slot: Slot, committee_index: CommitteeIndex) {.async: (raises: [CancelledError]).} =
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
validator_index = validator.index.get()
|
|
|
|
selectionProof = block:
|
|
|
|
let res = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
slot, error = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregation-selection
|
2024-02-07 11:26:04 +00:00
|
|
|
if not is_aggregator(
|
|
|
|
shufflingRef, slot, committee_index, selectionProof):
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/validator.md#construct-aggregate
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#aggregateandproof
|
2024-02-07 11:26:04 +00:00
|
|
|
var
|
|
|
|
msg = SignedAggregateAndProof(
|
|
|
|
message: AggregateAndProof(
|
|
|
|
aggregator_index: uint64 validator_index,
|
|
|
|
selection_proof: selectionProof))
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
msg.message.aggregate = node.attestationPool[].getAggregatedAttestation(
|
|
|
|
slot, committee_index).valueOr:
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
msg.signature = block:
|
|
|
|
let res = await validator.getAggregateAndProofSignature(
|
|
|
|
fork, genesis_validators_root, msg.message)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if res.isErr():
|
|
|
|
warn "Unable to sign aggregate",
|
|
|
|
validator = shortLog(validator), error_msg = res.error()
|
|
|
|
return
|
|
|
|
res.get()
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
validator.doppelgangerActivity(msg.message.aggregate.data.slot.epoch)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Logged in the router
|
|
|
|
discard await node.router.routeSignedAggregateAndProof(
|
|
|
|
msg, checkSignature = false)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
proc sendAggregatedAttestations(
|
2024-02-07 11:26:04 +00:00
|
|
|
node: BeaconNode, head: BlockRef, slot: Slot) =
|
2023-08-23 16:39:57 +00:00
|
|
|
# Aggregated attestations must be sent by members of the beacon committees for
|
|
|
|
# the given slot, for which `is_aggregator` returns `true`.
|
|
|
|
|
|
|
|
let
|
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
|
|
|
warn "Cannot construct EpochRef for head, report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
|
|
|
|
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
|
|
|
for _, validator_index in
|
|
|
|
get_beacon_committee(shufflingRef, slot, committee_index):
|
|
|
|
let validator = node.getValidatorForDuties(validator_index, slot).valueOr:
|
|
|
|
continue
|
|
|
|
asyncSpawn signAndSendAggregate(node, validator, shufflingRef, slot,
|
|
|
|
committee_index)
|
|
|
|
|
|
|
|
proc updateValidatorMetrics*(node: BeaconNode) =
|
|
|
|
# Technically, this only needs to be done on epoch transitions and if there's
|
|
|
|
# a reorg that spans an epoch transition, but it's easier to implement this
|
|
|
|
# way for now.
|
|
|
|
|
|
|
|
# We'll limit labelled metrics to the first 64, so that we don't overload
|
|
|
|
# Prometheus.
|
|
|
|
|
|
|
|
var total: Gwei
|
|
|
|
var i = 0
|
|
|
|
for _, v in node.attachedValidators[].validators:
|
|
|
|
let balance =
|
|
|
|
if v.index.isNone():
|
|
|
|
0.Gwei
|
|
|
|
elif v.index.get().uint64 >=
|
|
|
|
getStateField(node.dag.headState, balances).lenu64:
|
|
|
|
debug "Cannot get validator balance, index out of bounds",
|
|
|
|
pubkey = shortLog(v.pubkey), index = v.index.get(),
|
|
|
|
balances = getStateField(node.dag.headState, balances).len,
|
|
|
|
stateRoot = getStateRoot(node.dag.headState)
|
|
|
|
0.Gwei
|
|
|
|
else:
|
|
|
|
getStateField(node.dag.headState, balances).item(v.index.get())
|
|
|
|
|
|
|
|
if i < 64:
|
|
|
|
attached_validator_balance.set(
|
|
|
|
balance.toGaugeValue, labelValues = [shortLog(v.pubkey)])
|
|
|
|
|
|
|
|
inc i
|
|
|
|
total += balance
|
|
|
|
|
|
|
|
node.attachedValidatorBalanceTotal = total
|
|
|
|
attached_validator_balance_total.set(total.toGaugeValue)
|
|
|
|
|
|
|
|
from std/times import epochTime
|
|
|
|
|
|
|
|
proc getValidatorRegistration(
|
|
|
|
node: BeaconNode, validator: AttachedValidator, epoch: Epoch):
|
2024-02-07 11:26:04 +00:00
|
|
|
Future[Result[SignedValidatorRegistrationV1, string]] {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
let validatorIdx = validator.index.valueOr:
|
|
|
|
# The validator index will be missing when the validator was not
|
|
|
|
# activated for duties yet. We can safely skip the registration then.
|
|
|
|
return
|
|
|
|
|
|
|
|
let feeRecipient = node.getFeeRecipient(validator.pubkey, validatorIdx, epoch)
|
|
|
|
let gasLimit = node.getGasLimit(validator.pubkey)
|
|
|
|
var validatorRegistration = SignedValidatorRegistrationV1(
|
|
|
|
message: ValidatorRegistrationV1(
|
|
|
|
fee_recipient: ExecutionAddress(data: distinctBase(feeRecipient)),
|
|
|
|
gas_limit: gasLimit,
|
|
|
|
timestamp: epochTime().uint64,
|
|
|
|
pubkey: validator.pubkey))
|
|
|
|
|
|
|
|
let signature = await validator.getBuilderSignature(
|
|
|
|
node.dag.cfg.genesisFork, validatorRegistration.message)
|
|
|
|
|
|
|
|
debug "getValidatorRegistration: registering",
|
|
|
|
validatorRegistration
|
|
|
|
|
|
|
|
if signature.isErr:
|
|
|
|
return err signature.error
|
|
|
|
|
|
|
|
validatorRegistration.signature = signature.get
|
|
|
|
|
|
|
|
return ok validatorRegistration
|
|
|
|
|
2023-09-20 17:00:37 +00:00
|
|
|
proc registerValidatorsPerBuilder(
|
|
|
|
node: BeaconNode, payloadBuilderAddress: string, epoch: Epoch,
|
2024-02-07 11:26:04 +00:00
|
|
|
attachedValidatorPubkeys: seq[ValidatorPubKey]) {.async: (raises: [CancelledError]).} =
|
2023-12-06 16:23:45 +00:00
|
|
|
const
|
|
|
|
HttpOk = 200
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE = 6.seconds
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let payloadBuilderClient =
|
|
|
|
RestClientRef.new(payloadBuilderAddress).valueOr:
|
|
|
|
debug "Unable to initialize payload builder client while registering validators",
|
|
|
|
payloadBuilderAddress, epoch,
|
|
|
|
err = error
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if payloadBuilderClient.isNil:
|
|
|
|
debug "registerValidatorsPerBuilder: got nil payload builder REST client reference",
|
|
|
|
payloadBuilderAddress, epoch
|
|
|
|
return
|
2023-09-20 17:00:37 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
const emptyNestedSeq = @[newSeq[SignedValidatorRegistrationV1](0)]
|
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/validator.md#validator-registration
|
|
|
|
# Seed with single empty inner list to avoid special cases
|
|
|
|
var validatorRegistrations = emptyNestedSeq
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Some relay networks disallow large request bodies, so split requests
|
|
|
|
template addValidatorRegistration(
|
|
|
|
validatorRegistration: SignedValidatorRegistrationV1) =
|
|
|
|
const registrationValidatorChunkSize = 500
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if validatorRegistrations[^1].len < registrationValidatorChunkSize:
|
|
|
|
validatorRegistrations[^1].add validatorRegistration
|
|
|
|
else:
|
|
|
|
validatorRegistrations.add @[validatorRegistration]
|
|
|
|
|
|
|
|
# First, check for VC-added keys; cheaper because provided pre-signed
|
|
|
|
# See issue #5599: currently VC have no way to provide BN with per-validator builders per the specs, so we have to
|
|
|
|
# resort to use the BN fallback default (--payload-builder-url value, obtained by calling getPayloadBuilderAddress)
|
|
|
|
var nonExitedVcPubkeys: HashSet[ValidatorPubKey]
|
|
|
|
if node.externalBuilderRegistrations.len > 0 and
|
|
|
|
payloadBuilderAddress == node.config.getPayloadBuilderAddress.value:
|
|
|
|
withState(node.dag.headState):
|
|
|
|
let currentEpoch = node.currentSlot().epoch
|
|
|
|
for i in 0 ..< forkyState.data.validators.len:
|
|
|
|
# https://github.com/ethereum/beacon-APIs/blob/v2.4.0/apis/validator/register_validator.yaml
|
|
|
|
# "Note that only registrations for active or pending validators must
|
|
|
|
# be sent to the builder network. Registrations for unknown or exited
|
|
|
|
# validators must be filtered out and not sent to the builder
|
|
|
|
# network."
|
|
|
|
if forkyState.data.validators.item(i).exit_epoch > currentEpoch:
|
2023-08-23 16:39:57 +00:00
|
|
|
let pubkey = forkyState.data.validators.item(i).pubkey
|
2024-02-07 11:26:04 +00:00
|
|
|
node.externalBuilderRegistrations.withValue(
|
|
|
|
pubkey, signedValidatorRegistration):
|
|
|
|
nonExitedVcPubkeys.incl signedValidatorRegistration[].message.pubkey
|
|
|
|
addValidatorRegistration signedValidatorRegistration[]
|
|
|
|
|
|
|
|
for key in attachedValidatorPubkeys:
|
|
|
|
# Already included from VC
|
|
|
|
if key in nonExitedVcPubkeys:
|
|
|
|
warn "registerValidators: same validator registered by beacon node and validator client",
|
|
|
|
pubkey = shortLog(key)
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Time passed during awaits; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
let validator =
|
|
|
|
try:
|
|
|
|
node.attachedValidators[].validators[key]
|
|
|
|
except KeyError:
|
|
|
|
raiseAssert "just checked"
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if validator.index.isNone:
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# https://github.com/ethereum/builder-specs/blob/v0.4.0/specs/bellatrix/builder.md#is_eligible_for_registration
|
|
|
|
# Validators should be active or pending
|
|
|
|
withState(node.dag.headState):
|
|
|
|
if distinctBase(validator.index.get) >=
|
|
|
|
forkyState.data.validators.lenu64:
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if node.currentSlot().epoch >=
|
|
|
|
forkyState.data.validators.item(validator.index.get).exit_epoch:
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if validator.externalBuilderRegistration.isSome:
|
|
|
|
addValidatorRegistration validator.externalBuilderRegistration.get
|
|
|
|
else:
|
|
|
|
let validatorRegistration =
|
|
|
|
await node.getValidatorRegistration(validator, epoch)
|
|
|
|
if validatorRegistration.isErr:
|
|
|
|
error "registerValidators: validatorRegistration failed",
|
|
|
|
validatorRegistration
|
|
|
|
continue
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# Time passed during await; REST keymanager API might have removed it
|
|
|
|
if key notin node.attachedValidators[].validators:
|
|
|
|
continue
|
|
|
|
let validator = try:
|
|
|
|
node.attachedValidators[].validators[key]
|
|
|
|
except KeyError:
|
|
|
|
raiseAssert "just checked"
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
validator.externalBuilderRegistration = Opt.some validatorRegistration.get
|
|
|
|
addValidatorRegistration validatorRegistration.get
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
if validatorRegistrations == emptyNestedSeq:
|
|
|
|
return
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
# TODO if there are too many chunks, could trigger DoS protections, so
|
|
|
|
# might randomize order to accumulate cumulative coverage
|
|
|
|
for chunkIdx in 0 ..< validatorRegistrations.len:
|
|
|
|
let registerValidatorResult =
|
|
|
|
try:
|
2023-08-23 16:39:57 +00:00
|
|
|
awaitWithTimeout(
|
|
|
|
payloadBuilderClient.registerValidator(
|
|
|
|
validatorRegistrations[chunkIdx]),
|
|
|
|
BUILDER_VALIDATOR_REGISTRATION_DELAY_TOLERANCE):
|
|
|
|
error "Timeout when registering validator with builder"
|
|
|
|
continue # Try next batch regardless
|
2024-02-07 11:26:04 +00:00
|
|
|
except RestError as exc:
|
2024-03-01 05:25:29 +00:00
|
|
|
warn "Error when registering validator(s) with builder", err = exc.msg
|
2024-02-07 11:26:04 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
if HttpOk != registerValidatorResult.status:
|
|
|
|
warn "registerValidators: Couldn't register validator with MEV builder",
|
|
|
|
registerValidatorResult
|
|
|
|
|
|
|
|
proc registerValidators*(node: BeaconNode, epoch: Epoch) {.async: (raises: [CancelledError]).} =
|
2023-09-20 17:00:37 +00:00
|
|
|
if not node.config.payloadBuilderEnable: return
|
|
|
|
|
|
|
|
var builderKeys: Table[string, seq[ValidatorPubKey]]
|
2023-11-17 14:42:00 +00:00
|
|
|
|
|
|
|
# Ensure VC validators are still registered if we have no attached validators
|
|
|
|
let externalPayloadBuilderAddress = node.config.getPayloadBuilderAddress
|
|
|
|
if externalPayloadBuilderAddress.isSome:
|
|
|
|
builderKeys[externalPayloadBuilderAddress.value] = newSeq[ValidatorPubKey](0)
|
|
|
|
|
2023-09-20 17:00:37 +00:00
|
|
|
for pubkey in node.attachedValidators[].validators.keys:
|
|
|
|
let payloadBuilderAddress = node.getPayloadBuilderAddress(pubkey).valueOr:
|
|
|
|
continue
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
builderKeys.mgetOrPut(
|
|
|
|
payloadBuilderAddress, default(seq[ValidatorPubKey])).add pubkey
|
2023-09-20 17:00:37 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
for payloadBuilderAddress, keys in builderKeys:
|
|
|
|
await node.registerValidatorsPerBuilder(payloadBuilderAddress, epoch, keys)
|
2023-09-20 17:00:37 +00:00
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
proc updateValidators(
|
|
|
|
node: BeaconNode, validators: openArray[Validator]) =
|
|
|
|
# Since validator indicies are stable, we only check the "updated" range -
|
|
|
|
# checking all validators would significantly slow down this loop when there
|
|
|
|
# are many inactive keys
|
|
|
|
for i in node.dutyValidatorCount..validators.high:
|
|
|
|
let
|
|
|
|
v = node.attachedValidators[].getValidator(validators[i].pubkey).valueOr:
|
|
|
|
continue
|
|
|
|
v.index = Opt.some ValidatorIndex(i)
|
|
|
|
|
|
|
|
node.dutyValidatorCount = validators.len
|
|
|
|
|
|
|
|
for validator in node.attachedValidators[]:
|
|
|
|
# Check if any validators have been activated
|
|
|
|
if validator.needsUpdate and validator.index.isSome():
|
|
|
|
# Activation epoch can change after index is assigned..
|
|
|
|
let index = validator.index.get()
|
|
|
|
if index < validators.lenu64:
|
|
|
|
validator.updateValidator(
|
|
|
|
Opt.some(ValidatorAndIndex(
|
|
|
|
index: index, validator: validators[int index]
|
|
|
|
)))
|
|
|
|
|
2024-01-11 22:34:10 +00:00
|
|
|
proc handleFallbackAttestations(node: BeaconNode, lastSlot, slot: Slot) =
|
|
|
|
# Neither block proposal nor sync committee duties can be done in this
|
|
|
|
# situation.
|
|
|
|
let attestationHead = node.lastValidAttestedBlock.valueOr:
|
|
|
|
return
|
|
|
|
|
|
|
|
if attestationHead.slot + SLOTS_PER_EPOCH < slot:
|
|
|
|
return
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
sendAttestations(node, attestationHead.blck, slot)
|
2024-01-11 22:34:10 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Perform validator duties - create blocks, vote and aggregate existing votes
|
|
|
|
if node.attachedValidators[].count == 0:
|
|
|
|
# Nothing to do because we have no validator attached
|
|
|
|
return
|
|
|
|
|
|
|
|
# The dag head might be updated by sync while we're working due to the
|
|
|
|
# await calls, thus we use a local variable to keep the logic straight here
|
|
|
|
var head = node.dag.head
|
|
|
|
if not node.isSynced(head):
|
|
|
|
info "Beacon node not in sync; skipping validator duties for now",
|
|
|
|
slot, headSlot = head.slot
|
|
|
|
|
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
elif not head.executionValid:
|
|
|
|
info "Execution client not in sync; skipping validator duties for now",
|
|
|
|
slot, headSlot = head.slot
|
|
|
|
|
2024-01-11 22:34:10 +00:00
|
|
|
handleFallbackAttestations(node, lastSlot, slot)
|
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
# Rewards will be growing though, as we sync..
|
|
|
|
updateValidatorMetrics(node)
|
|
|
|
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
discard # keep going
|
|
|
|
|
2024-01-11 22:34:10 +00:00
|
|
|
node.lastValidAttestedBlock = Opt.some head.atSlot()
|
|
|
|
|
2023-08-23 16:39:57 +00:00
|
|
|
withState(node.dag.headState):
|
|
|
|
node.updateValidators(forkyState.data.validators.asSeq())
|
|
|
|
|
2023-12-05 11:45:47 +00:00
|
|
|
let newHead = await handleProposal(node, head, slot)
|
2023-08-23 16:39:57 +00:00
|
|
|
head = newHead
|
|
|
|
|
|
|
|
let
|
|
|
|
# The latest point in time when we'll be sending out attestations
|
|
|
|
attestationCutoff = node.beaconClock.fromNow(slot.attestation_deadline())
|
|
|
|
|
|
|
|
if attestationCutoff.inFuture:
|
|
|
|
debug "Waiting to send attestations",
|
|
|
|
head = shortLog(head),
|
|
|
|
attestationCutoff = shortLog(attestationCutoff.offset)
|
|
|
|
|
|
|
|
# Wait either for the block or the attestation cutoff time to arrive
|
|
|
|
if await node.consensusManager[].expectBlock(slot)
|
|
|
|
.withTimeout(attestationCutoff.offset):
|
|
|
|
await waitAfterBlockCutoff(node.beaconClock, slot, Opt.some(head))
|
|
|
|
|
|
|
|
# Time passed - we might need to select a new head in that case
|
|
|
|
node.consensusManager[].updateHead(slot)
|
|
|
|
head = node.dag.head
|
|
|
|
|
|
|
|
static: doAssert attestationSlotOffset == syncCommitteeMessageSlotOffset
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
sendAttestations(node, head, slot)
|
|
|
|
sendSyncCommitteeMessages(node, head, slot)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
|
|
|
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
|
|
|
|
2024-03-14 06:26:36 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#broadcast-aggregate
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/altair/validator.md#broadcast-sync-committee-contribution
|
2023-08-23 16:39:57 +00:00
|
|
|
# Wait 2 / 3 of the slot time to allow messages to propagate, then collect
|
|
|
|
# the result in aggregates
|
|
|
|
static:
|
|
|
|
doAssert aggregateSlotOffset == syncContributionSlotOffset, "Timing change?"
|
|
|
|
let
|
|
|
|
aggregateCutoff = node.beaconClock.fromNow(slot.aggregate_deadline())
|
|
|
|
if aggregateCutoff.inFuture:
|
|
|
|
debug "Waiting to send aggregate attestations",
|
|
|
|
aggregateCutoff = shortLog(aggregateCutoff.offset)
|
|
|
|
await sleepAsync(aggregateCutoff.offset)
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
sendAggregatedAttestations(node, head, slot)
|
|
|
|
sendSyncCommitteeContributions(node, head, slot)
|
2023-08-23 16:39:57 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async: (raises: [CancelledError]).} =
|
2023-08-23 16:39:57 +00:00
|
|
|
## Register upcoming duties of attached validators with the duty tracker
|
|
|
|
|
|
|
|
if node.attachedValidators[].count() == 0 or
|
|
|
|
not node.isSynced(node.dag.head) or not node.dag.head.executionValid:
|
|
|
|
# Nothing to do because we have no validator attached
|
|
|
|
return
|
|
|
|
|
|
|
|
let
|
|
|
|
genesis_validators_root = node.dag.genesis_validators_root
|
|
|
|
head = node.dag.head
|
|
|
|
|
|
|
|
# Getting the slot signature is expensive but cached - in "normal" cases we'll
|
|
|
|
# be getting the duties one slot at a time
|
|
|
|
for slot in wallSlot ..< wallSlot + SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS:
|
|
|
|
let
|
|
|
|
shufflingRef = node.dag.getShufflingRef(head, slot.epoch, false).valueOr:
|
|
|
|
warn "Cannot construct EpochRef for duties - report bug",
|
|
|
|
head = shortLog(head), slot
|
|
|
|
return
|
|
|
|
let
|
|
|
|
fork = node.dag.forkAtEpoch(slot.epoch)
|
|
|
|
committees_per_slot = get_committee_count_per_slot(shufflingRef)
|
|
|
|
|
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
|
|
|
let committee = get_beacon_committee(shufflingRef, slot, committee_index)
|
|
|
|
|
|
|
|
for index_in_committee, validator_index in committee:
|
|
|
|
let
|
|
|
|
validator = node.getValidator(validator_index).valueOr:
|
|
|
|
continue
|
|
|
|
|
|
|
|
subnet_id = compute_subnet_for_attestation(
|
|
|
|
committees_per_slot, slot, committee_index)
|
|
|
|
slotSigRes = await validator.getSlotSignature(
|
|
|
|
fork, genesis_validators_root, slot)
|
|
|
|
if slotSigRes.isErr():
|
|
|
|
error "Unable to create slot signature",
|
|
|
|
validator = shortLog(validator),
|
|
|
|
error_msg = slotSigRes.error()
|
|
|
|
continue
|
|
|
|
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
|
|
|
|
|
|
|
|
node.consensusManager[].actionTracker.registerDuty(
|
|
|
|
slot, subnet_id, validator_index, isAggregator)
|
2023-11-28 23:30:14 +00:00
|
|
|
|
|
|
|
proc makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType](
|
|
|
|
node: BeaconNode, consensusFork: static ConsensusFork,
|
|
|
|
randao_reveal: ValidatorSig, graffiti: GraffitiBytes,
|
2024-02-07 11:26:04 +00:00
|
|
|
head: BlockRef, slot: Slot): Future[ResultType] {.async: (raises: [CancelledError]).} =
|
2023-11-28 23:30:14 +00:00
|
|
|
let
|
|
|
|
proposer = node.dag.getProposer(head, slot).valueOr:
|
|
|
|
return ResultType.err(
|
|
|
|
"Unable to get proposer for specific head and slot")
|
|
|
|
proposerKey = node.dag.validatorKey(proposer).get().toPubKey()
|
|
|
|
|
|
|
|
payloadBuilderClient =
|
2024-02-07 11:26:04 +00:00
|
|
|
node.getPayloadBuilderClient(proposer.distinctBase).valueOr(nil)
|
2023-11-28 23:30:14 +00:00
|
|
|
localBlockValueBoost = node.config.localBlockValueBoost
|
|
|
|
|
|
|
|
collectedBids =
|
2024-02-07 11:26:04 +00:00
|
|
|
await collectBids(consensusFork.SignedBlindedBeaconBlock,
|
2023-11-28 23:30:14 +00:00
|
|
|
consensusFork.ExecutionPayloadForSigning,
|
|
|
|
node,
|
|
|
|
payloadBuilderClient, proposerKey,
|
|
|
|
proposer, graffiti, head, slot,
|
|
|
|
randao_reveal)
|
|
|
|
useBuilderBlock =
|
2024-02-07 11:26:04 +00:00
|
|
|
if collectedBids.builderBid.isSome():
|
|
|
|
collectedBids.engineBid.isNone() or builderBetterBid(
|
2023-11-28 23:30:14 +00:00
|
|
|
localBlockValueBoost,
|
2024-03-11 14:18:50 +00:00
|
|
|
collectedBids.builderBid.value().executionPayloadValue,
|
|
|
|
collectedBids.engineBid.value().executionPayloadValue)
|
2023-11-28 23:30:14 +00:00
|
|
|
else:
|
2024-02-07 11:26:04 +00:00
|
|
|
if not(collectedBids.engineBid.isSome):
|
2023-11-28 23:30:14 +00:00
|
|
|
return ResultType.err("Engine bid is not available")
|
|
|
|
false
|
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
engineBid = block:
|
2023-11-28 23:30:14 +00:00
|
|
|
if useBuilderBlock:
|
2024-03-11 14:18:50 +00:00
|
|
|
let blindedBid = collectedBids.builderBid.value()
|
2023-11-28 23:30:14 +00:00
|
|
|
return ResultType.ok((
|
2024-03-11 14:18:50 +00:00
|
|
|
blck:
|
|
|
|
consensusFork.MaybeBlindedBeaconBlock(
|
|
|
|
isBlinded: true,
|
|
|
|
blindedData: blindedBid.blindedBlckPart.message),
|
|
|
|
executionValue: Opt.some(blindedBid.executionPayloadValue),
|
|
|
|
consensusValue: Opt.some(blindedBid.consensusBlockValue)))
|
2023-11-28 23:30:14 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
collectedBids.engineBid.value()
|
2023-11-28 23:30:14 +00:00
|
|
|
|
2024-02-07 11:26:04 +00:00
|
|
|
doAssert engineBid.blck.kind == consensusFork
|
|
|
|
template forkyBlck: untyped = engineBid.blck.forky(consensusFork)
|
2023-11-28 23:30:14 +00:00
|
|
|
when consensusFork >= ConsensusFork.Deneb:
|
2024-02-07 11:26:04 +00:00
|
|
|
let blobsBundle = engineBid.blobsBundleOpt.get()
|
2023-11-28 23:30:14 +00:00
|
|
|
doAssert blobsBundle.commitments == forkyBlck.body.blob_kzg_commitments
|
|
|
|
ResultType.ok((
|
|
|
|
blck: consensusFork.MaybeBlindedBeaconBlock(
|
|
|
|
isBlinded: false,
|
|
|
|
data: deneb.BlockContents(
|
|
|
|
`block`: forkyBlck,
|
|
|
|
kzg_proofs: blobsBundle.proofs,
|
|
|
|
blobs: blobsBundle.blobs)),
|
2024-03-11 14:18:50 +00:00
|
|
|
executionValue: Opt.some(engineBid.executionPayloadValue),
|
|
|
|
consensusValue: Opt.some(engineBid.consensusBlockValue)))
|
2023-11-28 23:30:14 +00:00
|
|
|
else:
|
|
|
|
ResultType.ok((
|
|
|
|
blck: consensusFork.MaybeBlindedBeaconBlock(
|
|
|
|
isBlinded: false,
|
|
|
|
data: forkyBlck),
|
2024-03-11 14:18:50 +00:00
|
|
|
executionValue: Opt.some(engineBid.executionPayloadValue),
|
|
|
|
consensusValue: Opt.some(engineBid.consensusBlockValue)))
|
2023-11-28 23:30:14 +00:00
|
|
|
|
|
|
|
proc makeMaybeBlindedBeaconBlockForHeadAndSlot*(
|
|
|
|
node: BeaconNode, consensusFork: static ConsensusFork,
|
|
|
|
randao_reveal: ValidatorSig, graffiti: GraffitiBytes,
|
|
|
|
head: BlockRef, slot: Slot): auto =
|
|
|
|
type ResultType = Result[tuple[
|
|
|
|
blck: consensusFork.MaybeBlindedBeaconBlock,
|
|
|
|
executionValue: Opt[UInt256],
|
|
|
|
consensusValue: Opt[UInt256]], string]
|
|
|
|
|
|
|
|
makeMaybeBlindedBeaconBlockForHeadAndSlotImpl[ResultType](
|
|
|
|
node, consensusFork, randao_reveal, graffiti, head, slot)
|