2020-04-24 07:16:11 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
2020-04-24 07:16:11 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2020-04-24 07:16:11 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
import
|
2024-08-23 11:26:35 +00:00
|
|
|
std/algorithm,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Status libraries
|
2021-04-14 14:43:29 +00:00
|
|
|
metrics,
|
2023-09-27 16:06:22 +00:00
|
|
|
chronicles, stew/byteutils,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Internal
|
2022-07-06 10:33:02 +00:00
|
|
|
../spec/[
|
2023-09-24 08:50:48 +00:00
|
|
|
beaconstate, eth2_merkleization, forks, state_transition_epoch, validator],
|
2021-06-11 17:51:46 +00:00
|
|
|
"."/[spec_cache, blockchain_dag, block_quarantine],
|
2021-10-19 14:09:26 +00:00
|
|
|
../fork_choice/fork_choice,
|
|
|
|
../beacon_clock
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2023-09-24 08:50:48 +00:00
|
|
|
from std/sequtils import keepItIf, maxIndex
|
2022-11-02 16:23:30 +00:00
|
|
|
|
2023-09-27 16:06:22 +00:00
|
|
|
export blockchain_dag, fork_choice
|
2021-10-19 14:09:26 +00:00
|
|
|
|
|
|
|
const
|
2023-09-24 08:50:48 +00:00
|
|
|
# TODO since deneb, this is looser (whole previous epoch)
|
|
|
|
ATTESTATION_LOOKBACK =
|
2021-10-19 14:09:26 +00:00
|
|
|
min(24'u64, SLOTS_PER_EPOCH) + MIN_ATTESTATION_INCLUSION_DELAY
|
|
|
|
## The number of slots we'll keep track of in terms of "free" attestations
|
|
|
|
## that potentially could be added to a newly created block
|
|
|
|
|
|
|
|
type
|
2024-05-07 15:01:51 +00:00
|
|
|
OnPhase0AttestationCallback =
|
|
|
|
proc(data: phase0.Attestation) {.gcsafe, raises: [].}
|
|
|
|
OnElectraAttestationCallback =
|
|
|
|
proc(data: electra.Attestation) {.gcsafe, raises: [].}
|
2021-10-19 14:09:26 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
Validation[CVBType] = object
|
2021-10-19 14:09:26 +00:00
|
|
|
## Validations collect a set of signatures for a distict attestation - in
|
|
|
|
## eth2, a single bit is used to keep track of which signatures have been
|
|
|
|
## added to the aggregate meaning that only non-overlapping aggregates may
|
|
|
|
## be further combined.
|
2024-05-07 15:01:51 +00:00
|
|
|
aggregation_bits: CVBType
|
2023-09-24 08:50:48 +00:00
|
|
|
aggregate_signature: AggregateSignature
|
2024-05-17 12:37:41 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
Phase0Validation = Validation[CommitteeValidatorsBits]
|
|
|
|
ElectraValidation = Validation[ElectraCommitteeValidatorsBits]
|
2021-10-19 14:09:26 +00:00
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
AttestationEntry[CVBType] = object
|
2021-10-19 14:09:26 +00:00
|
|
|
## Each entry holds the known signatures for a particular, distinct vote
|
2024-05-17 12:37:41 +00:00
|
|
|
## For electra+, the data has been changed to hold the committee index
|
2023-09-24 08:50:48 +00:00
|
|
|
data: AttestationData
|
|
|
|
committee_len: int
|
|
|
|
singles: Table[int, CookedSig] ## \
|
2021-10-19 14:09:26 +00:00
|
|
|
## On the attestation subnets, only attestations with a single vote are
|
|
|
|
## allowed - these can be collected separately to top up aggregates with -
|
|
|
|
## here we collect them by mapping index in committee to a vote
|
2024-05-17 12:37:41 +00:00
|
|
|
aggregates: seq[Validation[CVBType]]
|
2021-10-19 14:09:26 +00:00
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
Phase0AttestationEntry = AttestationEntry[CommitteeValidatorsBits]
|
|
|
|
ElectraAttestationEntry = AttestationEntry[ElectraCommitteeValidatorsBits]
|
2024-05-07 15:01:51 +00:00
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
AttestationTable[CVBType] = Table[Eth2Digest, AttestationEntry[CVBType]]
|
2021-10-19 14:09:26 +00:00
|
|
|
## Depending on the world view of the various validators, they may have
|
|
|
|
## voted on different states - this map keeps track of each vote keyed by
|
2024-05-07 15:01:51 +00:00
|
|
|
## getAttestationCandidateKey()
|
2021-10-19 14:09:26 +00:00
|
|
|
|
|
|
|
AttestationPool* = object
|
|
|
|
## The attestation pool keeps track of all attestations that potentially
|
|
|
|
## could be added to a block during block production.
|
|
|
|
## These attestations also contribute to the fork choice, which combines
|
|
|
|
## "free" attestations with those found in past blocks - these votes
|
|
|
|
## are tracked separately in the fork choice.
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
phase0Candidates: array[ATTESTATION_LOOKBACK.int,
|
2024-05-17 12:37:41 +00:00
|
|
|
AttestationTable[CommitteeValidatorsBits]] ## \
|
2024-05-07 15:01:51 +00:00
|
|
|
## We keep one item per slot such that indexing matches slot number
|
|
|
|
## together with startingSlot
|
|
|
|
|
|
|
|
electraCandidates: array[ATTESTATION_LOOKBACK.int,
|
2024-05-17 12:37:41 +00:00
|
|
|
AttestationTable[ElectraCommitteeValidatorsBits]] ## \
|
2021-10-19 14:09:26 +00:00
|
|
|
## We keep one item per slot such that indexing matches slot number
|
|
|
|
## together with startingSlot
|
|
|
|
|
2023-09-24 08:50:48 +00:00
|
|
|
startingSlot: Slot ## \
|
2021-10-19 14:09:26 +00:00
|
|
|
## Generally, we keep attestations only until a slot has been finalized -
|
|
|
|
## after that, they may no longer affect fork choice.
|
|
|
|
|
|
|
|
dag*: ChainDAGRef
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine*: ref Quarantine
|
2021-10-19 14:09:26 +00:00
|
|
|
|
|
|
|
forkChoice*: ForkChoice
|
|
|
|
|
|
|
|
nextAttestationEpoch*: seq[tuple[subnet: Epoch, aggregate: Epoch]] ## \
|
|
|
|
## sequence based on validator indices
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
onPhase0AttestationAdded: OnPhase0AttestationCallback
|
|
|
|
onElectraAttestationAdded: OnElectraAttestationCallback
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "attpool"
|
|
|
|
|
2021-04-14 14:43:29 +00:00
|
|
|
declareGauge attestation_pool_block_attestation_packing_time,
|
|
|
|
"Time it took to create list of attestations for block"
|
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
proc init*(T: type AttestationPool, dag: ChainDAGRef,
|
2021-12-06 09:49:01 +00:00
|
|
|
quarantine: ref Quarantine,
|
2024-05-07 15:01:51 +00:00
|
|
|
onAttestation: OnPhase0AttestationCallback = nil,
|
|
|
|
onElectraAttestation: OnElectraAttestationCallback = nil): T =
|
2021-06-01 11:13:40 +00:00
|
|
|
## Initialize an AttestationPool from the dag `headState`
|
2020-06-10 06:58:12 +00:00
|
|
|
## The `finalized_root` works around the finalized_checkpoint of the genesis block
|
|
|
|
## holding a zero_root.
|
2021-06-01 11:13:40 +00:00
|
|
|
let finalizedEpochRef = dag.getFinalizedEpochRef()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2022-08-29 07:26:01 +00:00
|
|
|
var forkChoice = ForkChoice.init(
|
2024-08-07 14:48:05 +00:00
|
|
|
finalizedEpochRef, dag.finalizedHead.blck)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-07-27 16:04:44 +00:00
|
|
|
# Feed fork choice with unfinalized history - during startup, block pool only
|
|
|
|
# keeps track of a single history so we just need to follow it
|
2021-06-01 11:13:40 +00:00
|
|
|
doAssert dag.heads.len == 1, "Init only supports a single history"
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
var blocks: seq[BlockRef]
|
2021-06-01 11:13:40 +00:00
|
|
|
var cur = dag.head
|
2020-10-29 11:09:03 +00:00
|
|
|
|
|
|
|
# When the chain is finalizing, the votes between the head block and the
|
|
|
|
# finalized checkpoint should be enough for a stable fork choice - when the
|
|
|
|
# chain is not finalizing, we want to seed it with as many votes as possible
|
|
|
|
# since the whole history of each branch might be significant. It is however
|
|
|
|
# a game of diminishing returns, and we have to weigh it against the time
|
|
|
|
# it takes to replay that many blocks during startup and thus miss _new_
|
|
|
|
# votes.
|
|
|
|
const ForkChoiceHorizon = 256
|
2021-06-01 11:13:40 +00:00
|
|
|
while cur != dag.finalizedHead.blck:
|
2020-07-25 19:41:12 +00:00
|
|
|
blocks.add cur
|
|
|
|
cur = cur.parent
|
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
info "Initializing fork choice", unfinalized_blocks = blocks.len
|
2020-08-03 18:39:43 +00:00
|
|
|
|
2020-10-29 11:09:03 +00:00
|
|
|
var epochRef = finalizedEpochRef
|
|
|
|
for i in 0..<blocks.len:
|
2020-08-03 18:39:43 +00:00
|
|
|
let
|
2021-07-14 12:18:52 +00:00
|
|
|
blckRef = blocks[blocks.len - i - 1]
|
2020-08-03 18:39:43 +00:00
|
|
|
status =
|
2020-11-02 17:51:08 +00:00
|
|
|
if i < (blocks.len - ForkChoiceHorizon) and (i mod 1024 != 0):
|
2022-07-06 10:33:02 +00:00
|
|
|
# Fork choice needs to know about the full block tree back through the
|
2020-10-29 11:09:03 +00:00
|
|
|
# finalization point, but doesn't really need to have overly accurate
|
|
|
|
# justification and finalization points until we get close to head -
|
|
|
|
# nonetheless, we'll make sure to pass a fresh finalization point now
|
|
|
|
# and then to make sure the fork choice data structure doesn't grow
|
|
|
|
# too big - getting an EpochRef can be expensive.
|
|
|
|
forkChoice.backend.process_block(
|
2023-01-31 12:35:01 +00:00
|
|
|
blckRef.bid, blckRef.parent.root, epochRef.checkpoints)
|
2020-10-29 11:09:03 +00:00
|
|
|
else:
|
2022-01-05 18:38:04 +00:00
|
|
|
epochRef = dag.getEpochRef(blckRef, blckRef.slot.epoch, false).expect(
|
|
|
|
"Getting an EpochRef should always work for non-finalized blocks")
|
2023-04-18 19:26:36 +00:00
|
|
|
let
|
|
|
|
blck = dag.getForkedBlock(blckRef.bid).expect(
|
|
|
|
"Should be able to load initial fork choice blocks")
|
|
|
|
unrealized =
|
|
|
|
if blckRef == dag.head:
|
|
|
|
withState(dag.headState):
|
|
|
|
when consensusFork >= ConsensusFork.Altair:
|
|
|
|
forkyState.data.compute_unrealized_finality()
|
|
|
|
else:
|
|
|
|
var cache: StateCache
|
|
|
|
forkyState.data.compute_unrealized_finality(cache)
|
2022-07-06 10:33:02 +00:00
|
|
|
else:
|
2023-04-18 19:26:36 +00:00
|
|
|
default(FinalityCheckpoints)
|
2022-03-11 12:08:17 +00:00
|
|
|
withBlck(blck):
|
2021-07-14 12:18:52 +00:00
|
|
|
forkChoice.process_block(
|
2023-09-21 10:49:14 +00:00
|
|
|
dag, epochRef, blckRef, unrealized, forkyBlck.message,
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
blckRef.slot.start_beacon_time)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-03 18:39:43 +00:00
|
|
|
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
debugComment "nothing initializes electra callback externally"
|
2020-10-29 11:09:03 +00:00
|
|
|
info "Fork choice initialized",
|
2022-07-07 14:24:31 +00:00
|
|
|
justified = shortLog(getStateField(
|
|
|
|
dag.headState, current_justified_checkpoint)),
|
|
|
|
finalized = shortLog(getStateField(dag.headState, finalized_checkpoint))
|
2019-02-28 21:21:29 +00:00
|
|
|
T(
|
2021-06-01 11:13:40 +00:00
|
|
|
dag: dag,
|
2020-07-30 19:18:17 +00:00
|
|
|
quarantine: quarantine,
|
2021-09-22 12:17:15 +00:00
|
|
|
forkChoice: forkChoice,
|
2024-05-07 15:01:51 +00:00
|
|
|
onPhase0AttestationAdded: onAttestation,
|
|
|
|
onElectraAttestationAdded: onElectraAttestation
|
2019-02-28 21:21:29 +00:00
|
|
|
)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
proc addForkChoiceVotes(
|
2021-04-26 20:39:44 +00:00
|
|
|
pool: var AttestationPool, slot: Slot,
|
|
|
|
attesting_indices: openArray[ValidatorIndex], block_root: Eth2Digest,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime) =
|
2020-07-27 16:04:44 +00:00
|
|
|
# Add attestation votes to fork choice
|
2020-08-17 18:36:13 +00:00
|
|
|
if (let v = pool.forkChoice.on_attestation(
|
2021-12-21 18:56:08 +00:00
|
|
|
pool.dag, slot, block_root, attesting_indices, wallTime);
|
2020-08-17 18:36:13 +00:00
|
|
|
v.isErr):
|
2020-09-14 14:50:03 +00:00
|
|
|
# This indicates that the fork choice and the chain dag are out of sync -
|
|
|
|
# this is most likely the result of a bug, but we'll try to keep going -
|
|
|
|
# hopefully the fork choice will heal itself over time.
|
|
|
|
error "Couldn't add attestation to fork choice, bug?", err = v.error()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2024-09-06 12:15:30 +00:00
|
|
|
func candidateIdx(pool: AttestationPool, slot: Slot,
|
|
|
|
isElectra: bool = false): Opt[int] =
|
2024-08-07 11:14:57 +00:00
|
|
|
static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
|
2024-09-06 12:15:30 +00:00
|
|
|
|
|
|
|
let poolLength = if isElectra:
|
|
|
|
pool.electraCandidates.lenu64 else: pool.phase0Candidates.lenu64
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if slot >= pool.startingSlot and
|
2024-09-06 12:15:30 +00:00
|
|
|
slot < (pool.startingSlot + poolLength):
|
|
|
|
Opt.some(int(slot mod poolLength))
|
2020-07-28 13:54:32 +00:00
|
|
|
else:
|
2022-07-06 16:11:44 +00:00
|
|
|
Opt.none(int)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
2024-05-07 15:01:51 +00:00
|
|
|
if wallSlot + 1 < pool.phase0Candidates.lenu64:
|
2021-04-12 20:25:09 +00:00
|
|
|
return # Genesis
|
|
|
|
|
2024-08-07 11:14:57 +00:00
|
|
|
static: doAssert pool.phase0Candidates.len == pool.electraCandidates.len
|
|
|
|
let newStartingSlot = wallSlot + 1 - pool.phase0Candidates.lenu64
|
2020-06-28 17:32:11 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
if newStartingSlot < pool.startingSlot:
|
2020-07-28 13:54:32 +00:00
|
|
|
error "Current slot older than attestation pool view, clock reset?",
|
2021-04-12 20:25:09 +00:00
|
|
|
startingSlot = pool.startingSlot, newStartingSlot, wallSlot
|
2020-06-28 17:32:11 +00:00
|
|
|
return
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
# As time passes we'll clear out any old attestations as they are no longer
|
|
|
|
# viable to be included in blocks
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
if newStartingSlot - pool.startingSlot >= pool.phase0Candidates.lenu64():
|
2021-04-12 20:25:09 +00:00
|
|
|
# In case many slots passed since the last update, avoid iterating over
|
|
|
|
# the same indices over and over
|
2024-08-07 11:14:57 +00:00
|
|
|
pool.phase0Candidates.reset()
|
|
|
|
pool.electraCandidates.reset()
|
2021-04-12 20:25:09 +00:00
|
|
|
else:
|
|
|
|
for i in pool.startingSlot..newStartingSlot:
|
2024-05-07 15:01:51 +00:00
|
|
|
pool.phase0Candidates[i.uint64 mod pool.phase0Candidates.lenu64].reset()
|
2024-08-07 11:14:57 +00:00
|
|
|
pool.electraCandidates[i.uint64 mod pool.electraCandidates.lenu64].reset()
|
2021-04-12 20:25:09 +00:00
|
|
|
|
|
|
|
pool.startingSlot = newStartingSlot
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
func oneIndex(
|
|
|
|
bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits): Opt[int] =
|
2021-04-12 20:25:09 +00:00
|
|
|
# Find the index of the set bit, iff one bit is set
|
2022-07-06 16:11:44 +00:00
|
|
|
var res = Opt.none(int)
|
2021-04-12 20:25:09 +00:00
|
|
|
for idx in 0..<bits.len():
|
|
|
|
if bits[idx]:
|
|
|
|
if res.isNone():
|
2022-07-06 16:11:44 +00:00
|
|
|
res = Opt.some(idx)
|
2021-04-12 20:25:09 +00:00
|
|
|
else: # More than one bit set!
|
2022-07-06 16:11:44 +00:00
|
|
|
return Opt.none(int)
|
2021-04-12 20:25:09 +00:00
|
|
|
res
|
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
func toAttestation(entry: AttestationEntry, validation: Phase0Validation):
|
2024-04-17 20:44:29 +00:00
|
|
|
phase0.Attestation =
|
|
|
|
phase0.Attestation(
|
2021-04-12 20:25:09 +00:00
|
|
|
aggregation_bits: validation.aggregation_bits,
|
|
|
|
data: entry.data,
|
2021-04-26 20:39:44 +00:00
|
|
|
signature: validation.aggregate_signature.finish().toValidatorSig()
|
2021-04-12 20:25:09 +00:00
|
|
|
)
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
func toElectraAttestation(
|
2024-05-17 12:37:41 +00:00
|
|
|
entry: AttestationEntry, validation: ElectraValidation):
|
2024-05-07 15:01:51 +00:00
|
|
|
electra.Attestation =
|
2024-05-17 12:37:41 +00:00
|
|
|
var committee_bits: AttestationCommitteeBits
|
|
|
|
committee_bits[int(entry.data.index)] = true
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
electra.Attestation(
|
|
|
|
aggregation_bits: validation.aggregation_bits,
|
2024-05-17 12:37:41 +00:00
|
|
|
committee_bits: committee_bits,
|
|
|
|
data: AttestationData(
|
|
|
|
slot: entry.data.slot,
|
|
|
|
index: 0,
|
|
|
|
beacon_block_root: entry.data.beacon_block_root,
|
|
|
|
source: entry.data.source,
|
|
|
|
target: entry.data.target),
|
2024-05-07 15:01:51 +00:00
|
|
|
signature: validation.aggregate_signature.finish().toValidatorSig()
|
|
|
|
)
|
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
func updateAggregates(entry: var AttestationEntry) =
|
2021-04-12 20:25:09 +00:00
|
|
|
# Upgrade the list of aggregates to ensure that there is at least one
|
|
|
|
# aggregate (assuming there are singles) and all aggregates have all
|
|
|
|
# singles incorporated
|
|
|
|
if entry.singles.len() == 0:
|
|
|
|
return
|
|
|
|
|
|
|
|
if entry.aggregates.len() == 0:
|
|
|
|
# If there are singles, we can create an aggregate from them that will
|
|
|
|
# represent our best knowledge about the current votes
|
|
|
|
for index_in_committee, signature in entry.singles:
|
|
|
|
if entry.aggregates.len() == 0:
|
|
|
|
# Create aggregate on first iteration..
|
2024-05-07 15:01:51 +00:00
|
|
|
template getInitialAggregate(_: Phase0AttestationEntry):
|
|
|
|
untyped {.used.} =
|
|
|
|
Phase0Validation(
|
|
|
|
aggregation_bits:
|
|
|
|
CommitteeValidatorsBits.init(entry.committee_len),
|
|
|
|
aggregate_signature: AggregateSignature.init(signature))
|
|
|
|
template getInitialAggregate(_: ElectraAttestationEntry):
|
|
|
|
untyped {.used.} =
|
|
|
|
ElectraValidation(
|
|
|
|
aggregation_bits:
|
|
|
|
ElectraCommitteeValidatorsBits.init(entry.committee_len),
|
|
|
|
aggregate_signature: AggregateSignature.init(signature))
|
|
|
|
entry.aggregates.add(getInitialAggregate(entry))
|
2021-04-12 20:25:09 +00:00
|
|
|
else:
|
|
|
|
entry.aggregates[0].aggregate_signature.aggregate(signature)
|
|
|
|
|
|
|
|
entry.aggregates[0].aggregation_bits.setBit(index_in_committee)
|
|
|
|
else:
|
|
|
|
# There already exist aggregates - we'll try to top them up by adding
|
|
|
|
# singles to them - for example, it may happen that we're being asked to
|
|
|
|
# produce a block 4s after creating an aggregate and new information may
|
|
|
|
# have arrived by then.
|
|
|
|
# In theory, also aggregates could be combined but finding the best
|
|
|
|
# combination is hard, so we'll pragmatically use singles only here
|
|
|
|
var updated = false
|
|
|
|
for index_in_committee, signature in entry.singles:
|
|
|
|
for v in entry.aggregates.mitems():
|
|
|
|
if not v.aggregation_bits[index_in_committee]:
|
|
|
|
v.aggregation_bits.setBit(index_in_committee)
|
|
|
|
v.aggregate_signature.aggregate(signature)
|
|
|
|
updated = true
|
|
|
|
|
|
|
|
if updated:
|
|
|
|
# One or more aggregates were updated - time to remove the ones that are
|
|
|
|
# pure subsets of the others. This may lead to quadratic behaviour, but
|
|
|
|
# the number of aggregates for the entry is limited by the number of
|
|
|
|
# aggregators on the topic which is capped `is_aggregator` and
|
|
|
|
# TARGET_AGGREGATORS_PER_COMMITTEE
|
|
|
|
var i = 0
|
|
|
|
while i < entry.aggregates.len():
|
|
|
|
var j = 0
|
|
|
|
while j < entry.aggregates.len():
|
|
|
|
if i != j and entry.aggregates[i].aggregation_bits.isSubsetOf(
|
|
|
|
entry.aggregates[j].aggregation_bits):
|
|
|
|
entry.aggregates[i] = entry.aggregates[j]
|
|
|
|
entry.aggregates.del(j)
|
|
|
|
dec i # Rerun checks on the new `i` item
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
inc j
|
|
|
|
inc i
|
2020-09-14 11:13:30 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
func covers(
|
2024-05-17 12:37:41 +00:00
|
|
|
entry: AttestationEntry,
|
2024-05-07 15:01:51 +00:00
|
|
|
bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits): bool =
|
2022-02-25 16:15:39 +00:00
|
|
|
for i in 0..<entry.aggregates.len():
|
|
|
|
if bits.isSubsetOf(entry.aggregates[i].aggregation_bits):
|
|
|
|
return true
|
|
|
|
false
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
proc addAttestation(
|
2024-05-17 12:37:41 +00:00
|
|
|
entry: var AttestationEntry,
|
2024-05-07 15:01:51 +00:00
|
|
|
attestation: phase0.Attestation | electra.Attestation,
|
|
|
|
signature: CookedSig): bool =
|
2021-04-14 14:43:29 +00:00
|
|
|
logScope:
|
|
|
|
attestation = shortLog(attestation)
|
|
|
|
|
|
|
|
let
|
|
|
|
singleIndex = oneIndex(attestation.aggregation_bits)
|
|
|
|
|
|
|
|
if singleIndex.isSome():
|
|
|
|
if singleIndex.get() in entry.singles:
|
|
|
|
trace "Attestation already seen",
|
|
|
|
singles = entry.singles.len(),
|
|
|
|
aggregates = entry.aggregates.len()
|
|
|
|
|
|
|
|
return false
|
|
|
|
|
|
|
|
debug "Attestation resolved",
|
|
|
|
singles = entry.singles.len(),
|
|
|
|
aggregates = entry.aggregates.len()
|
|
|
|
|
|
|
|
entry.singles[singleIndex.get()] = signature
|
|
|
|
else:
|
|
|
|
# More than one vote in this attestation
|
2022-02-25 16:15:39 +00:00
|
|
|
if entry.covers(attestation.aggregation_bits):
|
|
|
|
return false
|
2021-04-14 14:43:29 +00:00
|
|
|
|
|
|
|
# Since we're adding a new aggregate, we can now remove existing
|
|
|
|
# aggregates that don't add any new votes
|
|
|
|
entry.aggregates.keepItIf(
|
|
|
|
not it.aggregation_bits.isSubsetOf(attestation.aggregation_bits))
|
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
entry.aggregates.add(Validation[typeof(entry).CVBType](
|
2021-04-14 14:43:29 +00:00
|
|
|
aggregation_bits: attestation.aggregation_bits,
|
|
|
|
aggregate_signature: AggregateSignature.init(signature)))
|
|
|
|
|
|
|
|
debug "Aggregate resolved",
|
|
|
|
singles = entry.singles.len(),
|
|
|
|
aggregates = entry.aggregates.len()
|
|
|
|
|
|
|
|
true
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
func getAttestationCandidateKey(
|
|
|
|
data: AttestationData,
|
|
|
|
committee_bits: AttestationCommitteeBits =
|
|
|
|
default(AttestationCommitteeBits)): Eth2Digest =
|
|
|
|
# Some callers might have used for the key just htr(data), so rather than
|
|
|
|
# risk some random regression (one was caught in test suite, but there is
|
|
|
|
# not any particular reason other code could not have manually calculated
|
|
|
|
# the key, too), special-case the phase0 case as htr(data).
|
|
|
|
if committee_bits == static(default(typeof(committee_bits))):
|
|
|
|
# i.e. no committees selected, so it can't be an actual Electra attestation
|
|
|
|
hash_tree_root(data)
|
|
|
|
else:
|
|
|
|
hash_tree_root([hash_tree_root(data), hash_tree_root(committee_bits)])
|
|
|
|
|
|
|
|
proc addAttestation*(
|
|
|
|
pool: var AttestationPool,
|
|
|
|
attestation: phase0.Attestation | electra.Attestation,
|
|
|
|
attesting_indices: openArray[ValidatorIndex],
|
|
|
|
signature: CookedSig, wallTime: BeaconTime) =
|
2020-09-14 14:50:03 +00:00
|
|
|
## Add an attestation to the pool, assuming it's been validated already.
|
|
|
|
##
|
2021-04-12 20:25:09 +00:00
|
|
|
## Assuming the votes in the attestation have not already been seen, the
|
|
|
|
## attestation will be added to the fork choice and lazily added to a list of
|
|
|
|
## attestations for future aggregation and block production.
|
2020-07-28 13:54:32 +00:00
|
|
|
logScope:
|
|
|
|
attestation = shortLog(attestation)
|
2020-07-22 07:51:45 +00:00
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
doAssert attestation.signature == signature.toValidatorSig(),
|
2021-04-12 20:25:09 +00:00
|
|
|
"Deserialized signature must match the one in the attestation"
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
updateCurrent(pool, wallTime.slotOrZero)
|
2020-07-28 13:54:32 +00:00
|
|
|
|
|
|
|
let candidateIdx = pool.candidateIdx(attestation.data.slot)
|
|
|
|
if candidateIdx.isNone:
|
2020-09-14 14:50:03 +00:00
|
|
|
debug "Skipping old attestation for block production",
|
2020-07-28 13:54:32 +00:00
|
|
|
startingSlot = pool.startingSlot
|
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
template committee_bits(_: phase0.Attestation): auto =
|
|
|
|
const res = default(AttestationCommitteeBits)
|
|
|
|
res
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2021-04-14 14:43:29 +00:00
|
|
|
# TODO withValue is an abomination but hard to use anything else too without
|
|
|
|
# creating an unnecessary AttestationEntry on the hot path and avoiding
|
|
|
|
# multiple lookups
|
2024-05-07 15:01:51 +00:00
|
|
|
template addAttToPool(attCandidates: untyped, entry: untyped) =
|
2024-05-17 12:37:41 +00:00
|
|
|
let attestation_data_root = hash_tree_root(entry.data)
|
|
|
|
|
|
|
|
attCandidates[candidateIdx.get()].withValue(attestation_data_root, entry) do:
|
2024-05-07 15:01:51 +00:00
|
|
|
if not addAttestation(entry[], attestation, signature):
|
|
|
|
return
|
|
|
|
do:
|
|
|
|
if not addAttestation(
|
2024-05-17 12:37:41 +00:00
|
|
|
attCandidates[candidateIdx.get()].mgetOrPut(attestation_data_root, entry),
|
2024-05-07 15:01:51 +00:00
|
|
|
attestation, signature):
|
|
|
|
# Returns from overall function, not only template
|
|
|
|
return
|
|
|
|
|
|
|
|
template addAttToPool(_: phase0.Attestation) {.used.} =
|
|
|
|
let newAttEntry = Phase0AttestationEntry(
|
|
|
|
data: attestation.data, committee_len: attestation.aggregation_bits.len)
|
|
|
|
addAttToPool(pool.phase0Candidates, newAttEntry)
|
|
|
|
pool.addForkChoiceVotes(
|
|
|
|
attestation.data.slot, attesting_indices,
|
|
|
|
attestation.data.beacon_block_root, wallTime)
|
|
|
|
|
|
|
|
# Send notification about new attestation via callback.
|
|
|
|
if not(isNil(pool.onPhase0AttestationAdded)):
|
|
|
|
pool.onPhase0AttestationAdded(attestation)
|
|
|
|
|
|
|
|
template addAttToPool(_: electra.Attestation) {.used.} =
|
2024-05-17 12:37:41 +00:00
|
|
|
let
|
|
|
|
committee_index = get_committee_index_one(attestation.committee_bits).expect("TODO")
|
|
|
|
data = AttestationData(
|
|
|
|
slot: attestation.data.slot,
|
|
|
|
index: uint64 committee_index,
|
|
|
|
beacon_block_root: attestation.data.beacon_block_root,
|
|
|
|
source: attestation.data.source,
|
|
|
|
target: attestation.data.target)
|
2024-05-07 15:01:51 +00:00
|
|
|
let newAttEntry = ElectraAttestationEntry(
|
2024-05-17 12:37:41 +00:00
|
|
|
data: data,
|
2024-05-07 15:01:51 +00:00
|
|
|
committee_len: attestation.aggregation_bits.len)
|
|
|
|
addAttToPool(pool.electraCandidates, newAttEntry)
|
|
|
|
pool.addForkChoiceVotes(
|
|
|
|
attestation.data.slot, attesting_indices,
|
|
|
|
attestation.data.beacon_block_root, wallTime)
|
|
|
|
|
|
|
|
# Send notification about new attestation via callback.
|
|
|
|
if not(isNil(pool.onElectraAttestationAdded)):
|
|
|
|
pool.onElectraAttestationAdded(attestation)
|
|
|
|
|
|
|
|
addAttToPool(attestation)
|
2021-09-22 12:17:15 +00:00
|
|
|
|
2022-02-25 16:15:39 +00:00
|
|
|
func covers*(
|
2022-04-08 16:22:49 +00:00
|
|
|
pool: var AttestationPool, data: AttestationData,
|
2022-02-25 16:15:39 +00:00
|
|
|
bits: CommitteeValidatorsBits): bool =
|
|
|
|
## Return true iff the given attestation already is fully covered by one of
|
|
|
|
## the existing aggregates, making it redundant
|
|
|
|
## the `var` attestation pool is needed to use `withValue`, else Table becomes
|
|
|
|
## unusably inefficient
|
|
|
|
let candidateIdx = pool.candidateIdx(data.slot)
|
|
|
|
if candidateIdx.isNone:
|
|
|
|
return false
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
pool.phase0Candidates[candidateIdx.get()].withValue(
|
|
|
|
getAttestationCandidateKey(data), entry):
|
2022-02-25 16:15:39 +00:00
|
|
|
if entry[].covers(bits):
|
|
|
|
return true
|
|
|
|
|
|
|
|
false
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
func covers*(
|
|
|
|
pool: var AttestationPool, data: AttestationData,
|
|
|
|
bits: ElectraCommitteeValidatorsBits): bool =
|
|
|
|
## Return true iff the given attestation already is fully covered by one of
|
|
|
|
## the existing aggregates, making it redundant
|
|
|
|
## the `var` attestation pool is needed to use `withValue`, else Table becomes
|
|
|
|
## unusably inefficient
|
|
|
|
let candidateIdx = pool.candidateIdx(data.slot)
|
|
|
|
if candidateIdx.isNone:
|
|
|
|
return false
|
|
|
|
|
2024-08-07 11:14:57 +00:00
|
|
|
debugComment "foo"
|
2024-05-07 15:01:51 +00:00
|
|
|
# needs to know more than attestationdata now
|
|
|
|
#let attestation_data_root = hash_tree_root(data)
|
|
|
|
#pool.electraCandidates[candidateIdx.get()].withValue(attestation_data_root, entry):
|
|
|
|
# if entry[].covers(bits):
|
|
|
|
# return true
|
|
|
|
|
|
|
|
false
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc addForkChoice*(pool: var AttestationPool,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-07-25 19:41:12 +00:00
|
|
|
blckRef: BlockRef,
|
2022-07-06 10:33:02 +00:00
|
|
|
unrealized: FinalityCheckpoints,
|
2021-11-05 07:34:34 +00:00
|
|
|
blck: ForkyTrustedBeaconBlock,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime) =
|
2020-07-09 09:29:32 +00:00
|
|
|
## Add a verified block to the fork choice context
|
2020-07-25 19:41:12 +00:00
|
|
|
let state = pool.forkChoice.process_block(
|
2022-07-06 10:33:02 +00:00
|
|
|
pool.dag, epochRef, blckRef, unrealized, blck, wallTime)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-22 09:42:55 +00:00
|
|
|
if state.isErr:
|
2020-09-14 14:50:03 +00:00
|
|
|
# This indicates that the fork choice and the chain dag are out of sync -
|
|
|
|
# this is most likely the result of a bug, but we'll try to keep going -
|
|
|
|
# hopefully the fork choice will heal itself over time.
|
|
|
|
error "Couldn't add block to fork choice, bug?",
|
2020-07-25 19:41:12 +00:00
|
|
|
blck = shortLog(blck), err = state.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2024-04-17 20:44:29 +00:00
|
|
|
iterator attestations*(
|
|
|
|
pool: AttestationPool, slot: Opt[Slot],
|
|
|
|
committee_index: Opt[CommitteeIndex]): phase0.Attestation =
|
2021-04-14 14:43:29 +00:00
|
|
|
let candidateIndices =
|
|
|
|
if slot.isSome():
|
|
|
|
let candidateIdx = pool.candidateIdx(slot.get())
|
|
|
|
if candidateIdx.isSome():
|
|
|
|
candidateIdx.get() .. candidateIdx.get()
|
|
|
|
else:
|
|
|
|
1 .. 0
|
|
|
|
else:
|
2024-05-07 15:01:51 +00:00
|
|
|
0 ..< pool.phase0Candidates.len()
|
2021-04-14 14:43:29 +00:00
|
|
|
|
|
|
|
for candidateIndex in candidateIndices:
|
2024-05-07 15:01:51 +00:00
|
|
|
for _, entry in pool.phase0Candidates[candidateIndex]:
|
2022-01-08 23:28:49 +00:00
|
|
|
if committee_index.isNone() or entry.data.index == committee_index.get():
|
2024-04-17 20:44:29 +00:00
|
|
|
var singleAttestation = phase0.Attestation(
|
2021-04-12 20:25:09 +00:00
|
|
|
aggregation_bits: CommitteeValidatorsBits.init(entry.committee_len),
|
|
|
|
data: entry.data)
|
|
|
|
|
|
|
|
for index, signature in entry.singles:
|
|
|
|
singleAttestation.aggregation_bits.setBit(index)
|
2021-04-26 20:39:44 +00:00
|
|
|
singleAttestation.signature = signature.toValidatorSig()
|
2021-04-12 20:25:09 +00:00
|
|
|
yield singleAttestation
|
|
|
|
singleAttestation.aggregation_bits.clearBit(index)
|
|
|
|
|
|
|
|
for v in entry.aggregates:
|
|
|
|
yield entry.toAttestation(v)
|
|
|
|
|
|
|
|
type
|
2023-09-24 08:50:48 +00:00
|
|
|
AttestationCacheKey = (Slot, uint64)
|
2024-05-17 12:37:41 +00:00
|
|
|
AttestationCache[CVBType] = Table[AttestationCacheKey, CVBType] ##\
|
2021-04-12 20:25:09 +00:00
|
|
|
## Cache for quick lookup during beacon block construction of attestations
|
|
|
|
## which have already been included, and therefore should be skipped.
|
|
|
|
|
|
|
|
func getAttestationCacheKey(ad: AttestationData): AttestationCacheKey =
|
|
|
|
# The committee is unique per slot and committee index which means we can use
|
|
|
|
# it as key for a participation cache - this is checked in `check_attestation`
|
|
|
|
(ad.slot, ad.index)
|
|
|
|
|
|
|
|
func add(
|
|
|
|
attCache: var AttestationCache, data: AttestationData,
|
2024-05-17 12:37:41 +00:00
|
|
|
aggregation_bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits) =
|
2021-04-12 20:25:09 +00:00
|
|
|
let key = data.getAttestationCacheKey()
|
|
|
|
attCache.withValue(key, v) do:
|
|
|
|
v[].incl(aggregation_bits)
|
|
|
|
do:
|
|
|
|
attCache[key] = aggregation_bits
|
|
|
|
|
2023-09-24 08:50:48 +00:00
|
|
|
func init(
|
|
|
|
T: type AttestationCache, state: phase0.HashedBeaconState, _: StateCache):
|
|
|
|
T =
|
2021-04-12 20:25:09 +00:00
|
|
|
# Load attestations that are scheduled for being given rewards for
|
2021-06-11 17:51:46 +00:00
|
|
|
for i in 0..<state.data.previous_epoch_attestations.len():
|
2021-04-12 20:25:09 +00:00
|
|
|
result.add(
|
2021-06-11 17:51:46 +00:00
|
|
|
state.data.previous_epoch_attestations[i].data,
|
|
|
|
state.data.previous_epoch_attestations[i].aggregation_bits)
|
|
|
|
for i in 0..<state.data.current_epoch_attestations.len():
|
2021-04-12 20:25:09 +00:00
|
|
|
result.add(
|
2021-06-11 17:51:46 +00:00
|
|
|
state.data.current_epoch_attestations[i].data,
|
|
|
|
state.data.current_epoch_attestations[i].aggregation_bits)
|
2021-04-12 20:25:09 +00:00
|
|
|
|
2021-06-17 17:13:14 +00:00
|
|
|
func init(
|
2021-09-27 14:22:58 +00:00
|
|
|
T: type AttestationCache,
|
2022-11-11 10:17:27 +00:00
|
|
|
state: altair.HashedBeaconState | bellatrix.HashedBeaconState |
|
2024-02-26 06:49:12 +00:00
|
|
|
capella.HashedBeaconState | deneb.HashedBeaconState |
|
|
|
|
electra.HashedBeaconState,
|
2021-06-17 17:13:14 +00:00
|
|
|
cache: var StateCache): T =
|
|
|
|
# Load attestations that are scheduled for being given rewards for
|
|
|
|
let
|
|
|
|
prev_epoch = state.data.get_previous_epoch()
|
|
|
|
cur_epoch = state.data.get_current_epoch()
|
|
|
|
|
|
|
|
template update_attestation_pool_cache(
|
2022-01-08 23:28:49 +00:00
|
|
|
epoch: Epoch, participation_bitmap: untyped) =
|
2022-01-12 20:42:03 +00:00
|
|
|
let committees_per_slot = get_committee_count_per_slot(
|
|
|
|
state.data, epoch, cache)
|
|
|
|
for committee_index in get_committee_indices(committees_per_slot):
|
2022-01-11 10:01:54 +00:00
|
|
|
for slot in epoch.slots():
|
2022-01-08 23:28:49 +00:00
|
|
|
let committee = get_beacon_committee(
|
|
|
|
state.data, slot, committee_index, cache)
|
|
|
|
var
|
2024-05-17 12:37:41 +00:00
|
|
|
validator_bits = typeof(result).B.init(committee.len)
|
2022-01-08 23:28:49 +00:00
|
|
|
for index_in_committee, validator_index in committee:
|
|
|
|
if participation_bitmap[validator_index] != 0:
|
|
|
|
# If any flag got set, there was an attestation from this validator.
|
|
|
|
validator_bits[index_in_committee] = true
|
2022-06-09 14:30:13 +00:00
|
|
|
result[(slot, committee_index.uint64)] = validator_bits
|
2021-06-17 17:13:14 +00:00
|
|
|
|
|
|
|
# This treats all types of rewards as equivalent, which isn't ideal
|
2022-01-08 23:28:49 +00:00
|
|
|
update_attestation_pool_cache(
|
|
|
|
prev_epoch, state.data.previous_epoch_participation)
|
|
|
|
update_attestation_pool_cache(
|
|
|
|
cur_epoch, state.data.current_epoch_participation)
|
2021-06-17 17:13:14 +00:00
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func score(
|
2021-04-12 20:25:09 +00:00
|
|
|
attCache: var AttestationCache, data: AttestationData,
|
2024-05-17 12:37:41 +00:00
|
|
|
aggregation_bits: CommitteeValidatorsBits | ElectraCommitteeValidatorsBits): int =
|
2021-04-12 20:25:09 +00:00
|
|
|
# The score of an attestation is loosely based on how many new votes it brings
|
|
|
|
# to the state - a more accurate score function would also look at inclusion
|
|
|
|
# distance and effective balance.
|
|
|
|
# TODO cache not var, but `withValue` requires it
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
2021-04-12 20:25:09 +00:00
|
|
|
key = data.getAttestationCacheKey()
|
|
|
|
bitsScore = aggregation_bits.countOnes()
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
attCache.withValue(key, xxx):
|
|
|
|
doAssert aggregation_bits.len() == xxx[].len(),
|
2021-04-12 20:25:09 +00:00
|
|
|
"check_attestation ensures committee length"
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
# How many votes were in the attestation minues the votes that are the same
|
2024-05-17 12:37:41 +00:00
|
|
|
return bitsScore - aggregation_bits.countOverlap(xxx[])
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
# Not found in cache - fresh vote meaning all attestations count
|
|
|
|
bitsScore
|
2020-12-15 15:16:10 +00:00
|
|
|
|
2022-11-01 12:23:40 +00:00
|
|
|
proc check_attestation_compatible*(
|
|
|
|
dag: ChainDAGRef,
|
2023-06-04 10:39:12 +00:00
|
|
|
state: ForkyHashedBeaconState,
|
2024-08-07 11:14:57 +00:00
|
|
|
attestation: SomeAttestation | electra.Attestation |
|
|
|
|
electra.TrustedAttestation): Result[void, cstring] =
|
2022-11-01 12:23:40 +00:00
|
|
|
let
|
2023-06-04 10:39:12 +00:00
|
|
|
targetEpoch = attestation.data.target.epoch
|
|
|
|
compatibleRoot = state.dependent_root(targetEpoch.get_previous_epoch)
|
2022-11-01 12:23:40 +00:00
|
|
|
|
2023-06-04 10:39:12 +00:00
|
|
|
attestedBlck = dag.getBlockRef(attestation.data.target.root).valueOr:
|
|
|
|
return err("Unknown `target.root`")
|
|
|
|
dependentSlot = targetEpoch.attester_dependent_slot
|
2022-11-01 12:23:40 +00:00
|
|
|
dependentBid = dag.atSlot(attestedBlck.bid, dependentSlot).valueOr:
|
|
|
|
return err("Dependent root not found")
|
|
|
|
dependentRoot = dependentBid.bid.root
|
2023-06-04 10:39:12 +00:00
|
|
|
|
2022-11-01 12:23:40 +00:00
|
|
|
if dependentRoot != compatibleRoot:
|
|
|
|
return err("Incompatible shuffling")
|
|
|
|
ok()
|
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
proc getAttestationsForBlock*(pool: var AttestationPool,
|
2021-11-05 07:34:34 +00:00
|
|
|
state: ForkyHashedBeaconState,
|
2024-04-17 20:44:29 +00:00
|
|
|
cache: var StateCache): seq[phase0.Attestation] =
|
2020-03-31 18:39:02 +00:00
|
|
|
## Retrieve attestations that may be added to a new block at the slot of the
|
|
|
|
## given state
|
2024-03-14 06:26:36 +00:00
|
|
|
## https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#attestations
|
2021-06-11 17:51:46 +00:00
|
|
|
let newBlockSlot = state.data.slot.uint64
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY:
|
2023-09-24 08:50:48 +00:00
|
|
|
return @[] # Too close to genesis
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
let
|
|
|
|
# Attestations produced in a particular slot are added to the block
|
|
|
|
# at the slot where at least MIN_ATTESTATION_INCLUSION_DELAY have passed
|
|
|
|
maxAttestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
2021-05-28 16:34:00 +00:00
|
|
|
startPackingTick = Moment.now()
|
2019-03-28 17:06:43 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
var
|
|
|
|
candidates: seq[tuple[
|
2024-05-07 15:01:51 +00:00
|
|
|
score: int, slot: Slot, entry: ptr Phase0AttestationEntry,
|
|
|
|
validation: int]]
|
2024-05-17 12:37:41 +00:00
|
|
|
attCache = AttestationCache[CommitteeValidatorsBits].init(state, cache)
|
2021-04-12 20:25:09 +00:00
|
|
|
|
|
|
|
for i in 0..<ATTESTATION_LOOKBACK:
|
|
|
|
if i > maxAttestationSlot: # Around genesis..
|
|
|
|
break
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
let
|
|
|
|
slot = Slot(maxAttestationSlot - i)
|
|
|
|
candidateIdx = pool.candidateIdx(slot)
|
|
|
|
|
|
|
|
if candidateIdx.isNone():
|
|
|
|
# Passed the collection horizon - shouldn't happen because it's based on
|
|
|
|
# ATTESTATION_LOOKBACK
|
|
|
|
break
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
for _, entry in pool.phase0Candidates[candidateIdx.get()].mpairs():
|
2021-04-12 20:25:09 +00:00
|
|
|
entry.updateAggregates()
|
|
|
|
|
|
|
|
for j in 0..<entry.aggregates.len():
|
2023-09-24 08:50:48 +00:00
|
|
|
let attestation = entry.toAttestation(entry.aggregates[j])
|
2021-04-12 20:25:09 +00:00
|
|
|
|
2022-11-01 12:23:40 +00:00
|
|
|
# Filter out attestations that were created with a different shuffling.
|
|
|
|
# As we don't re-check signatures, this needs to be done separately
|
2023-06-04 10:39:12 +00:00
|
|
|
if not pool.dag.check_attestation_compatible(state, attestation).isOk():
|
2022-11-01 12:23:40 +00:00
|
|
|
continue
|
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
# Attestations are checked based on the state that we're adding the
|
|
|
|
# attestation to - there might have been a fork between when we first
|
|
|
|
# saw the attestation and the time that we added it
|
2022-07-27 17:14:19 +00:00
|
|
|
if not check_attestation(
|
|
|
|
state.data, attestation, {skipBlsValidation}, cache).isOk():
|
2021-04-12 20:25:09 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
let score = attCache.score(
|
|
|
|
entry.data, entry.aggregates[j].aggregation_bits)
|
|
|
|
if score == 0:
|
|
|
|
# 0 score means the attestation would not bring any votes - discard
|
|
|
|
# it early
|
|
|
|
# Note; this must be done _after_ `check_attestation` as it relies on
|
|
|
|
# the committee to match the state that was used to build the cache
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Careful, must not update the attestation table for the pointer to
|
|
|
|
# remain valid
|
|
|
|
candidates.add((score, slot, addr entry, j))
|
|
|
|
|
|
|
|
# Using a greedy algorithm, select as many attestations as possible that will
|
|
|
|
# fit in the block.
|
|
|
|
#
|
2023-09-27 16:06:22 +00:00
|
|
|
# Effectively https://en.wikipedia.org/wiki/Maximum_coverage_problem which
|
|
|
|
# therefore has inapproximability results of greedy algorithm optimality.
|
|
|
|
#
|
|
|
|
# Some research, also, has been done showing that one can tweak this and do
|
|
|
|
# a kind of k-greedy version where each greedy step tries all possible two,
|
|
|
|
# three, or higher-order tuples of next elements. These seem promising, but
|
|
|
|
# also expensive.
|
|
|
|
#
|
2021-04-12 20:25:09 +00:00
|
|
|
# For each round, we'll look for the best attestation and add it to the result
|
|
|
|
# then re-score the other candidates.
|
2024-04-17 20:44:29 +00:00
|
|
|
var res: seq[phase0.Attestation]
|
2021-04-14 14:43:29 +00:00
|
|
|
let totalCandidates = candidates.len()
|
2021-04-12 20:25:09 +00:00
|
|
|
while candidates.len > 0 and res.lenu64() < MAX_ATTESTATIONS:
|
2023-10-04 22:38:10 +00:00
|
|
|
let entryCacheKey = block:
|
2021-04-12 20:25:09 +00:00
|
|
|
# Find the candidate with the highest score - slot is used as a
|
|
|
|
# tie-breaker so that more recent attestations are added first
|
|
|
|
let
|
|
|
|
candidate =
|
|
|
|
# Fast path for when all remaining candidates fit
|
|
|
|
if candidates.lenu64 < MAX_ATTESTATIONS: candidates.len - 1
|
|
|
|
else: maxIndex(candidates)
|
2023-01-30 19:21:51 +00:00
|
|
|
(_, _, entry, j) = candidates[candidate]
|
2021-04-12 20:25:09 +00:00
|
|
|
|
|
|
|
candidates.del(candidate) # careful, `del` reorders candidates
|
|
|
|
|
|
|
|
res.add(entry[].toAttestation(entry[].aggregates[j]))
|
|
|
|
|
|
|
|
# Update cache so that the new votes are taken into account when updating
|
|
|
|
# the score below
|
|
|
|
attCache.add(entry[].data, entry[].aggregates[j].aggregation_bits)
|
|
|
|
|
2023-10-04 22:38:10 +00:00
|
|
|
entry[].data.getAttestationCacheKey
|
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
block:
|
|
|
|
# Because we added some votes, it's quite possible that some candidates
|
|
|
|
# are no longer interesting - update the scores of the existing candidates
|
|
|
|
for it in candidates.mitems():
|
2023-10-04 22:38:10 +00:00
|
|
|
# Aggregates not on the same (slot, committee) pair don't change scores
|
|
|
|
if it.entry[].data.getAttestationCacheKey != entryCacheKey:
|
|
|
|
continue
|
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
it.score = attCache.score(
|
|
|
|
it.entry[].data,
|
|
|
|
it.entry[].aggregates[it.validation].aggregation_bits)
|
|
|
|
|
|
|
|
candidates.keepItIf:
|
|
|
|
# Only keep candidates that might add coverage
|
|
|
|
it.score > 0
|
|
|
|
|
2021-04-14 14:43:29 +00:00
|
|
|
let
|
2021-05-28 16:34:00 +00:00
|
|
|
packingDur = Moment.now() - startPackingTick
|
2021-04-14 14:43:29 +00:00
|
|
|
|
|
|
|
debug "Packed attestations for block",
|
2021-05-28 16:34:00 +00:00
|
|
|
newBlockSlot, packingDur, totalCandidates, attestations = res.len()
|
2021-04-14 14:43:29 +00:00
|
|
|
attestation_pool_block_attestation_packing_time.set(
|
2021-05-28 16:34:00 +00:00
|
|
|
packingDur.toFloatSeconds())
|
2021-04-14 14:43:29 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
res
|
|
|
|
|
2021-10-19 14:09:26 +00:00
|
|
|
proc getAttestationsForBlock*(pool: var AttestationPool,
|
|
|
|
state: ForkedHashedBeaconState,
|
2024-04-17 20:44:29 +00:00
|
|
|
cache: var StateCache): seq[phase0.Attestation] =
|
2021-10-01 01:29:32 +00:00
|
|
|
withState(state):
|
2024-05-07 15:01:51 +00:00
|
|
|
when consensusFork < ConsensusFork.Electra:
|
|
|
|
pool.getAttestationsForBlock(forkyState, cache)
|
|
|
|
else:
|
|
|
|
default(seq[phase0.Attestation])
|
|
|
|
|
|
|
|
proc getElectraAttestationsForBlock*(
|
|
|
|
pool: var AttestationPool, state: electra.HashedBeaconState,
|
|
|
|
cache: var StateCache): seq[electra.Attestation] =
|
|
|
|
let newBlockSlot = state.data.slot.uint64
|
|
|
|
|
|
|
|
if newBlockSlot < MIN_ATTESTATION_INCLUSION_DELAY:
|
|
|
|
return @[] # Too close to genesis
|
|
|
|
|
|
|
|
let
|
|
|
|
# Attestations produced in a particular slot are added to the block
|
|
|
|
# at the slot where at least MIN_ATTESTATION_INCLUSION_DELAY have passed
|
|
|
|
maxAttestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
|
|
|
startPackingTick = Moment.now()
|
|
|
|
|
|
|
|
var
|
|
|
|
candidates: seq[tuple[
|
2024-05-17 12:37:41 +00:00
|
|
|
score: int, slot: Slot, entry: ptr ElectraAttestationEntry,
|
|
|
|
validation: int]]
|
|
|
|
attCache = AttestationCache[ElectraCommitteeValidatorsBits].init(state, cache)
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
for i in 0..<ATTESTATION_LOOKBACK:
|
|
|
|
if i > maxAttestationSlot: # Around genesis..
|
|
|
|
break
|
|
|
|
|
|
|
|
let
|
|
|
|
slot = Slot(maxAttestationSlot - i)
|
|
|
|
candidateIdx = pool.candidateIdx(slot)
|
|
|
|
|
|
|
|
if candidateIdx.isNone():
|
|
|
|
# Passed the collection horizon - shouldn't happen because it's based on
|
|
|
|
# ATTESTATION_LOOKBACK
|
|
|
|
break
|
|
|
|
|
|
|
|
for _, entry in pool.electraCandidates[candidateIdx.get()].mpairs():
|
2024-05-17 12:37:41 +00:00
|
|
|
entry.updateAggregates()
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
for j in 0..<entry.aggregates.len():
|
|
|
|
let attestation = entry.toElectraAttestation(entry.aggregates[j])
|
|
|
|
|
|
|
|
# Filter out attestations that were created with a different shuffling.
|
|
|
|
# As we don't re-check signatures, this needs to be done separately
|
2024-05-17 12:37:41 +00:00
|
|
|
if not pool.dag.check_attestation_compatible(state, attestation).isOk():
|
|
|
|
continue
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
# Attestations are checked based on the state that we're adding the
|
|
|
|
# attestation to - there might have been a fork between when we first
|
|
|
|
# saw the attestation and the time that we added it
|
|
|
|
if not check_attestation(
|
2024-05-17 12:37:41 +00:00
|
|
|
state.data, attestation, {skipBlsValidation}, cache, false).isOk():
|
2024-05-07 15:01:51 +00:00
|
|
|
continue
|
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
let score = attCache.score(
|
|
|
|
entry.data, entry.aggregates[j].aggregation_bits)
|
2024-05-07 15:01:51 +00:00
|
|
|
if score == 0:
|
|
|
|
# 0 score means the attestation would not bring any votes - discard
|
|
|
|
# it early
|
|
|
|
# Note; this must be done _after_ `check_attestation` as it relies on
|
|
|
|
# the committee to match the state that was used to build the cache
|
|
|
|
continue
|
|
|
|
|
|
|
|
# Careful, must not update the attestation table for the pointer to
|
|
|
|
# remain valid
|
|
|
|
candidates.add((score, slot, addr entry, j))
|
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
# Sort candidates by score use slot as a tie-breaker
|
|
|
|
candidates.sort()
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
# Using a greedy algorithm, select as many attestations as possible that will
|
|
|
|
# fit in the block.
|
|
|
|
#
|
|
|
|
# Effectively https://en.wikipedia.org/wiki/Maximum_coverage_problem which
|
|
|
|
# therefore has inapproximability results of greedy algorithm optimality.
|
|
|
|
#
|
|
|
|
# Some research, also, has been done showing that one can tweak this and do
|
|
|
|
# a kind of k-greedy version where each greedy step tries all possible two,
|
|
|
|
# three, or higher-order tuples of next elements. These seem promising, but
|
|
|
|
# also expensive.
|
|
|
|
#
|
|
|
|
# For each round, we'll look for the best attestation and add it to the result
|
|
|
|
# then re-score the other candidates.
|
|
|
|
var
|
2024-08-23 11:26:35 +00:00
|
|
|
candidatesPerBlock: Table[(Eth2Digest, Slot), seq[electra.Attestation]]
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
let totalCandidates = candidates.len()
|
2024-08-23 11:26:35 +00:00
|
|
|
while candidates.len > 0 and candidatesPerBlock.lenu64() <
|
2024-05-17 12:37:41 +00:00
|
|
|
MAX_ATTESTATIONS_ELECTRA * MAX_COMMITTEES_PER_SLOT:
|
2024-05-07 15:01:51 +00:00
|
|
|
let entryCacheKey = block:
|
2024-08-23 11:26:35 +00:00
|
|
|
let (_, _, entry, j) =
|
|
|
|
# Fast path for when all remaining candidates fit
|
|
|
|
if candidates.lenu64 < MAX_ATTESTATIONS_ELECTRA:
|
|
|
|
candidates[candidates.len - 1]
|
|
|
|
else:
|
|
|
|
# Get the candidate with the highest score
|
|
|
|
candidates.pop()
|
2024-05-07 15:01:51 +00:00
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
#TODO: Merge candidates per block structure with the candidates one
|
|
|
|
# and score possible on-chain attestations while collecting candidates
|
|
|
|
# (previous loop) and reavaluate cache key definition
|
|
|
|
let
|
|
|
|
key = (entry.data.beacon_block_root, entry.data.slot)
|
|
|
|
newAtt = entry[].toElectraAttestation(entry[].aggregates[j])
|
2024-05-07 15:01:51 +00:00
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
candidatesPerBlock.withValue(key, candidate):
|
|
|
|
candidate[].add newAtt
|
|
|
|
do:
|
|
|
|
candidatesPerBlock[key] = @[newAtt]
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
# Update cache so that the new votes are taken into account when updating
|
|
|
|
# the score below
|
2024-05-17 12:37:41 +00:00
|
|
|
attCache.add(entry[].data, entry[].aggregates[j].aggregation_bits)
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
entry[].data.getAttestationCacheKey
|
|
|
|
|
|
|
|
block:
|
|
|
|
# Because we added some votes, it's quite possible that some candidates
|
|
|
|
# are no longer interesting - update the scores of the existing candidates
|
|
|
|
for it in candidates.mitems():
|
|
|
|
# Aggregates not on the same (slot, committee) pair don't change scores
|
|
|
|
if it.entry[].data.getAttestationCacheKey != entryCacheKey:
|
|
|
|
continue
|
|
|
|
|
2024-05-17 12:37:41 +00:00
|
|
|
it.score = attCache.score(
|
|
|
|
it.entry[].data,
|
|
|
|
it.entry[].aggregates[it.validation].aggregation_bits)
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
candidates.keepItIf:
|
|
|
|
# Only keep candidates that might add coverage
|
|
|
|
it.score > 0
|
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
# Sort candidates by score use slot as a tie-breaker
|
|
|
|
candidates.sort()
|
2024-05-17 12:37:41 +00:00
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
# Consolidate attestation aggregates with disjoint comittee bits into single
|
|
|
|
# attestation
|
|
|
|
var res: seq[electra.Attestation]
|
|
|
|
for a in candidatesPerBlock.values():
|
2024-05-17 12:37:41 +00:00
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
if a.len > 1:
|
|
|
|
let
|
|
|
|
att = compute_on_chain_aggregate(a).valueOr:
|
|
|
|
continue
|
|
|
|
res.add(att)
|
|
|
|
#no on chain candidates
|
|
|
|
else:
|
|
|
|
res.add(a)
|
2024-05-17 12:37:41 +00:00
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
if res.lenu64 == MAX_ATTESTATIONS_ELECTRA:
|
2024-05-17 12:37:41 +00:00
|
|
|
break
|
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
let
|
|
|
|
packingDur = Moment.now() - startPackingTick
|
|
|
|
|
|
|
|
debug "Packed attestations for block",
|
2024-08-23 11:26:35 +00:00
|
|
|
newBlockSlot, packingDur, totalCandidates, attestations = res.len()
|
2024-05-07 15:01:51 +00:00
|
|
|
attestation_pool_block_attestation_packing_time.set(
|
|
|
|
packingDur.toFloatSeconds())
|
|
|
|
|
2024-08-23 11:26:35 +00:00
|
|
|
res
|
2024-05-07 15:01:51 +00:00
|
|
|
|
|
|
|
proc getElectraAttestationsForBlock*(
|
|
|
|
pool: var AttestationPool, state: ForkedHashedBeaconState,
|
|
|
|
cache: var StateCache): seq[electra.Attestation] =
|
|
|
|
withState(state):
|
|
|
|
when consensusFork >= ConsensusFork.Electra:
|
|
|
|
pool.getElectraAttestationsForBlock(forkyState, cache)
|
|
|
|
else:
|
|
|
|
default(seq[electra.Attestation])
|
2021-10-01 01:29:32 +00:00
|
|
|
|
2024-09-06 12:15:30 +00:00
|
|
|
func bestValidation(
|
|
|
|
aggregates: openArray[Phase0Validation | ElectraValidation]): (int, int) =
|
2021-04-12 20:25:09 +00:00
|
|
|
# Look for best validation based on number of votes in the aggregate
|
|
|
|
doAssert aggregates.len() > 0,
|
|
|
|
"updateAggregates should have created at least one aggregate"
|
|
|
|
var
|
|
|
|
bestIndex = 0
|
|
|
|
best = aggregates[bestIndex].aggregation_bits.countOnes()
|
|
|
|
|
|
|
|
for i in 1..<aggregates.len():
|
|
|
|
let count = aggregates[i].aggregation_bits.countOnes()
|
|
|
|
if count > best:
|
|
|
|
best = count
|
|
|
|
bestIndex = i
|
|
|
|
(bestIndex, best)
|
|
|
|
|
2024-09-06 12:15:30 +00:00
|
|
|
func getElectraAggregatedAttestation*(
|
|
|
|
pool: var AttestationPool, slot: Slot,
|
|
|
|
attestationDataRoot: Eth2Digest, committeeIndex: CommitteeIndex):
|
|
|
|
Opt[electra.Attestation] =
|
|
|
|
|
|
|
|
let candidateIdx = pool.candidateIdx(slot)
|
|
|
|
if candidateIdx.isNone:
|
|
|
|
return Opt.none(electra.Attestation)
|
|
|
|
|
|
|
|
var res: Opt[electra.Attestation]
|
|
|
|
for _, entry in pool.electraCandidates[candidateIdx.get].mpairs():
|
|
|
|
if entry.data.index != committeeIndex.distinctBase:
|
|
|
|
continue
|
|
|
|
|
|
|
|
entry.updateAggregates()
|
|
|
|
|
|
|
|
let (bestIndex, best) = bestValidation(entry.aggregates)
|
|
|
|
|
|
|
|
if res.isNone() or best > res.get().aggregation_bits.countOnes():
|
|
|
|
res = Opt.some(entry.toElectraAttestation(entry.aggregates[bestIndex]))
|
|
|
|
|
|
|
|
res
|
|
|
|
|
2024-04-17 20:44:29 +00:00
|
|
|
func getAggregatedAttestation*(
|
|
|
|
pool: var AttestationPool, slot: Slot, attestation_data_root: Eth2Digest):
|
|
|
|
Opt[phase0.Attestation] =
|
2021-04-12 20:25:09 +00:00
|
|
|
let
|
|
|
|
candidateIdx = pool.candidateIdx(slot)
|
|
|
|
if candidateIdx.isNone:
|
2024-04-17 20:44:29 +00:00
|
|
|
return Opt.none(phase0.Attestation)
|
2021-04-12 20:25:09 +00:00
|
|
|
|
2024-05-07 15:01:51 +00:00
|
|
|
pool.phase0Candidates[candidateIdx.get].withValue(
|
|
|
|
attestation_data_root, entry):
|
2021-04-12 20:25:09 +00:00
|
|
|
entry[].updateAggregates()
|
|
|
|
|
|
|
|
let (bestIndex, _) = bestValidation(entry[].aggregates)
|
|
|
|
|
|
|
|
# Found the right hash, no need to look further
|
2022-07-06 16:11:44 +00:00
|
|
|
return Opt.some(entry[].toAttestation(entry[].aggregates[bestIndex]))
|
2021-04-12 20:25:09 +00:00
|
|
|
|
2024-04-17 20:44:29 +00:00
|
|
|
Opt.none(phase0.Attestation)
|
2020-09-14 11:13:30 +00:00
|
|
|
|
2024-04-17 20:44:29 +00:00
|
|
|
func getAggregatedAttestation*(
|
|
|
|
pool: var AttestationPool, slot: Slot, index: CommitteeIndex):
|
|
|
|
Opt[phase0.Attestation] =
|
2021-04-12 20:25:09 +00:00
|
|
|
## Select the attestation that has the most votes going for it in the given
|
|
|
|
## slot/index
|
2024-03-14 06:26:36 +00:00
|
|
|
## https://github.com/ethereum/consensus-specs/blob/v1.4.0/specs/phase0/validator.md#construct-aggregate
|
2022-11-30 14:37:23 +00:00
|
|
|
let candidateIdx = pool.candidateIdx(slot)
|
2021-04-12 20:25:09 +00:00
|
|
|
if candidateIdx.isNone:
|
2024-04-17 20:44:29 +00:00
|
|
|
return Opt.none(phase0.Attestation)
|
2020-08-21 01:22:26 +00:00
|
|
|
|
2024-04-17 20:44:29 +00:00
|
|
|
var res: Opt[phase0.Attestation]
|
2024-05-07 15:01:51 +00:00
|
|
|
for _, entry in pool.phase0Candidates[candidateIdx.get].mpairs():
|
2021-04-12 20:25:09 +00:00
|
|
|
doAssert entry.data.slot == slot
|
2022-01-08 23:28:49 +00:00
|
|
|
if index != entry.data.index:
|
2020-08-21 01:22:26 +00:00
|
|
|
continue
|
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
entry.updateAggregates()
|
2020-08-21 01:22:26 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
let (bestIndex, best) = bestValidation(entry.aggregates)
|
2020-08-21 01:22:26 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
if res.isNone() or best > res.get().aggregation_bits.countOnes():
|
2022-07-06 16:11:44 +00:00
|
|
|
res = Opt.some(entry.toAttestation(entry.aggregates[bestIndex]))
|
2020-08-21 01:22:26 +00:00
|
|
|
|
2021-04-12 20:25:09 +00:00
|
|
|
res
|
2020-08-21 01:22:26 +00:00
|
|
|
|
2022-08-25 23:34:02 +00:00
|
|
|
type BeaconHead* = object
|
|
|
|
blck*: BlockRef
|
2024-02-08 00:24:55 +00:00
|
|
|
safeExecutionBlockHash*, finalizedExecutionBlockHash*: Eth2Digest
|
2022-08-25 23:34:02 +00:00
|
|
|
|
|
|
|
proc getBeaconHead*(
|
2022-08-29 12:16:35 +00:00
|
|
|
pool: AttestationPool, headBlock: BlockRef): BeaconHead =
|
2022-08-25 23:34:02 +00:00
|
|
|
let
|
2024-02-08 00:24:55 +00:00
|
|
|
finalizedExecutionBlockHash =
|
2023-04-11 16:56:29 +00:00
|
|
|
pool.dag.loadExecutionBlockHash(pool.dag.finalizedHead.blck)
|
2024-02-09 22:10:38 +00:00
|
|
|
.get(ZERO_HASH)
|
2022-08-25 23:34:02 +00:00
|
|
|
|
2024-06-16 00:59:25 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.3/fork_choice/safe-block.md#get_safe_execution_payload_hash
|
2022-08-25 23:34:02 +00:00
|
|
|
safeBlockRoot = pool.forkChoice.get_safe_beacon_block_root()
|
|
|
|
safeBlock = pool.dag.getBlockRef(safeBlockRoot)
|
2024-02-08 00:24:55 +00:00
|
|
|
safeExecutionBlockHash =
|
2022-08-25 23:34:02 +00:00
|
|
|
if safeBlock.isErr:
|
|
|
|
# Safe block is currently the justified block determined by fork choice.
|
|
|
|
# If finality already advanced beyond the current justified checkpoint,
|
|
|
|
# e.g., because we have selected a head that did not yet realize the cp,
|
|
|
|
# the justified block may end up not having a `BlockRef` anymore.
|
|
|
|
# Because we know that a different fork already finalized a later point,
|
|
|
|
# let's just report the finalized execution payload hash instead.
|
2024-02-08 00:24:55 +00:00
|
|
|
finalizedExecutionBlockHash
|
2022-08-25 23:34:02 +00:00
|
|
|
else:
|
2023-04-11 16:56:29 +00:00
|
|
|
pool.dag.loadExecutionBlockHash(safeBlock.get)
|
2024-02-09 22:10:38 +00:00
|
|
|
.get(finalizedExecutionBlockHash)
|
2022-08-25 23:34:02 +00:00
|
|
|
|
|
|
|
BeaconHead(
|
|
|
|
blck: headBlock,
|
2024-02-08 00:24:55 +00:00
|
|
|
safeExecutionBlockHash: safeExecutionBlockHash,
|
|
|
|
finalizedExecutionBlockHash: finalizedExecutionBlockHash)
|
2022-08-25 23:34:02 +00:00
|
|
|
|
2022-07-04 20:35:33 +00:00
|
|
|
proc selectOptimisticHead*(
|
2022-08-25 23:34:02 +00:00
|
|
|
pool: var AttestationPool, wallTime: BeaconTime): Opt[BeaconHead] =
|
2020-08-26 15:23:34 +00:00
|
|
|
## Trigger fork choice and returns the new head block.
|
2022-08-25 23:34:02 +00:00
|
|
|
let newHeadRoot = pool.forkChoice.get_head(pool.dag, wallTime)
|
|
|
|
if newHeadRoot.isErr:
|
|
|
|
error "Couldn't select head", err = newHeadRoot.error
|
|
|
|
return err()
|
|
|
|
|
|
|
|
let headBlock = pool.dag.getBlockRef(newHeadRoot.get()).valueOr:
|
|
|
|
# This should normally not happen, but if the chain dag and fork choice
|
|
|
|
# get out of sync, we'll need to try to download the selected head - in
|
|
|
|
# the meantime, return nil to indicate that no new head was chosen
|
|
|
|
warn "Fork choice selected unknown head, trying to sync",
|
|
|
|
root = newHeadRoot.get()
|
|
|
|
pool.quarantine[].addMissing(newHeadRoot.get())
|
|
|
|
return err()
|
|
|
|
|
|
|
|
ok pool.getBeaconHead(headBlock)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc prune*(pool: var AttestationPool) =
|
|
|
|
if (let v = pool.forkChoice.prune(); v.isErr):
|
2020-09-14 14:50:03 +00:00
|
|
|
# If pruning fails, it's likely the result of a bug - this shouldn't happen
|
|
|
|
# but we'll keep running hoping that the fork chocie will recover eventually
|
|
|
|
error "Couldn't prune fork choice, bug?", err = v.error()
|
2022-07-21 16:54:07 +00:00
|
|
|
|
2022-11-20 13:55:43 +00:00
|
|
|
proc validatorSeenAtEpoch*(pool: AttestationPool, epoch: Epoch,
|
2022-07-21 16:54:07 +00:00
|
|
|
vindex: ValidatorIndex): bool =
|
|
|
|
if uint64(vindex) < lenu64(pool.nextAttestationEpoch):
|
|
|
|
let mark = pool.nextAttestationEpoch[vindex]
|
|
|
|
(mark.subnet > epoch) or (mark.aggregate > epoch)
|
|
|
|
else:
|
|
|
|
false
|