2020-04-24 07:16:11 +00:00
|
|
|
# beacon_chain
|
2021-01-26 11:52:00 +00:00
|
|
|
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
2020-04-24 07:16:11 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
import
|
2020-07-09 09:29:32 +00:00
|
|
|
# Standard libraries
|
2021-03-06 07:32:55 +00:00
|
|
|
std/[options, tables, sequtils],
|
2020-07-09 09:29:32 +00:00
|
|
|
# Status libraries
|
2021-03-17 13:35:59 +00:00
|
|
|
chronicles, stew/byteutils, json_serialization/std/sets as jsonSets,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Internal
|
2021-03-06 07:32:55 +00:00
|
|
|
../spec/[beaconstate, datatypes, crypto, digest],
|
2021-03-04 09:13:44 +00:00
|
|
|
../ssz/merkleization,
|
2021-03-06 07:32:55 +00:00
|
|
|
"."/[spec_cache, blockchain_dag, block_quarantine],
|
2021-03-04 09:13:44 +00:00
|
|
|
../beacon_node_types,
|
|
|
|
../fork_choice/fork_choice
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2021-02-08 07:27:30 +00:00
|
|
|
export beacon_node_types
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "attpool"
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: QuarantineRef): T =
|
2020-07-30 19:18:17 +00:00
|
|
|
## Initialize an AttestationPool from the chainDag `headState`
|
2020-06-10 06:58:12 +00:00
|
|
|
## The `finalized_root` works around the finalized_checkpoint of the genesis block
|
|
|
|
## holding a zero_root.
|
2020-11-03 01:21:07 +00:00
|
|
|
let finalizedEpochRef = chainDag.getFinalizedEpochRef()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-08-18 14:56:32 +00:00
|
|
|
var forkChoice = ForkChoice.init(
|
2020-11-03 01:21:07 +00:00
|
|
|
finalizedEpochRef,
|
2020-10-26 08:55:10 +00:00
|
|
|
chainDag.finalizedHead.blck)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-07-27 16:04:44 +00:00
|
|
|
# Feed fork choice with unfinalized history - during startup, block pool only
|
|
|
|
# keeps track of a single history so we just need to follow it
|
2020-07-30 19:18:17 +00:00
|
|
|
doAssert chainDag.heads.len == 1, "Init only supports a single history"
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
var blocks: seq[BlockRef]
|
2020-07-30 19:18:17 +00:00
|
|
|
var cur = chainDag.head
|
2020-10-29 11:09:03 +00:00
|
|
|
|
|
|
|
# When the chain is finalizing, the votes between the head block and the
|
|
|
|
# finalized checkpoint should be enough for a stable fork choice - when the
|
|
|
|
# chain is not finalizing, we want to seed it with as many votes as possible
|
|
|
|
# since the whole history of each branch might be significant. It is however
|
|
|
|
# a game of diminishing returns, and we have to weigh it against the time
|
|
|
|
# it takes to replay that many blocks during startup and thus miss _new_
|
|
|
|
# votes.
|
|
|
|
const ForkChoiceHorizon = 256
|
2020-07-30 19:18:17 +00:00
|
|
|
while cur != chainDag.finalizedHead.blck:
|
2020-07-25 19:41:12 +00:00
|
|
|
blocks.add cur
|
|
|
|
cur = cur.parent
|
|
|
|
|
2020-11-16 19:15:43 +00:00
|
|
|
info "Initializing fork choice", unfinalized_blocks = blocks.len
|
2020-08-03 18:39:43 +00:00
|
|
|
|
2020-10-29 11:09:03 +00:00
|
|
|
var epochRef = finalizedEpochRef
|
|
|
|
for i in 0..<blocks.len:
|
2020-08-03 18:39:43 +00:00
|
|
|
let
|
2020-10-29 11:09:03 +00:00
|
|
|
blck = blocks[blocks.len - i - 1]
|
2020-08-03 18:39:43 +00:00
|
|
|
status =
|
2020-11-02 17:51:08 +00:00
|
|
|
if i < (blocks.len - ForkChoiceHorizon) and (i mod 1024 != 0):
|
2020-10-29 11:09:03 +00:00
|
|
|
# Fork choice needs to know about the full block tree up to the
|
|
|
|
# finalization point, but doesn't really need to have overly accurate
|
|
|
|
# justification and finalization points until we get close to head -
|
|
|
|
# nonetheless, we'll make sure to pass a fresh finalization point now
|
|
|
|
# and then to make sure the fork choice data structure doesn't grow
|
|
|
|
# too big - getting an EpochRef can be expensive.
|
|
|
|
forkChoice.backend.process_block(
|
|
|
|
blck.root, blck.parent.root,
|
|
|
|
epochRef.current_justified_checkpoint.epoch,
|
|
|
|
epochRef.finalized_checkpoint.epoch)
|
|
|
|
else:
|
|
|
|
epochRef = chainDag.getEpochRef(blck, blck.slot.epoch)
|
|
|
|
forkChoice.process_block(
|
|
|
|
chainDag, epochRef, blck, chainDag.get(blck).data.message, blck.slot)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-03 18:39:43 +00:00
|
|
|
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-10-29 11:09:03 +00:00
|
|
|
info "Fork choice initialized",
|
2021-04-08 08:24:25 +00:00
|
|
|
justified_epoch = getStateField(
|
|
|
|
chainDag.headState, current_justified_checkpoint).epoch,
|
|
|
|
finalized_epoch = getStateField(
|
|
|
|
chainDag.headState, finalized_checkpoint).epoch,
|
2020-07-30 19:18:17 +00:00
|
|
|
finalized_root = shortlog(chainDag.finalizedHead.blck.root)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
T(
|
2020-07-30 19:18:17 +00:00
|
|
|
chainDag: chainDag,
|
|
|
|
quarantine: quarantine,
|
2020-07-25 19:41:12 +00:00
|
|
|
forkChoice: forkChoice
|
2019-02-28 21:21:29 +00:00
|
|
|
)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
proc addForkChoiceVotes(
|
2021-02-08 07:27:30 +00:00
|
|
|
pool: var AttestationPool, slot: Slot, participants: seq[ValidatorIndex],
|
2020-08-27 07:34:12 +00:00
|
|
|
block_root: Eth2Digest, wallSlot: Slot) =
|
2020-07-27 16:04:44 +00:00
|
|
|
# Add attestation votes to fork choice
|
2020-08-17 18:36:13 +00:00
|
|
|
if (let v = pool.forkChoice.on_attestation(
|
2020-08-27 07:34:12 +00:00
|
|
|
pool.chainDag, slot, block_root, participants, wallSlot);
|
2020-08-17 18:36:13 +00:00
|
|
|
v.isErr):
|
2020-09-14 14:50:03 +00:00
|
|
|
# This indicates that the fork choice and the chain dag are out of sync -
|
|
|
|
# this is most likely the result of a bug, but we'll try to keep going -
|
|
|
|
# hopefully the fork choice will heal itself over time.
|
|
|
|
error "Couldn't add attestation to fork choice, bug?", err = v.error()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
func candidateIdx(pool: AttestationPool, slot: Slot): Option[uint64] =
|
|
|
|
if slot >= pool.startingSlot and
|
|
|
|
slot < (pool.startingSlot + pool.candidates.lenu64):
|
|
|
|
some(slot mod pool.candidates.lenu64)
|
|
|
|
else:
|
|
|
|
none(uint64)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
|
|
|
if wallSlot + 1 < pool.candidates.lenu64:
|
2019-12-19 13:02:28 +00:00
|
|
|
return
|
2020-06-28 17:32:11 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if pool.startingSlot + pool.candidates.lenu64 - 1 > wallSlot:
|
|
|
|
error "Current slot older than attestation pool view, clock reset?",
|
|
|
|
poolSlot = pool.startingSlot, wallSlot
|
2020-06-28 17:32:11 +00:00
|
|
|
return
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
# As time passes we'll clear out any old attestations as they are no longer
|
|
|
|
# viable to be included in blocks
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
let newWallSlot = wallSlot + 1 - pool.candidates.lenu64
|
|
|
|
for i in pool.startingSlot..newWallSlot:
|
|
|
|
pool.candidates[i.uint64 mod pool.candidates.lenu64] = AttestationsSeen()
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
pool.startingSlot = newWallSlot
|
2020-07-22 07:51:45 +00:00
|
|
|
|
2020-09-14 11:13:30 +00:00
|
|
|
# now also clear old aggregated attestations
|
|
|
|
var keysToRemove: seq[Slot] = @[]
|
|
|
|
for k, v in pool.attestationAggregates.pairs:
|
|
|
|
if k < pool.startingSlot:
|
|
|
|
keysToRemove.add k
|
|
|
|
for k in keysToRemove:
|
|
|
|
pool.attestationAggregates.del k
|
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
func addToAggregates(pool: var AttestationPool, attestation: Attestation) =
|
2020-09-14 11:13:30 +00:00
|
|
|
# do a lookup for the current slot and get it's associated htrs/attestations
|
2020-10-27 17:21:35 +00:00
|
|
|
var aggregated_attestation = pool.attestationAggregates.mgetOrPut(
|
2020-09-14 11:13:30 +00:00
|
|
|
attestation.data.slot, Table[Eth2Digest, Attestation]()).
|
|
|
|
# do a lookup for the same attestation data htr and get the attestation
|
|
|
|
mgetOrPut(attestation.data.hash_tree_root, attestation)
|
|
|
|
# if the aggregation bits differ (we didn't just insert it into the table)
|
|
|
|
# and only if there is no overlap of the signatures ==> aggregate!
|
2020-10-27 17:21:35 +00:00
|
|
|
if not aggregated_attestation.aggregation_bits.overlaps(attestation.aggregation_bits):
|
2020-09-14 11:13:30 +00:00
|
|
|
var agg {.noInit.}: AggregateSignature
|
2020-10-27 17:21:35 +00:00
|
|
|
agg.init(aggregated_attestation.signature)
|
|
|
|
aggregated_attestation.aggregation_bits.combine(attestation.aggregation_bits)
|
2020-09-14 11:13:30 +00:00
|
|
|
agg.aggregate(attestation.signature)
|
2020-10-27 17:21:35 +00:00
|
|
|
aggregated_attestation.signature = agg.finish()
|
2020-09-14 11:13:30 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
proc addAttestation*(pool: var AttestationPool,
|
|
|
|
attestation: Attestation,
|
2021-02-08 07:27:30 +00:00
|
|
|
participants: seq[ValidatorIndex],
|
2021-04-09 12:59:24 +00:00
|
|
|
signature: CookedSig,
|
2020-08-27 07:34:12 +00:00
|
|
|
wallSlot: Slot) =
|
2020-09-14 14:50:03 +00:00
|
|
|
## Add an attestation to the pool, assuming it's been validated already.
|
|
|
|
## Attestations may be either agggregated or not - we're pursuing an eager
|
|
|
|
## strategy where we'll drop validations we already knew about and combine
|
|
|
|
## the new attestation with an existing one if possible.
|
|
|
|
##
|
|
|
|
## This strategy is not optimal in the sense that it would be possible to find
|
|
|
|
## a better packing of attestations by delaying the aggregation, but because
|
|
|
|
## it's possible to include more than one aggregate in a block we can be
|
|
|
|
## somewhat lazy instead of looking for a perfect packing.
|
2020-07-28 13:54:32 +00:00
|
|
|
logScope:
|
|
|
|
attestation = shortLog(attestation)
|
2020-07-22 07:51:45 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
updateCurrent(pool, wallSlot)
|
|
|
|
|
|
|
|
let candidateIdx = pool.candidateIdx(attestation.data.slot)
|
|
|
|
if candidateIdx.isNone:
|
2020-09-14 14:50:03 +00:00
|
|
|
debug "Skipping old attestation for block production",
|
2020-07-28 13:54:32 +00:00
|
|
|
startingSlot = pool.startingSlot
|
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-09-14 11:13:30 +00:00
|
|
|
pool.addToAggregates(attestation)
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
2020-07-28 13:54:32 +00:00
|
|
|
attestationsSeen = addr pool.candidates[candidateIdx.get]
|
2021-03-17 13:35:59 +00:00
|
|
|
# Only attestestions with valid signatures get here
|
2021-04-09 12:59:24 +00:00
|
|
|
|
|
|
|
template getValidation(): auto =
|
|
|
|
doAssert attestation.signature == signature.exportRaw
|
|
|
|
Validation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: attestation.aggregation_bits,
|
2021-04-09 12:59:24 +00:00
|
|
|
aggregate_signature: signature,
|
|
|
|
aggregate_signature_raw: attestation.signature)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
var found = false
|
2020-06-10 06:58:12 +00:00
|
|
|
for a in attestationsSeen.attestations.mitems():
|
2019-02-19 23:35:02 +00:00
|
|
|
if a.data == attestation.data:
|
|
|
|
for v in a.validations:
|
2021-04-09 12:59:24 +00:00
|
|
|
if attestation.aggregation_bits.isSubsetOf(v.aggregation_bits):
|
2019-06-03 08:26:38 +00:00
|
|
|
# The validations in the new attestation are a subset of one of the
|
|
|
|
# attestations that we already have on file - no need to add this
|
|
|
|
# attestation to the database
|
2020-09-14 14:50:03 +00:00
|
|
|
trace "Ignoring subset attestation", newParticipants = participants
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2019-06-03 08:26:38 +00:00
|
|
|
# Attestations in the pool that are a subset of the new attestation
|
|
|
|
# can now be removed per same logic as above
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
trace "Removing subset attestations", newParticipants = participants
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2019-06-03 08:26:38 +00:00
|
|
|
a.validations.keepItIf(
|
2021-04-09 12:59:24 +00:00
|
|
|
not it.aggregation_bits.isSubsetOf(attestation.aggregation_bits))
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2021-04-09 12:59:24 +00:00
|
|
|
a.validations.add(getValidation())
|
2020-09-14 14:50:03 +00:00
|
|
|
pool.addForkChoiceVotes(
|
2020-08-17 18:36:13 +00:00
|
|
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
2020-08-27 07:34:12 +00:00
|
|
|
wallSlot)
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2020-08-27 07:34:12 +00:00
|
|
|
validations = a.validations.len()
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2020-06-10 06:58:12 +00:00
|
|
|
attestationsSeen.attestations.add(AttestationEntry(
|
2019-08-19 16:41:13 +00:00
|
|
|
data: attestation.data,
|
2021-04-09 12:59:24 +00:00
|
|
|
validations: @[getValidation()],
|
2020-12-15 15:16:10 +00:00
|
|
|
aggregation_bits: attestation.aggregation_bits
|
2019-08-19 16:41:13 +00:00
|
|
|
))
|
2020-09-14 14:50:03 +00:00
|
|
|
pool.addForkChoiceVotes(
|
2020-08-17 18:36:13 +00:00
|
|
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
2020-08-27 07:34:12 +00:00
|
|
|
wallSlot)
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2020-10-01 18:56:42 +00:00
|
|
|
debug "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2020-08-27 07:34:12 +00:00
|
|
|
validations = 1
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc addForkChoice*(pool: var AttestationPool,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-07-25 19:41:12 +00:00
|
|
|
blckRef: BlockRef,
|
2021-01-25 18:45:48 +00:00
|
|
|
blck: TrustedBeaconBlock,
|
2020-07-25 19:41:12 +00:00
|
|
|
wallSlot: Slot) =
|
2020-07-09 09:29:32 +00:00
|
|
|
## Add a verified block to the fork choice context
|
2020-07-25 19:41:12 +00:00
|
|
|
let state = pool.forkChoice.process_block(
|
2020-08-03 18:39:43 +00:00
|
|
|
pool.chainDag, epochRef, blckRef, blck, wallSlot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-22 09:42:55 +00:00
|
|
|
if state.isErr:
|
2020-09-14 14:50:03 +00:00
|
|
|
# This indicates that the fork choice and the chain dag are out of sync -
|
|
|
|
# this is most likely the result of a bug, but we'll try to keep going -
|
|
|
|
# hopefully the fork choice will heal itself over time.
|
|
|
|
error "Couldn't add block to fork choice, bug?",
|
2020-07-25 19:41:12 +00:00
|
|
|
blck = shortLog(blck), err = state.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
proc getAttestationsForSlot(pool: AttestationPool, newBlockSlot: Slot):
|
2020-06-10 06:58:12 +00:00
|
|
|
Option[AttestationsSeen] =
|
2019-08-19 16:41:13 +00:00
|
|
|
if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY):
|
2020-09-14 14:50:03 +00:00
|
|
|
debug "Too early for attestations", newBlockSlot = shortLog(newBlockSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
2020-07-28 13:54:32 +00:00
|
|
|
candidateIdx = pool.candidateIdx(attestationSlot)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if candidateIdx.isNone:
|
2020-08-21 01:22:26 +00:00
|
|
|
trace "No attestations matching the slot range",
|
2019-08-15 16:01:55 +00:00
|
|
|
attestationSlot = shortLog(attestationSlot),
|
2020-07-28 13:54:32 +00:00
|
|
|
startingSlot = shortLog(pool.startingSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
some(pool.candidates[candidateIdx.get()])
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-11-30 15:20:46 +00:00
|
|
|
iterator attestations*(pool: AttestationPool, slot: Option[Slot],
|
|
|
|
index: Option[CommitteeIndex]): Attestation =
|
2020-11-30 14:18:06 +00:00
|
|
|
for seenAttestations in pool.candidates.items():
|
|
|
|
for entry in seenAttestations.attestations.items():
|
|
|
|
let slotInclude =
|
|
|
|
if slot.isSome():
|
|
|
|
entry.data.slot == slot.get()
|
|
|
|
else:
|
|
|
|
true
|
|
|
|
let committeeInclude =
|
|
|
|
if index.isSome():
|
|
|
|
CommitteeIndex(entry.data.index) == index.get()
|
|
|
|
else:
|
|
|
|
true
|
|
|
|
if slotInclude or committeeInclude:
|
|
|
|
for validation in entry.validations.items():
|
2020-11-30 15:20:46 +00:00
|
|
|
yield Attestation(
|
2020-11-30 14:18:06 +00:00
|
|
|
aggregation_bits: validation.aggregation_bits,
|
2020-11-30 15:20:46 +00:00
|
|
|
data: entry.data,
|
2021-04-09 12:59:24 +00:00
|
|
|
signature: validation.aggregate_signature_raw
|
2020-11-30 14:18:06 +00:00
|
|
|
)
|
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
func getAttestationDataKey(ad: AttestationData): AttestationDataKey =
|
|
|
|
# This determines the rest of the AttestationData
|
|
|
|
(ad.slot, ad.index, ad.source.epoch, ad.target.epoch)
|
|
|
|
|
|
|
|
func incorporateCacheAttestation(
|
|
|
|
pool: var AttestationPool, attestation: PendingAttestation) =
|
|
|
|
let key = attestation.data.getAttestationDataKey
|
|
|
|
try:
|
|
|
|
var validatorBits = pool.attestedValidators[key]
|
|
|
|
validatorBits.combine(attestation.aggregation_bits)
|
|
|
|
pool.attestedValidators[key] = validatorBits
|
|
|
|
except KeyError:
|
|
|
|
pool.attestedValidators[key] = attestation.aggregation_bits
|
|
|
|
|
|
|
|
func populateAttestationCache(pool: var AttestationPool, state: BeaconState) =
|
|
|
|
pool.attestedValidators.clear()
|
|
|
|
|
|
|
|
for pendingAttestation in state.previous_epoch_attestations:
|
|
|
|
pool.incorporateCacheAttestation(pendingAttestation)
|
|
|
|
|
|
|
|
for pendingAttestation in state.current_epoch_attestations:
|
|
|
|
pool.incorporateCacheAttestation(pendingAttestation)
|
|
|
|
|
|
|
|
func updateAttestationsCache(pool: var AttestationPool,
|
|
|
|
state: BeaconState) =
|
|
|
|
# There have likely been additional attestations integrated into BeaconState
|
|
|
|
# since last block production, an epoch change, or even a tree restructuring
|
|
|
|
# so that there's nothing in common in the BeaconState altogether, since the
|
|
|
|
# last time requested.
|
|
|
|
if (
|
|
|
|
(pool.lastPreviousEpochAttestationsLen == 0 or
|
|
|
|
(pool.lastPreviousEpochAttestationsLen <= state.previous_epoch_attestations.len and
|
|
|
|
pool.lastPreviousEpochAttestation ==
|
|
|
|
state.previous_epoch_attestations[pool.lastPreviousEpochAttestationsLen - 1])) and
|
|
|
|
(pool.lastCurrentEpochAttestationsLen == 0 or
|
|
|
|
(pool.lastCurrentEpochAttestationsLen <= state.current_epoch_attestations.len and
|
|
|
|
pool.lastCurrentEpochAttestation ==
|
|
|
|
state.current_epoch_attestations[pool.lastCurrentEpochAttestationsLen - 1]))
|
|
|
|
):
|
|
|
|
# There are multiple validators attached to this node proposing in the
|
|
|
|
# same epoch. As such, incorporate that new attestation. Both previous
|
|
|
|
# and current attestations lists might have been appended to.
|
|
|
|
for i in pool.lastPreviousEpochAttestationsLen ..<
|
|
|
|
state.previous_epoch_attestations.len:
|
|
|
|
pool.incorporateCacheAttestation(state.previous_epoch_attestations[i])
|
|
|
|
for i in pool.lastCurrentEpochAttestationsLen ..<
|
|
|
|
state.current_epoch_attestations.len:
|
|
|
|
pool.incorporateCacheAttestation(state.current_epoch_attestations[i])
|
|
|
|
else:
|
|
|
|
# Tree restructuring or other cache flushing event. This must trigger
|
|
|
|
# sometimes to clear old attestations.
|
|
|
|
pool.populateAttestationCache(state)
|
|
|
|
|
|
|
|
pool.lastPreviousEpochAttestationsLen = state.previous_epoch_attestations.len
|
|
|
|
pool.lastCurrentEpochAttestationsLen = state.current_epoch_attestations.len
|
|
|
|
if pool.lastPreviousEpochAttestationsLen > 0:
|
|
|
|
pool.lastPreviousEpochAttestation =
|
|
|
|
state.previous_epoch_attestations[pool.lastPreviousEpochAttestationsLen - 1]
|
|
|
|
if pool.lastCurrentEpochAttestationsLen > 0:
|
|
|
|
pool.lastCurrentEpochAttestation =
|
|
|
|
state.current_epoch_attestations[pool.lastCurrentEpochAttestationsLen - 1]
|
|
|
|
|
|
|
|
proc getAttestationsForBlock*(pool: var AttestationPool,
|
2020-09-14 14:50:03 +00:00
|
|
|
state: BeaconState,
|
|
|
|
cache: var StateCache): seq[Attestation] =
|
2020-03-31 18:39:02 +00:00
|
|
|
## Retrieve attestations that may be added to a new block at the slot of the
|
|
|
|
## given state
|
2021-03-02 13:40:28 +00:00
|
|
|
let newBlockSlot = state.slot.uint64
|
2020-05-09 12:43:15 +00:00
|
|
|
var attestations: seq[AttestationEntry]
|
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
pool.updateAttestationsCache(state)
|
|
|
|
|
2021-03-02 13:40:28 +00:00
|
|
|
# Consider attestations from the current slot and ranging back up to
|
|
|
|
# ATTESTATION_LOOKBACK slots, excluding the special genesis slot. As
|
|
|
|
# unsigned subtraction (mostly avoided in this codebase, partly as a
|
|
|
|
# consequence) will otherwise wrap through zero, clamp value which's
|
|
|
|
# subtracted so that slots through ATTESTATION_LOOKBACK don't do so.
|
|
|
|
for i in max(
|
|
|
|
1'u64, newBlockSlot - min(newBlockSlot, ATTESTATION_LOOKBACK)) ..
|
|
|
|
newBlockSlot:
|
2020-05-09 12:43:15 +00:00
|
|
|
let maybeSlotData = getAttestationsForSlot(pool, i.Slot)
|
|
|
|
if maybeSlotData.isSome:
|
|
|
|
insert(attestations, maybeSlotData.get.attestations)
|
|
|
|
|
|
|
|
if attestations.len == 0:
|
2020-03-31 18:39:02 +00:00
|
|
|
return
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-05-09 12:43:15 +00:00
|
|
|
for a in attestations:
|
2019-02-19 23:35:02 +00:00
|
|
|
var
|
2021-02-25 13:37:22 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#construct-attestation
|
2019-02-19 23:35:02 +00:00
|
|
|
attestation = Attestation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: a.validations[0].aggregation_bits,
|
2019-02-19 23:35:02 +00:00
|
|
|
data: a.data,
|
2021-04-09 12:59:24 +00:00
|
|
|
signature: a.validations[0].aggregate_signature_raw
|
2019-02-19 23:35:02 +00:00
|
|
|
)
|
|
|
|
|
2020-08-15 17:33:58 +00:00
|
|
|
agg {.noInit.}: AggregateSignature
|
|
|
|
agg.init(a.validations[0].aggregate_signature)
|
|
|
|
|
2020-09-14 14:50:03 +00:00
|
|
|
# Signature verification here is more of a sanity check - it could
|
|
|
|
# be optimized away, though for now it's easier to reuse the logic from
|
|
|
|
# the state transition function to ensure that the new block will not
|
|
|
|
# fail application.
|
2020-07-27 16:04:44 +00:00
|
|
|
if (let v = check_attestation(state, attestation, {}, cache); v.isErr):
|
2020-06-10 06:58:12 +00:00
|
|
|
warn "Attestation no longer validates...",
|
2020-07-27 16:04:44 +00:00
|
|
|
attestation = shortLog(attestation),
|
|
|
|
err = v.error
|
|
|
|
|
2019-03-28 17:06:43 +00:00
|
|
|
continue
|
|
|
|
|
2020-08-27 06:32:51 +00:00
|
|
|
for i in 1..a.validations.high:
|
|
|
|
if not attestation.aggregation_bits.overlaps(
|
|
|
|
a.validations[i].aggregation_bits):
|
|
|
|
attestation.aggregation_bits.combine(a.validations[i].aggregation_bits)
|
|
|
|
agg.aggregate(a.validations[i].aggregate_signature)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
# Since each validator attests exactly once per epoch and its attestation
|
|
|
|
# has been validated to have been included in the attestation pool, there
|
|
|
|
# only exists one possible slot/committee combination to check.
|
|
|
|
try:
|
|
|
|
if a.aggregation_bits.isSubsetOf(
|
|
|
|
pool.attestedValidators[a.data.getAttestationDataKey]):
|
|
|
|
continue
|
|
|
|
except KeyError:
|
|
|
|
# No record of inclusion, so go ahead and include attestation.
|
|
|
|
discard
|
|
|
|
|
2020-08-15 17:33:58 +00:00
|
|
|
attestation.signature = agg.finish()
|
2019-02-19 23:35:02 +00:00
|
|
|
result.add(attestation)
|
|
|
|
|
2020-07-26 18:55:48 +00:00
|
|
|
if result.lenu64 >= MAX_ATTESTATIONS:
|
2020-05-09 12:43:15 +00:00
|
|
|
debug "getAttestationsForBlock: returning early after hitting MAX_ATTESTATIONS",
|
|
|
|
attestationSlot = newBlockSlot - 1
|
2019-02-19 23:35:02 +00:00
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-12-15 15:16:10 +00:00
|
|
|
func getAggregatedAttestation*(pool: AttestationPool,
|
2020-09-14 11:13:30 +00:00
|
|
|
slot: Slot,
|
|
|
|
ad_htr: Eth2Digest): Option[Attestation] =
|
|
|
|
try:
|
|
|
|
if pool.attestationAggregates.contains(slot) and
|
|
|
|
pool.attestationAggregates[slot].contains(ad_htr):
|
|
|
|
return some pool.attestationAggregates[slot][ad_htr]
|
|
|
|
except KeyError:
|
|
|
|
doAssert(false) # shouldn't be possible because we check with `contains`
|
2020-12-15 15:16:10 +00:00
|
|
|
none(Attestation)
|
2020-09-14 11:13:30 +00:00
|
|
|
|
2020-08-21 01:22:26 +00:00
|
|
|
proc getAggregatedAttestation*(pool: AttestationPool,
|
|
|
|
slot: Slot,
|
|
|
|
index: CommitteeIndex): Option[Attestation] =
|
|
|
|
let attestations = pool.getAttestationsForSlot(
|
|
|
|
slot + MIN_ATTESTATION_INCLUSION_DELAY)
|
|
|
|
if attestations.isNone:
|
|
|
|
return none(Attestation)
|
|
|
|
|
|
|
|
for a in attestations.get.attestations:
|
|
|
|
doAssert a.data.slot == slot
|
|
|
|
if index.uint64 != a.data.index:
|
|
|
|
continue
|
|
|
|
|
|
|
|
var
|
2021-02-25 13:37:22 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#construct-attestation
|
2020-08-21 01:22:26 +00:00
|
|
|
attestation = Attestation(
|
|
|
|
aggregation_bits: a.validations[0].aggregation_bits,
|
|
|
|
data: a.data,
|
2021-04-09 12:59:24 +00:00
|
|
|
signature: a.validations[0].aggregate_signature_raw
|
2020-08-21 01:22:26 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
agg {.noInit.}: AggregateSignature
|
|
|
|
|
|
|
|
agg.init(a.validations[0].aggregate_signature)
|
|
|
|
for v in a.validations[1..^1]:
|
|
|
|
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
|
|
|
attestation.aggregation_bits.combine(v.aggregation_bits)
|
|
|
|
agg.aggregate(v.aggregate_signature)
|
|
|
|
|
|
|
|
attestation.signature = agg.finish()
|
|
|
|
|
|
|
|
return some(attestation)
|
|
|
|
|
|
|
|
none(Attestation)
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef =
|
2020-08-26 15:23:34 +00:00
|
|
|
## Trigger fork choice and returns the new head block.
|
|
|
|
## Can return `nil`
|
2020-08-17 18:36:13 +00:00
|
|
|
let newHead = pool.forkChoice.get_head(pool.chainDag, wallSlot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-22 09:42:55 +00:00
|
|
|
if newHead.isErr:
|
|
|
|
error "Couldn't select head", err = newHead.error
|
|
|
|
nil
|
|
|
|
else:
|
2020-10-28 07:55:36 +00:00
|
|
|
let ret = pool.chainDag.getRef(newHead.get())
|
|
|
|
if ret.isNil:
|
|
|
|
# This should normally not happen, but if the chain dag and fork choice
|
|
|
|
# get out of sync, we'll need to try to download the selected head - in
|
|
|
|
# the meantime, return nil to indicate that no new head was chosen
|
|
|
|
warn "Fork choice selected unknown head, trying to sync", root = newHead.get()
|
|
|
|
pool.quarantine.addMissing(newHead.get())
|
|
|
|
|
|
|
|
ret
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc prune*(pool: var AttestationPool) =
|
|
|
|
if (let v = pool.forkChoice.prune(); v.isErr):
|
2020-09-14 14:50:03 +00:00
|
|
|
# If pruning fails, it's likely the result of a bug - this shouldn't happen
|
|
|
|
# but we'll keep running hoping that the fork chocie will recover eventually
|
|
|
|
error "Couldn't prune fork choice, bug?", err = v.error()
|