2020-04-24 07:16:11 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
import
|
2020-07-09 09:29:32 +00:00
|
|
|
# Standard libraries
|
2020-07-28 13:54:32 +00:00
|
|
|
std/[algorithm, deques, sequtils, sets, tables, options],
|
2020-07-09 09:29:32 +00:00
|
|
|
# Status libraries
|
2020-07-28 13:54:32 +00:00
|
|
|
chronicles, stew/[byteutils], json_serialization/std/sets as jsonSets,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Internal
|
2020-07-15 10:44:18 +00:00
|
|
|
./spec/[beaconstate, datatypes, crypto, digest, helpers],
|
2020-07-31 14:49:06 +00:00
|
|
|
./block_pools/[spec_cache, chain_dag, clearance], ./beacon_node_types,
|
2020-07-09 09:29:32 +00:00
|
|
|
./fork_choice/fork_choice
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
export beacon_node_types, sets
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "attpool"
|
|
|
|
|
2020-07-31 14:49:06 +00:00
|
|
|
proc init*(T: type AttestationPool, chainDag: ChainDAGRef, quarantine: QuarantineRef): T =
|
2020-07-30 19:18:17 +00:00
|
|
|
## Initialize an AttestationPool from the chainDag `headState`
|
2020-06-10 06:58:12 +00:00
|
|
|
## The `finalized_root` works around the finalized_checkpoint of the genesis block
|
|
|
|
## holding a zero_root.
|
2020-07-30 19:18:17 +00:00
|
|
|
# TODO chainDag/quarantine are only used when resolving orphaned attestations - they
|
|
|
|
# should probably be removed as a dependency of AttestationPool (or some other
|
2019-08-19 16:41:13 +00:00
|
|
|
# smart refactoring)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-08-18 14:56:32 +00:00
|
|
|
let
|
|
|
|
finalizedEpochRef = chainDag.getEpochRef(
|
|
|
|
chainDag.finalizedHead.blck, chainDag.finalizedHead.slot.epoch())
|
|
|
|
|
|
|
|
var forkChoice = ForkChoice.init(
|
|
|
|
finalizedEpochRef, chainDag.finalizedHead.blck)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-07-27 16:04:44 +00:00
|
|
|
# Feed fork choice with unfinalized history - during startup, block pool only
|
|
|
|
# keeps track of a single history so we just need to follow it
|
2020-07-30 19:18:17 +00:00
|
|
|
doAssert chainDag.heads.len == 1, "Init only supports a single history"
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
var blocks: seq[BlockRef]
|
2020-07-30 19:18:17 +00:00
|
|
|
var cur = chainDag.head
|
|
|
|
while cur != chainDag.finalizedHead.blck:
|
2020-07-25 19:41:12 +00:00
|
|
|
blocks.add cur
|
|
|
|
cur = cur.parent
|
|
|
|
|
2020-08-03 18:39:43 +00:00
|
|
|
debug "Preloading fork choice with blocks", blocks = blocks.len
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
for blck in reversed(blocks):
|
2020-08-03 18:39:43 +00:00
|
|
|
let
|
|
|
|
epochRef = chainDag.getEpochRef(blck, blck.slot.compute_epoch_at_slot)
|
|
|
|
status =
|
2020-07-25 19:41:12 +00:00
|
|
|
forkChoice.process_block(
|
2020-08-03 18:39:43 +00:00
|
|
|
chainDag, epochRef, blck, chainDag.get(blck).data.message, blck.slot)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-03 18:39:43 +00:00
|
|
|
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
info "Fork choice initialized",
|
2020-07-30 19:18:17 +00:00
|
|
|
justified_epoch = chainDag.headState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = chainDag.headState.data.data.finalized_checkpoint.epoch,
|
|
|
|
finalized_root = shortlog(chainDag.finalizedHead.blck.root)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
T(
|
2020-07-30 19:18:17 +00:00
|
|
|
chainDag: chainDag,
|
|
|
|
quarantine: quarantine,
|
2019-03-13 22:59:20 +00:00
|
|
|
unresolved: initTable[Eth2Digest, UnresolvedAttestation](),
|
2020-07-25 19:41:12 +00:00
|
|
|
forkChoice: forkChoice
|
2019-02-28 21:21:29 +00:00
|
|
|
)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
proc processAttestation(
|
|
|
|
pool: var AttestationPool, slot: Slot, participants: HashSet[ValidatorIndex],
|
|
|
|
block_root: Eth2Digest, target: Checkpoint, wallSlot: Slot) =
|
2020-07-27 16:04:44 +00:00
|
|
|
# Add attestation votes to fork choice
|
2020-08-17 18:36:13 +00:00
|
|
|
if (let v = pool.forkChoice.on_attestation(
|
|
|
|
pool.chainDag, slot, block_root, toSeq(participants), target, wallSlot);
|
|
|
|
v.isErr):
|
|
|
|
warn "Couldn't process attestation", err = v.error()
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-08-03 10:49:15 +00:00
|
|
|
func addUnresolved*(pool: var AttestationPool, attestation: Attestation) =
|
2019-12-19 13:02:28 +00:00
|
|
|
pool.unresolved[attestation.data.beacon_block_root] =
|
|
|
|
UnresolvedAttestation(
|
|
|
|
attestation: attestation,
|
|
|
|
)
|
2019-09-12 01:45:04 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
func candidateIdx(pool: AttestationPool, slot: Slot): Option[uint64] =
|
|
|
|
if slot >= pool.startingSlot and
|
|
|
|
slot < (pool.startingSlot + pool.candidates.lenu64):
|
|
|
|
some(slot mod pool.candidates.lenu64)
|
|
|
|
else:
|
|
|
|
none(uint64)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc updateCurrent(pool: var AttestationPool, wallSlot: Slot) =
|
|
|
|
if wallSlot + 1 < pool.candidates.lenu64:
|
2019-12-19 13:02:28 +00:00
|
|
|
return
|
2020-06-28 17:32:11 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if pool.startingSlot + pool.candidates.lenu64 - 1 > wallSlot:
|
|
|
|
error "Current slot older than attestation pool view, clock reset?",
|
|
|
|
poolSlot = pool.startingSlot, wallSlot
|
2020-06-28 17:32:11 +00:00
|
|
|
return
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
# As time passes we'll clear out any old attestations as they are no longer
|
|
|
|
# viable to be included in blocks
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
let newWallSlot = wallSlot + 1 - pool.candidates.lenu64
|
|
|
|
for i in pool.startingSlot..newWallSlot:
|
|
|
|
pool.candidates[i.uint64 mod pool.candidates.lenu64] = AttestationsSeen()
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
pool.startingSlot = newWallSlot
|
2020-07-22 07:51:45 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc addResolved(
|
|
|
|
pool: var AttestationPool, blck: BlockRef, attestation: Attestation,
|
|
|
|
wallSlot: Slot) =
|
|
|
|
# Add an attestation whose parent we know
|
|
|
|
logScope:
|
|
|
|
attestation = shortLog(attestation)
|
2020-07-22 07:51:45 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
updateCurrent(pool, wallSlot)
|
|
|
|
|
|
|
|
doAssert blck.root == attestation.data.beacon_block_root
|
|
|
|
|
|
|
|
let candidateIdx = pool.candidateIdx(attestation.data.slot)
|
|
|
|
if candidateIdx.isNone:
|
|
|
|
debug "Attestation slot out of range",
|
|
|
|
startingSlot = pool.startingSlot
|
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
2020-07-30 19:18:17 +00:00
|
|
|
epochRef = pool.chainDag.getEpochRef(blck, attestation.data.target.epoch)
|
2020-07-28 13:54:32 +00:00
|
|
|
attestationsSeen = addr pool.candidates[candidateIdx.get]
|
2019-02-19 23:35:02 +00:00
|
|
|
validation = Validation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: attestation.aggregation_bits,
|
2019-06-12 07:48:49 +00:00
|
|
|
aggregate_signature: attestation.signature)
|
2020-07-27 16:04:44 +00:00
|
|
|
participants = get_attesting_indices(
|
2020-07-28 13:54:32 +00:00
|
|
|
epochRef, attestation.data, validation.aggregation_bits)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
var found = false
|
2020-06-10 06:58:12 +00:00
|
|
|
for a in attestationsSeen.attestations.mitems():
|
2019-02-19 23:35:02 +00:00
|
|
|
if a.data == attestation.data:
|
|
|
|
for v in a.validations:
|
2019-07-01 07:53:42 +00:00
|
|
|
if validation.aggregation_bits.isSubsetOf(v.aggregation_bits):
|
2019-06-03 08:26:38 +00:00
|
|
|
# The validations in the new attestation are a subset of one of the
|
|
|
|
# attestations that we already have on file - no need to add this
|
|
|
|
# attestation to the database
|
|
|
|
# TODO what if the new attestation is useful for creating bigger
|
|
|
|
# sets by virtue of not overlapping with some other attestation
|
|
|
|
# and therefore being useful after all?
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Ignoring subset attestation",
|
2020-07-16 13:16:51 +00:00
|
|
|
newParticipants = participants
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2019-06-03 08:26:38 +00:00
|
|
|
# Attestations in the pool that are a subset of the new attestation
|
|
|
|
# can now be removed per same logic as above
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Removing subset attestations",
|
2020-07-16 13:16:51 +00:00
|
|
|
newParticipants = participants
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2019-06-03 08:26:38 +00:00
|
|
|
a.validations.keepItIf(
|
2019-08-19 16:41:13 +00:00
|
|
|
not it.aggregation_bits.isSubsetOf(validation.aggregation_bits))
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
a.validations.add(validation)
|
2020-07-25 19:41:12 +00:00
|
|
|
pool.processAttestation(
|
2020-08-17 18:36:13 +00:00
|
|
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
|
|
|
attestation.data.target, wallSlot)
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
info "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-08-14 08:56:32 +00:00
|
|
|
validations = a.validations.len(),
|
2020-07-16 13:16:51 +00:00
|
|
|
blockSlot = shortLog(blck.slot)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2020-06-10 06:58:12 +00:00
|
|
|
attestationsSeen.attestations.add(AttestationEntry(
|
2019-08-19 16:41:13 +00:00
|
|
|
data: attestation.data,
|
|
|
|
blck: blck,
|
|
|
|
validations: @[validation]
|
|
|
|
))
|
2020-07-25 19:41:12 +00:00
|
|
|
pool.processAttestation(
|
2020-08-17 18:36:13 +00:00
|
|
|
attestation.data.slot, participants, attestation.data.beacon_block_root,
|
|
|
|
attestation.data.target, wallSlot)
|
2019-08-19 16:41:13 +00:00
|
|
|
|
|
|
|
info "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-09-12 01:45:04 +00:00
|
|
|
validations = 1,
|
2020-07-16 13:16:51 +00:00
|
|
|
blockSlot = shortLog(blck.slot)
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc addAttestation*(pool: var AttestationPool,
|
|
|
|
attestation: Attestation,
|
|
|
|
wallSlot: Slot) =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Add a verified attestation to the fork choice context
|
2019-12-19 13:02:28 +00:00
|
|
|
logScope: pcs = "atp_add_attestation"
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# Fetch the target block or notify the block pool that it's needed
|
2020-07-30 19:18:17 +00:00
|
|
|
let blck = pool.chainDag.getOrResolve(
|
|
|
|
pool.quarantine,
|
|
|
|
attestation.data.beacon_block_root)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# If the block exist, add it to the fork choice context
|
|
|
|
# Otherwise delay until it resolves
|
2019-12-19 13:02:28 +00:00
|
|
|
if blck.isNil:
|
|
|
|
pool.addUnresolved(attestation)
|
|
|
|
return
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
pool.addResolved(blck, attestation, wallSlot)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc addForkChoice*(pool: var AttestationPool,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-07-25 19:41:12 +00:00
|
|
|
blckRef: BlockRef,
|
|
|
|
blck: BeaconBlock,
|
|
|
|
wallSlot: Slot) =
|
2020-07-09 09:29:32 +00:00
|
|
|
## Add a verified block to the fork choice context
|
2020-07-25 19:41:12 +00:00
|
|
|
let state = pool.forkChoice.process_block(
|
2020-08-03 18:39:43 +00:00
|
|
|
pool.chainDag, epochRef, blckRef, blck, wallSlot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-22 09:42:55 +00:00
|
|
|
if state.isErr:
|
|
|
|
# TODO If this happens, it is effectively a bug - the BlockRef structure
|
|
|
|
# guarantees that the DAG is valid and the state transition should
|
|
|
|
# guarantee that the justified and finalized epochs are ok! However,
|
|
|
|
# we'll log it for now to avoid crashes
|
|
|
|
error "Unexpected error when applying block",
|
2020-07-25 19:41:12 +00:00
|
|
|
blck = shortLog(blck), err = state.error
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
|
|
|
Option[AttestationsSeen] =
|
2019-08-19 16:41:13 +00:00
|
|
|
if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY):
|
2019-12-10 09:55:37 +00:00
|
|
|
debug "Too early for attestations",
|
2020-07-16 13:16:51 +00:00
|
|
|
newBlockSlot = shortLog(newBlockSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
2020-07-28 13:54:32 +00:00
|
|
|
candidateIdx = pool.candidateIdx(attestationSlot)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
if candidateIdx.isNone:
|
2020-08-21 01:22:26 +00:00
|
|
|
trace "No attestations matching the slot range",
|
2019-08-15 16:01:55 +00:00
|
|
|
attestationSlot = shortLog(attestationSlot),
|
2020-07-28 13:54:32 +00:00
|
|
|
startingSlot = shortLog(pool.startingSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
some(pool.candidates[candidateIdx.get()])
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-04-23 18:58:54 +00:00
|
|
|
proc getAttestationsForBlock*(pool: AttestationPool,
|
|
|
|
state: BeaconState): seq[Attestation] =
|
2020-03-31 18:39:02 +00:00
|
|
|
## Retrieve attestations that may be added to a new block at the slot of the
|
|
|
|
## given state
|
|
|
|
logScope: pcs = "retrieve_attestation"
|
|
|
|
|
|
|
|
# TODO this shouldn't really need state -- it's to recheck/validate, but that
|
|
|
|
# should be refactored
|
2020-05-09 12:43:15 +00:00
|
|
|
let newBlockSlot = state.slot
|
|
|
|
var attestations: seq[AttestationEntry]
|
|
|
|
|
2020-08-21 01:22:26 +00:00
|
|
|
# This potentially creates problems with lots of repeated attestations,
|
2020-05-09 12:43:15 +00:00
|
|
|
# as a bunch of synchronized beacon_nodes do almost the opposite of the
|
|
|
|
# intended thing -- sure, _blocks_ have to be popular (via attestation)
|
|
|
|
# but _attestations_ shouldn't have to be so frequently repeated, as an
|
|
|
|
# artifact of this state-free, identical-across-clones choice basis. In
|
|
|
|
# addResolved, too, the new attestations get added to the end, while in
|
|
|
|
# these functions, it's reading from the beginning, et cetera. This all
|
|
|
|
# needs a single unified strategy.
|
2020-07-28 13:54:32 +00:00
|
|
|
for i in max(1, newBlockSlot.int64 - ATTESTATION_LOOKBACK.int64) .. newBlockSlot.int64:
|
2020-05-09 12:43:15 +00:00
|
|
|
let maybeSlotData = getAttestationsForSlot(pool, i.Slot)
|
|
|
|
if maybeSlotData.isSome:
|
|
|
|
insert(attestations, maybeSlotData.get.attestations)
|
|
|
|
|
|
|
|
if attestations.len == 0:
|
2020-03-31 18:39:02 +00:00
|
|
|
return
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-15 10:44:18 +00:00
|
|
|
var cache = StateCache()
|
2020-05-09 12:43:15 +00:00
|
|
|
for a in attestations:
|
2019-02-19 23:35:02 +00:00
|
|
|
var
|
2020-08-06 13:05:13 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#construct-attestation
|
2019-02-19 23:35:02 +00:00
|
|
|
attestation = Attestation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: a.validations[0].aggregation_bits,
|
2019-02-19 23:35:02 +00:00
|
|
|
data: a.data,
|
2019-06-12 07:48:49 +00:00
|
|
|
signature: a.validations[0].aggregate_signature
|
2019-02-19 23:35:02 +00:00
|
|
|
)
|
|
|
|
|
2020-08-15 17:33:58 +00:00
|
|
|
agg {.noInit.}: AggregateSignature
|
|
|
|
agg.init(a.validations[0].aggregate_signature)
|
|
|
|
|
2019-03-28 17:06:43 +00:00
|
|
|
# TODO what's going on here is that when producing a block, we need to
|
|
|
|
# include only such attestations that will not cause block validation
|
|
|
|
# to fail. How this interacts with voting and the acceptance of
|
|
|
|
# attestations into the pool in general is an open question that needs
|
|
|
|
# revisiting - for example, when attestations are added, against which
|
|
|
|
# state should they be validated, if at all?
|
2020-06-10 06:58:12 +00:00
|
|
|
# TODO we're checking signatures here every time which is very slow and we don't want
|
2019-08-19 16:41:13 +00:00
|
|
|
# to include a broken attestation
|
2020-07-27 16:04:44 +00:00
|
|
|
if (let v = check_attestation(state, attestation, {}, cache); v.isErr):
|
2020-06-10 06:58:12 +00:00
|
|
|
warn "Attestation no longer validates...",
|
2020-07-27 16:04:44 +00:00
|
|
|
attestation = shortLog(attestation),
|
|
|
|
err = v.error
|
|
|
|
|
2019-03-28 17:06:43 +00:00
|
|
|
continue
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
for v in a.validations[1..^1]:
|
2019-06-03 08:26:38 +00:00
|
|
|
# TODO We need to select a set of attestations that maximise profit by
|
|
|
|
# adding the largest combined attestation set that we can find - this
|
|
|
|
# unfortunately looks an awful lot like
|
|
|
|
# https://en.wikipedia.org/wiki/Set_packing - here we just iterate
|
|
|
|
# and naively add as much as possible in one go, by we could also
|
|
|
|
# add the same attestation data twice, as long as there's at least
|
|
|
|
# one new attestation in there
|
2019-07-03 07:35:05 +00:00
|
|
|
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
|
|
|
attestation.aggregation_bits.combine(v.aggregation_bits)
|
2020-08-15 17:33:58 +00:00
|
|
|
agg.aggregate(v.aggregate_signature)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-08-15 17:33:58 +00:00
|
|
|
attestation.signature = agg.finish()
|
2019-02-19 23:35:02 +00:00
|
|
|
result.add(attestation)
|
|
|
|
|
2020-07-26 18:55:48 +00:00
|
|
|
if result.lenu64 >= MAX_ATTESTATIONS:
|
2020-05-09 12:43:15 +00:00
|
|
|
debug "getAttestationsForBlock: returning early after hitting MAX_ATTESTATIONS",
|
|
|
|
attestationSlot = newBlockSlot - 1
|
2019-02-19 23:35:02 +00:00
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-08-21 01:22:26 +00:00
|
|
|
proc getAggregatedAttestation*(pool: AttestationPool,
|
|
|
|
slot: Slot,
|
|
|
|
index: CommitteeIndex): Option[Attestation] =
|
|
|
|
let attestations = pool.getAttestationsForSlot(
|
|
|
|
slot + MIN_ATTESTATION_INCLUSION_DELAY)
|
|
|
|
if attestations.isNone:
|
|
|
|
return none(Attestation)
|
|
|
|
|
|
|
|
for a in attestations.get.attestations:
|
|
|
|
doAssert a.data.slot == slot
|
|
|
|
if index.uint64 != a.data.index:
|
|
|
|
continue
|
|
|
|
|
|
|
|
var
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.2/specs/phase0/validator.md#construct-attestation
|
|
|
|
attestation = Attestation(
|
|
|
|
aggregation_bits: a.validations[0].aggregation_bits,
|
|
|
|
data: a.data,
|
|
|
|
signature: a.validations[0].aggregate_signature
|
|
|
|
)
|
|
|
|
|
|
|
|
agg {.noInit.}: AggregateSignature
|
|
|
|
|
|
|
|
agg.init(a.validations[0].aggregate_signature)
|
|
|
|
for v in a.validations[1..^1]:
|
|
|
|
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
|
|
|
attestation.aggregation_bits.combine(v.aggregation_bits)
|
|
|
|
agg.aggregate(v.aggregate_signature)
|
|
|
|
|
|
|
|
attestation.signature = agg.finish()
|
|
|
|
|
|
|
|
return some(attestation)
|
|
|
|
|
|
|
|
none(Attestation)
|
|
|
|
|
2020-07-28 13:54:32 +00:00
|
|
|
proc resolve*(pool: var AttestationPool, wallSlot: Slot) =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Check attestations in our unresolved deque
|
|
|
|
## if they can be integrated to the fork choice
|
2019-12-19 13:02:28 +00:00
|
|
|
logScope: pcs = "atp_resolve"
|
|
|
|
|
2019-08-19 16:41:13 +00:00
|
|
|
var
|
|
|
|
done: seq[Eth2Digest]
|
|
|
|
resolved: seq[tuple[blck: BlockRef, attestation: Attestation]]
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
for k, v in pool.unresolved.mpairs():
|
2020-07-30 19:18:17 +00:00
|
|
|
if (let blck = pool.chainDag.getRef(k); not blck.isNil()):
|
2019-08-19 16:41:13 +00:00
|
|
|
resolved.add((blck, v.attestation))
|
|
|
|
done.add(k)
|
|
|
|
elif v.tries > 8:
|
2019-02-28 21:21:29 +00:00
|
|
|
done.add(k)
|
|
|
|
else:
|
2019-08-19 16:41:13 +00:00
|
|
|
inc v.tries
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
for k in done:
|
|
|
|
pool.unresolved.del(k)
|
|
|
|
|
|
|
|
for a in resolved:
|
2020-07-28 13:54:32 +00:00
|
|
|
pool.addResolved(a.blck, a.attestation, wallSlot)
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc selectHead*(pool: var AttestationPool, wallSlot: Slot): BlockRef =
|
2020-08-26 15:23:34 +00:00
|
|
|
## Trigger fork choice and returns the new head block.
|
|
|
|
## Can return `nil`
|
2020-08-17 18:36:13 +00:00
|
|
|
let newHead = pool.forkChoice.get_head(pool.chainDag, wallSlot)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-22 09:42:55 +00:00
|
|
|
if newHead.isErr:
|
|
|
|
error "Couldn't select head", err = newHead.error
|
|
|
|
nil
|
|
|
|
else:
|
2020-07-30 19:18:17 +00:00
|
|
|
pool.chainDag.getRef(newHead.get())
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc prune*(pool: var AttestationPool) =
|
|
|
|
if (let v = pool.forkChoice.prune(); v.isErr):
|
2020-07-22 09:42:55 +00:00
|
|
|
error "Pruning failed", err = v.error() # TODO should never happen
|