2020-04-24 07:16:11 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
import
|
2020-07-09 09:29:32 +00:00
|
|
|
# Standard libraries
|
2020-07-10 09:24:04 +00:00
|
|
|
deques, sequtils, tables, options,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Status libraries
|
2020-06-03 13:52:02 +00:00
|
|
|
chronicles, stew/[byteutils], json_serialization/std/sets,
|
2020-07-09 09:29:32 +00:00
|
|
|
# Internal
|
2019-07-03 07:35:05 +00:00
|
|
|
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
2020-07-09 09:29:32 +00:00
|
|
|
./extras, ./block_pool, ./block_pools/candidate_chains, ./beacon_node_types,
|
|
|
|
./fork_choice/fork_choice
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: topics = "attpool"
|
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
proc init*(T: type AttestationPool, blockPool: BlockPool): T =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Initialize an AttestationPool from the blockPool `headState`
|
|
|
|
## The `finalized_root` works around the finalized_checkpoint of the genesis block
|
|
|
|
## holding a zero_root.
|
2019-08-19 16:41:13 +00:00
|
|
|
# TODO blockPool is only used when resolving orphaned attestations - it should
|
|
|
|
# probably be removed as a dependency of AttestationPool (or some other
|
|
|
|
# smart refactoring)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
# TODO: Return Value Optimization
|
|
|
|
|
|
|
|
# TODO: In tests, on blockpool.init the finalized root
|
|
|
|
# from the `headState` and `justifiedState` is zero
|
|
|
|
var forkChoice = initForkChoice(
|
|
|
|
finalized_block_slot = default(Slot), # This is unnecessary for fork choice but may help external components for example logging/debugging
|
|
|
|
finalized_block_state_root = default(Eth2Digest), # This is unnecessary for fork choice but may help external components for example logging/debugging
|
|
|
|
justified_epoch = blockPool.headState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = blockPool.headState.data.data.finalized_checkpoint.epoch,
|
|
|
|
# We should use the checkpoint, but at genesis the headState finalized checkpoint is 0x0000...0000
|
|
|
|
# finalized_root = blockPool.headState.data.data.finalized_checkpoint.root
|
|
|
|
finalized_root = blockPool.finalizedHead.blck.root
|
|
|
|
).get()
|
|
|
|
|
|
|
|
# Load all blocks since finalized head - TODO a proper test
|
|
|
|
for blck in blockPool.dag.topoSortedSinceLastFinalization():
|
|
|
|
if blck.root == blockPool.finalizedHead.blck.root:
|
|
|
|
continue
|
|
|
|
|
|
|
|
# BlockRef
|
|
|
|
# should ideally contain the justified_epoch and finalized_epoch
|
|
|
|
# so that we can pass them directly to `process_block` without having to
|
|
|
|
# redo "updateStateData"
|
|
|
|
#
|
|
|
|
# In any case, `updateStateData` should shortcut
|
|
|
|
# to `getStateDataCached`
|
|
|
|
|
|
|
|
updateStateData(
|
|
|
|
blockPool,
|
|
|
|
blockPool.tmpState,
|
|
|
|
BlockSlot(blck: blck, slot: blck.slot)
|
|
|
|
)
|
|
|
|
|
|
|
|
debug "Preloading fork choice with block",
|
|
|
|
block_root = shortlog(blck.root),
|
|
|
|
parent_root = shortlog(blck.parent.root),
|
|
|
|
justified_epoch = $blockPool.tmpState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = $blockPool.tmpState.data.data.finalized_checkpoint.epoch,
|
|
|
|
slot = $blck.slot
|
|
|
|
|
|
|
|
let status = forkChoice.process_block(
|
|
|
|
block_root = blck.root,
|
|
|
|
parent_root = blck.parent.root,
|
|
|
|
justified_epoch = blockPool.tmpState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = blockPool.tmpState.data.data.finalized_checkpoint.epoch,
|
|
|
|
# Unused in fork choice - i.e. for logging or caching extra metadata
|
|
|
|
slot = blck.slot,
|
|
|
|
state_root = default(Eth2Digest)
|
|
|
|
)
|
|
|
|
|
|
|
|
doAssert status.isOk(), "Error in preloading the fork choice: " & $status.error
|
|
|
|
|
|
|
|
info "Fork choice initialized",
|
|
|
|
justified_epoch = $blockPool.headState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = $blockPool.headState.data.data.finalized_checkpoint.epoch,
|
|
|
|
finalized_root = shortlog(blockPool.finalizedHead.blck.root)
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
T(
|
2020-06-10 06:58:12 +00:00
|
|
|
mapSlotsToAttestations: initDeque[AttestationsSeen](),
|
2019-02-28 21:21:29 +00:00
|
|
|
blockPool: blockPool,
|
2019-03-13 22:59:20 +00:00
|
|
|
unresolved: initTable[Eth2Digest, UnresolvedAttestation](),
|
2020-07-09 09:29:32 +00:00
|
|
|
forkChoice_v2: forkChoice
|
2019-02-28 21:21:29 +00:00
|
|
|
)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
proc combine*(tgt: var Attestation, src: Attestation, flags: UpdateFlags) =
|
2019-06-03 08:26:38 +00:00
|
|
|
## Combine the signature and participation bitfield, with the assumption that
|
|
|
|
## the same data is being signed - if the signatures overlap, they are not
|
|
|
|
## combined.
|
2020-06-10 06:58:12 +00:00
|
|
|
# TODO: Exported only for testing, all usage are internals
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2019-03-13 23:04:43 +00:00
|
|
|
doAssert tgt.data == src.data
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2019-06-03 08:26:38 +00:00
|
|
|
# In a BLS aggregate signature, one needs to count how many times a
|
|
|
|
# particular public key has been added - since we use a single bit per key, we
|
|
|
|
# can only it once, thus we can never combine signatures that overlap already!
|
2019-07-01 07:53:42 +00:00
|
|
|
if not tgt.aggregation_bits.overlaps(src.aggregation_bits):
|
|
|
|
tgt.aggregation_bits.combine(src.aggregation_bits)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-03-05 12:52:10 +00:00
|
|
|
if skipBlsValidation notin flags:
|
2020-03-04 21:27:11 +00:00
|
|
|
tgt.signature.aggregate(src.signature)
|
2019-08-05 00:00:49 +00:00
|
|
|
else:
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Ignoring overlapping attestations"
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
proc slotIndex(
|
|
|
|
pool: var AttestationPool, state: BeaconState, attestationSlot: Slot): int =
|
|
|
|
## Grow and garbage collect pool, returning the deque index of the slot
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
# We keep a sliding window of attestations, roughly from the last finalized
|
|
|
|
# epoch to now, because these are the attestations that may affect the voting
|
|
|
|
# outcome. Some of these attestations will already have been added to blocks,
|
|
|
|
# while others are fresh off the network.
|
2019-06-03 08:26:38 +00:00
|
|
|
# TODO only the latest vote of each validator counts. Can we use that somehow?
|
2019-09-12 01:45:04 +00:00
|
|
|
logScope: pcs = "atp_slot_maintenance"
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
doAssert attestationSlot >= pool.startingSlot,
|
|
|
|
"""
|
2020-06-28 17:32:11 +00:00
|
|
|
We should have checked in addResolved that attestation is newer than
|
2019-02-19 23:35:02 +00:00
|
|
|
finalized_slot and we never prune things before that, per below condition!
|
|
|
|
""" &
|
2019-08-15 16:01:55 +00:00
|
|
|
", attestationSlot: " & $shortLog(attestationSlot) &
|
|
|
|
", startingSlot: " & $shortLog(pool.startingSlot)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
if pool.mapSlotsToAttestations.len == 0:
|
2019-02-21 04:42:17 +00:00
|
|
|
# Because the first attestations may arrive in any order, we'll make sure
|
|
|
|
# to start counting at the last finalized epoch start slot - anything
|
|
|
|
# earlier than that is thrown out by the above check
|
2019-02-19 23:35:02 +00:00
|
|
|
info "First attestation!",
|
2019-09-12 01:45:04 +00:00
|
|
|
attestationSlot = $shortLog(attestationSlot),
|
|
|
|
cat = "init"
|
More 0.8.0 updates (#311)
* replace BeaconState.finalized_{epoch,root} with BeaconState.finalized_checkpoint; rename get_delayed_activation_exit_epoch(...) to compute_activation_exit_epoch(...) and mark as 0.8.0; update get_churn_limit(...)/get_validator_churn_limit(...) to 0.8.0; update process_registry_updates(...) to 0.8.0
* update process_crosslinks(...) to 0.8.0; mark compute_start_slot_of_epoch(...) and get_committee_count(...) as 0.8.0
* mark Fork, is_slashable_validator(...), and get_beacon_proposer_index(...) as 0.8.0
* rename LATEST_SLASHED_EXIT_LENGTH to EPOCHS_PER_SLASHINGS_VECTOR; update process_slashings(...) to 0.8.0; remove pointless type conversion warning in get_previous_epoch(...)
* convert remaining references to finalized_epoch to finalized_checkpoint.epoch
* update slash_validator(...) to 0.8.0; mark inital value, Gwei, and time constants as 0.8.0; mark hash(...) and processBlockHeader(...) as 0.8.0
* rename WHISTLEBLOWING_REWARD_QUOTIENT to WHISTLEBLOWER_REWARD_QUOTIENT; rename LATEST_ACTIVE_INDEX_ROOTS_LENGTH to EPOCHS_PER_HISTORICAL_VECTOR (randao will also get merged into this); remove get_active_index_root(...); mark time parameter, signature domain types, and max operations per block constants as 0.8.0; update rewards and penalties constants to 0.8.0
* update is_valid_indexed_attestation(...) to 0.8.0; mark process_slot(...) as 0.8.0
* replace BeaconState.{current,previous}_justified_{epoch,root} with BeaconState.{current,previous}_justified_checkpoint
2019-07-05 08:30:05 +00:00
|
|
|
pool.startingSlot =
|
initial 0.9.0 spec sync (#509)
* rename compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* remove some unnecessary imports; remove some crosslink-related code and tests; complete renaming of compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* rm more transfer-related code and tests; rm more unnecessary strutils imports
* rm remaining unused imports
* remove useless get_empty_per_epoch_cache(...)/compute_start_slot_of_epoch(...) calls
* rename compute_start_slot_of_epoch(...) to compute_start_slot_at_epoch(...)
* rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD
* update domain types to 0.9.0
* mark AttesterSlashing, IndexedAttestation, AttestationDataAndCustodyBit, DepositData, BeaconBlockHeader, Fork, integer_squareroot(...), and process_voluntary_exit(...) as 0.9.0
* mark increase_balance(...), decrease_balance(...), get_block_root(...), CheckPoint, Deposit, PendingAttestation, HistoricalBatch, is_active_validator(...), and is_slashable_attestation_data(...) as 0.9.0
* mark compute_activation_exit_epoch(...), bls_verify(...), Validator, get_active_validator_indices(...), get_current_epoch(...), get_total_active_balance(...), and get_previous_epoch(...) as 0.9.0
* mark get_block_root_at_slot(...), ProposerSlashing, get_domain(...), VoluntaryExit, mainnet preset Gwei values, minimal preset max operations, process_block_header(...), and is_slashable_validator(...) as 0.9.0
* mark makeWithdrawalCredentials(...), get_validator_churn_limit(...), get_total_balance(...), is_valid_indexed_attestation(...), bls_aggregate_pubkeys(...), initial genesis value/constants, Attestation, get_randao_mix(...), mainnet preset max operations per block constants, minimal preset Gwei values and time parameters, process_eth1_data(...), get_shuffled_seq(...), compute_committee(...), and process_slots(...) as 0.9.0; partially update get_indexed_attestation(...) to 0.9.0 by removing crosslink refs and associated tests
* mark initiate_validator_exit(...), process_registry_updates(...), BeaconBlock, Eth1Data, compute_domain(...), process_randao(...), process_attester_slashing(...), get_base_reward(...), and process_slot(...) as 0.9.0
2019-10-30 19:41:19 +00:00
|
|
|
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
if pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Growing attestation pool",
|
2019-08-15 16:01:55 +00:00
|
|
|
attestationSlot = $shortLog(attestationSlot),
|
2019-09-12 01:45:04 +00:00
|
|
|
startingSlot = $shortLog(pool.startingSlot),
|
|
|
|
cat = "caching"
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
# Make sure there's a pool entry for every slot, even when there's a gap
|
2020-06-10 06:58:12 +00:00
|
|
|
while pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
|
|
|
|
pool.mapSlotsToAttestations.addLast(AttestationsSeen())
|
2019-02-19 23:35:02 +00:00
|
|
|
|
More 0.8.0 updates (#311)
* replace BeaconState.finalized_{epoch,root} with BeaconState.finalized_checkpoint; rename get_delayed_activation_exit_epoch(...) to compute_activation_exit_epoch(...) and mark as 0.8.0; update get_churn_limit(...)/get_validator_churn_limit(...) to 0.8.0; update process_registry_updates(...) to 0.8.0
* update process_crosslinks(...) to 0.8.0; mark compute_start_slot_of_epoch(...) and get_committee_count(...) as 0.8.0
* mark Fork, is_slashable_validator(...), and get_beacon_proposer_index(...) as 0.8.0
* rename LATEST_SLASHED_EXIT_LENGTH to EPOCHS_PER_SLASHINGS_VECTOR; update process_slashings(...) to 0.8.0; remove pointless type conversion warning in get_previous_epoch(...)
* convert remaining references to finalized_epoch to finalized_checkpoint.epoch
* update slash_validator(...) to 0.8.0; mark inital value, Gwei, and time constants as 0.8.0; mark hash(...) and processBlockHeader(...) as 0.8.0
* rename WHISTLEBLOWING_REWARD_QUOTIENT to WHISTLEBLOWER_REWARD_QUOTIENT; rename LATEST_ACTIVE_INDEX_ROOTS_LENGTH to EPOCHS_PER_HISTORICAL_VECTOR (randao will also get merged into this); remove get_active_index_root(...); mark time parameter, signature domain types, and max operations per block constants as 0.8.0; update rewards and penalties constants to 0.8.0
* update is_valid_indexed_attestation(...) to 0.8.0; mark process_slot(...) as 0.8.0
* replace BeaconState.{current,previous}_justified_{epoch,root} with BeaconState.{current,previous}_justified_checkpoint
2019-07-05 08:30:05 +00:00
|
|
|
if pool.startingSlot <
|
initial 0.9.0 spec sync (#509)
* rename compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* remove some unnecessary imports; remove some crosslink-related code and tests; complete renaming of compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* rm more transfer-related code and tests; rm more unnecessary strutils imports
* rm remaining unused imports
* remove useless get_empty_per_epoch_cache(...)/compute_start_slot_of_epoch(...) calls
* rename compute_start_slot_of_epoch(...) to compute_start_slot_at_epoch(...)
* rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD
* update domain types to 0.9.0
* mark AttesterSlashing, IndexedAttestation, AttestationDataAndCustodyBit, DepositData, BeaconBlockHeader, Fork, integer_squareroot(...), and process_voluntary_exit(...) as 0.9.0
* mark increase_balance(...), decrease_balance(...), get_block_root(...), CheckPoint, Deposit, PendingAttestation, HistoricalBatch, is_active_validator(...), and is_slashable_attestation_data(...) as 0.9.0
* mark compute_activation_exit_epoch(...), bls_verify(...), Validator, get_active_validator_indices(...), get_current_epoch(...), get_total_active_balance(...), and get_previous_epoch(...) as 0.9.0
* mark get_block_root_at_slot(...), ProposerSlashing, get_domain(...), VoluntaryExit, mainnet preset Gwei values, minimal preset max operations, process_block_header(...), and is_slashable_validator(...) as 0.9.0
* mark makeWithdrawalCredentials(...), get_validator_churn_limit(...), get_total_balance(...), is_valid_indexed_attestation(...), bls_aggregate_pubkeys(...), initial genesis value/constants, Attestation, get_randao_mix(...), mainnet preset max operations per block constants, minimal preset Gwei values and time parameters, process_eth1_data(...), get_shuffled_seq(...), compute_committee(...), and process_slots(...) as 0.9.0; partially update get_indexed_attestation(...) to 0.9.0 by removing crosslink refs and associated tests
* mark initiate_validator_exit(...), process_registry_updates(...), BeaconBlock, Eth1Data, compute_domain(...), process_randao(...), process_attester_slashing(...), get_base_reward(...), and process_slot(...) as 0.9.0
2019-10-30 19:41:19 +00:00
|
|
|
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
|
2019-02-19 23:35:02 +00:00
|
|
|
debug "Pruning attestation pool",
|
2019-08-15 16:01:55 +00:00
|
|
|
startingSlot = $shortLog(pool.startingSlot),
|
2019-09-12 01:45:04 +00:00
|
|
|
finalizedSlot = $shortLog(
|
|
|
|
state.finalized_checkpoint
|
initial 0.9.0 spec sync (#509)
* rename compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* remove some unnecessary imports; remove some crosslink-related code and tests; complete renaming of compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* rm more transfer-related code and tests; rm more unnecessary strutils imports
* rm remaining unused imports
* remove useless get_empty_per_epoch_cache(...)/compute_start_slot_of_epoch(...) calls
* rename compute_start_slot_of_epoch(...) to compute_start_slot_at_epoch(...)
* rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD
* update domain types to 0.9.0
* mark AttesterSlashing, IndexedAttestation, AttestationDataAndCustodyBit, DepositData, BeaconBlockHeader, Fork, integer_squareroot(...), and process_voluntary_exit(...) as 0.9.0
* mark increase_balance(...), decrease_balance(...), get_block_root(...), CheckPoint, Deposit, PendingAttestation, HistoricalBatch, is_active_validator(...), and is_slashable_attestation_data(...) as 0.9.0
* mark compute_activation_exit_epoch(...), bls_verify(...), Validator, get_active_validator_indices(...), get_current_epoch(...), get_total_active_balance(...), and get_previous_epoch(...) as 0.9.0
* mark get_block_root_at_slot(...), ProposerSlashing, get_domain(...), VoluntaryExit, mainnet preset Gwei values, minimal preset max operations, process_block_header(...), and is_slashable_validator(...) as 0.9.0
* mark makeWithdrawalCredentials(...), get_validator_churn_limit(...), get_total_balance(...), is_valid_indexed_attestation(...), bls_aggregate_pubkeys(...), initial genesis value/constants, Attestation, get_randao_mix(...), mainnet preset max operations per block constants, minimal preset Gwei values and time parameters, process_eth1_data(...), get_shuffled_seq(...), compute_committee(...), and process_slots(...) as 0.9.0; partially update get_indexed_attestation(...) to 0.9.0 by removing crosslink refs and associated tests
* mark initiate_validator_exit(...), process_registry_updates(...), BeaconBlock, Eth1Data, compute_domain(...), process_randao(...), process_attester_slashing(...), get_base_reward(...), and process_slot(...) as 0.9.0
2019-10-30 19:41:19 +00:00
|
|
|
.epoch.compute_start_slot_at_epoch()),
|
2019-09-12 01:45:04 +00:00
|
|
|
cat = "pruning"
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
# TODO there should be a better way to remove a whole epoch of stuff..
|
More 0.8.0 updates (#311)
* replace BeaconState.finalized_{epoch,root} with BeaconState.finalized_checkpoint; rename get_delayed_activation_exit_epoch(...) to compute_activation_exit_epoch(...) and mark as 0.8.0; update get_churn_limit(...)/get_validator_churn_limit(...) to 0.8.0; update process_registry_updates(...) to 0.8.0
* update process_crosslinks(...) to 0.8.0; mark compute_start_slot_of_epoch(...) and get_committee_count(...) as 0.8.0
* mark Fork, is_slashable_validator(...), and get_beacon_proposer_index(...) as 0.8.0
* rename LATEST_SLASHED_EXIT_LENGTH to EPOCHS_PER_SLASHINGS_VECTOR; update process_slashings(...) to 0.8.0; remove pointless type conversion warning in get_previous_epoch(...)
* convert remaining references to finalized_epoch to finalized_checkpoint.epoch
* update slash_validator(...) to 0.8.0; mark inital value, Gwei, and time constants as 0.8.0; mark hash(...) and processBlockHeader(...) as 0.8.0
* rename WHISTLEBLOWING_REWARD_QUOTIENT to WHISTLEBLOWER_REWARD_QUOTIENT; rename LATEST_ACTIVE_INDEX_ROOTS_LENGTH to EPOCHS_PER_HISTORICAL_VECTOR (randao will also get merged into this); remove get_active_index_root(...); mark time parameter, signature domain types, and max operations per block constants as 0.8.0; update rewards and penalties constants to 0.8.0
* update is_valid_indexed_attestation(...) to 0.8.0; mark process_slot(...) as 0.8.0
* replace BeaconState.{current,previous}_justified_{epoch,root} with BeaconState.{current,previous}_justified_checkpoint
2019-07-05 08:30:05 +00:00
|
|
|
while pool.startingSlot <
|
initial 0.9.0 spec sync (#509)
* rename compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* remove some unnecessary imports; remove some crosslink-related code and tests; complete renaming of compute_epoch_of_slot(...) to compute_epoch_at_slot(...)
* rm more transfer-related code and tests; rm more unnecessary strutils imports
* rm remaining unused imports
* remove useless get_empty_per_epoch_cache(...)/compute_start_slot_of_epoch(...) calls
* rename compute_start_slot_of_epoch(...) to compute_start_slot_at_epoch(...)
* rename ACTIVATION_EXIT_DELAY to MAX_SEED_LOOKAHEAD
* update domain types to 0.9.0
* mark AttesterSlashing, IndexedAttestation, AttestationDataAndCustodyBit, DepositData, BeaconBlockHeader, Fork, integer_squareroot(...), and process_voluntary_exit(...) as 0.9.0
* mark increase_balance(...), decrease_balance(...), get_block_root(...), CheckPoint, Deposit, PendingAttestation, HistoricalBatch, is_active_validator(...), and is_slashable_attestation_data(...) as 0.9.0
* mark compute_activation_exit_epoch(...), bls_verify(...), Validator, get_active_validator_indices(...), get_current_epoch(...), get_total_active_balance(...), and get_previous_epoch(...) as 0.9.0
* mark get_block_root_at_slot(...), ProposerSlashing, get_domain(...), VoluntaryExit, mainnet preset Gwei values, minimal preset max operations, process_block_header(...), and is_slashable_validator(...) as 0.9.0
* mark makeWithdrawalCredentials(...), get_validator_churn_limit(...), get_total_balance(...), is_valid_indexed_attestation(...), bls_aggregate_pubkeys(...), initial genesis value/constants, Attestation, get_randao_mix(...), mainnet preset max operations per block constants, minimal preset Gwei values and time parameters, process_eth1_data(...), get_shuffled_seq(...), compute_committee(...), and process_slots(...) as 0.9.0; partially update get_indexed_attestation(...) to 0.9.0 by removing crosslink refs and associated tests
* mark initiate_validator_exit(...), process_registry_updates(...), BeaconBlock, Eth1Data, compute_domain(...), process_randao(...), process_attester_slashing(...), get_base_reward(...), and process_slot(...) as 0.9.0
2019-10-30 19:41:19 +00:00
|
|
|
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch():
|
2020-06-10 06:58:12 +00:00
|
|
|
pool.mapSlotsToAttestations.popFirst()
|
2019-02-19 23:35:02 +00:00
|
|
|
pool.startingSlot += 1
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
int(attestationSlot - pool.startingSlot)
|
|
|
|
|
2019-11-21 09:15:10 +00:00
|
|
|
func updateLatestVotes(
|
2019-03-13 22:59:20 +00:00
|
|
|
pool: var AttestationPool, state: BeaconState, attestationSlot: Slot,
|
|
|
|
participants: seq[ValidatorIndex], blck: BlockRef) =
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
# ForkChoice v2
|
|
|
|
let target_epoch = compute_epoch_at_slot(attestationSlot)
|
|
|
|
|
2019-03-13 22:59:20 +00:00
|
|
|
for validator in participants:
|
2020-07-09 09:29:32 +00:00
|
|
|
# ForkChoice v1
|
2019-03-13 22:59:20 +00:00
|
|
|
let
|
2019-07-01 09:13:14 +00:00
|
|
|
pubKey = state.validators[validator].pubkey
|
2019-03-13 22:59:20 +00:00
|
|
|
current = pool.latestAttestations.getOrDefault(pubKey)
|
|
|
|
if current.isNil or current.slot < attestationSlot:
|
|
|
|
pool.latestAttestations[pubKey] = blck
|
|
|
|
|
2020-07-10 16:47:48 +00:00
|
|
|
# # ForkChoice v2
|
|
|
|
# pool.forkChoice_v2.process_attestation(validator, blck.root, target_epoch)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
2019-11-13 11:30:11 +00:00
|
|
|
func get_attesting_indices_seq(state: BeaconState,
|
2020-06-05 11:28:52 +00:00
|
|
|
attestation_data: AttestationData,
|
|
|
|
bits: CommitteeValidatorsBits,
|
|
|
|
cache: var StateCache): seq[ValidatorIndex] =
|
2019-11-13 11:30:11 +00:00
|
|
|
toSeq(items(get_attesting_indices(
|
|
|
|
state, attestation_data, bits, cache)))
|
|
|
|
|
2019-12-19 13:02:28 +00:00
|
|
|
func addUnresolved(pool: var AttestationPool, attestation: Attestation) =
|
|
|
|
pool.unresolved[attestation.data.beacon_block_root] =
|
|
|
|
UnresolvedAttestation(
|
|
|
|
attestation: attestation,
|
|
|
|
)
|
2019-09-12 01:45:04 +00:00
|
|
|
|
2019-12-19 13:02:28 +00:00
|
|
|
proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attestation) =
|
2019-08-19 16:41:13 +00:00
|
|
|
doAssert blck.root == attestation.data.beacon_block_root
|
2019-08-14 08:56:32 +00:00
|
|
|
|
2019-12-19 13:02:28 +00:00
|
|
|
# TODO Which state should we use to validate the attestation? It seems
|
|
|
|
# reasonable to involve the head being voted for as well as the intended
|
|
|
|
# slot of the attestation - double-check this with spec
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# TODO: How fast is state rewind?
|
|
|
|
# Can this be a DOS vector.
|
|
|
|
|
|
|
|
# TODO: filter valid attestation as much as possible before state rewind
|
|
|
|
# TODO: the below check does not respect the inclusion delay
|
|
|
|
# we should use isValidAttestationSlot instead
|
2019-12-19 13:02:28 +00:00
|
|
|
if blck.slot > attestation.data.slot:
|
|
|
|
notice "Invalid attestation (too new!)",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-12-19 13:02:28 +00:00
|
|
|
blockSlot = shortLog(blck.slot)
|
|
|
|
return
|
2020-06-28 17:32:11 +00:00
|
|
|
|
|
|
|
if attestation.data.slot < pool.startingSlot:
|
|
|
|
# It can happen that attestations in blocks for example are included even
|
|
|
|
# though they no longer are relevant for finalization - let's clear
|
|
|
|
# these out
|
|
|
|
debug "Old attestation",
|
|
|
|
attestation = shortLog(attestation),
|
|
|
|
startingSlot = pool.startingSlot
|
|
|
|
return
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# if not isValidAttestationSlot(attestation.data.slot, blck.slot):
|
|
|
|
# # Logging in isValidAttestationSlot
|
|
|
|
# return
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# Get a temporary state at the (block, slot) targeted by the attestation
|
2019-12-19 13:02:28 +00:00
|
|
|
updateStateData(
|
|
|
|
pool.blockPool, pool.blockPool.tmpState,
|
|
|
|
BlockSlot(blck: blck, slot: attestation.data.slot))
|
|
|
|
|
2020-04-28 08:08:32 +00:00
|
|
|
template state(): BeaconState = pool.blockPool.tmpState.data.data
|
2019-12-19 13:02:28 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# Check that the attestation is indeed valid
|
|
|
|
# TODO: we might want to split checks that depend
|
|
|
|
# on the state and those that don't to cheaply
|
|
|
|
# discard invalid attestations before rewinding state.
|
|
|
|
|
2020-06-25 10:23:10 +00:00
|
|
|
if not isValidAttestationTargetEpoch(state, attestation.data):
|
2019-08-19 16:41:13 +00:00
|
|
|
notice "Invalid attestation",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-08-14 08:56:32 +00:00
|
|
|
current_epoch = get_current_epoch(state),
|
2019-09-12 01:45:04 +00:00
|
|
|
cat = "filtering"
|
2019-02-28 21:21:29 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# TODO inefficient data structures..
|
|
|
|
|
2020-06-05 11:28:52 +00:00
|
|
|
var cache = getEpochCache(blck, state)
|
2019-02-19 23:35:02 +00:00
|
|
|
let
|
2019-11-12 05:35:52 +00:00
|
|
|
attestationSlot = attestation.data.slot
|
2019-03-13 22:59:20 +00:00
|
|
|
idx = pool.slotIndex(state, attestationSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
attestationsSeen = addr pool.mapSlotsToAttestations[idx]
|
2019-02-19 23:35:02 +00:00
|
|
|
validation = Validation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: attestation.aggregation_bits,
|
2019-06-12 07:48:49 +00:00
|
|
|
aggregate_signature: attestation.signature)
|
|
|
|
participants = get_attesting_indices_seq(
|
2020-06-05 11:28:52 +00:00
|
|
|
state, attestation.data, validation.aggregation_bits, cache)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
var found = false
|
2020-06-10 06:58:12 +00:00
|
|
|
for a in attestationsSeen.attestations.mitems():
|
2019-02-19 23:35:02 +00:00
|
|
|
if a.data == attestation.data:
|
|
|
|
for v in a.validations:
|
2019-07-01 07:53:42 +00:00
|
|
|
if validation.aggregation_bits.isSubsetOf(v.aggregation_bits):
|
2019-06-03 08:26:38 +00:00
|
|
|
# The validations in the new attestation are a subset of one of the
|
|
|
|
# attestations that we already have on file - no need to add this
|
|
|
|
# attestation to the database
|
|
|
|
# TODO what if the new attestation is useful for creating bigger
|
|
|
|
# sets by virtue of not overlapping with some other attestation
|
|
|
|
# and therefore being useful after all?
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Ignoring subset attestation",
|
|
|
|
newParticipants = participants,
|
|
|
|
cat = "filtering"
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2019-06-03 08:26:38 +00:00
|
|
|
# Attestations in the pool that are a subset of the new attestation
|
|
|
|
# can now be removed per same logic as above
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2019-09-12 01:45:04 +00:00
|
|
|
trace "Removing subset attestations",
|
|
|
|
newParticipants = participants,
|
|
|
|
cat = "pruning"
|
2019-08-19 16:41:13 +00:00
|
|
|
|
2019-06-03 08:26:38 +00:00
|
|
|
a.validations.keepItIf(
|
2019-08-19 16:41:13 +00:00
|
|
|
not it.aggregation_bits.isSubsetOf(validation.aggregation_bits))
|
2019-06-03 08:26:38 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
a.validations.add(validation)
|
2019-03-13 22:59:20 +00:00
|
|
|
pool.updateLatestVotes(state, attestationSlot, participants, a.blck)
|
|
|
|
|
2019-02-28 21:21:29 +00:00
|
|
|
info "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-08-14 08:56:32 +00:00
|
|
|
validations = a.validations.len(),
|
|
|
|
current_epoch = get_current_epoch(state),
|
2020-01-23 17:48:11 +00:00
|
|
|
blockSlot = shortLog(blck.slot),
|
2019-09-12 01:45:04 +00:00
|
|
|
cat = "filtering"
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
found = true
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
if not found:
|
2020-06-10 06:58:12 +00:00
|
|
|
attestationsSeen.attestations.add(AttestationEntry(
|
2019-08-19 16:41:13 +00:00
|
|
|
data: attestation.data,
|
|
|
|
blck: blck,
|
|
|
|
validations: @[validation]
|
|
|
|
))
|
|
|
|
pool.updateLatestVotes(state, attestationSlot, participants, blck)
|
|
|
|
|
|
|
|
info "Attestation resolved",
|
2020-01-23 17:48:11 +00:00
|
|
|
attestation = shortLog(attestation),
|
2019-08-19 16:41:13 +00:00
|
|
|
current_epoch = get_current_epoch(state),
|
2019-09-12 01:45:04 +00:00
|
|
|
validations = 1,
|
2020-01-23 17:48:11 +00:00
|
|
|
blockSlot = shortLog(blck.slot),
|
2019-09-12 01:45:04 +00:00
|
|
|
cat = "filtering"
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
proc addAttestation*(pool: var AttestationPool, attestation: Attestation) =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Add a verified attestation to the fork choice context
|
2019-12-19 13:02:28 +00:00
|
|
|
logScope: pcs = "atp_add_attestation"
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# Fetch the target block or notify the block pool that it's needed
|
2019-12-19 13:02:28 +00:00
|
|
|
let blck = pool.blockPool.getOrResolve(attestation.data.beacon_block_root)
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
# If the block exist, add it to the fork choice context
|
|
|
|
# Otherwise delay until it resolves
|
2019-12-19 13:02:28 +00:00
|
|
|
if blck.isNil:
|
|
|
|
pool.addUnresolved(attestation)
|
|
|
|
return
|
|
|
|
|
|
|
|
pool.addResolved(blck, attestation)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
proc addForkChoice_v2*(pool: var AttestationPool, blck: BlockRef) =
|
|
|
|
## Add a verified block to the fork choice context
|
|
|
|
## The current justifiedState of the block pool is used as reference
|
|
|
|
|
|
|
|
# TODO: add(BlockPool, blockRoot: Eth2Digest, SignedBeaconBlock): BlockRef
|
|
|
|
# should ideally return the justified_epoch and finalized_epoch
|
|
|
|
# so that we can pass them directly to this proc without having to
|
|
|
|
# redo "updateStateData"
|
|
|
|
#
|
|
|
|
# In any case, `updateStateData` should shortcut
|
|
|
|
# to `getStateDataCached`
|
|
|
|
|
|
|
|
var state: Result[void, string]
|
|
|
|
# A stack of block to add in case recovery is needed
|
|
|
|
var blockStack: seq[BlockSlot]
|
|
|
|
var current = BlockSlot(blck: blck, slot: blck.slot)
|
|
|
|
|
|
|
|
while true: # The while loop should not be needed but it seems a block addition
|
|
|
|
# scenario is unaccounted for
|
|
|
|
updateStateData(
|
|
|
|
pool.blockPool,
|
|
|
|
pool.blockPool.tmpState,
|
|
|
|
current
|
|
|
|
)
|
|
|
|
|
|
|
|
let blockData = pool.blockPool.get(current.blck)
|
|
|
|
state = pool.forkChoice_v2.process_block(
|
|
|
|
slot = current.blck.slot,
|
|
|
|
block_root = current.blck.root,
|
|
|
|
parent_root = if not current.blck.parent.isNil: current.blck.parent.root else: default(Eth2Digest),
|
|
|
|
state_root = default(Eth2Digest), # This is unnecessary for fork choice but may help external components
|
|
|
|
justified_epoch = pool.blockPool.tmpState.data.data.current_justified_checkpoint.epoch,
|
|
|
|
finalized_epoch = pool.blockPool.tmpState.data.data.finalized_checkpoint.epoch,
|
|
|
|
)
|
|
|
|
|
|
|
|
# This should not happen and might lead to unresponsive networking while processing occurs
|
|
|
|
if state.isErr:
|
|
|
|
# TODO investigate, potential sources:
|
|
|
|
# - Pruning
|
|
|
|
# - Quarantine adding multiple blocks at once
|
|
|
|
# - Own block proposal
|
|
|
|
error "Desync between fork_choice and blockpool services, trying to recover.",
|
|
|
|
msg = state.error,
|
|
|
|
blck = shortlog(current.blck),
|
|
|
|
parent = shortlog(current.blck.parent),
|
|
|
|
finalizedHead = shortLog(pool.blockPool.finalizedHead),
|
|
|
|
justifiedHead = shortLog(pool.blockPool.head.justified),
|
|
|
|
head = shortLog(pool.blockPool.head.blck)
|
|
|
|
blockStack.add(current)
|
|
|
|
current = BlockSlot(blck: blck.parent, slot: blck.parent.slot)
|
|
|
|
elif blockStack.len == 0:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
info "Re-added missing or pruned block to fork choice",
|
|
|
|
msg = state.error,
|
|
|
|
blck = shortlog(current.blck),
|
|
|
|
parent = shortlog(current.blck.parent),
|
|
|
|
finalizedHead = shortLog(pool.blockPool.finalizedHead),
|
|
|
|
justifiedHead = shortLog(pool.blockPool.head.justified),
|
|
|
|
head = shortLog(pool.blockPool.head.blck)
|
|
|
|
current = blockStack.pop()
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
|
|
|
Option[AttestationsSeen] =
|
2019-08-19 16:41:13 +00:00
|
|
|
if newBlockSlot < (GENESIS_SLOT + MIN_ATTESTATION_INCLUSION_DELAY):
|
2019-12-10 09:55:37 +00:00
|
|
|
debug "Too early for attestations",
|
2019-09-12 01:45:04 +00:00
|
|
|
newBlockSlot = shortLog(newBlockSlot),
|
|
|
|
cat = "query"
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
if pool.mapSlotsToAttestations.len == 0: # startingSlot not set yet!
|
2019-02-19 23:35:02 +00:00
|
|
|
info "No attestations found (pool empty)",
|
2019-09-12 01:45:04 +00:00
|
|
|
newBlockSlot = shortLog(newBlockSlot),
|
|
|
|
cat = "query"
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
# TODO in theory we could include attestations from other slots also, but
|
|
|
|
# we're currently not tracking which attestations have already been included
|
|
|
|
# in blocks on the fork we're aiming for.. this is a conservative approach
|
|
|
|
# that's guaranteed to not include any duplicates, because it's the first
|
|
|
|
# time the attestations are up for inclusion!
|
|
|
|
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
|
|
|
|
|
|
|
if attestationSlot < pool.startingSlot or
|
2020-06-10 06:58:12 +00:00
|
|
|
attestationSlot >= pool.startingSlot + pool.mapSlotsToAttestations.len.uint64:
|
2019-09-12 01:45:04 +00:00
|
|
|
info "No attestations matching the slot range",
|
2019-08-15 16:01:55 +00:00
|
|
|
attestationSlot = shortLog(attestationSlot),
|
|
|
|
startingSlot = shortLog(pool.startingSlot),
|
2020-06-10 06:58:12 +00:00
|
|
|
endingSlot = shortLog(pool.startingSlot + pool.mapSlotsToAttestations.len.uint64),
|
2019-09-12 01:45:04 +00:00
|
|
|
cat = "query"
|
2020-06-10 06:58:12 +00:00
|
|
|
return none(AttestationsSeen)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-03-31 18:39:02 +00:00
|
|
|
let slotDequeIdx = int(attestationSlot - pool.startingSlot)
|
2020-06-10 06:58:12 +00:00
|
|
|
some(pool.mapSlotsToAttestations[slotDequeIdx])
|
2020-03-31 18:39:02 +00:00
|
|
|
|
2020-04-23 18:58:54 +00:00
|
|
|
proc getAttestationsForBlock*(pool: AttestationPool,
|
|
|
|
state: BeaconState): seq[Attestation] =
|
2020-03-31 18:39:02 +00:00
|
|
|
## Retrieve attestations that may be added to a new block at the slot of the
|
|
|
|
## given state
|
|
|
|
logScope: pcs = "retrieve_attestation"
|
|
|
|
|
|
|
|
# TODO this shouldn't really need state -- it's to recheck/validate, but that
|
|
|
|
# should be refactored
|
2020-05-09 12:43:15 +00:00
|
|
|
let newBlockSlot = state.slot
|
|
|
|
var attestations: seq[AttestationEntry]
|
|
|
|
|
|
|
|
# This isn't maximally efficient -- iterators or other approaches would
|
|
|
|
# avoid lots of memory allocations -- but this provides a more flexible
|
|
|
|
# base upon which to experiment with, and isn't yet profiling hot-path,
|
|
|
|
# while avoiding penalizing slow attesting too much (as, in the spec it
|
|
|
|
# is supposed to be available two epochs back; it's not meant as). This
|
|
|
|
# isn't a good solution, either -- see the set-packing comment below as
|
|
|
|
# one issue. It also creates problems with lots of repeat attestations,
|
|
|
|
# as a bunch of synchronized beacon_nodes do almost the opposite of the
|
|
|
|
# intended thing -- sure, _blocks_ have to be popular (via attestation)
|
|
|
|
# but _attestations_ shouldn't have to be so frequently repeated, as an
|
|
|
|
# artifact of this state-free, identical-across-clones choice basis. In
|
|
|
|
# addResolved, too, the new attestations get added to the end, while in
|
|
|
|
# these functions, it's reading from the beginning, et cetera. This all
|
|
|
|
# needs a single unified strategy.
|
|
|
|
const LOOKBACK_WINDOW = 3
|
|
|
|
for i in max(1, newBlockSlot.int64 - LOOKBACK_WINDOW) .. newBlockSlot.int64:
|
|
|
|
let maybeSlotData = getAttestationsForSlot(pool, i.Slot)
|
|
|
|
if maybeSlotData.isSome:
|
|
|
|
insert(attestations, maybeSlotData.get.attestations)
|
|
|
|
|
|
|
|
if attestations.len == 0:
|
2020-03-31 18:39:02 +00:00
|
|
|
return
|
2019-02-19 23:35:02 +00:00
|
|
|
|
2020-03-31 18:39:02 +00:00
|
|
|
var cache = get_empty_per_epoch_cache()
|
2020-05-09 12:43:15 +00:00
|
|
|
for a in attestations:
|
2019-02-19 23:35:02 +00:00
|
|
|
var
|
2020-07-09 11:43:27 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#construct-attestation
|
2019-02-19 23:35:02 +00:00
|
|
|
attestation = Attestation(
|
2019-07-01 07:53:42 +00:00
|
|
|
aggregation_bits: a.validations[0].aggregation_bits,
|
2019-02-19 23:35:02 +00:00
|
|
|
data: a.data,
|
2019-06-12 07:48:49 +00:00
|
|
|
signature: a.validations[0].aggregate_signature
|
2019-02-19 23:35:02 +00:00
|
|
|
)
|
|
|
|
|
2019-03-28 17:06:43 +00:00
|
|
|
# TODO what's going on here is that when producing a block, we need to
|
|
|
|
# include only such attestations that will not cause block validation
|
|
|
|
# to fail. How this interacts with voting and the acceptance of
|
|
|
|
# attestations into the pool in general is an open question that needs
|
|
|
|
# revisiting - for example, when attestations are added, against which
|
|
|
|
# state should they be validated, if at all?
|
2020-06-10 06:58:12 +00:00
|
|
|
# TODO we're checking signatures here every time which is very slow and we don't want
|
2019-08-19 16:41:13 +00:00
|
|
|
# to include a broken attestation
|
2020-03-19 23:48:03 +00:00
|
|
|
if not check_attestation(state, attestation, {}, cache):
|
2020-06-10 06:58:12 +00:00
|
|
|
warn "Attestation no longer validates...",
|
|
|
|
cat = "query"
|
2019-03-28 17:06:43 +00:00
|
|
|
continue
|
|
|
|
|
2019-02-19 23:35:02 +00:00
|
|
|
for v in a.validations[1..^1]:
|
2019-06-03 08:26:38 +00:00
|
|
|
# TODO We need to select a set of attestations that maximise profit by
|
|
|
|
# adding the largest combined attestation set that we can find - this
|
|
|
|
# unfortunately looks an awful lot like
|
|
|
|
# https://en.wikipedia.org/wiki/Set_packing - here we just iterate
|
|
|
|
# and naively add as much as possible in one go, by we could also
|
|
|
|
# add the same attestation data twice, as long as there's at least
|
|
|
|
# one new attestation in there
|
2019-07-03 07:35:05 +00:00
|
|
|
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
|
|
|
attestation.aggregation_bits.combine(v.aggregation_bits)
|
2020-03-04 21:27:11 +00:00
|
|
|
attestation.signature.aggregate(v.aggregate_signature)
|
2019-02-19 23:35:02 +00:00
|
|
|
|
|
|
|
result.add(attestation)
|
|
|
|
|
2020-07-13 14:44:58 +00:00
|
|
|
if result.len >= MAX_ATTESTATIONS.int:
|
2020-05-09 12:43:15 +00:00
|
|
|
debug "getAttestationsForBlock: returning early after hitting MAX_ATTESTATIONS",
|
|
|
|
attestationSlot = newBlockSlot - 1
|
2019-02-19 23:35:02 +00:00
|
|
|
return
|
2019-02-28 21:21:29 +00:00
|
|
|
|
2019-12-19 13:02:28 +00:00
|
|
|
proc resolve*(pool: var AttestationPool) =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Check attestations in our unresolved deque
|
|
|
|
## if they can be integrated to the fork choice
|
2019-12-19 13:02:28 +00:00
|
|
|
logScope: pcs = "atp_resolve"
|
|
|
|
|
2019-08-19 16:41:13 +00:00
|
|
|
var
|
|
|
|
done: seq[Eth2Digest]
|
|
|
|
resolved: seq[tuple[blck: BlockRef, attestation: Attestation]]
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
for k, v in pool.unresolved.mpairs():
|
2019-08-19 16:41:13 +00:00
|
|
|
if (let blck = pool.blockPool.getRef(k); not blck.isNil()):
|
|
|
|
resolved.add((blck, v.attestation))
|
|
|
|
done.add(k)
|
|
|
|
elif v.tries > 8:
|
2019-02-28 21:21:29 +00:00
|
|
|
done.add(k)
|
|
|
|
else:
|
2019-08-19 16:41:13 +00:00
|
|
|
inc v.tries
|
2019-02-28 21:21:29 +00:00
|
|
|
|
|
|
|
for k in done:
|
|
|
|
pool.unresolved.del(k)
|
|
|
|
|
|
|
|
for a in resolved:
|
2019-12-19 13:02:28 +00:00
|
|
|
pool.addResolved(a.blck, a.attestation)
|
2019-03-13 22:59:20 +00:00
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
# Fork choice v1
|
|
|
|
# ---------------------------------------------------------------
|
|
|
|
|
|
|
|
func latestAttestation(
|
2019-03-13 22:59:20 +00:00
|
|
|
pool: AttestationPool, pubKey: ValidatorPubKey): BlockRef =
|
2019-03-18 03:54:08 +00:00
|
|
|
pool.latestAttestations.getOrDefault(pubKey)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.4/specs/core/0_fork-choice.md
|
|
|
|
# The structure of this code differs from the spec since we use a different
|
|
|
|
# strategy for storing states and justification points - it should nonetheless
|
|
|
|
# be close in terms of functionality.
|
2020-07-09 09:29:32 +00:00
|
|
|
func lmdGhost(
|
2019-12-19 13:02:28 +00:00
|
|
|
pool: AttestationPool, start_state: BeaconState,
|
|
|
|
start_block: BlockRef): BlockRef =
|
|
|
|
# TODO: a Fenwick Tree datastructure to keep track of cumulated votes
|
|
|
|
# in O(log N) complexity
|
|
|
|
# https://en.wikipedia.org/wiki/Fenwick_tree
|
|
|
|
# Nim implementation for cumulative frequencies at
|
|
|
|
# https://github.com/numforge/laser/blob/990e59fffe50779cdef33aa0b8f22da19e1eb328/benchmarks/random_sampling/fenwicktree.nim
|
|
|
|
|
|
|
|
let
|
|
|
|
active_validator_indices =
|
|
|
|
get_active_validator_indices(
|
|
|
|
start_state, compute_epoch_at_slot(start_state.slot))
|
|
|
|
|
|
|
|
var latest_messages: seq[tuple[validator: ValidatorIndex, blck: BlockRef]]
|
|
|
|
for i in active_validator_indices:
|
|
|
|
let pubKey = start_state.validators[i].pubkey
|
|
|
|
if (let vote = pool.latestAttestation(pubKey); not vote.isNil):
|
|
|
|
latest_messages.add((i, vote))
|
|
|
|
|
2020-01-27 10:56:32 +00:00
|
|
|
# TODO: update to 0.10.1: https://github.com/ethereum/eth2.0-specs/pull/1589/files#diff-9fc3792aa94456eb29506fa77f77b918R143
|
2019-12-19 13:02:28 +00:00
|
|
|
template get_latest_attesting_balance(blck: BlockRef): uint64 =
|
|
|
|
var res: uint64
|
|
|
|
for validator_index, target in latest_messages.items():
|
|
|
|
if get_ancestor(target, blck.slot) == blck:
|
|
|
|
res += start_state.validators[validator_index].effective_balance
|
|
|
|
res
|
|
|
|
|
|
|
|
var head = start_block
|
|
|
|
while true:
|
|
|
|
if head.children.len() == 0:
|
|
|
|
return head
|
|
|
|
|
|
|
|
if head.children.len() == 1:
|
|
|
|
head = head.children[0]
|
|
|
|
else:
|
|
|
|
var
|
|
|
|
winner = head.children[0]
|
|
|
|
winCount = get_latest_attesting_balance(winner)
|
|
|
|
|
|
|
|
for i in 1..<head.children.len:
|
|
|
|
let
|
|
|
|
candidate = head.children[i]
|
|
|
|
candCount = get_latest_attesting_balance(candidate)
|
|
|
|
|
|
|
|
if (candCount > winCount) or
|
|
|
|
((candCount == winCount and candidate.root.data < winner.root.data)):
|
|
|
|
winner = candidate
|
|
|
|
winCount = candCount
|
|
|
|
head = winner
|
|
|
|
|
2020-07-09 09:29:32 +00:00
|
|
|
proc selectHead_v1(pool: AttestationPool): BlockRef =
|
2019-12-19 13:02:28 +00:00
|
|
|
let
|
|
|
|
justifiedHead = pool.blockPool.latestJustifiedBlock()
|
|
|
|
|
|
|
|
let newHead =
|
2020-04-28 08:08:32 +00:00
|
|
|
lmdGhost(pool, pool.blockPool.justifiedState.data.data, justifiedHead.blck)
|
2019-12-19 13:02:28 +00:00
|
|
|
|
|
|
|
newHead
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
# Fork choice v2
|
|
|
|
# ---------------------------------------------------------------
|
|
|
|
|
|
|
|
func getAttesterBalances(state: StateData): seq[Gwei] {.noInit.}=
|
|
|
|
## Get the balances from a state
|
|
|
|
result.newSeq(state.data.data.validators.len) # zero-init
|
|
|
|
|
|
|
|
let epoch = state.data.data.slot.compute_epoch_at_slot()
|
|
|
|
|
|
|
|
for i in 0 ..< result.len:
|
|
|
|
# All non-active validators have a 0 balance
|
|
|
|
template validator: Validator = state.data.data.validators[i]
|
|
|
|
if validator.is_active_validator(epoch):
|
|
|
|
result[i] = validator.effective_balance
|
|
|
|
|
|
|
|
proc selectHead_v2(pool: var AttestationPool): BlockRef =
|
|
|
|
let attesterBalances = pool.blockPool.justifiedState.getAttesterBalances()
|
|
|
|
|
|
|
|
let newHead = pool.forkChoice_v2.find_head(
|
|
|
|
justified_epoch = pool.blockPool.justifiedState.data.data.slot.compute_epoch_at_slot(),
|
|
|
|
justified_root = pool.blockPool.head.justified.blck.root,
|
|
|
|
finalized_epoch = pool.blockPool.headState.data.data.finalized_checkpoint.epoch,
|
|
|
|
justified_state_balances = attesterBalances
|
|
|
|
).get()
|
|
|
|
|
|
|
|
pool.blockPool.getRef(newHead)
|
|
|
|
|
|
|
|
proc pruneBefore*(pool: var AttestationPool, finalizedhead: BlockSlot) =
|
|
|
|
pool.forkChoice_v2.maybe_prune(finalizedHead.blck.root).get()
|
|
|
|
|
|
|
|
# Dual-Headed Fork choice
|
|
|
|
# ---------------------------------------------------------------
|
|
|
|
|
|
|
|
proc selectHead*(pool: var AttestationPool): BlockRef =
|
|
|
|
let head_v1 = pool.selectHead_v1()
|
2020-07-10 16:47:48 +00:00
|
|
|
# let head_v2 = pool.selectHead_v2()
|
|
|
|
#
|
|
|
|
# if head_v1 != head_v2:
|
|
|
|
# error "Fork choice engines in disagreement, using block from v1.",
|
|
|
|
# v1_block = shortlog(head_v1),
|
|
|
|
# v2_block = shortlog(head_v2)
|
2020-07-09 09:29:32 +00:00
|
|
|
|
|
|
|
return head_v1
|