2020-04-09 16:15:00 +00:00
|
|
|
# beacon_chain
|
2023-01-09 22:44:44 +00:00
|
|
|
# Copyright (c) 2018-2023 Status Research & Development GmbH
|
2020-04-09 16:15:00 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2020-04-15 09:21:22 +00:00
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2021-02-08 07:27:30 +00:00
|
|
|
std/[sequtils, tables],
|
2020-04-09 16:15:00 +00:00
|
|
|
# Status libraries
|
2022-07-06 10:33:02 +00:00
|
|
|
stew/[objects, results], chronicles,
|
2020-04-09 16:15:00 +00:00
|
|
|
# Internal
|
2022-07-06 10:33:02 +00:00
|
|
|
../spec/[beaconstate, helpers, state_transition_block],
|
2022-01-07 17:10:40 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2020-04-09 16:15:00 +00:00
|
|
|
# Fork choice
|
2020-07-25 19:41:12 +00:00
|
|
|
./fork_choice_types, ./proto_array,
|
2021-03-04 09:13:44 +00:00
|
|
|
../consensus_object_pools/[spec_cache, blockchain_dag]
|
2020-07-28 13:54:32 +00:00
|
|
|
|
2021-02-08 07:27:30 +00:00
|
|
|
export results, fork_choice_types
|
2020-08-26 15:23:34 +00:00
|
|
|
export proto_array.len
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2023-05-05 17:03:54 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/fork-choice.md
|
2020-04-09 16:15:00 +00:00
|
|
|
# This is a port of https://github.com/sigp/lighthouse/pull/804
|
|
|
|
# which is a port of "Proto-Array": https://github.com/protolambda/lmd-ghost
|
|
|
|
# See also:
|
|
|
|
# - Protolambda port of Lighthouse: https://github.com/protolambda/eth2-py-hacks/blob/ae286567/proto_array.py
|
|
|
|
# - Prysmatic writeup: https://hackmd.io/bABJiht3Q9SyV3Ga4FT9lQ#High-level-concept
|
|
|
|
# - Gasper Whitepaper: https://arxiv.org/abs/2003.03052
|
|
|
|
|
|
|
|
# Forward declarations
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
type Index = fork_choice_types.Index
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func compute_deltas(
|
2020-10-28 18:35:31 +00:00
|
|
|
deltas: var openArray[Delta],
|
2020-04-09 16:15:00 +00:00
|
|
|
indices: Table[Eth2Digest, Index],
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset: Index,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes: var openArray[VoteTracker],
|
2020-10-28 18:35:31 +00:00
|
|
|
old_balances: openArray[Gwei],
|
|
|
|
new_balances: openArray[Gwei]
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void]
|
2020-04-09 16:15:00 +00:00
|
|
|
# Fork choice routines
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
logScope: topics = "fork_choice"
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2022-08-29 07:26:01 +00:00
|
|
|
func init*(
|
2023-04-18 19:26:36 +00:00
|
|
|
T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints): T =
|
|
|
|
T(proto_array: ProtoArray.init(checkpoints))
|
2022-08-29 07:26:01 +00:00
|
|
|
|
|
|
|
proc init*(
|
2023-04-18 19:26:36 +00:00
|
|
|
T: type ForkChoice, epochRef: EpochRef, blck: BlockRef): T =
|
2021-11-25 18:41:39 +00:00
|
|
|
## Initialize a fork choice context for a finalized state - in the finalized
|
2020-08-18 14:56:32 +00:00
|
|
|
## state, the justified and finalized checkpoints are the same, so only one
|
|
|
|
## is used here
|
2020-07-25 19:41:12 +00:00
|
|
|
debug "Initializing fork choice",
|
2020-08-18 14:56:32 +00:00
|
|
|
epoch = epochRef.epoch, blck = shortLog(blck)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
let checkpoint = Checkpoint(root: blck.root, epoch: epochRef.epoch)
|
2020-08-18 14:56:32 +00:00
|
|
|
ForkChoice(
|
2022-02-04 11:59:40 +00:00
|
|
|
backend: ForkChoiceBackend.init(
|
2022-07-06 10:33:02 +00:00
|
|
|
FinalityCheckpoints(
|
|
|
|
justified: checkpoint,
|
2023-04-18 19:26:36 +00:00
|
|
|
finalized: checkpoint)),
|
2020-07-25 19:41:12 +00:00
|
|
|
checkpoints: Checkpoints(
|
2022-07-06 10:33:02 +00:00
|
|
|
justified: BalanceCheckpoint(
|
|
|
|
checkpoint: checkpoint,
|
|
|
|
balances: epochRef.effective_balances),
|
|
|
|
finalized: checkpoint,
|
|
|
|
best_justified: checkpoint))
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-04-15 09:21:22 +00:00
|
|
|
func extend[T](s: var seq[T], minLen: int) =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Extend a sequence so that it can contains at least `minLen` elements.
|
|
|
|
## If it's already bigger, the sequence is unmodified.
|
|
|
|
## The extension is zero-initialized
|
2020-07-30 15:48:25 +00:00
|
|
|
if s.len < minLen:
|
2020-04-09 16:15:00 +00:00
|
|
|
s.setLen(minLen)
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
proc update_justified(
|
|
|
|
self: var Checkpoints, dag: ChainDAGRef, blck: BlockRef, epoch: Epoch) =
|
|
|
|
let
|
|
|
|
epochRef = dag.getEpochRef(blck, epoch, false).valueOr:
|
|
|
|
# Shouldn't happen for justified data unless out of sync with ChainDAG
|
|
|
|
warn "Skipping justified checkpoint update, no EpochRef - report bug",
|
2023-01-31 12:35:01 +00:00
|
|
|
blck, epoch, error
|
2022-07-06 10:33:02 +00:00
|
|
|
return
|
|
|
|
justified = Checkpoint(root: blck.root, epoch: epochRef.epoch)
|
|
|
|
|
|
|
|
trace "Updating justified",
|
|
|
|
store = self.justified.checkpoint, state = justified
|
|
|
|
self.justified = BalanceCheckpoint(
|
|
|
|
checkpoint: Checkpoint(root: blck.root, epoch: epochRef.epoch),
|
|
|
|
balances: epochRef.effective_balances)
|
|
|
|
|
|
|
|
proc update_justified(
|
|
|
|
self: var Checkpoints, dag: ChainDAGRef,
|
|
|
|
justified: Checkpoint): FcResult[void] =
|
|
|
|
let blck = dag.getBlockRef(justified.root).valueOr:
|
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcJustifiedNodeUnknown,
|
|
|
|
blockRoot: justified.root)
|
|
|
|
|
|
|
|
self.update_justified(dag, blck, justified.epoch)
|
|
|
|
ok()
|
|
|
|
|
2023-06-16 16:45:09 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#update_checkpoints
|
2022-07-06 10:33:02 +00:00
|
|
|
proc update_checkpoints(
|
|
|
|
self: var Checkpoints, dag: ChainDAGRef,
|
|
|
|
checkpoints: FinalityCheckpoints): FcResult[void] =
|
|
|
|
## Update checkpoints in store if necessary
|
|
|
|
# Update justified checkpoint
|
|
|
|
if checkpoints.justified.epoch > self.justified.checkpoint.epoch:
|
2023-04-18 19:26:36 +00:00
|
|
|
? self.update_justified(dag, checkpoints.justified)
|
2022-07-06 10:33:02 +00:00
|
|
|
|
|
|
|
# Update finalized checkpoint
|
|
|
|
if checkpoints.finalized.epoch > self.finalized.epoch:
|
|
|
|
trace "Updating finalized",
|
|
|
|
store = self.finalized, state = checkpoints.finalized
|
|
|
|
self.finalized = checkpoints.finalized
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
ok()
|
|
|
|
|
2023-06-16 16:45:09 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_tick_per_slot
|
2022-07-06 10:33:02 +00:00
|
|
|
proc on_tick(
|
|
|
|
self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime): FcResult[void] =
|
|
|
|
## Must be called at least once per slot.
|
2022-07-10 15:26:29 +00:00
|
|
|
let previous_time = self.checkpoints.time
|
2021-12-21 18:56:08 +00:00
|
|
|
|
2023-05-05 17:03:54 +00:00
|
|
|
# Update store time
|
2022-07-06 10:33:02 +00:00
|
|
|
if time < previous_time:
|
|
|
|
return err ForkChoiceError(kind: fcInconsistentTick)
|
2022-07-10 15:26:29 +00:00
|
|
|
self.checkpoints.time = time
|
2022-07-06 10:33:02 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
current_slot = time.slotOrZero
|
|
|
|
previous_slot = previous_time.slotOrZero
|
|
|
|
|
2023-05-05 17:03:54 +00:00
|
|
|
# If this is a new slot, reset store.proposer_boost_root
|
2022-07-06 10:33:02 +00:00
|
|
|
if current_slot > previous_slot:
|
2022-07-10 15:26:29 +00:00
|
|
|
self.checkpoints.proposer_boost_root = ZERO_HASH
|
2022-07-06 10:33:02 +00:00
|
|
|
|
2023-05-05 17:03:54 +00:00
|
|
|
# If a new epoch, pull-up justification and finalization from previous epoch
|
|
|
|
if current_slot > previous_slot and current_slot.is_epoch:
|
|
|
|
for realized in self.backend.proto_array.realizePendingCheckpoints():
|
|
|
|
? self.checkpoints.update_checkpoints(dag, realized)
|
2021-12-21 18:56:08 +00:00
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
ok()
|
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_attestation_queue(self: var ForkChoice) {.gcsafe.}
|
2021-02-16 18:53:07 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
proc update_time*(self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime):
|
2021-12-21 18:56:08 +00:00
|
|
|
FcResult[void] =
|
|
|
|
const step_size = seconds(SECONDS_PER_SLOT.int)
|
2020-10-03 21:43:27 +00:00
|
|
|
if time > self.checkpoints.time:
|
2021-12-21 18:56:08 +00:00
|
|
|
# Call on_tick at least once per slot.
|
|
|
|
while time >= self.checkpoints.time + step_size:
|
2022-07-06 10:33:02 +00:00
|
|
|
? self.on_tick(dag, self.checkpoints.time + step_size)
|
2021-12-21 18:56:08 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
if time > self.checkpoints.time:
|
2021-12-21 18:56:08 +00:00
|
|
|
# Might create two ticks for the last slot.
|
2022-07-06 10:33:02 +00:00
|
|
|
? self.on_tick(dag, time)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2020-10-03 21:43:27 +00:00
|
|
|
self.process_attestation_queue() # Only run if time changed!
|
2020-08-17 18:36:13 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func process_attestation*(
|
2020-07-25 19:41:12 +00:00
|
|
|
self: var ForkChoiceBackend,
|
2020-04-09 16:15:00 +00:00
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
block_root: Eth2Digest,
|
|
|
|
target_epoch: Epoch
|
2020-04-15 09:21:22 +00:00
|
|
|
) =
|
2022-02-14 05:26:19 +00:00
|
|
|
if block_root.isZero:
|
2020-07-25 19:41:12 +00:00
|
|
|
return
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
## Add an attestation to the fork choice context
|
|
|
|
self.votes.extend(validator_index.int + 1)
|
|
|
|
|
2021-01-26 11:52:00 +00:00
|
|
|
template vote: untyped = self.votes[validator_index]
|
2022-07-06 10:33:02 +00:00
|
|
|
if target_epoch > vote.next_epoch or vote.isZeroMemory:
|
2020-04-09 16:15:00 +00:00
|
|
|
vote.next_root = block_root
|
|
|
|
vote.next_epoch = target_epoch
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
trace "Integrating vote in fork choice",
|
|
|
|
validator_index = validator_index,
|
|
|
|
new_vote = shortLog(vote)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_attestation_queue(self: var ForkChoice) =
|
2020-10-03 21:43:27 +00:00
|
|
|
self.queuedAttestations.keepItIf:
|
2021-12-21 18:56:08 +00:00
|
|
|
if it.slot < self.checkpoints.time.slotOrZero:
|
2020-10-03 21:43:27 +00:00
|
|
|
for validator_index in it.attesting_indices:
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2021-06-29 15:09:29 +00:00
|
|
|
validator_index, it.block_root, it.slot.epoch())
|
2020-10-03 21:43:27 +00:00
|
|
|
false
|
2020-08-17 18:36:13 +00:00
|
|
|
else:
|
2020-10-03 21:43:27 +00:00
|
|
|
true
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
func contains*(self: ForkChoiceBackend, block_root: Eth2Digest): bool =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Returns `true` if a block is known to the fork choice
|
|
|
|
## and `false` otherwise.
|
|
|
|
##
|
|
|
|
## In particular, before adding a block, its parent must be known to the fork choice
|
|
|
|
self.proto_array.indices.contains(block_root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2023-06-13 14:03:49 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_attestation
|
2020-08-17 18:36:13 +00:00
|
|
|
proc on_attestation*(
|
|
|
|
self: var ForkChoice,
|
|
|
|
dag: ChainDAGRef,
|
2020-08-27 07:34:12 +00:00
|
|
|
attestation_slot: Slot,
|
2020-08-17 18:36:13 +00:00
|
|
|
beacon_block_root: Eth2Digest,
|
2021-04-26 20:39:44 +00:00
|
|
|
attesting_indices: openArray[ValidatorIndex],
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime
|
2020-08-17 18:36:13 +00:00
|
|
|
): FcResult[void] =
|
2023-05-26 08:03:49 +00:00
|
|
|
? self.update_time(dag, max(wallTime, attestation_slot.start_beacon_time))
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2022-02-14 05:26:19 +00:00
|
|
|
if beacon_block_root.isZero:
|
2020-08-17 18:36:13 +00:00
|
|
|
return ok()
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
if attestation_slot < self.checkpoints.time.slotOrZero:
|
2020-08-17 18:36:13 +00:00
|
|
|
for validator_index in attesting_indices:
|
2020-08-27 07:34:12 +00:00
|
|
|
# attestation_slot and target epoch must match, per attestation rules
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2021-04-26 20:39:44 +00:00
|
|
|
validator_index, beacon_block_root, attestation_slot.epoch)
|
2020-08-17 18:36:13 +00:00
|
|
|
else:
|
2021-04-26 20:39:44 +00:00
|
|
|
# Spec:
|
|
|
|
# Attestations can only affect the fork choice of subsequent slots.
|
|
|
|
# Delay consideration in the fork choice until their slot is in the past.
|
2020-08-27 07:34:12 +00:00
|
|
|
self.queuedAttestations.add(QueuedAttestation(
|
|
|
|
slot: attestation_slot,
|
2021-04-26 20:39:44 +00:00
|
|
|
attesting_indices: @attesting_indices,
|
2020-08-27 07:34:12 +00:00
|
|
|
block_root: beacon_block_root))
|
2020-08-17 18:36:13 +00:00
|
|
|
ok()
|
|
|
|
|
2023-06-16 16:45:09 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#on_attester_slashing
|
2022-07-06 10:33:02 +00:00
|
|
|
func process_equivocation*(
|
|
|
|
self: var ForkChoice,
|
|
|
|
validator_index: ValidatorIndex
|
|
|
|
) =
|
|
|
|
self.backend.votes.extend(validator_index.int + 1)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
# Disallow future votes
|
|
|
|
template vote: untyped = self.backend.votes[validator_index]
|
|
|
|
if vote.next_epoch != FAR_FUTURE_EPOCH or not vote.next_root.isZero:
|
|
|
|
vote.next_epoch = FAR_FUTURE_EPOCH
|
|
|
|
vote.next_root.reset()
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
trace "Integrating equivocation in fork choice",
|
|
|
|
validator_index
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2023-05-05 17:03:54 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/fork-choice.md#on_block
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_block*(self: var ForkChoiceBackend,
|
2023-01-31 12:35:01 +00:00
|
|
|
bid: BlockId,
|
2020-07-25 19:41:12 +00:00
|
|
|
parent_root: Eth2Digest,
|
2022-07-06 10:33:02 +00:00
|
|
|
checkpoints: FinalityCheckpoints,
|
|
|
|
unrealized = none(FinalityCheckpoints)): FcResult[void] =
|
2023-01-31 12:35:01 +00:00
|
|
|
self.proto_array.onBlock(bid, parent_root, checkpoints, unrealized)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
proc process_block*(self: var ForkChoice,
|
2020-07-31 14:49:06 +00:00
|
|
|
dag: ChainDAGRef,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-07-25 19:41:12 +00:00
|
|
|
blckRef: BlockRef,
|
2022-07-06 10:33:02 +00:00
|
|
|
unrealized: FinalityCheckpoints,
|
2022-01-08 23:28:49 +00:00
|
|
|
blck: ForkyTrustedBeaconBlock,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime): FcResult[void] =
|
2023-05-26 08:03:49 +00:00
|
|
|
? update_time(self, dag, max(wallTime, blckRef.slot.start_beacon_time))
|
2020-10-15 18:35:00 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
for attester_slashing in blck.body.attester_slashings:
|
|
|
|
for idx in getValidatorIndices(attester_slashing):
|
|
|
|
let i = ValidatorIndex.init(idx).valueOr:
|
2022-01-08 23:28:49 +00:00
|
|
|
continue
|
2022-07-06 10:33:02 +00:00
|
|
|
self.process_equivocation(i)
|
2022-01-08 23:28:49 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
for attestation in blck.body.attestations:
|
2022-01-08 23:28:49 +00:00
|
|
|
if attestation.data.beacon_block_root in self.backend:
|
2022-07-06 10:33:02 +00:00
|
|
|
for validator_index in dag.get_attesting_indices(attestation):
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2022-07-06 10:33:02 +00:00
|
|
|
validator_index,
|
2020-07-25 19:41:12 +00:00
|
|
|
attestation.data.beacon_block_root,
|
|
|
|
attestation.data.target.epoch)
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
trace "Integrating block in fork choice",
|
|
|
|
block_root = shortLog(blckRef)
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
# Add proposer score boost if the block is timely
|
2022-07-06 10:33:02 +00:00
|
|
|
let slot = self.checkpoints.time.slotOrZero
|
|
|
|
if slot == blck.slot and self.checkpoints.time < slot.attestation_deadline:
|
2021-12-21 18:56:08 +00:00
|
|
|
self.checkpoints.proposer_boost_root = blckRef.root
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
# Update checkpoints in store if necessary
|
|
|
|
? update_checkpoints(self.checkpoints, dag, epochRef.checkpoints)
|
|
|
|
|
|
|
|
# If block is from a prior epoch, pull up the post-state to next epoch to
|
|
|
|
# realize new finality info
|
|
|
|
let unrealized_is_better =
|
|
|
|
unrealized.justified.epoch > epochRef.checkpoints.justified.epoch or
|
|
|
|
unrealized.finalized.epoch > epochRef.checkpoints.finalized.epoch
|
|
|
|
if unrealized_is_better:
|
|
|
|
if epochRef.epoch < slot.epoch:
|
|
|
|
trace "Pulling up chain tip",
|
|
|
|
blck = shortLog(blckRef), checkpoints = epochRef.checkpoints, unrealized
|
|
|
|
? update_checkpoints(self.checkpoints, dag, unrealized)
|
|
|
|
? process_block(
|
2023-01-31 12:35:01 +00:00
|
|
|
self.backend, blckRef.bid, blck.parent_root, unrealized)
|
2022-07-06 10:33:02 +00:00
|
|
|
else:
|
|
|
|
? process_block(
|
2023-01-31 12:35:01 +00:00
|
|
|
self.backend, blckRef.bid, blck.parent_root,
|
2023-05-05 17:03:54 +00:00
|
|
|
epochRef.checkpoints, some unrealized) # Realized in `on_tick`
|
2022-07-06 10:33:02 +00:00
|
|
|
else:
|
|
|
|
? process_block(
|
2023-01-31 12:35:01 +00:00
|
|
|
self.backend, blckRef.bid, blck.parent_root, epochRef.checkpoints)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
ok()
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func find_head*(
|
2020-07-25 19:41:12 +00:00
|
|
|
self: var ForkChoiceBackend,
|
2022-08-29 07:26:01 +00:00
|
|
|
current_epoch: Epoch,
|
2022-07-06 10:33:02 +00:00
|
|
|
checkpoints: FinalityCheckpoints,
|
2022-02-04 11:59:40 +00:00
|
|
|
justified_state_balances: seq[Gwei],
|
|
|
|
proposer_boost_root: Eth2Digest
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[Eth2Digest] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Returns the new blockchain head
|
|
|
|
|
|
|
|
# Compute deltas with previous call
|
|
|
|
# we might want to reuse the `deltas` buffer across calls
|
|
|
|
var deltas = newSeq[Delta](self.proto_array.indices.len)
|
2020-07-30 15:48:25 +00:00
|
|
|
? deltas.compute_deltas(
|
2020-04-09 16:15:00 +00:00
|
|
|
indices = self.proto_array.indices,
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset = self.proto_array.nodes.offset,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes = self.votes,
|
|
|
|
old_balances = self.balances,
|
2022-07-06 10:33:02 +00:00
|
|
|
new_balances = justified_state_balances)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
# Apply score changes
|
2021-02-16 18:53:07 +00:00
|
|
|
? self.proto_array.applyScoreChanges(
|
2022-08-29 07:26:01 +00:00
|
|
|
deltas, current_epoch, checkpoints,
|
|
|
|
justified_state_balances, proposer_boost_root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
self.balances = justified_state_balances
|
|
|
|
|
|
|
|
# Find the best block
|
2022-02-16 22:24:44 +00:00
|
|
|
var new_head{.noinit.}: Eth2Digest
|
2022-07-06 10:33:02 +00:00
|
|
|
? self.proto_array.findHead(new_head, checkpoints.justified.root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
trace "Fork choice requested",
|
|
|
|
checkpoints, fork_choice_head = shortLog(new_head)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
return ok(new_head)
|
|
|
|
|
2023-06-13 21:07:39 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/specs/phase0/fork-choice.md#get_head
|
2020-08-17 18:36:13 +00:00
|
|
|
proc get_head*(self: var ForkChoice,
|
|
|
|
dag: ChainDAGRef,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime): FcResult[Eth2Digest] =
|
|
|
|
? self.update_time(dag, wallTime)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
self.backend.find_head(
|
2022-08-29 07:26:01 +00:00
|
|
|
self.checkpoints.time.slotOrZero.epoch,
|
2022-07-06 10:33:02 +00:00
|
|
|
FinalityCheckpoints(
|
|
|
|
justified: self.checkpoints.justified.checkpoint,
|
|
|
|
finalized: self.checkpoints.finalized),
|
2020-10-22 10:53:33 +00:00
|
|
|
self.checkpoints.justified.balances,
|
2022-07-06 10:33:02 +00:00
|
|
|
self.checkpoints.proposer_boost_root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2023-06-13 14:03:49 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-alpha.3/fork_choice/safe-block.md#get_safe_beacon_block_root
|
2022-08-29 12:16:35 +00:00
|
|
|
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
|
2022-08-25 23:34:02 +00:00
|
|
|
# Use most recent justified block as a stopgap
|
|
|
|
self.checkpoints.justified.checkpoint.root
|
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
func prune*(
|
2023-01-31 12:35:01 +00:00
|
|
|
self: var ForkChoiceBackend, checkpoints: FinalityCheckpoints
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Prune blocks preceding the finalized root as they are now unneeded.
|
2023-01-31 12:35:01 +00:00
|
|
|
self.proto_array.prune(checkpoints)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
func prune*(self: var ForkChoice): FcResult[void] =
|
2023-01-31 12:35:01 +00:00
|
|
|
self.backend.prune(
|
|
|
|
FinalityCheckpoints(
|
|
|
|
justified: self.checkpoints.justified.checkpoint,
|
|
|
|
finalized: self.checkpoints.finalized))
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2022-09-06 16:58:54 +00:00
|
|
|
func mark_root_invalid*(self: var ForkChoice, root: Eth2Digest) =
|
|
|
|
try:
|
|
|
|
let nodePhysicalIdx =
|
|
|
|
self.backend.proto_array.indices[root] -
|
|
|
|
self.backend.proto_array.nodes.offset
|
|
|
|
if nodePhysicalIdx < self.backend.proto_array.nodes.buf.len:
|
|
|
|
self.backend.proto_array.nodes.buf[nodePhysicalIdx].invalid = true
|
2022-09-27 12:11:47 +00:00
|
|
|
self.backend.proto_array.propagateInvalidity(nodePhysicalIdx)
|
2022-09-06 16:58:54 +00:00
|
|
|
# Best-effort; attempts to mark unknown roots invalid harmlessly ignored
|
|
|
|
except KeyError:
|
|
|
|
discard
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func compute_deltas(
|
2020-10-28 18:35:31 +00:00
|
|
|
deltas: var openArray[Delta],
|
2020-04-09 16:15:00 +00:00
|
|
|
indices: Table[Eth2Digest, Index],
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset: Index,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes: var openArray[VoteTracker],
|
2020-10-28 18:35:31 +00:00
|
|
|
old_balances: openArray[Gwei],
|
|
|
|
new_balances: openArray[Gwei]
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Update `deltas`
|
|
|
|
## between old and new balances
|
|
|
|
## between votes
|
|
|
|
##
|
2022-07-06 10:33:02 +00:00
|
|
|
## `deltas.len` must match `indices.len` (length match)
|
2020-04-09 16:15:00 +00:00
|
|
|
##
|
|
|
|
## Error:
|
|
|
|
## - If a value in indices is greater than `indices.len`
|
|
|
|
## - If a `Eth2Digest` in `votes` does not exist in `indices`
|
|
|
|
## except for the `default(Eth2Digest)` (i.e. zero hash)
|
|
|
|
|
|
|
|
for val_index, vote in votes.mpairs():
|
|
|
|
# No need to create a score change if the validator has never voted
|
|
|
|
# or if votes are for the zero hash (alias to the genesis block)
|
2022-02-14 05:26:19 +00:00
|
|
|
if vote.current_root.isZero and vote.next_root.isZero:
|
2020-04-09 16:15:00 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# If the validator was not included in `old_balances` (i.e. did not exist)
|
|
|
|
# its balance is zero
|
|
|
|
let old_balance = if val_index < old_balances.len: old_balances[val_index]
|
|
|
|
else: 0
|
|
|
|
|
|
|
|
# If the validator is not known in the `new_balances` then use balance of zero
|
|
|
|
#
|
|
|
|
# It is possible that there is a vote for an unknown validator if we change our
|
|
|
|
# justified state to a new state with a higher epoch on a different fork
|
|
|
|
# because that fork may have on-boarded less validators than the previous fork.
|
|
|
|
#
|
|
|
|
# Note that attesters are not different as they are activated only under finality
|
|
|
|
let new_balance = if val_index < new_balances.len: new_balances[val_index]
|
|
|
|
else: 0
|
|
|
|
|
|
|
|
if vote.current_root != vote.next_root or old_balance != new_balance:
|
|
|
|
# Ignore the current or next vote if it is not known in `indices`.
|
|
|
|
# We assume that it is outside of our tree (i.e., pre-finalization) and therefore not interesting.
|
|
|
|
if vote.current_root in indices:
|
2020-08-26 15:23:34 +00:00
|
|
|
let index = indices.unsafeGet(vote.current_root) - indices_offset
|
2020-04-09 16:15:00 +00:00
|
|
|
if index >= deltas.len:
|
2020-07-30 15:48:25 +00:00
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcInvalidNodeDelta,
|
2021-02-16 18:53:07 +00:00
|
|
|
index: index)
|
2020-04-09 16:15:00 +00:00
|
|
|
deltas[index] -= Delta old_balance
|
|
|
|
# Note that delta can be negative
|
|
|
|
# TODO: is int64 big enough?
|
|
|
|
|
2022-07-06 10:33:02 +00:00
|
|
|
if vote.next_epoch != FAR_FUTURE_EPOCH or not vote.next_root.isZero:
|
|
|
|
if vote.next_root in indices:
|
|
|
|
let index = indices.unsafeGet(vote.next_root) - indices_offset
|
|
|
|
if index >= deltas.len:
|
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcInvalidNodeDelta,
|
|
|
|
index: index)
|
|
|
|
deltas[index] += Delta new_balance
|
|
|
|
# Note that delta can be negative
|
|
|
|
# TODO: is int64 big enough?
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
vote.current_root = vote.next_root
|
2020-07-30 15:48:25 +00:00
|
|
|
return ok()
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
# Sanity checks
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
# Sanity checks on internal private procedures
|
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
import stew/endians2
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
func fakeHash(index: SomeInteger): Eth2Digest =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Create fake hashes
|
|
|
|
## Those are just the value serialized in big-endian
|
|
|
|
## We add 16x16 to avoid having a zero hash are those are special cased
|
|
|
|
## We store them in the first 8 bytes
|
|
|
|
## as those are the one used in hash tables Table[Eth2Digest, T]
|
|
|
|
result.data[0 ..< 8] = (16*16+index).uint64.toBytesBE()
|
|
|
|
|
|
|
|
proc tZeroHash() =
|
|
|
|
echo " fork_choice compute_deltas - test zero votes"
|
|
|
|
|
|
|
|
const validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add default(VoteTracker)
|
|
|
|
old_balances.add 0
|
|
|
|
new_balances.add 0
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas == newSeq[Delta](validator_count), "deltas should be zeros"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tAll_voted_the_same() =
|
|
|
|
echo " fork_choice compute_deltas - test all same votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: default(Eth2Digest),
|
|
|
|
next_root: fakeHash(0), # Get a non-zero hash
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for i, delta in deltas:
|
2020-04-09 16:15:00 +00:00
|
|
|
if i == 0:
|
|
|
|
doAssert delta == Delta(Balance * validator_count), "The 0th root should have a delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tDifferent_votes() =
|
|
|
|
echo " fork_choice compute_deltas - test all different votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: default(Eth2Digest),
|
|
|
|
next_root: fakeHash(i), # Each vote for a different root
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for i, delta in deltas:
|
2020-04-09 16:15:00 +00:00
|
|
|
doAssert delta == Delta(Balance), "Each root should have a delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tMoving_votes() =
|
|
|
|
echo " fork_choice compute_deltas - test moving votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
TotalDeltas = Delta(Balance * validator_count)
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
# Move vote from root 0 to root 1
|
|
|
|
current_root: fakeHash(0),
|
|
|
|
next_root: fakeHash(1),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for i, delta in deltas:
|
2020-04-09 16:15:00 +00:00
|
|
|
if i == 0:
|
|
|
|
doAssert delta == -TotalDeltas, "0th root should have a negative delta"
|
|
|
|
elif i == 1:
|
|
|
|
doAssert delta == TotalDeltas, "1st root should have a positive delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tMove_out_of_tree() =
|
|
|
|
echo " fork_choice compute_deltas - test votes for unknown subtree"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add a block
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
|
|
|
|
# 2 validators
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance, Balance]
|
|
|
|
let new_balances = @[Balance, Balance]
|
|
|
|
|
|
|
|
# One validator moves their vote from the block to the zero hash
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: default(Eth2Digest),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
# One validator moves their vote from the block to something outside of the tree
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(1337),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance)*2, "The 0th block should have lost both balances."
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tChanging_balances() =
|
|
|
|
echo " fork_choice compute_deltas - test changing balances"
|
|
|
|
|
|
|
|
const
|
|
|
|
OldBalance = Gwei(42)
|
|
|
|
NewBalance = OldBalance * 2
|
|
|
|
validator_count = 16
|
|
|
|
TotalOldDeltas = Delta(OldBalance * validator_count)
|
|
|
|
TotalNewDeltas = Delta(NewBalance * validator_count)
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
# Move vote from root 0 to root 1
|
|
|
|
current_root: fakeHash(0),
|
|
|
|
next_root: fakeHash(1),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add OldBalance
|
|
|
|
new_balances.add NewBalance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2022-05-10 10:03:40 +00:00
|
|
|
for i, delta in deltas:
|
2020-04-09 16:15:00 +00:00
|
|
|
if i == 0:
|
|
|
|
doAssert delta == -TotalOldDeltas, "0th root should have a negative delta"
|
|
|
|
elif i == 1:
|
|
|
|
doAssert delta == TotalNewDeltas, "1st root should have a positive delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tValidator_appears() =
|
|
|
|
echo " fork_choice compute_deltas - test validator appears"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add 2 blocks
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
indices.add fakeHash(2), 1
|
|
|
|
|
|
|
|
# 1 validator at the start, 2 at the end
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance]
|
|
|
|
let new_balances = @[Balance, Balance]
|
|
|
|
|
|
|
|
# Both moves vote from Block 1 to 2
|
|
|
|
for _ in 0 ..< 2:
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(2),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance), "Block 1 should have lost only 1 balance"
|
|
|
|
doAssert deltas[1] == Delta(Balance)*2, "Block 2 should have gained 2 balances"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tValidator_disappears() =
|
|
|
|
echo " fork_choice compute_deltas - test validator disappears"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add 2 blocks
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
indices.add fakeHash(2), 1
|
|
|
|
|
|
|
|
# 1 validator at the start, 2 at the end
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance, Balance]
|
|
|
|
let new_balances = @[Balance]
|
|
|
|
|
|
|
|
# Both moves vote from Block 1 to 2
|
|
|
|
for _ in 0 ..< 2:
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(2),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance)*2, "Block 1 should have lost 2 balances"
|
|
|
|
doAssert deltas[1] == Delta(Balance), "Block 2 should have gained 1 balance"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
|
|
|
echo "fork_choice internal tests for compute_deltas"
|
|
|
|
tZeroHash()
|
|
|
|
tAll_voted_the_same()
|
|
|
|
tDifferent_votes()
|
|
|
|
tMoving_votes()
|
|
|
|
tChanging_balances()
|
|
|
|
tValidator_appears()
|
|
|
|
tValidator_disappears()
|