nimbus-eth2/beacon_chain/fork_choice/fork_choice.nim

853 lines
29 KiB
Nim
Raw Normal View History

# beacon_chain
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
2022-07-29 10:53:42 +00:00
when (NimMajor, NimMinor) < (1, 4):
{.push raises: [Defect].}
else:
{.push raises: [].}
2020-04-15 09:21:22 +00:00
import
# Standard library
std/[sequtils, tables],
# Status libraries
stew/[objects, results], chronicles,
# Internal
../spec/[beaconstate, helpers, state_transition_block],
../spec/datatypes/[phase0, altair, bellatrix],
# Fork choice
./fork_choice_types, ./proto_array,
../consensus_object_pools/[spec_cache, blockchain_dag]
export results, fork_choice_types
export proto_array.len
# https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/fork-choice.md
# This is a port of https://github.com/sigp/lighthouse/pull/804
# which is a port of "Proto-Array": https://github.com/protolambda/lmd-ghost
# See also:
# - Protolambda port of Lighthouse: https://github.com/protolambda/eth2-py-hacks/blob/ae286567/proto_array.py
# - Prysmatic writeup: https://hackmd.io/bABJiht3Q9SyV3Ga4FT9lQ#High-level-concept
# - Gasper Whitepaper: https://arxiv.org/abs/2003.03052
# Forward declarations
# ----------------------------------------------------------------------
era: load blocks and states (#3394) * era: load blocks and states Era files contain finalized history and can be thought of as an alternative source for block and state data that allows clients to avoid syncing this information from the P2P network - the P2P network is then used to "top up" the client with the most recent data. They can be freely shared in the community via whatever means (http, torrent, etc) and serve as a permanent cold store of consensus data (and, after the merge, execution data) for history buffs and bean counters alike. This PR gently introduces support for loading blocks and states in two cases: block requests from rest/p2p and frontfilling when doing checkpoint sync. The era files are used as a secondary source if the information is not found in the database - compared to the database, there are a few key differences: * the database stores the block indexed by block root while the era file indexes by slot - the former is used only in rest, while the latter is used both by p2p and rest. * when loading blocks from era files, the root is no longer trivially available - if it is needed, it must either be computed (slow) or cached (messy) - the good news is that for p2p requests, it is not needed * in era files, "framed" snappy encoding is used while in the database we store unframed snappy - for p2p2 requests, the latter requires recompression while the former could avoid it * front-filling is the process of using era files to replace backfilling - in theory this front-filling could happen from any block and front-fills with gaps could also be entertained, but our backfilling algorithm cannot take advantage of this because there's no (simple) way to tell it to "skip" a range. * front-filling, as implemented, is a bit slow (10s to load mainnet): we load the full BeaconState for every era to grab the roots of the blocks - it would be better to partially load the state - as such, it would also be good to be able to partially decompress snappy blobs * lookups from REST via root are served by first looking up a block summary in the database, then using the slot to load the block data from the era file - however, there needs to be an option to create the summary table from era files to fully support historical queries To test this, `ncli_db` has an era file exporter: the files it creates should be placed in an `era` folder next to `db` in the data directory. What's interesting in particular about this setup is that `db` remains as the source of truth for security purposes - it stores the latest synced head root which in turn determines where a node "starts" its consensus participation - the era directory however can be freely shared between nodes / people without any (significant) security implications, assuming the era files are consistent / not broken. There's lots of future improvements to be had: * we can drop the in-memory `BlockRef` index almost entirely - at this point, resident memory usage of Nimbus should drop to a cool 500-600 mb * we could serve era files via REST trivially: this would drop backfill times to whatever time it takes to download the files - unlike the current implementation that downloads block by block, downloading an era at a time almost entirely cuts out request overhead * we can "reasonably" recreate detailed state history from almost any point in time, turning an O(slot) process into O(1) effectively - we'll still need caches and indices to do this with sufficient efficiency for the rest api, but at least it cuts the whole process down to minutes instead of hours, for arbitrary points in time * CI: ignore failures with Nim-1.6 (temporary) * test fixes Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
type Index = fork_choice_types.Index
func compute_deltas(
2020-10-28 18:35:31 +00:00
deltas: var openArray[Delta],
indices: Table[Eth2Digest, Index],
indices_offset: Index,
votes: var openArray[VoteTracker],
2020-10-28 18:35:31 +00:00
old_balances: openArray[Gwei],
new_balances: openArray[Gwei]
): FcResult[void]
# Fork choice routines
# ----------------------------------------------------------------------
logScope: topics = "fork_choice"
func init*(
T: type ForkChoiceBackend, checkpoints: FinalityCheckpoints,
hasLowParticipation = false): T =
T(proto_array: ProtoArray.init(checkpoints, hasLowParticipation))
proc init*(
T: type ForkChoice, epochRef: EpochRef, blck: BlockRef,
hasLowParticipation = false): T =
## Initialize a fork choice context for a finalized state - in the finalized
## state, the justified and finalized checkpoints are the same, so only one
## is used here
debug "Initializing fork choice",
epoch = epochRef.epoch, blck = shortLog(blck)
let checkpoint = Checkpoint(root: blck.root, epoch: epochRef.epoch)
ForkChoice(
backend: ForkChoiceBackend.init(
FinalityCheckpoints(
justified: checkpoint,
finalized: checkpoint),
hasLowParticipation),
checkpoints: Checkpoints(
justified: BalanceCheckpoint(
checkpoint: checkpoint,
balances: epochRef.effective_balances),
finalized: checkpoint,
best_justified: checkpoint))
2020-04-15 09:21:22 +00:00
func extend[T](s: var seq[T], minLen: int) =
## Extend a sequence so that it can contains at least `minLen` elements.
## If it's already bigger, the sequence is unmodified.
## The extension is zero-initialized
if s.len < minLen:
s.setLen(minLen)
2022-08-20 16:03:32 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/fork-choice.md#should_update_justified_checkpoint
func should_update_justified_checkpoint(
self: var Checkpoints, dag: ChainDAGRef,
new_justified_checkpoint: Checkpoint): FcResult[bool] =
## To address the bouncing attack, only update conflicting justified
## checkpoints in the fork choice if in the early slots of the epoch.
## Otherwise, delay incorporation of new justified checkpoint until next epoch
## boundary.
##
## See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for
## more detailed analysis and discussion.
if self.time.slotOrZero.since_epoch_start() < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
return ok true
let
justified_slot = self.justified.checkpoint.epoch.start_slot()
justified_blck = dag.getBlockRef(new_justified_checkpoint.root).valueOr:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: new_justified_checkpoint.root)
let justified_ancestor = justified_blck.atSlot(justified_slot)
if justified_ancestor.blck.root != self.justified.checkpoint.root:
return ok false
ok true
proc update_justified(
self: var Checkpoints, dag: ChainDAGRef, blck: BlockRef, epoch: Epoch) =
let
epochRef = dag.getEpochRef(blck, epoch, false).valueOr:
# Shouldn't happen for justified data unless out of sync with ChainDAG
warn "Skipping justified checkpoint update, no EpochRef - report bug",
blck, epoch, best = self.best_justified.epoch
return
justified = Checkpoint(root: blck.root, epoch: epochRef.epoch)
trace "Updating justified",
store = self.justified.checkpoint, state = justified
self.justified = BalanceCheckpoint(
checkpoint: Checkpoint(root: blck.root, epoch: epochRef.epoch),
balances: epochRef.effective_balances)
proc update_justified(
self: var Checkpoints, dag: ChainDAGRef,
justified: Checkpoint): FcResult[void] =
let blck = dag.getBlockRef(justified.root).valueOr:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: justified.root)
self.update_justified(dag, blck, justified.epoch)
ok()
2022-08-20 16:03:32 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/fork-choice.md#on_block
proc update_checkpoints(
self: var Checkpoints, dag: ChainDAGRef,
checkpoints: FinalityCheckpoints): FcResult[void] =
## Update checkpoints in store if necessary
# Update justified checkpoint
if checkpoints.justified.epoch > self.justified.checkpoint.epoch:
if checkpoints.justified.epoch > self.best_justified.epoch:
self.best_justified = checkpoints.justified
if ? should_update_justified_checkpoint(self, dag, checkpoints.justified):
? self.update_justified(dag, checkpoints.justified)
# Update finalized checkpoint
if checkpoints.finalized.epoch > self.finalized.epoch:
trace "Updating finalized",
store = self.finalized, state = checkpoints.finalized
self.finalized = checkpoints.finalized
if checkpoints.justified != self.justified.checkpoint:
? self.update_justified(dag, checkpoints.justified)
ok()
2022-08-20 16:03:32 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/fork-choice.md#on_tick
proc on_tick(
self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime): FcResult[void] =
## Must be called at least once per slot.
let previous_time = self.checkpoints.time
# update store time
if time < previous_time:
return err ForkChoiceError(kind: fcInconsistentTick)
self.checkpoints.time = time
let
current_slot = time.slotOrZero
previous_slot = previous_time.slotOrZero
# Reset store.proposer_boost_root if this is a new slot
if current_slot > previous_slot:
self.checkpoints.proposer_boost_root = ZERO_HASH
# Not a new epoch, return
if not (current_slot > previous_slot and current_slot.is_epoch):
return ok()
# Update store.justified_checkpoint if a better checkpoint on the
# store.finalized_checkpoint chain
let
best_justified_epoch = self.checkpoints.best_justified.epoch
store_justified_epoch = self.checkpoints.justified.checkpoint.epoch
if best_justified_epoch > store_justified_epoch:
let
blck = dag.getBlockRef(self.checkpoints.best_justified.root).valueOr:
return err ForkChoiceError(
kind: fcJustifiedNodeUnknown,
blockRoot: self.checkpoints.best_justified.root)
finalized_ancestor = blck.atEpochStart(self.checkpoints.finalized.epoch)
if finalized_ancestor.blck.root == self.checkpoints.finalized.root:
self.checkpoints.update_justified(dag, blck, best_justified_epoch)
# Pull-up chain tips from previous epoch
for realized in self.backend.proto_array.realizePendingCheckpoints():
? self.checkpoints.update_checkpoints(dag, realized)
ok()
func process_attestation_queue(self: var ForkChoice) {.gcsafe.}
2021-02-16 18:53:07 +00:00
proc update_time*(self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime):
FcResult[void] =
const step_size = seconds(SECONDS_PER_SLOT.int)
if time > self.checkpoints.time:
# Call on_tick at least once per slot.
while time >= self.checkpoints.time + step_size:
? self.on_tick(dag, self.checkpoints.time + step_size)
if time > self.checkpoints.time:
# Might create two ticks for the last slot.
? self.on_tick(dag, time)
self.process_attestation_queue() # Only run if time changed!
ok()
func process_attestation*(
self: var ForkChoiceBackend,
validator_index: ValidatorIndex,
block_root: Eth2Digest,
target_epoch: Epoch
2020-04-15 09:21:22 +00:00
) =
if block_root.isZero:
return
## Add an attestation to the fork choice context
self.votes.extend(validator_index.int + 1)
template vote: untyped = self.votes[validator_index]
if target_epoch > vote.next_epoch or vote.isZeroMemory:
vote.next_root = block_root
vote.next_epoch = target_epoch
trace "Integrating vote in fork choice",
validator_index = validator_index,
new_vote = shortLog(vote)
func process_attestation_queue(self: var ForkChoice) =
self.queuedAttestations.keepItIf:
if it.slot < self.checkpoints.time.slotOrZero:
for validator_index in it.attesting_indices:
self.backend.process_attestation(
validator_index, it.block_root, it.slot.epoch())
false
else:
true
func contains*(self: ForkChoiceBackend, block_root: Eth2Digest): bool =
## Returns `true` if a block is known to the fork choice
## and `false` otherwise.
##
## In particular, before adding a block, its parent must be known to the fork choice
self.proto_array.indices.contains(block_root)
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/fork-choice.md#on_attestation
proc on_attestation*(
self: var ForkChoice,
dag: ChainDAGRef,
attestation_slot: Slot,
beacon_block_root: Eth2Digest,
attesting_indices: openArray[ValidatorIndex],
wallTime: BeaconTime
): FcResult[void] =
? self.update_time(dag, wallTime)
if beacon_block_root.isZero:
return ok()
if attestation_slot < self.checkpoints.time.slotOrZero:
for validator_index in attesting_indices:
# attestation_slot and target epoch must match, per attestation rules
self.backend.process_attestation(
validator_index, beacon_block_root, attestation_slot.epoch)
else:
# Spec:
# Attestations can only affect the fork choice of subsequent slots.
# Delay consideration in the fork choice until their slot is in the past.
self.queuedAttestations.add(QueuedAttestation(
slot: attestation_slot,
attesting_indices: @attesting_indices,
block_root: beacon_block_root))
ok()
2022-08-20 16:03:32 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/fork-choice.md#on_attester_slashing
func process_equivocation*(
self: var ForkChoice,
validator_index: ValidatorIndex
) =
self.backend.votes.extend(validator_index.int + 1)
# Disallow future votes
template vote: untyped = self.backend.votes[validator_index]
if vote.next_epoch != FAR_FUTURE_EPOCH or not vote.next_root.isZero:
vote.next_epoch = FAR_FUTURE_EPOCH
vote.next_root.reset()
trace "Integrating equivocation in fork choice",
validator_index
2022-08-20 16:03:32 +00:00
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/specs/phase0/fork-choice.md#on_block
func process_block*(self: var ForkChoiceBackend,
block_root: Eth2Digest,
parent_root: Eth2Digest,
checkpoints: FinalityCheckpoints,
unrealized = none(FinalityCheckpoints)): FcResult[void] =
self.proto_array.onBlock(block_root, parent_root, checkpoints, unrealized)
proc process_block*(self: var ForkChoice,
dag: ChainDAGRef,
epochRef: EpochRef,
blckRef: BlockRef,
unrealized: FinalityCheckpoints,
blck: ForkyTrustedBeaconBlock,
wallTime: BeaconTime): FcResult[void] =
? update_time(self, dag, wallTime)
2020-10-15 18:35:00 +00:00
for attester_slashing in blck.body.attester_slashings:
for idx in getValidatorIndices(attester_slashing):
let i = ValidatorIndex.init(idx).valueOr:
continue
self.process_equivocation(i)
for attestation in blck.body.attestations:
if attestation.data.beacon_block_root in self.backend:
for validator_index in dag.get_attesting_indices(attestation):
self.backend.process_attestation(
validator_index,
attestation.data.beacon_block_root,
attestation.data.target.epoch)
trace "Integrating block in fork choice",
block_root = shortLog(blckRef)
# Add proposer score boost if the block is timely
let slot = self.checkpoints.time.slotOrZero
if slot == blck.slot and self.checkpoints.time < slot.attestation_deadline:
self.checkpoints.proposer_boost_root = blckRef.root
# Update checkpoints in store if necessary
? update_checkpoints(self.checkpoints, dag, epochRef.checkpoints)
# If block is from a prior epoch, pull up the post-state to next epoch to
# realize new finality info
let unrealized_is_better =
unrealized.justified.epoch > epochRef.checkpoints.justified.epoch or
unrealized.finalized.epoch > epochRef.checkpoints.finalized.epoch
if unrealized_is_better:
if epochRef.epoch < slot.epoch:
trace "Pulling up chain tip",
blck = shortLog(blckRef), checkpoints = epochRef.checkpoints, unrealized
? update_checkpoints(self.checkpoints, dag, unrealized)
? process_block(
self.backend, blckRef.root, blck.parent_root, unrealized)
else:
? process_block(
self.backend, blckRef.root, blck.parent_root,
epochRef.checkpoints, some unrealized) # Realized in `on_tick`
else:
? process_block(
self.backend, blckRef.root, blck.parent_root, epochRef.checkpoints)
ok()
func find_head*(
self: var ForkChoiceBackend,
current_epoch: Epoch,
checkpoints: FinalityCheckpoints,
justified_state_balances: seq[Gwei],
proposer_boost_root: Eth2Digest
): FcResult[Eth2Digest] =
## Returns the new blockchain head
# Compute deltas with previous call
# we might want to reuse the `deltas` buffer across calls
var deltas = newSeq[Delta](self.proto_array.indices.len)
? deltas.compute_deltas(
indices = self.proto_array.indices,
indices_offset = self.proto_array.nodes.offset,
votes = self.votes,
old_balances = self.balances,
new_balances = justified_state_balances)
# Apply score changes
2021-02-16 18:53:07 +00:00
? self.proto_array.applyScoreChanges(
deltas, current_epoch, checkpoints,
justified_state_balances, proposer_boost_root)
self.balances = justified_state_balances
# Find the best block
var new_head{.noinit.}: Eth2Digest
? self.proto_array.findHead(new_head, checkpoints.justified.root)
trace "Fork choice requested",
checkpoints, fork_choice_head = shortLog(new_head)
return ok(new_head)
# https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head
proc get_head*(self: var ForkChoice,
dag: ChainDAGRef,
wallTime: BeaconTime): FcResult[Eth2Digest] =
? self.update_time(dag, wallTime)
self.backend.find_head(
self.checkpoints.time.slotOrZero.epoch,
FinalityCheckpoints(
justified: self.checkpoints.justified.checkpoint,
finalized: self.checkpoints.finalized),
self.checkpoints.justified.balances,
self.checkpoints.proposer_boost_root)
# https://github.com/ethereum/consensus-specs/blob/v1.2.0-rc.3/fork_choice/safe-block.md#get_safe_beacon_block_root
func get_safe_beacon_block_root*(self: ForkChoice): Eth2Digest =
# Use most recent justified block as a stopgap
self.checkpoints.justified.checkpoint.root
func prune*(
self: var ForkChoiceBackend, finalized_root: Eth2Digest
): FcResult[void] =
## Prune blocks preceding the finalized root as they are now unneeded.
self.proto_array.prune(finalized_root)
func prune*(self: var ForkChoice): FcResult[void] =
self.backend.prune(self.checkpoints.finalized.root)
func mark_root_invalid*(self: var ForkChoice, root: Eth2Digest) =
try:
let nodePhysicalIdx =
self.backend.proto_array.indices[root] -
self.backend.proto_array.nodes.offset
if nodePhysicalIdx < self.backend.proto_array.nodes.buf.len:
self.backend.proto_array.nodes.buf[nodePhysicalIdx].invalid = true
# Best-effort; attempts to mark unknown roots invalid harmlessly ignored
except KeyError:
discard
func compute_deltas(
2020-10-28 18:35:31 +00:00
deltas: var openArray[Delta],
indices: Table[Eth2Digest, Index],
indices_offset: Index,
votes: var openArray[VoteTracker],
2020-10-28 18:35:31 +00:00
old_balances: openArray[Gwei],
new_balances: openArray[Gwei]
): FcResult[void] =
## Update `deltas`
## between old and new balances
## between votes
##
## `deltas.len` must match `indices.len` (length match)
##
## Error:
## - If a value in indices is greater than `indices.len`
## - If a `Eth2Digest` in `votes` does not exist in `indices`
## except for the `default(Eth2Digest)` (i.e. zero hash)
for val_index, vote in votes.mpairs():
# No need to create a score change if the validator has never voted
# or if votes are for the zero hash (alias to the genesis block)
if vote.current_root.isZero and vote.next_root.isZero:
continue
# If the validator was not included in `old_balances` (i.e. did not exist)
# its balance is zero
let old_balance = if val_index < old_balances.len: old_balances[val_index]
else: 0
# If the validator is not known in the `new_balances` then use balance of zero
#
# It is possible that there is a vote for an unknown validator if we change our
# justified state to a new state with a higher epoch on a different fork
# because that fork may have on-boarded less validators than the previous fork.
#
# Note that attesters are not different as they are activated only under finality
let new_balance = if val_index < new_balances.len: new_balances[val_index]
else: 0
if vote.current_root != vote.next_root or old_balance != new_balance:
# Ignore the current or next vote if it is not known in `indices`.
# We assume that it is outside of our tree (i.e., pre-finalization) and therefore not interesting.
if vote.current_root in indices:
let index = indices.unsafeGet(vote.current_root) - indices_offset
if index >= deltas.len:
return err ForkChoiceError(
kind: fcInvalidNodeDelta,
2021-02-16 18:53:07 +00:00
index: index)
deltas[index] -= Delta old_balance
# Note that delta can be negative
# TODO: is int64 big enough?
if vote.next_epoch != FAR_FUTURE_EPOCH or not vote.next_root.isZero:
if vote.next_root in indices:
let index = indices.unsafeGet(vote.next_root) - indices_offset
if index >= deltas.len:
return err ForkChoiceError(
kind: fcInvalidNodeDelta,
index: index)
deltas[index] += Delta new_balance
# Note that delta can be negative
# TODO: is int64 big enough?
vote.current_root = vote.next_root
return ok()
# Sanity checks
# ----------------------------------------------------------------------
# Sanity checks on internal private procedures
when isMainModule:
import stew/endians2
func fakeHash(index: SomeInteger): Eth2Digest =
## Create fake hashes
## Those are just the value serialized in big-endian
## We add 16x16 to avoid having a zero hash are those are special cased
## We store them in the first 8 bytes
## as those are the one used in hash tables Table[Eth2Digest, T]
result.data[0 ..< 8] = (16*16+index).uint64.toBytesBE()
proc tZeroHash() =
echo " fork_choice compute_deltas - test zero votes"
const validator_count = 16
var deltas = newSeqUninitialized[Delta](validator_count)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
var old_balances: seq[Gwei]
var new_balances: seq[Gwei]
for i in 0 ..< validator_count:
indices.add fakeHash(i), i
votes.add default(VoteTracker)
old_balances.add 0
new_balances.add 0
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
doAssert deltas == newSeq[Delta](validator_count), "deltas should be zeros"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tAll_voted_the_same() =
echo " fork_choice compute_deltas - test all same votes"
const
Balance = Gwei(42)
validator_count = 16
var deltas = newSeqUninitialized[Delta](validator_count)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
var old_balances: seq[Gwei]
var new_balances: seq[Gwei]
for i in 0 ..< validator_count:
indices.add fakeHash(i), i
votes.add VoteTracker(
current_root: default(Eth2Digest),
next_root: fakeHash(0), # Get a non-zero hash
next_epoch: Epoch(0)
)
old_balances.add Balance
new_balances.add Balance
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
for i, delta in deltas:
if i == 0:
doAssert delta == Delta(Balance * validator_count), "The 0th root should have a delta"
else:
doAssert delta == 0, "The non-0 indexes should have a zero delta"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tDifferent_votes() =
echo " fork_choice compute_deltas - test all different votes"
const
Balance = Gwei(42)
validator_count = 16
var deltas = newSeqUninitialized[Delta](validator_count)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
var old_balances: seq[Gwei]
var new_balances: seq[Gwei]
for i in 0 ..< validator_count:
indices.add fakeHash(i), i
votes.add VoteTracker(
current_root: default(Eth2Digest),
next_root: fakeHash(i), # Each vote for a different root
next_epoch: Epoch(0)
)
old_balances.add Balance
new_balances.add Balance
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
for i, delta in deltas:
doAssert delta == Delta(Balance), "Each root should have a delta"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tMoving_votes() =
echo " fork_choice compute_deltas - test moving votes"
const
Balance = Gwei(42)
validator_count = 16
TotalDeltas = Delta(Balance * validator_count)
var deltas = newSeqUninitialized[Delta](validator_count)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
var old_balances: seq[Gwei]
var new_balances: seq[Gwei]
for i in 0 ..< validator_count:
indices.add fakeHash(i), i
votes.add VoteTracker(
# Move vote from root 0 to root 1
current_root: fakeHash(0),
next_root: fakeHash(1),
next_epoch: Epoch(0)
)
old_balances.add Balance
new_balances.add Balance
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
for i, delta in deltas:
if i == 0:
doAssert delta == -TotalDeltas, "0th root should have a negative delta"
elif i == 1:
doAssert delta == TotalDeltas, "1st root should have a positive delta"
else:
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tMove_out_of_tree() =
echo " fork_choice compute_deltas - test votes for unknown subtree"
const Balance = Gwei(42)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
# Add a block
indices.add fakeHash(1), 0
# 2 validators
var deltas = newSeqUninitialized[Delta](2)
let old_balances = @[Balance, Balance]
let new_balances = @[Balance, Balance]
# One validator moves their vote from the block to the zero hash
votes.add VoteTracker(
current_root: fakeHash(1),
next_root: default(Eth2Digest),
next_epoch: Epoch(0)
)
# One validator moves their vote from the block to something outside of the tree
votes.add VoteTracker(
current_root: fakeHash(1),
next_root: fakeHash(1337),
next_epoch: Epoch(0)
)
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
doAssert deltas[0] == -Delta(Balance)*2, "The 0th block should have lost both balances."
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tChanging_balances() =
echo " fork_choice compute_deltas - test changing balances"
const
OldBalance = Gwei(42)
NewBalance = OldBalance * 2
validator_count = 16
TotalOldDeltas = Delta(OldBalance * validator_count)
TotalNewDeltas = Delta(NewBalance * validator_count)
var deltas = newSeqUninitialized[Delta](validator_count)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
var old_balances: seq[Gwei]
var new_balances: seq[Gwei]
for i in 0 ..< validator_count:
indices.add fakeHash(i), i
votes.add VoteTracker(
# Move vote from root 0 to root 1
current_root: fakeHash(0),
next_root: fakeHash(1),
next_epoch: Epoch(0)
)
old_balances.add OldBalance
new_balances.add NewBalance
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
for i, delta in deltas:
if i == 0:
doAssert delta == -TotalOldDeltas, "0th root should have a negative delta"
elif i == 1:
doAssert delta == TotalNewDeltas, "1st root should have a positive delta"
else:
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tValidator_appears() =
echo " fork_choice compute_deltas - test validator appears"
const Balance = Gwei(42)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
# Add 2 blocks
indices.add fakeHash(1), 0
indices.add fakeHash(2), 1
# 1 validator at the start, 2 at the end
var deltas = newSeqUninitialized[Delta](2)
let old_balances = @[Balance]
let new_balances = @[Balance, Balance]
# Both moves vote from Block 1 to 2
for _ in 0 ..< 2:
votes.add VoteTracker(
current_root: fakeHash(1),
next_root: fakeHash(2),
next_epoch: Epoch(0)
)
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
doAssert deltas[0] == -Delta(Balance), "Block 1 should have lost only 1 balance"
doAssert deltas[1] == Delta(Balance)*2, "Block 2 should have gained 2 balances"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
proc tValidator_disappears() =
echo " fork_choice compute_deltas - test validator disappears"
const Balance = Gwei(42)
var indices: Table[Eth2Digest, Index]
var votes: seq[VoteTracker]
# Add 2 blocks
indices.add fakeHash(1), 0
indices.add fakeHash(2), 1
# 1 validator at the start, 2 at the end
var deltas = newSeqUninitialized[Delta](2)
let old_balances = @[Balance, Balance]
let new_balances = @[Balance]
# Both moves vote from Block 1 to 2
for _ in 0 ..< 2:
votes.add VoteTracker(
current_root: fakeHash(1),
next_root: fakeHash(2),
next_epoch: Epoch(0)
)
let err = deltas.compute_deltas(
indices, indices_offset = 0, votes, old_balances, new_balances
)
doAssert err.isOk, "compute_deltas finished with error: " & $err
doAssert deltas[0] == -Delta(Balance)*2, "Block 1 should have lost 2 balances"
doAssert deltas[1] == Delta(Balance), "Block 2 should have gained 1 balance"
for vote in votes:
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
# ----------------------------------------------------------------------
echo "fork_choice internal tests for compute_deltas"
tZeroHash()
tAll_voted_the_same()
tDifferent_votes()
tMoving_votes()
tChanging_balances()
tValidator_appears()
tValidator_disappears()