2020-04-09 16:15:00 +00:00
|
|
|
# beacon_chain
|
2022-01-29 13:53:31 +00:00
|
|
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
2020-04-09 16:15:00 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2020-04-15 09:21:22 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
import
|
|
|
|
# Standard library
|
2021-02-08 07:27:30 +00:00
|
|
|
std/[sequtils, tables],
|
2020-04-09 16:15:00 +00:00
|
|
|
# Status libraries
|
2020-06-10 06:58:12 +00:00
|
|
|
stew/results, chronicles,
|
2020-04-09 16:15:00 +00:00
|
|
|
# Internal
|
2021-08-12 13:08:20 +00:00
|
|
|
../spec/[beaconstate, helpers],
|
2022-01-07 17:10:40 +00:00
|
|
|
../spec/datatypes/[phase0, altair, bellatrix],
|
2020-04-09 16:15:00 +00:00
|
|
|
# Fork choice
|
2020-07-25 19:41:12 +00:00
|
|
|
./fork_choice_types, ./proto_array,
|
2021-03-04 09:13:44 +00:00
|
|
|
../consensus_object_pools/[spec_cache, blockchain_dag]
|
2020-07-28 13:54:32 +00:00
|
|
|
|
2021-02-08 07:27:30 +00:00
|
|
|
export results, fork_choice_types
|
2020-08-26 15:23:34 +00:00
|
|
|
export proto_array.len
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2021-08-20 23:37:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/fork-choice.md
|
2020-04-09 16:15:00 +00:00
|
|
|
# This is a port of https://github.com/sigp/lighthouse/pull/804
|
|
|
|
# which is a port of "Proto-Array": https://github.com/protolambda/lmd-ghost
|
|
|
|
# See also:
|
|
|
|
# - Protolambda port of Lighthouse: https://github.com/protolambda/eth2-py-hacks/blob/ae286567/proto_array.py
|
|
|
|
# - Prysmatic writeup: https://hackmd.io/bABJiht3Q9SyV3Ga4FT9lQ#High-level-concept
|
|
|
|
# - Gasper Whitepaper: https://arxiv.org/abs/2003.03052
|
|
|
|
|
|
|
|
# Forward declarations
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
era: load blocks and states (#3394)
* era: load blocks and states
Era files contain finalized history and can be thought of as an
alternative source for block and state data that allows clients to avoid
syncing this information from the P2P network - the P2P network is then
used to "top up" the client with the most recent data. They can be
freely shared in the community via whatever means (http, torrent, etc)
and serve as a permanent cold store of consensus data (and, after the
merge, execution data) for history buffs and bean counters alike.
This PR gently introduces support for loading blocks and states in two
cases: block requests from rest/p2p and frontfilling when doing
checkpoint sync.
The era files are used as a secondary source if the information is not
found in the database - compared to the database, there are a few key
differences:
* the database stores the block indexed by block root while the era file
indexes by slot - the former is used only in rest, while the latter is
used both by p2p and rest.
* when loading blocks from era files, the root is no longer trivially
available - if it is needed, it must either be computed (slow) or cached
(messy) - the good news is that for p2p requests, it is not needed
* in era files, "framed" snappy encoding is used while in the database
we store unframed snappy - for p2p2 requests, the latter requires
recompression while the former could avoid it
* front-filling is the process of using era files to replace backfilling
- in theory this front-filling could happen from any block and
front-fills with gaps could also be entertained, but our backfilling
algorithm cannot take advantage of this because there's no (simple) way
to tell it to "skip" a range.
* front-filling, as implemented, is a bit slow (10s to load mainnet): we
load the full BeaconState for every era to grab the roots of the blocks
- it would be better to partially load the state - as such, it would
also be good to be able to partially decompress snappy blobs
* lookups from REST via root are served by first looking up a block
summary in the database, then using the slot to load the block data from
the era file - however, there needs to be an option to create the
summary table from era files to fully support historical queries
To test this, `ncli_db` has an era file exporter: the files it creates
should be placed in an `era` folder next to `db` in the data directory.
What's interesting in particular about this setup is that `db` remains
as the source of truth for security purposes - it stores the latest
synced head root which in turn determines where a node "starts" its
consensus participation - the era directory however can be freely shared
between nodes / people without any (significant) security implications,
assuming the era files are consistent / not broken.
There's lots of future improvements to be had:
* we can drop the in-memory `BlockRef` index almost entirely - at this
point, resident memory usage of Nimbus should drop to a cool 500-600 mb
* we could serve era files via REST trivially: this would drop backfill
times to whatever time it takes to download the files - unlike the
current implementation that downloads block by block, downloading an era
at a time almost entirely cuts out request overhead
* we can "reasonably" recreate detailed state history from almost any
point in time, turning an O(slot) process into O(1) effectively - we'll
still need caches and indices to do this with sufficient efficiency for
the rest api, but at least it cuts the whole process down to minutes
instead of hours, for arbitrary points in time
* CI: ignore failures with Nim-1.6 (temporary)
* test fixes
Co-authored-by: Ștefan Talpalaru <stefantalpalaru@yahoo.com>
2022-03-23 08:58:17 +00:00
|
|
|
type Index = fork_choice_types.Index
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func compute_deltas(
|
2020-10-28 18:35:31 +00:00
|
|
|
deltas: var openArray[Delta],
|
2020-04-09 16:15:00 +00:00
|
|
|
indices: Table[Eth2Digest, Index],
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset: Index,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes: var openArray[VoteTracker],
|
2020-10-28 18:35:31 +00:00
|
|
|
old_balances: openArray[Gwei],
|
|
|
|
new_balances: openArray[Gwei]
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void]
|
2020-04-09 16:15:00 +00:00
|
|
|
# Fork choice routines
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
logScope:
|
|
|
|
topics = "fork_choice"
|
|
|
|
|
2022-02-17 11:53:55 +00:00
|
|
|
func init*(T: type ForkChoiceBackend,
|
2021-11-25 18:41:39 +00:00
|
|
|
justifiedCheckpoint: Checkpoint,
|
2022-04-12 10:06:30 +00:00
|
|
|
finalizedCheckpoint: Checkpoint): T =
|
|
|
|
T(proto_array: ProtoArray.init(
|
2021-11-25 18:41:39 +00:00
|
|
|
justifiedCheckpoint,
|
2022-04-12 10:06:30 +00:00
|
|
|
finalizedCheckpoint))
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-08-18 14:56:32 +00:00
|
|
|
proc init*(T: type ForkChoice,
|
|
|
|
epochRef: EpochRef,
|
2022-04-12 10:06:30 +00:00
|
|
|
blck: BlockRef): T =
|
2021-11-25 18:41:39 +00:00
|
|
|
## Initialize a fork choice context for a finalized state - in the finalized
|
2020-08-18 14:56:32 +00:00
|
|
|
## state, the justified and finalized checkpoints are the same, so only one
|
|
|
|
## is used here
|
2020-07-25 19:41:12 +00:00
|
|
|
debug "Initializing fork choice",
|
2020-08-18 14:56:32 +00:00
|
|
|
epoch = epochRef.epoch, blck = shortLog(blck)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
let
|
2021-04-26 20:39:44 +00:00
|
|
|
justified = BalanceCheckpoint(
|
2021-11-25 18:41:39 +00:00
|
|
|
checkpoint: Checkpoint(root: blck.root, epoch: epochRef.epoch),
|
|
|
|
balances: epochRef.effective_balances)
|
2020-08-18 14:56:32 +00:00
|
|
|
finalized = Checkpoint(root: blck.root, epoch: epochRef.epoch)
|
|
|
|
best_justified = Checkpoint(
|
2021-11-25 18:41:39 +00:00
|
|
|
root: blck.root, epoch: epochRef.epoch)
|
2020-08-18 14:56:32 +00:00
|
|
|
|
|
|
|
ForkChoice(
|
2022-02-04 11:59:40 +00:00
|
|
|
backend: ForkChoiceBackend.init(
|
2022-04-12 10:06:30 +00:00
|
|
|
best_justified, finalized),
|
2020-07-25 19:41:12 +00:00
|
|
|
checkpoints: Checkpoints(
|
2020-08-17 18:36:13 +00:00
|
|
|
justified: justified,
|
2020-08-18 14:56:32 +00:00
|
|
|
finalized: finalized,
|
2022-02-04 11:59:40 +00:00
|
|
|
best_justified: best_justified),
|
2020-08-18 14:56:32 +00:00
|
|
|
)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-04-15 09:21:22 +00:00
|
|
|
func extend[T](s: var seq[T], minLen: int) =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Extend a sequence so that it can contains at least `minLen` elements.
|
|
|
|
## If it's already bigger, the sequence is unmodified.
|
|
|
|
## The extension is zero-initialized
|
2020-07-30 15:48:25 +00:00
|
|
|
if s.len < minLen:
|
2020-04-09 16:15:00 +00:00
|
|
|
s.setLen(minLen)
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
func on_tick*(self: var Checkpoints, time: BeaconTime): FcResult[void] =
|
2020-08-17 18:36:13 +00:00
|
|
|
if self.time > time:
|
2021-02-16 18:53:07 +00:00
|
|
|
return err ForkChoiceError(kind: fcInconsistentTick)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
# Reset store.proposer_boost_root if this is a new slot
|
|
|
|
if time.slotOrZero > self.time.slotOrZero:
|
|
|
|
self.proposer_boost_root = default(Eth2Digest)
|
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
self.time = time
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
ok()
|
|
|
|
|
|
|
|
proc on_tick(self: var Checkpoints, dag: ChainDAGRef, time: BeaconTime):
|
|
|
|
FcResult[void] =
|
|
|
|
let prev_time = self.time
|
|
|
|
|
|
|
|
? self.on_tick(time)
|
|
|
|
|
|
|
|
let newEpoch = prev_time.slotOrZero.epoch() != time.slotOrZero.epoch()
|
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
if newEpoch and
|
2021-11-25 18:41:39 +00:00
|
|
|
self.best_justified.epoch > self.justified.checkpoint.epoch:
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let blck = dag.getBlockRef(self.best_justified.root).valueOr:
|
2021-02-16 18:53:07 +00:00
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcJustifiedNodeUnknown,
|
|
|
|
blockRoot: self.best_justified.root)
|
2022-01-05 18:38:04 +00:00
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
let ancestor = blck.atEpochStart(self.finalized.epoch)
|
|
|
|
if ancestor.blck.root == self.finalized.root:
|
2022-01-05 18:38:04 +00:00
|
|
|
let epochRef = dag.getEpochRef(blck, self.best_justified.epoch, false)
|
|
|
|
if epochRef.isSome():
|
|
|
|
self.justified = BalanceCheckpoint(
|
|
|
|
checkpoint: Checkpoint(root: blck.root, epoch: epochRef[].epoch),
|
|
|
|
balances: epochRef[].effective_balances)
|
|
|
|
else:
|
|
|
|
# Shouldn't happen for justified data unless fork choice is out of sync
|
|
|
|
# with ChainDAG
|
|
|
|
warn "No `EpochRef` for justified epoch, skipping update - report bug",
|
|
|
|
justified = shortLog(self.justified.checkpoint),
|
|
|
|
best = shortLog(self.best_justified.epoch),
|
|
|
|
blck = shortLog(blck)
|
2020-08-17 18:36:13 +00:00
|
|
|
ok()
|
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_attestation_queue(self: var ForkChoice) {.gcsafe.}
|
2021-02-16 18:53:07 +00:00
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
proc update_time(self: var ForkChoice, dag: ChainDAGRef, time: BeaconTime):
|
|
|
|
FcResult[void] =
|
|
|
|
const step_size = seconds(SECONDS_PER_SLOT.int)
|
2020-10-03 21:43:27 +00:00
|
|
|
if time > self.checkpoints.time:
|
2021-12-21 18:56:08 +00:00
|
|
|
# Call on_tick at least once per slot.
|
|
|
|
while time >= self.checkpoints.time + step_size:
|
|
|
|
? on_tick(self.checkpoints, dag, self.checkpoints.time + step_size)
|
|
|
|
|
|
|
|
if self.checkpoints.time < time:
|
|
|
|
# Might create two ticks for the last slot.
|
|
|
|
? on_tick(self.checkpoints, dag, time)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2020-10-03 21:43:27 +00:00
|
|
|
self.process_attestation_queue() # Only run if time changed!
|
2020-08-17 18:36:13 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func process_attestation*(
|
2020-07-25 19:41:12 +00:00
|
|
|
self: var ForkChoiceBackend,
|
2020-04-09 16:15:00 +00:00
|
|
|
validator_index: ValidatorIndex,
|
|
|
|
block_root: Eth2Digest,
|
|
|
|
target_epoch: Epoch
|
2020-04-15 09:21:22 +00:00
|
|
|
) =
|
2022-02-14 05:26:19 +00:00
|
|
|
if block_root.isZero:
|
2020-07-25 19:41:12 +00:00
|
|
|
return
|
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
## Add an attestation to the fork choice context
|
|
|
|
self.votes.extend(validator_index.int + 1)
|
|
|
|
|
2021-01-26 11:52:00 +00:00
|
|
|
template vote: untyped = self.votes[validator_index]
|
2020-04-09 16:15:00 +00:00
|
|
|
# alias
|
|
|
|
|
|
|
|
if target_epoch > vote.next_epoch or vote == default(VoteTracker):
|
|
|
|
# TODO: the "default" condition is probably unneeded
|
|
|
|
vote.next_root = block_root
|
|
|
|
vote.next_epoch = target_epoch
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
{.noSideEffect.}:
|
2020-07-09 09:29:32 +00:00
|
|
|
trace "Integrating vote in fork choice",
|
2020-07-22 09:42:55 +00:00
|
|
|
validator_index = validator_index,
|
2020-07-25 19:41:12 +00:00
|
|
|
new_vote = shortLog(vote)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_attestation_queue(self: var ForkChoice) =
|
2020-10-03 21:43:27 +00:00
|
|
|
self.queuedAttestations.keepItIf:
|
2021-12-21 18:56:08 +00:00
|
|
|
if it.slot < self.checkpoints.time.slotOrZero:
|
2020-10-03 21:43:27 +00:00
|
|
|
for validator_index in it.attesting_indices:
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2021-06-29 15:09:29 +00:00
|
|
|
validator_index, it.block_root, it.slot.epoch())
|
2020-10-03 21:43:27 +00:00
|
|
|
false
|
2020-08-17 18:36:13 +00:00
|
|
|
else:
|
2020-10-03 21:43:27 +00:00
|
|
|
true
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
func contains*(self: ForkChoiceBackend, block_root: Eth2Digest): bool =
|
2020-06-10 06:58:12 +00:00
|
|
|
## Returns `true` if a block is known to the fork choice
|
|
|
|
## and `false` otherwise.
|
|
|
|
##
|
|
|
|
## In particular, before adding a block, its parent must be known to the fork choice
|
|
|
|
self.proto_array.indices.contains(block_root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2021-08-20 23:37:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/fork-choice.md#on_attestation
|
2020-08-17 18:36:13 +00:00
|
|
|
proc on_attestation*(
|
|
|
|
self: var ForkChoice,
|
|
|
|
dag: ChainDAGRef,
|
2020-08-27 07:34:12 +00:00
|
|
|
attestation_slot: Slot,
|
2020-08-17 18:36:13 +00:00
|
|
|
beacon_block_root: Eth2Digest,
|
2021-04-26 20:39:44 +00:00
|
|
|
attesting_indices: openArray[ValidatorIndex],
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime
|
2020-08-17 18:36:13 +00:00
|
|
|
): FcResult[void] =
|
2021-12-21 18:56:08 +00:00
|
|
|
? self.update_time(dag, wallTime)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2022-02-14 05:26:19 +00:00
|
|
|
if beacon_block_root.isZero:
|
2020-08-17 18:36:13 +00:00
|
|
|
return ok()
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
if attestation_slot < self.checkpoints.time.slotOrZero:
|
2020-08-17 18:36:13 +00:00
|
|
|
for validator_index in attesting_indices:
|
2020-08-27 07:34:12 +00:00
|
|
|
# attestation_slot and target epoch must match, per attestation rules
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2021-04-26 20:39:44 +00:00
|
|
|
validator_index, beacon_block_root, attestation_slot.epoch)
|
2020-08-17 18:36:13 +00:00
|
|
|
else:
|
2021-04-26 20:39:44 +00:00
|
|
|
# Spec:
|
|
|
|
# Attestations can only affect the fork choice of subsequent slots.
|
|
|
|
# Delay consideration in the fork choice until their slot is in the past.
|
2020-08-27 07:34:12 +00:00
|
|
|
self.queuedAttestations.add(QueuedAttestation(
|
|
|
|
slot: attestation_slot,
|
2021-04-26 20:39:44 +00:00
|
|
|
attesting_indices: @attesting_indices,
|
2020-08-27 07:34:12 +00:00
|
|
|
block_root: beacon_block_root))
|
2020-08-17 18:36:13 +00:00
|
|
|
ok()
|
|
|
|
|
2022-03-02 10:00:21 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.1.10/specs/phase0/fork-choice.md#should_update_justified_checkpoint
|
2021-11-17 19:45:39 +00:00
|
|
|
func should_update_justified_checkpoint(
|
2020-08-17 18:36:13 +00:00
|
|
|
self: var Checkpoints,
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
epochRef: EpochRef): FcResult[bool] =
|
2022-01-29 13:53:31 +00:00
|
|
|
# To address the bouncing attack, only update conflicting justified
|
|
|
|
# checkpoints in the fork choice if in the early slots of the epoch.
|
|
|
|
# Otherwise, delay incorporation of new justified checkpoint until next epoch
|
|
|
|
# boundary.
|
|
|
|
#
|
|
|
|
# See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for
|
|
|
|
# more detailed analysis and discussion.
|
2022-01-11 10:01:54 +00:00
|
|
|
if self.time.slotOrZero.since_epoch_start() < SAFE_SLOTS_TO_UPDATE_JUSTIFIED:
|
2020-10-22 10:53:33 +00:00
|
|
|
return ok(true)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
|
|
|
let
|
2022-01-11 10:01:54 +00:00
|
|
|
justified_slot = self.justified.checkpoint.epoch.start_slot()
|
2020-10-22 10:53:33 +00:00
|
|
|
new_justified_checkpoint = epochRef.current_justified_checkpoint
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
justified_blck = dag.getBlockRef(new_justified_checkpoint.root).valueOr:
|
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcJustifiedNodeUnknown,
|
|
|
|
blockRoot: new_justified_checkpoint.root)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let justified_ancestor = justified_blck.atSlot(justified_slot)
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
if justified_ancestor.blck.root != self.justified.checkpoint.root:
|
2020-08-17 18:36:13 +00:00
|
|
|
return ok(false)
|
|
|
|
|
|
|
|
ok(true)
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
proc process_state(self: var Checkpoints,
|
2020-07-31 14:49:06 +00:00
|
|
|
dag: ChainDAGRef,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-08-17 18:36:13 +00:00
|
|
|
blck: BlockRef): FcResult[void] =
|
2020-08-12 04:49:52 +00:00
|
|
|
let
|
|
|
|
state_justified_epoch = epochRef.current_justified_checkpoint.epoch
|
|
|
|
state_finalized_epoch = epochRef.finalized_checkpoint.epoch
|
|
|
|
|
2020-08-03 18:39:43 +00:00
|
|
|
trace "Processing epoch",
|
|
|
|
epoch = epochRef.epoch,
|
2020-08-12 04:49:52 +00:00
|
|
|
state_justified_epoch = state_justified_epoch,
|
2021-11-25 18:41:39 +00:00
|
|
|
current_justified = self.justified.checkpoint.epoch,
|
2020-08-12 04:49:52 +00:00
|
|
|
state_finalized_epoch = state_finalized_epoch,
|
2020-08-17 18:36:13 +00:00
|
|
|
current_finalized = self.finalized.epoch
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
if state_justified_epoch > self.justified.checkpoint.epoch:
|
2020-08-17 18:36:13 +00:00
|
|
|
if state_justified_epoch > self.best_justified.epoch:
|
|
|
|
self.best_justified = epochRef.current_justified_checkpoint
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
if ? should_update_justified_checkpoint(self, dag, epochRef):
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
|
|
|
justifiedBlck = blck.atEpochStart(state_justified_epoch)
|
2022-01-05 18:38:04 +00:00
|
|
|
justifiedEpochRef = dag.getEpochRef(
|
|
|
|
justifiedBlck.blck, state_justified_epoch, false)
|
|
|
|
if justifiedEpochRef.isOk():
|
|
|
|
self.justified =
|
|
|
|
BalanceCheckpoint(
|
|
|
|
checkpoint: Checkpoint(
|
|
|
|
root: justifiedBlck.blck.root,
|
|
|
|
epoch: state_justified_epoch
|
|
|
|
),
|
|
|
|
balances: justifiedEpochRef[].effective_balances)
|
|
|
|
else:
|
|
|
|
# Shouldn't happen, unless fork choice is out of sync with ChainDAG
|
|
|
|
warn "Skipping justified checkpoint update, no EpochRef - report bug",
|
|
|
|
epoch = epochRef.epoch,
|
|
|
|
justifiedBlck = shortLog(justifiedBlck),
|
|
|
|
state_justified = shortLog(epochRef.current_justified_checkpoint),
|
|
|
|
state_finalized = shortLog(epochRef.finalized_checkpoint)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-08-17 18:36:13 +00:00
|
|
|
if state_finalized_epoch > self.finalized.epoch:
|
|
|
|
self.finalized = epochRef.finalized_checkpoint
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
if self.justified.checkpoint.epoch != state_justified_epoch or
|
|
|
|
self.justified.checkpoint.root != epochRef.current_justified_checkpoint.root:
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2021-11-25 18:41:39 +00:00
|
|
|
if (state_justified_epoch > self.justified.checkpoint.epoch) or
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
((? dag.getBlockRef(self.justified.checkpoint.root).orErr(
|
|
|
|
ForkChoiceError(
|
|
|
|
kind: fcJustifiedNodeUnknown,
|
|
|
|
blockRoot: self.justified.checkpoint.root))).atEpochStart(
|
|
|
|
self.finalized.epoch).blck.root != self.finalized.root):
|
2020-08-17 18:36:13 +00:00
|
|
|
|
2020-10-22 10:53:33 +00:00
|
|
|
let
|
|
|
|
justifiedBlck = blck.atEpochStart(state_justified_epoch)
|
2022-01-05 18:38:04 +00:00
|
|
|
justifiedEpochRef = dag.getEpochRef(
|
|
|
|
justifiedBlck.blck, state_justified_epoch, false)
|
|
|
|
if justifiedEpochRef.isOk():
|
|
|
|
self.justified =
|
|
|
|
BalanceCheckpoint(
|
|
|
|
checkpoint: Checkpoint(
|
|
|
|
root: justifiedBlck.blck.root,
|
|
|
|
epoch: justifiedEpochRef[].epoch
|
|
|
|
),
|
|
|
|
balances: justifiedEpochRef[].effective_balances)
|
|
|
|
else:
|
|
|
|
warn "Skipping justified checkpoint update, no EpochRef - report bug",
|
|
|
|
epoch = epochRef.epoch,
|
|
|
|
justifiedBlck = shortLog(justifiedBlck),
|
|
|
|
state_justified = shortLog(epochRef.current_justified_checkpoint),
|
|
|
|
state_finalized = shortLog(epochRef.finalized_checkpoint)
|
2020-08-17 18:36:13 +00:00
|
|
|
ok()
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func process_block*(self: var ForkChoiceBackend,
|
2020-07-25 19:41:12 +00:00
|
|
|
block_root: Eth2Digest,
|
|
|
|
parent_root: Eth2Digest,
|
2021-11-25 18:41:39 +00:00
|
|
|
justified_checkpoint: Checkpoint,
|
|
|
|
finalized_checkpoint: Checkpoint): FcResult[void] =
|
2021-02-16 18:53:07 +00:00
|
|
|
self.proto_array.onBlock(
|
2021-11-25 18:41:39 +00:00
|
|
|
block_root, parent_root, justified_checkpoint, finalized_checkpoint)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
proc process_block*(self: var ForkChoice,
|
2020-07-31 14:49:06 +00:00
|
|
|
dag: ChainDAGRef,
|
2020-08-03 18:39:43 +00:00
|
|
|
epochRef: EpochRef,
|
2020-07-25 19:41:12 +00:00
|
|
|
blckRef: BlockRef,
|
2022-01-08 23:28:49 +00:00
|
|
|
blck: ForkyTrustedBeaconBlock,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime): FcResult[void] =
|
|
|
|
? update_time(self, dag, wallTime)
|
2020-08-17 18:36:13 +00:00
|
|
|
? process_state(self.checkpoints, dag, epochRef, blckRef)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-10-15 18:35:00 +00:00
|
|
|
let committees_per_slot = get_committee_count_per_slot(epochRef)
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
for attestation in blck.body.attestations:
|
2022-02-04 11:59:40 +00:00
|
|
|
let _ = dag.getBlockRef(attestation.data.target.root).valueOr:
|
2020-07-28 13:54:32 +00:00
|
|
|
continue
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
let committee_index = block:
|
|
|
|
let v = CommitteeIndex.init(attestation.data.index, committees_per_slot)
|
|
|
|
if v.isErr():
|
|
|
|
warn "Unexpected committee index in block attestation",
|
|
|
|
blck = shortLog(blck),
|
|
|
|
data = shortLog(attestation.data)
|
|
|
|
continue
|
|
|
|
v.get()
|
|
|
|
|
|
|
|
if attestation.data.beacon_block_root in self.backend:
|
2020-10-22 11:08:46 +00:00
|
|
|
for validator in get_attesting_indices(
|
2022-01-08 23:28:49 +00:00
|
|
|
epochRef, attestation.data.slot, committee_index,
|
|
|
|
attestation.aggregation_bits):
|
2020-08-17 18:36:13 +00:00
|
|
|
self.backend.process_attestation(
|
2020-07-25 19:41:12 +00:00
|
|
|
validator,
|
|
|
|
attestation.data.beacon_block_root,
|
|
|
|
attestation.data.target.epoch)
|
|
|
|
|
2021-12-21 18:56:08 +00:00
|
|
|
# Add proposer score boost if the block is timely
|
|
|
|
let
|
|
|
|
time_into_slot =
|
2022-01-11 10:01:54 +00:00
|
|
|
self.checkpoints.time - self.checkpoints.time.slotOrZero.start_beacon_time
|
2021-12-21 18:56:08 +00:00
|
|
|
is_before_attesting_interval = time_into_slot < attestationSlotOffset
|
|
|
|
if self.checkpoints.time.slotOrZero == blck.slot and
|
|
|
|
is_before_attesting_interval:
|
|
|
|
self.checkpoints.proposer_boost_root = blckRef.root
|
|
|
|
|
2020-07-25 19:41:12 +00:00
|
|
|
? process_block(
|
|
|
|
self.backend, blckRef.root, blck.parent_root,
|
2021-11-25 18:41:39 +00:00
|
|
|
epochRef.current_justified_checkpoint,
|
|
|
|
epochRef.finalized_checkpoint
|
2020-07-25 19:41:12 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
trace "Integrating block in fork choice",
|
|
|
|
block_root = shortLog(blckRef)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
ok()
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2021-11-17 19:45:39 +00:00
|
|
|
func find_head*(
|
2020-07-25 19:41:12 +00:00
|
|
|
self: var ForkChoiceBackend,
|
2021-11-25 18:41:39 +00:00
|
|
|
justifiedCheckpoint: Checkpoint,
|
|
|
|
finalizedCheckpoint: Checkpoint,
|
2022-02-04 11:59:40 +00:00
|
|
|
justified_state_balances: seq[Gwei],
|
|
|
|
proposer_boost_root: Eth2Digest
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[Eth2Digest] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Returns the new blockchain head
|
|
|
|
|
|
|
|
# Compute deltas with previous call
|
|
|
|
# we might want to reuse the `deltas` buffer across calls
|
|
|
|
var deltas = newSeq[Delta](self.proto_array.indices.len)
|
2020-07-30 15:48:25 +00:00
|
|
|
? deltas.compute_deltas(
|
2020-04-09 16:15:00 +00:00
|
|
|
indices = self.proto_array.indices,
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset = self.proto_array.nodes.offset,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes = self.votes,
|
|
|
|
old_balances = self.balances,
|
|
|
|
new_balances = justified_state_balances
|
|
|
|
)
|
|
|
|
|
|
|
|
# Apply score changes
|
2021-02-16 18:53:07 +00:00
|
|
|
? self.proto_array.applyScoreChanges(
|
2022-02-04 11:59:40 +00:00
|
|
|
deltas, justifiedCheckpoint, finalizedCheckpoint,
|
2022-04-12 10:06:30 +00:00
|
|
|
justified_state_balances, proposer_boost_root
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
self.balances = justified_state_balances
|
|
|
|
|
|
|
|
# Find the best block
|
2022-02-16 22:24:44 +00:00
|
|
|
var new_head{.noinit.}: Eth2Digest
|
2021-11-25 18:41:39 +00:00
|
|
|
? self.proto_array.findHead(new_head, justifiedCheckpoint.root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
{.noSideEffect.}:
|
2020-10-01 18:56:42 +00:00
|
|
|
trace "Fork choice requested",
|
2021-11-25 18:41:39 +00:00
|
|
|
justifiedCheckpoint = shortLog(justifiedCheckpoint),
|
|
|
|
finalizedCheckpoint = shortLog(finalizedCheckpoint),
|
2020-07-25 19:41:12 +00:00
|
|
|
fork_choice_head = shortLog(new_head)
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
return ok(new_head)
|
|
|
|
|
2021-08-20 23:37:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v0.12.1/specs/phase0/fork-choice.md#get_head
|
2020-08-17 18:36:13 +00:00
|
|
|
proc get_head*(self: var ForkChoice,
|
|
|
|
dag: ChainDAGRef,
|
2021-12-21 18:56:08 +00:00
|
|
|
wallTime: BeaconTime): FcResult[Eth2Digest] =
|
|
|
|
? self.update_time(dag, wallTime)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
|
|
|
self.backend.find_head(
|
2021-11-25 18:41:39 +00:00
|
|
|
self.checkpoints.justified.checkpoint,
|
|
|
|
self.checkpoints.finalized,
|
2020-10-22 10:53:33 +00:00
|
|
|
self.checkpoints.justified.balances,
|
2022-02-04 11:59:40 +00:00
|
|
|
self.checkpoints.proposer_boost_root
|
2020-07-25 19:41:12 +00:00
|
|
|
)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-08-26 15:23:34 +00:00
|
|
|
func prune*(
|
2020-07-25 19:41:12 +00:00
|
|
|
self: var ForkChoiceBackend, finalized_root: Eth2Digest
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Prune blocks preceding the finalized root as they are now unneeded.
|
2020-08-26 15:23:34 +00:00
|
|
|
self.proto_array.prune(finalized_root)
|
2020-04-09 16:15:00 +00:00
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
func prune*(self: var ForkChoice): FcResult[void] =
|
2020-08-26 15:23:34 +00:00
|
|
|
self.backend.prune(self.checkpoints.finalized.root)
|
2020-07-25 19:41:12 +00:00
|
|
|
|
2020-04-09 16:15:00 +00:00
|
|
|
func compute_deltas(
|
2020-10-28 18:35:31 +00:00
|
|
|
deltas: var openArray[Delta],
|
2020-04-09 16:15:00 +00:00
|
|
|
indices: Table[Eth2Digest, Index],
|
2020-08-26 15:23:34 +00:00
|
|
|
indices_offset: Index,
|
2020-04-09 16:15:00 +00:00
|
|
|
votes: var openArray[VoteTracker],
|
2020-10-28 18:35:31 +00:00
|
|
|
old_balances: openArray[Gwei],
|
|
|
|
new_balances: openArray[Gwei]
|
2020-07-30 15:48:25 +00:00
|
|
|
): FcResult[void] =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Update `deltas`
|
|
|
|
## between old and new balances
|
|
|
|
## between votes
|
|
|
|
##
|
|
|
|
## `deltas.len` must match `indices.len` (lenght match)
|
|
|
|
##
|
|
|
|
## Error:
|
|
|
|
## - If a value in indices is greater than `indices.len`
|
|
|
|
## - If a `Eth2Digest` in `votes` does not exist in `indices`
|
|
|
|
## except for the `default(Eth2Digest)` (i.e. zero hash)
|
|
|
|
|
|
|
|
for val_index, vote in votes.mpairs():
|
|
|
|
# No need to create a score change if the validator has never voted
|
|
|
|
# or if votes are for the zero hash (alias to the genesis block)
|
2022-02-14 05:26:19 +00:00
|
|
|
if vote.current_root.isZero and vote.next_root.isZero:
|
2020-04-09 16:15:00 +00:00
|
|
|
continue
|
|
|
|
|
|
|
|
# If the validator was not included in `old_balances` (i.e. did not exist)
|
|
|
|
# its balance is zero
|
|
|
|
let old_balance = if val_index < old_balances.len: old_balances[val_index]
|
|
|
|
else: 0
|
|
|
|
|
|
|
|
# If the validator is not known in the `new_balances` then use balance of zero
|
|
|
|
#
|
|
|
|
# It is possible that there is a vote for an unknown validator if we change our
|
|
|
|
# justified state to a new state with a higher epoch on a different fork
|
|
|
|
# because that fork may have on-boarded less validators than the previous fork.
|
|
|
|
#
|
|
|
|
# Note that attesters are not different as they are activated only under finality
|
|
|
|
let new_balance = if val_index < new_balances.len: new_balances[val_index]
|
|
|
|
else: 0
|
|
|
|
|
|
|
|
if vote.current_root != vote.next_root or old_balance != new_balance:
|
|
|
|
# Ignore the current or next vote if it is not known in `indices`.
|
|
|
|
# We assume that it is outside of our tree (i.e., pre-finalization) and therefore not interesting.
|
|
|
|
if vote.current_root in indices:
|
2020-08-26 15:23:34 +00:00
|
|
|
let index = indices.unsafeGet(vote.current_root) - indices_offset
|
2020-04-09 16:15:00 +00:00
|
|
|
if index >= deltas.len:
|
2020-07-30 15:48:25 +00:00
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcInvalidNodeDelta,
|
2021-02-16 18:53:07 +00:00
|
|
|
index: index)
|
2020-04-09 16:15:00 +00:00
|
|
|
deltas[index] -= Delta old_balance
|
|
|
|
# Note that delta can be negative
|
|
|
|
# TODO: is int64 big enough?
|
|
|
|
|
|
|
|
if vote.next_root in indices:
|
2020-08-26 15:23:34 +00:00
|
|
|
let index = indices.unsafeGet(vote.next_root) - indices_offset
|
2020-04-09 16:15:00 +00:00
|
|
|
if index >= deltas.len:
|
2020-07-30 15:48:25 +00:00
|
|
|
return err ForkChoiceError(
|
|
|
|
kind: fcInvalidNodeDelta,
|
2021-02-16 18:53:07 +00:00
|
|
|
index: index)
|
2020-04-09 16:15:00 +00:00
|
|
|
deltas[index] += Delta new_balance
|
|
|
|
# Note that delta can be negative
|
|
|
|
# TODO: is int64 big enough?
|
|
|
|
|
|
|
|
vote.current_root = vote.next_root
|
2020-07-30 15:48:25 +00:00
|
|
|
return ok()
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
# Sanity checks
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
# Sanity checks on internal private procedures
|
|
|
|
|
|
|
|
when isMainModule:
|
|
|
|
import stew/endians2
|
|
|
|
|
2020-06-10 06:58:12 +00:00
|
|
|
func fakeHash(index: SomeInteger): Eth2Digest =
|
2020-04-09 16:15:00 +00:00
|
|
|
## Create fake hashes
|
|
|
|
## Those are just the value serialized in big-endian
|
|
|
|
## We add 16x16 to avoid having a zero hash are those are special cased
|
|
|
|
## We store them in the first 8 bytes
|
|
|
|
## as those are the one used in hash tables Table[Eth2Digest, T]
|
|
|
|
result.data[0 ..< 8] = (16*16+index).uint64.toBytesBE()
|
|
|
|
|
|
|
|
proc tZeroHash() =
|
|
|
|
echo " fork_choice compute_deltas - test zero votes"
|
|
|
|
|
|
|
|
const validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add default(VoteTracker)
|
|
|
|
old_balances.add 0
|
|
|
|
new_balances.add 0
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas == newSeq[Delta](validator_count), "deltas should be zeros"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tAll_voted_the_same() =
|
|
|
|
echo " fork_choice compute_deltas - test all same votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: default(Eth2Digest),
|
|
|
|
next_root: fakeHash(0), # Get a non-zero hash
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
for i, delta in deltas.pairs:
|
|
|
|
if i == 0:
|
|
|
|
doAssert delta == Delta(Balance * validator_count), "The 0th root should have a delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tDifferent_votes() =
|
|
|
|
echo " fork_choice compute_deltas - test all different votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: default(Eth2Digest),
|
|
|
|
next_root: fakeHash(i), # Each vote for a different root
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
for i, delta in deltas.pairs:
|
|
|
|
doAssert delta == Delta(Balance), "Each root should have a delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tMoving_votes() =
|
|
|
|
echo " fork_choice compute_deltas - test moving votes"
|
|
|
|
|
|
|
|
const
|
|
|
|
Balance = Gwei(42)
|
|
|
|
validator_count = 16
|
|
|
|
TotalDeltas = Delta(Balance * validator_count)
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
# Move vote from root 0 to root 1
|
|
|
|
current_root: fakeHash(0),
|
|
|
|
next_root: fakeHash(1),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add Balance
|
|
|
|
new_balances.add Balance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
for i, delta in deltas.pairs:
|
|
|
|
if i == 0:
|
|
|
|
doAssert delta == -TotalDeltas, "0th root should have a negative delta"
|
|
|
|
elif i == 1:
|
|
|
|
doAssert delta == TotalDeltas, "1st root should have a positive delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tMove_out_of_tree() =
|
|
|
|
echo " fork_choice compute_deltas - test votes for unknown subtree"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add a block
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
|
|
|
|
# 2 validators
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance, Balance]
|
|
|
|
let new_balances = @[Balance, Balance]
|
|
|
|
|
|
|
|
# One validator moves their vote from the block to the zero hash
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: default(Eth2Digest),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
# One validator moves their vote from the block to something outside of the tree
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(1337),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance)*2, "The 0th block should have lost both balances."
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tChanging_balances() =
|
|
|
|
echo " fork_choice compute_deltas - test changing balances"
|
|
|
|
|
|
|
|
const
|
|
|
|
OldBalance = Gwei(42)
|
|
|
|
NewBalance = OldBalance * 2
|
|
|
|
validator_count = 16
|
|
|
|
TotalOldDeltas = Delta(OldBalance * validator_count)
|
|
|
|
TotalNewDeltas = Delta(NewBalance * validator_count)
|
|
|
|
var deltas = newSeqUninitialized[Delta](validator_count)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
var old_balances: seq[Gwei]
|
|
|
|
var new_balances: seq[Gwei]
|
|
|
|
|
|
|
|
for i in 0 ..< validator_count:
|
|
|
|
indices.add fakeHash(i), i
|
|
|
|
votes.add VoteTracker(
|
|
|
|
# Move vote from root 0 to root 1
|
|
|
|
current_root: fakeHash(0),
|
|
|
|
next_root: fakeHash(1),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
old_balances.add OldBalance
|
|
|
|
new_balances.add NewBalance
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
for i, delta in deltas.pairs:
|
|
|
|
if i == 0:
|
|
|
|
doAssert delta == -TotalOldDeltas, "0th root should have a negative delta"
|
|
|
|
elif i == 1:
|
|
|
|
doAssert delta == TotalNewDeltas, "1st root should have a positive delta"
|
|
|
|
else:
|
|
|
|
doAssert delta == 0, "The non-0 and non-1 indexes should have a zero delta"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tValidator_appears() =
|
|
|
|
echo " fork_choice compute_deltas - test validator appears"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add 2 blocks
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
indices.add fakeHash(2), 1
|
|
|
|
|
|
|
|
# 1 validator at the start, 2 at the end
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance]
|
|
|
|
let new_balances = @[Balance, Balance]
|
|
|
|
|
|
|
|
# Both moves vote from Block 1 to 2
|
|
|
|
for _ in 0 ..< 2:
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(2),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance), "Block 1 should have lost only 1 balance"
|
|
|
|
doAssert deltas[1] == Delta(Balance)*2, "Block 2 should have gained 2 balances"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
proc tValidator_disappears() =
|
|
|
|
echo " fork_choice compute_deltas - test validator disappears"
|
|
|
|
|
|
|
|
const Balance = Gwei(42)
|
|
|
|
|
|
|
|
var indices: Table[Eth2Digest, Index]
|
|
|
|
var votes: seq[VoteTracker]
|
|
|
|
|
|
|
|
# Add 2 blocks
|
|
|
|
indices.add fakeHash(1), 0
|
|
|
|
indices.add fakeHash(2), 1
|
|
|
|
|
|
|
|
# 1 validator at the start, 2 at the end
|
|
|
|
var deltas = newSeqUninitialized[Delta](2)
|
|
|
|
let old_balances = @[Balance, Balance]
|
|
|
|
let new_balances = @[Balance]
|
|
|
|
|
|
|
|
# Both moves vote from Block 1 to 2
|
|
|
|
for _ in 0 ..< 2:
|
|
|
|
votes.add VoteTracker(
|
|
|
|
current_root: fakeHash(1),
|
|
|
|
next_root: fakeHash(2),
|
|
|
|
next_epoch: Epoch(0)
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
let err = deltas.compute_deltas(
|
2020-08-26 15:23:34 +00:00
|
|
|
indices, indices_offset = 0, votes, old_balances, new_balances
|
2020-04-09 16:15:00 +00:00
|
|
|
)
|
|
|
|
|
2020-07-30 15:48:25 +00:00
|
|
|
doAssert err.isOk, "compute_deltas finished with error: " & $err
|
2020-04-09 16:15:00 +00:00
|
|
|
|
|
|
|
doAssert deltas[0] == -Delta(Balance)*2, "Block 1 should have lost 2 balances"
|
|
|
|
doAssert deltas[1] == Delta(Balance), "Block 2 should have gained 1 balance"
|
|
|
|
|
|
|
|
for vote in votes:
|
|
|
|
doAssert vote.current_root == vote.next_root, "The vote should have been updated"
|
|
|
|
|
|
|
|
|
|
|
|
# ----------------------------------------------------------------------
|
|
|
|
|
|
|
|
echo "fork_choice internal tests for compute_deltas"
|
|
|
|
tZeroHash()
|
|
|
|
tAll_voted_the_same()
|
|
|
|
tDifferent_votes()
|
|
|
|
tMoving_votes()
|
|
|
|
tChanging_balances()
|
|
|
|
tValidator_appears()
|
|
|
|
tValidator_disappears()
|