2019-12-06 15:05:11 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2019-2024 Status Research & Development GmbH
|
2019-12-06 15:05:11 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-20 14:14:37 +00:00
|
|
|
{.push raises: [].}
|
2020-04-24 07:16:11 +00:00
|
|
|
|
2019-12-06 12:05:00 +00:00
|
|
|
import
|
2021-04-02 14:36:43 +00:00
|
|
|
# Status
|
2021-04-26 20:39:44 +00:00
|
|
|
chronicles, chronos, metrics,
|
2024-01-16 22:37:14 +00:00
|
|
|
results,
|
2021-04-02 14:36:43 +00:00
|
|
|
# Internals
|
2021-03-05 13:12:00 +00:00
|
|
|
../spec/[
|
2024-05-29 08:32:56 +00:00
|
|
|
beaconstate, state_transition_block, forks, helpers, network, signatures, eip7594_helpers],
|
2021-03-05 13:12:00 +00:00
|
|
|
../consensus_object_pools/[
|
2023-04-26 17:33:33 +00:00
|
|
|
attestation_pool, blockchain_dag, blob_quarantine, block_quarantine,
|
2024-06-18 13:31:56 +00:00
|
|
|
data_column_quarantine, spec_cache, light_client_pool, sync_committee_msg_pool,
|
2023-12-23 05:55:47 +00:00
|
|
|
validator_change_pool],
|
2021-10-19 14:09:26 +00:00
|
|
|
".."/[beacon_clock],
|
2021-04-02 14:36:43 +00:00
|
|
|
./batch_validation
|
2019-12-06 12:05:00 +00:00
|
|
|
|
2024-01-17 14:26:16 +00:00
|
|
|
from libp2p/protocols/pubsub/errors import ValidationResult
|
2021-07-26 09:51:14 +00:00
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
export results, ValidationResult
|
2021-04-12 20:25:09 +00:00
|
|
|
|
2020-06-16 05:45:04 +00:00
|
|
|
logScope:
|
2021-03-06 07:32:55 +00:00
|
|
|
topics = "gossip_checks"
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
declareCounter beacon_attestations_dropped_queue_full,
|
|
|
|
"Number of attestations dropped because queue is full"
|
|
|
|
|
|
|
|
declareCounter beacon_aggregates_dropped_queue_full,
|
|
|
|
"Number of aggregates dropped because queue is full"
|
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
declareCounter beacon_sync_messages_dropped_queue_full,
|
|
|
|
"Number of sync committee messages dropped because queue is full"
|
|
|
|
|
|
|
|
declareCounter beacon_contributions_dropped_queue_full,
|
|
|
|
"Number of sync committee contributions dropped because queue is full"
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
# This result is a little messy in that it returns Result.ok for
|
|
|
|
# ValidationResult.Accept and an err for the others - this helps transport
|
|
|
|
# an error message to callers but could arguably be done in an cleaner way.
|
|
|
|
type
|
|
|
|
ValidationError* = (ValidationResult, cstring)
|
|
|
|
|
|
|
|
template errIgnore*(msg: cstring): untyped =
|
2021-08-24 19:49:51 +00:00
|
|
|
err((ValidationResult.Ignore, cstring msg))
|
2021-11-05 15:39:47 +00:00
|
|
|
template errReject*(msg: cstring): untyped =
|
2021-08-24 19:49:51 +00:00
|
|
|
err((ValidationResult.Reject, cstring msg))
|
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
# Internal checks
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
func check_attestation_block(
|
|
|
|
pool: AttestationPool, attestationSlot: Slot, blck: BlockRef):
|
2021-11-05 15:39:47 +00:00
|
|
|
Result[void, ValidationError] =
|
2021-03-01 19:50:43 +00:00
|
|
|
# The voted-for block must be a descendant of the finalized block, thus it
|
|
|
|
# must at least as new than the finalized checkpoint - in theory it could be
|
|
|
|
# equal, but then we're voting for an already-finalized block which is pretty
|
|
|
|
# useless - other blocks that are not rooted in the finalized chain will be
|
|
|
|
# pruned by the chain dag, and thus we can no longer get a BlockRef for them
|
2021-06-01 11:13:40 +00:00
|
|
|
if not (blck.slot > pool.dag.finalizedHead.slot):
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Voting for already-finalized block")
|
2020-07-13 14:58:38 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
# The attestation shouldn't be voting for a block that didn't exist at the
|
|
|
|
# time - not in spec, but hard to reason about
|
|
|
|
if not (attestationSlot >= blck.slot):
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Voting for block that didn't exist at the time")
|
2020-07-13 14:58:38 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
# We'll also cap it at 4 epochs which is somewhat arbitrary, but puts an
|
|
|
|
# upper bound on the processing done to validate the attestation
|
|
|
|
# TODO revisit with less arbitrary approach
|
|
|
|
if not ((attestationSlot - blck.slot) <= uint64(4 * SLOTS_PER_EPOCH)):
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Voting for very old block")
|
2020-07-13 14:58:38 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
ok()
|
2020-07-13 14:58:38 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
func check_propagation_slot_range(
|
2023-06-29 08:34:21 +00:00
|
|
|
consensusFork: ConsensusFork, msgSlot: Slot, wallTime: BeaconTime):
|
|
|
|
Result[Slot, ValidationError] =
|
|
|
|
let futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
|
2020-07-27 16:04:44 +00:00
|
|
|
|
2021-07-26 09:51:14 +00:00
|
|
|
if not futureSlot.afterGenesis or msgSlot > futureSlot.slot:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Attestation slot in the future")
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2023-06-29 08:34:21 +00:00
|
|
|
let pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
|
|
|
|
|
|
|
|
if not pastSlot.afterGenesis:
|
|
|
|
return ok(msgSlot)
|
|
|
|
|
|
|
|
if consensusFork < ConsensusFork.Deneb:
|
2023-08-09 03:58:47 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#configuration
|
2023-06-29 08:34:21 +00:00
|
|
|
# The spec value of ATTESTATION_PROPAGATION_SLOT_RANGE is 32, but it can
|
|
|
|
# retransmit attestations on the cusp of being out of spec, and which by
|
|
|
|
# the time they reach their destination might be out of spec.
|
2023-09-19 06:54:58 +00:00
|
|
|
const TIME_IN_FLIGHT_BUFFER = 4
|
|
|
|
static: doAssert ATTESTATION_PROPAGATION_SLOT_RANGE > TIME_IN_FLIGHT_BUFFER
|
|
|
|
if msgSlot + (ATTESTATION_PROPAGATION_SLOT_RANGE - TIME_IN_FLIGHT_BUFFER) <
|
|
|
|
pastSlot.slot:
|
2023-06-29 08:34:21 +00:00
|
|
|
return errIgnore("Attestation slot in the past")
|
|
|
|
else:
|
2023-09-21 18:06:51 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id
|
2023-06-29 08:34:21 +00:00
|
|
|
# "[IGNORE] the epoch of attestation.data.slot is either the current or
|
|
|
|
# previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
|
|
|
|
# compute_epoch_at_slot(attestation.data.slot) in
|
|
|
|
# (get_previous_epoch(state), get_current_epoch(state))"
|
|
|
|
#
|
2023-09-21 18:06:51 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
|
2023-06-29 08:34:21 +00:00
|
|
|
# "[IGNORE] the epoch of aggregate.data.slot is either the current or
|
|
|
|
# previous epoch (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
|
|
|
|
# compute_epoch_at_slot(aggregate.data.slot) in
|
|
|
|
# (get_previous_epoch(state), get_current_epoch(state))"
|
|
|
|
if msgSlot.epoch < pastSlot.slot.epoch.get_previous_epoch:
|
|
|
|
return errIgnore("Attestation slot in the past")
|
|
|
|
|
|
|
|
ok(msgSlot)
|
|
|
|
|
|
|
|
func check_slot_exact(msgSlot: Slot, wallTime: BeaconTime):
|
|
|
|
Result[Slot, ValidationError] =
|
|
|
|
let futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
|
|
|
|
|
|
|
|
if not futureSlot.afterGenesis or msgSlot > futureSlot.slot:
|
|
|
|
return errIgnore("Sync committee slot in the future")
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2023-06-29 08:34:21 +00:00
|
|
|
let pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
|
2020-12-07 11:56:49 +00:00
|
|
|
|
2023-06-29 08:34:21 +00:00
|
|
|
if pastSlot.afterGenesis and msgSlot < pastSlot.slot:
|
|
|
|
return errIgnore("Sync committee slot in the past")
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2022-01-08 23:28:49 +00:00
|
|
|
ok(msgSlot)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
func check_beacon_and_target_block(
|
|
|
|
pool: var AttestationPool, data: AttestationData):
|
2022-01-05 18:38:04 +00:00
|
|
|
Result[BlockSlot, ValidationError] =
|
2021-03-01 19:50:43 +00:00
|
|
|
# The block being voted for (data.beacon_block_root) passes validation - by
|
|
|
|
# extension, the target block must at that point also pass validation.
|
|
|
|
# The target block is returned.
|
2020-08-27 07:34:12 +00:00
|
|
|
# We rely on the chain DAG to have been validated, so check for the existence
|
|
|
|
# of the block in the pool.
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let blck = pool.dag.getBlockRef(data.beacon_block_root).valueOr:
|
2021-12-06 09:49:01 +00:00
|
|
|
pool.quarantine[].addMissing(data.beacon_block_root)
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Attestation block unknown")
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
# Not in spec - check that rewinding to the state is sane
|
2021-03-01 19:50:43 +00:00
|
|
|
? check_attestation_block(pool, data.slot, blck)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
# [REJECT] The attestation's target block is an ancestor of the block named
|
2023-06-29 08:34:21 +00:00
|
|
|
# in the LMD vote -- i.e.
|
|
|
|
# get_checkpoint_block(store, attestation.data.beacon_block_root,
|
|
|
|
# attestation.data.target.epoch) == attestation.data.target.root
|
2022-01-05 18:38:04 +00:00
|
|
|
# the sanity of target.epoch has been checked by check_attestation_slot_target
|
2022-07-06 10:33:02 +00:00
|
|
|
let target = blck.atCheckpoint(data.target).valueOr:
|
|
|
|
return errReject("Attestation target is not ancestor of LMD vote block")
|
2021-03-01 19:50:43 +00:00
|
|
|
|
|
|
|
ok(target)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
func check_aggregation_count(
|
2024-04-17 20:44:29 +00:00
|
|
|
attestation: phase0.Attestation, singular: bool):
|
|
|
|
Result[void, ValidationError] =
|
2021-04-12 20:25:09 +00:00
|
|
|
let ones = attestation.aggregation_bits.countOnes()
|
|
|
|
if singular and ones != 1:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errReject("Attestation must have a single attestation bit set")
|
2021-04-12 20:25:09 +00:00
|
|
|
elif not singular and ones < 1:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errReject("Attestation must have at least one attestation bit set")
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-05-14 16:01:26 +00:00
|
|
|
func check_aggregation_count(
|
|
|
|
attestation: electra.Attestation, singular: bool):
|
|
|
|
Result[void, ValidationError] =
|
2024-05-17 15:34:46 +00:00
|
|
|
block:
|
|
|
|
let ones = attestation.committee_bits.countOnes()
|
|
|
|
if singular and ones != 1:
|
|
|
|
return errReject("Attestation must have a single committee bit set")
|
|
|
|
elif not singular and ones < 1:
|
|
|
|
return errReject("Attestation must have at least one committee bit set")
|
|
|
|
|
|
|
|
block:
|
|
|
|
let ones = attestation.aggregation_bits.countOnes()
|
|
|
|
if singular and ones != 1:
|
|
|
|
return errReject("Attestation must have a single attestation bit set")
|
|
|
|
elif not singular and ones < 1:
|
|
|
|
return errReject("Attestation must have at least one attestation bit set")
|
|
|
|
|
2024-05-14 16:01:26 +00:00
|
|
|
ok()
|
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
func check_attestation_subnet(
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef: ShufflingRef, slot: Slot, committee_index: CommitteeIndex,
|
2021-11-05 15:39:47 +00:00
|
|
|
subnet_id: SubnetId): Result[void, ValidationError] =
|
2020-08-27 07:34:12 +00:00
|
|
|
let
|
2021-12-08 17:29:22 +00:00
|
|
|
expectedSubnet = compute_subnet_for_attestation(
|
2022-08-18 18:07:01 +00:00
|
|
|
get_committee_count_per_slot(shufflingRef), slot, committee_index)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2021-05-10 07:13:36 +00:00
|
|
|
if expectedSubnet != subnet_id:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errReject("Attestation not on the correct subnet")
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-01-18 14:45:10 +00:00
|
|
|
func check_blob_sidecar_inclusion_proof(
|
2023-11-06 06:48:43 +00:00
|
|
|
blob_sidecar: deneb.BlobSidecar): Result[void, ValidationError] =
|
2024-01-18 14:45:10 +00:00
|
|
|
let res = blob_sidecar.verify_blob_sidecar_inclusion_proof()
|
|
|
|
if res.isErr:
|
|
|
|
return errReject(res.error)
|
2023-11-06 06:48:43 +00:00
|
|
|
|
|
|
|
ok()
|
|
|
|
|
2024-05-29 08:32:56 +00:00
|
|
|
func check_data_column_sidecar_inclusion_proof(
|
|
|
|
data_column_sidecar: DataColumnSidecar): Result[void, ValidationError] =
|
|
|
|
let res = data_column_sidecar.verify_data_column_sidecar_inclusion_proof()
|
|
|
|
if res.isErr:
|
|
|
|
return errReject(res.error)
|
|
|
|
|
2024-06-02 09:19:38 +00:00
|
|
|
ok()
|
|
|
|
|
2024-05-29 08:32:56 +00:00
|
|
|
proc check_data_column_sidecar_kzg_proofs(
|
|
|
|
data_column_sidecar: DataColumnSidecar): Result[void, ValidationError] =
|
|
|
|
let res = data_column_sidecar.verify_data_column_sidecar_kzg_proofs()
|
|
|
|
if res.isErr:
|
|
|
|
return errReject(res.error)
|
|
|
|
|
2024-06-02 09:19:38 +00:00
|
|
|
ok()
|
|
|
|
|
2021-04-02 14:36:43 +00:00
|
|
|
# Gossip Validation
|
|
|
|
# ----------------------------------------------------------------
|
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# Generally, the following rules apply for gossip validation:
|
|
|
|
#
|
|
|
|
# [REJECT]
|
|
|
|
# This doesn't depend on the wall clock or the exact state of the DAG; it's
|
|
|
|
# an internal consistency/correctness check only, and effectively never has
|
|
|
|
# false positives. These don't, for example, arise from timeouts.
|
|
|
|
#
|
|
|
|
# [IGNORE]
|
|
|
|
# This may be intermittent, depend on timing or the current state of the DAG.
|
|
|
|
|
|
|
|
template checkedReject(
|
|
|
|
msg: cstring, strictVerification: bool): untyped =
|
|
|
|
if strictVerification:
|
2021-08-18 12:30:05 +00:00
|
|
|
raiseAssert $msg
|
2021-09-27 08:38:36 +00:00
|
|
|
errReject(msg)
|
2021-07-19 11:58:22 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
template checkedReject(
|
|
|
|
error: ValidationError, strictVerification: bool): untyped =
|
2021-07-19 11:58:22 +00:00
|
|
|
doAssert error[0] == ValidationResult.Reject
|
2023-05-02 11:06:02 +00:00
|
|
|
if strictVerification:
|
|
|
|
raiseAssert $error[1]
|
|
|
|
err(error)
|
|
|
|
|
|
|
|
template checkedResult*(
|
|
|
|
error: ValidationError, strictVerification: bool): untyped =
|
|
|
|
if error[0] == ValidationResult.Reject and strictVerification:
|
2021-08-18 12:30:05 +00:00
|
|
|
raiseAssert $error[1]
|
2021-07-19 11:58:22 +00:00
|
|
|
err(error)
|
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# ChainDAGRef
|
|
|
|
template checkedReject(
|
|
|
|
dag: ChainDAGRef, msg: cstring): untyped =
|
|
|
|
checkedReject(msg, strictVerification in dag.updateFlags)
|
|
|
|
|
|
|
|
template checkedReject(
|
|
|
|
dag: ChainDAGRef, error: ValidationError): untyped =
|
|
|
|
checkedReject(error, strictVerification in dag.updateFlags)
|
|
|
|
|
|
|
|
template checkedResult(
|
|
|
|
dag: ChainDAGRef, error: ValidationError): untyped =
|
|
|
|
checkedResult(error, strictVerification in dag.updateFlags)
|
|
|
|
|
|
|
|
# AttestationPool
|
|
|
|
template checkedReject(
|
|
|
|
pool: ref AttestationPool, msg: cstring): untyped =
|
|
|
|
pool[].dag.checkedReject(msg)
|
|
|
|
|
|
|
|
template checkedReject(
|
|
|
|
pool: ref AttestationPool, error: ValidationError): untyped =
|
|
|
|
pool[].dag.checkedReject(error)
|
|
|
|
|
|
|
|
template checkedResult(
|
|
|
|
pool: ref AttestationPool, error: ValidationError): untyped =
|
|
|
|
pool[].dag.checkedResult(error)
|
|
|
|
|
|
|
|
# ValidatorChangePool
|
|
|
|
template checkedReject(
|
|
|
|
pool: ValidatorChangePool, msg: cstring): untyped =
|
|
|
|
pool.dag.checkedReject(msg)
|
|
|
|
|
|
|
|
template checkedReject(
|
|
|
|
pool: ValidatorChangePool, error: ValidationError): untyped =
|
|
|
|
pool.dag.checkedReject(error)
|
|
|
|
|
|
|
|
template checkedResult(
|
|
|
|
pool: ValidatorChangePool, error: ValidationError): untyped =
|
|
|
|
pool.dag.checkedResult(error)
|
|
|
|
|
2022-01-04 09:45:38 +00:00
|
|
|
template validateBeaconBlockBellatrix(
|
2022-07-21 18:39:43 +00:00
|
|
|
signed_beacon_block: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
|
|
|
parent: BlockRef): untyped =
|
2021-12-08 17:29:22 +00:00
|
|
|
discard
|
|
|
|
|
2023-04-21 18:52:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
|
2022-01-04 09:45:38 +00:00
|
|
|
template validateBeaconBlockBellatrix(
|
2023-05-02 11:06:02 +00:00
|
|
|
signed_beacon_block:
|
|
|
|
bellatrix.SignedBeaconBlock |
|
|
|
|
capella.SignedBeaconBlock |
|
2024-02-26 06:49:12 +00:00
|
|
|
deneb.SignedBeaconBlock |
|
|
|
|
electra.SignedBeaconBlock,
|
2023-05-02 11:06:02 +00:00
|
|
|
parent: BlockRef): untyped =
|
2021-12-08 17:29:22 +00:00
|
|
|
# If the execution is enabled for the block -- i.e.
|
|
|
|
# is_execution_enabled(state, block.body) then validate the following:
|
2022-07-21 18:39:43 +00:00
|
|
|
#
|
|
|
|
# `is_execution_enabled(state, block.body)` is
|
|
|
|
# `is_merge_transition_block(state, block.body) or is_merge_transition_complete(state)` is
|
|
|
|
# `(not is_merge_transition_complete(state) and block.body.execution_payload != ExecutionPayload()) or is_merge_transition_complete(state)` is
|
|
|
|
# `is_merge_transition_complete(state) or block.body.execution_payload != ExecutionPayload()` is
|
|
|
|
# `is_merge_transition_complete(state) or is_execution_block(block)`
|
|
|
|
#
|
|
|
|
# `is_merge_transition_complete(state)` tests for
|
|
|
|
# `state.latest_execution_payload_header != ExecutionPayloadHeader()`, while
|
2024-05-09 05:03:10 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/bellatrix/beacon-chain.md#block-processing
|
2022-07-21 18:39:43 +00:00
|
|
|
# shows that `state.latest_execution_payload_header` being default or not is
|
|
|
|
# exactly equivalent to whether that block's execution payload is default or
|
|
|
|
# not, so test cached block information rather than reconstructing a state.
|
2024-02-09 22:10:38 +00:00
|
|
|
let isExecutionEnabled =
|
|
|
|
if signed_beacon_block.message.is_execution_block:
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
# If we don't know whether the parent block had execution enabled,
|
|
|
|
# assume it didn't. This way, we don't reject here if the timestamp
|
|
|
|
# is invalid, and let state transition check the timestamp.
|
|
|
|
# This is an edge case, and may be hit in a pathological scenario with
|
|
|
|
# checkpoint sync, because the checkpoint block may be unavailable
|
|
|
|
# and it could already be the parent of the new block before backfill.
|
|
|
|
not dag.loadExecutionBlockHash(parent).get(ZERO_HASH).isZero
|
|
|
|
if isExecutionEnabled:
|
2021-12-08 17:29:22 +00:00
|
|
|
# [REJECT] The block's execution payload timestamp is correct with respect
|
|
|
|
# to the slot -- i.e. execution_payload.timestamp ==
|
|
|
|
# compute_timestamp_at_slot(state, block.slot).
|
|
|
|
let timestampAtSlot =
|
2022-03-16 07:20:40 +00:00
|
|
|
withState(dag.headState):
|
2022-09-10 06:12:07 +00:00
|
|
|
compute_timestamp_at_slot(
|
|
|
|
forkyState.data, signed_beacon_block.message.slot)
|
2021-12-08 17:29:22 +00:00
|
|
|
if not (signed_beacon_block.message.body.execution_payload.timestamp ==
|
|
|
|
timestampAtSlot):
|
2022-01-26 12:20:08 +00:00
|
|
|
quarantine[].addUnviable(signed_beacon_block.root)
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"BeaconBlock: mismatched execution payload timestamp")
|
2022-07-21 18:39:43 +00:00
|
|
|
|
2022-08-29 10:01:32 +00:00
|
|
|
# The condition:
|
|
|
|
# [REJECT] The block's parent (defined by `block.parent_root`) passes all
|
|
|
|
# validation (excluding execution node verification of the
|
|
|
|
# `block.body.execution_payload`).
|
|
|
|
# cannot occur here, because Nimbus's optimistic sync waits for either
|
|
|
|
# `ACCEPTED` or `SYNCING` from the EL to get this far.
|
2021-12-08 17:29:22 +00:00
|
|
|
|
2023-11-06 06:48:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
|
2023-04-04 15:12:34 +00:00
|
|
|
proc validateBlobSidecar*(
|
|
|
|
dag: ChainDAGRef, quarantine: ref Quarantine,
|
2023-11-06 06:48:43 +00:00
|
|
|
blobQuarantine: ref BlobQuarantine, blob_sidecar: BlobSidecar,
|
2023-11-04 14:20:34 +00:00
|
|
|
wallTime: BeaconTime, subnet_id: BlobId): Result[void, ValidationError] =
|
2023-11-05 08:13:57 +00:00
|
|
|
# Some of the checks below have been reordered compared to the spec, to
|
|
|
|
# perform the cheap checks first - in particular, we want to avoid loading
|
|
|
|
# an `EpochRef` and checking signatures. This reordering might lead to
|
|
|
|
# different IGNORE/REJECT results in turn affecting gossip scores.
|
2023-11-06 06:48:43 +00:00
|
|
|
template block_header: untyped = blob_sidecar.signed_block_header.message
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2023-11-05 08:14:47 +00:00
|
|
|
# [REJECT] The sidecar's index is consistent with `MAX_BLOBS_PER_BLOCK`
|
|
|
|
# -- i.e. `blob_sidecar.index < MAX_BLOBS_PER_BLOCK`
|
2023-11-06 06:48:43 +00:00
|
|
|
if not (blob_sidecar.index < MAX_BLOBS_PER_BLOCK):
|
|
|
|
return dag.checkedReject("BlobSidecar: index inconsistent")
|
2023-11-05 08:14:47 +00:00
|
|
|
|
|
|
|
# [REJECT] The sidecar is for the correct subnet -- i.e.
|
|
|
|
# `compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id`.
|
2023-11-06 06:48:43 +00:00
|
|
|
if not (compute_subnet_for_blob_sidecar(blob_sidecar.index) == subnet_id):
|
|
|
|
return dag.checkedReject("BlobSidecar: subnet incorrect")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
|
|
|
# [IGNORE] The sidecar is not from a future slot (with a
|
2023-11-05 08:14:47 +00:00
|
|
|
# `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that
|
|
|
|
# `block_header.slot <= current_slot` (a client MAY queue future sidecars
|
2023-11-06 06:48:43 +00:00
|
|
|
# for processing at the appropriate slot).
|
|
|
|
if not (block_header.slot <=
|
2023-04-04 15:12:34 +00:00
|
|
|
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
|
2023-11-06 06:48:43 +00:00
|
|
|
return errIgnore("BlobSidecar: slot too high")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2023-11-06 06:48:43 +00:00
|
|
|
# [IGNORE] The sidecar is from a slot greater than the latest
|
|
|
|
# finalized slot -- i.e. validate that `block_header.slot >
|
|
|
|
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
|
|
|
if not (block_header.slot > dag.finalizedHead.slot):
|
|
|
|
return errIgnore("BlobSidecar: slot already finalized")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2023-11-05 08:13:57 +00:00
|
|
|
# [IGNORE] The sidecar is the first sidecar for the tuple
|
|
|
|
# (block_header.slot, block_header.proposer_index, blob_sidecar.index)
|
|
|
|
# with valid header signature, sidecar inclusion proof, and kzg proof.
|
2023-11-06 06:48:43 +00:00
|
|
|
let block_root = hash_tree_root(block_header)
|
|
|
|
if dag.getBlockRef(block_root).isSome():
|
|
|
|
return errIgnore("BlobSidecar: already have block")
|
2023-11-05 08:13:57 +00:00
|
|
|
if blobQuarantine[].hasBlob(
|
2023-11-06 06:48:43 +00:00
|
|
|
block_header.slot, block_header.proposer_index, blob_sidecar.index):
|
|
|
|
return errIgnore("BlobSidecar: already have valid blob from same proposer")
|
|
|
|
|
|
|
|
# [REJECT] The sidecar's inclusion proof is valid as verified by
|
|
|
|
# `verify_blob_sidecar_inclusion_proof(blob_sidecar)`.
|
|
|
|
block:
|
2024-01-18 14:45:10 +00:00
|
|
|
let v = check_blob_sidecar_inclusion_proof(blob_sidecar)
|
2023-11-06 06:48:43 +00:00
|
|
|
if v.isErr:
|
|
|
|
return dag.checkedReject(v.error)
|
|
|
|
|
|
|
|
# [IGNORE] The sidecar's block's parent (defined by
|
|
|
|
# `block_header.parent_root`) has been seen (via both gossip and
|
|
|
|
# non-gossip sources) (a client MAY queue sidecars for processing
|
|
|
|
# once the parent block is retrieved).
|
|
|
|
#
|
|
|
|
# [REJECT] The sidecar's block's parent (defined by
|
|
|
|
# `block_header.parent_root`) passes validation.
|
|
|
|
let parent = dag.getBlockRef(block_header.parent_root).valueOr:
|
|
|
|
if block_header.parent_root in quarantine[].unviable:
|
|
|
|
quarantine[].addUnviable(block_root)
|
|
|
|
return dag.checkedReject("BlobSidecar: parent not validated")
|
2023-05-06 20:09:17 +00:00
|
|
|
else:
|
2023-11-06 06:48:43 +00:00
|
|
|
quarantine[].addMissing(block_header.parent_root)
|
|
|
|
return errIgnore("BlobSidecar: parent not found")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
|
|
|
# [REJECT] The sidecar is from a higher slot than the sidecar's
|
2023-11-06 06:48:43 +00:00
|
|
|
# block's parent (defined by `block_header.parent_root`).
|
|
|
|
if not (block_header.slot > parent.bid.slot):
|
|
|
|
return dag.checkedReject("BlobSidecar: slot lower than parents'")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2023-11-06 06:48:43 +00:00
|
|
|
# [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's
|
|
|
|
# block -- i.e. `get_checkpoint_block(store, block_header.parent_root,
|
|
|
|
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
|
|
|
|
let
|
|
|
|
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
|
|
|
|
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
|
|
|
|
|
|
|
|
if ancestor.isNil:
|
|
|
|
# This shouldn't happen: we should always be able to trace the parent back
|
|
|
|
# to the finalized checkpoint (else it wouldn't be in the DAG)
|
|
|
|
return errIgnore("BlobSidecar: Can't find ancestor")
|
|
|
|
|
|
|
|
if not (
|
|
|
|
finalized_checkpoint.root == ancestor.root or
|
|
|
|
finalized_checkpoint.root.isZero):
|
|
|
|
quarantine[].addUnviable(block_root)
|
|
|
|
return dag.checkedReject(
|
|
|
|
"BlobSidecar: Finalized checkpoint not an ancestor")
|
|
|
|
|
|
|
|
# [REJECT] The sidecar is proposed by the expected `proposer_index`
|
2023-04-04 15:12:34 +00:00
|
|
|
# for the block's slot in the context of the current shuffling
|
2023-11-06 06:48:43 +00:00
|
|
|
# (defined by `block_header.parent_root`/`block_header.slot`).
|
|
|
|
# If the proposer_index cannot immediately be verified against the expected
|
|
|
|
# shuffling, the sidecar MAY be queued for later processing while proposers
|
2023-04-04 15:12:34 +00:00
|
|
|
# for the block's branch are calculated -- in such a case do not
|
|
|
|
# REJECT, instead IGNORE this message.
|
2023-11-06 06:48:43 +00:00
|
|
|
let proposer = getProposer(dag, parent, block_header.slot).valueOr:
|
|
|
|
warn "cannot compute proposer for blob"
|
|
|
|
return errIgnore("BlobSidecar: Cannot compute proposer") # internal issue
|
|
|
|
|
|
|
|
if uint64(proposer) != block_header.proposer_index:
|
|
|
|
return dag.checkedReject("BlobSidecar: Unexpected proposer")
|
|
|
|
|
|
|
|
# [REJECT] The proposer signature of `blob_sidecar.signed_block_header`,
|
|
|
|
# is valid with respect to the `block_header.proposer_index` pubkey.
|
|
|
|
if not verify_block_signature(
|
|
|
|
dag.forkAtEpoch(block_header.slot.epoch),
|
|
|
|
getStateField(dag.headState, genesis_validators_root),
|
|
|
|
block_header.slot,
|
|
|
|
block_root,
|
|
|
|
dag.validatorKey(proposer).get(),
|
|
|
|
blob_sidecar.signed_block_header.signature):
|
|
|
|
return dag.checkedReject("BlobSidecar: Invalid proposer signature")
|
|
|
|
|
|
|
|
# [REJECT] The sidecar's blob is valid as verified by `verify_blob_kzg_proof(
|
|
|
|
# blob_sidecar.blob, blob_sidecar.kzg_commitment, blob_sidecar.kzg_proof)`.
|
|
|
|
block:
|
|
|
|
let ok = verifyProof(
|
|
|
|
blob_sidecar.blob,
|
|
|
|
blob_sidecar.kzg_commitment,
|
|
|
|
blob_sidecar.kzg_proof).valueOr:
|
|
|
|
return dag.checkedReject("BlobSidecar: blob verify failed")
|
|
|
|
if not ok:
|
|
|
|
return dag.checkedReject("BlobSidecar: blob invalid")
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2024-01-13 09:52:13 +00:00
|
|
|
# Send notification about new blob sidecar via callback
|
|
|
|
if not(isNil(blobQuarantine.onBlobSidecarCallback)):
|
|
|
|
blobQuarantine.onBlobSidecarCallback(blob_sidecar)
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2024-01-13 09:52:13 +00:00
|
|
|
ok()
|
2023-04-04 15:12:34 +00:00
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/p2p-interface.md#the-gossip-domain-gossipsub
|
|
|
|
proc validateDataColumnSidecar*(
|
|
|
|
dag: ChainDAGRef, quarantine: ref Quarantine,
|
2024-06-18 13:31:56 +00:00
|
|
|
dataColumnQuarantine: ref DataColumnQuarantine, data_column_sidecar: DataColumnSidecar,
|
2024-05-27 12:40:00 +00:00
|
|
|
wallTime: BeaconTime, subnet_id: uint64): Result[void, ValidationError] =
|
|
|
|
|
|
|
|
template block_header: untyped = data_column_sidecar.signed_block_header.message
|
|
|
|
|
|
|
|
# [REJECT] The sidecar's index is consistent with `NUMBER_OF_COLUMNS`
|
|
|
|
# -- i.e. `blob_sidecar.index < NUMBER_OF_COLUMNS`
|
|
|
|
if not (data_column_sidecar.index < NUMBER_OF_COLUMNS):
|
|
|
|
return dag.checkedReject("DataColumnSidecar: The sidecar's index should be consistent with NUMBER_OF_COLUMNS")
|
|
|
|
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "1"
|
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [REJECT] The sidecar is for the correct subnet
|
|
|
|
# -- i.e. `compute_subnet_for_data_column_sidecar(blob_sidecar.index) == subnet_id`.
|
2024-07-02 09:06:44 +00:00
|
|
|
if not (compute_subnet_for_data_column_sidecar(data_column_sidecar.index) == subnet_id):
|
|
|
|
return dag.checkedReject("DataColumnSidecar: The sidecar is not for the correct subnet")
|
2024-05-27 12:40:00 +00:00
|
|
|
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "2"
|
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [IGNORE] The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
|
|
|
# -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for
|
|
|
|
# processing at the appropriate slot).
|
|
|
|
if not (block_header.slot <=
|
|
|
|
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
|
|
|
|
return errIgnore("DataColumnSidecar: slot too high")
|
2024-07-02 09:06:44 +00:00
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [IGNORE] The sidecar is from a slot greater than the latest
|
|
|
|
# finalized slot -- i.e. validate that `block_header.slot >
|
|
|
|
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
|
|
|
|
if not (block_header.slot > dag.finalizedHead.slot):
|
|
|
|
return errIgnore("DataColumnSidecar: slot already finalized")
|
2024-07-02 09:06:44 +00:00
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [IGNORE] The sidecar is the first sidecar for the tuple
|
|
|
|
# (block_header.slot, block_header.proposer_index, blob_sidecar.index)
|
|
|
|
# with valid header signature, sidecar inclusion proof, and kzg proof.
|
|
|
|
let block_root = hash_tree_root(block_header)
|
|
|
|
if dag.getBlockRef(block_root).isSome():
|
2024-06-02 05:09:45 +00:00
|
|
|
return errIgnore("DataColumnSidecar: already have block")
|
2024-06-18 13:31:56 +00:00
|
|
|
if dataColumnQuarantine[].hasDataColumn(
|
2024-05-27 12:40:00 +00:00
|
|
|
block_header.slot, block_header.proposer_index, data_column_sidecar.index):
|
2024-06-02 05:22:13 +00:00
|
|
|
return errIgnore("DataColumnSidecar: already have valid data column from same proposer")
|
2024-07-02 09:06:44 +00:00
|
|
|
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "3"
|
|
|
|
|
2024-07-02 09:06:44 +00:00
|
|
|
# [REJECT] The sidecar's `kzg_commitments` inclusion proof is valid as verified by
|
|
|
|
# `verify_data_column_sidecar_inclusion_proof(sidecar)`.
|
|
|
|
block:
|
|
|
|
let v = check_data_column_sidecar_inclusion_proof(data_column_sidecar)
|
|
|
|
if v.isErr:
|
|
|
|
return dag.checkedReject(v.error)
|
|
|
|
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "4"
|
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [IGNORE] The sidecar's block's parent (defined by
|
|
|
|
# `block_header.parent_root`) has been seen (via both gossip and
|
|
|
|
# non-gossip sources) (a client MAY queue sidecars for processing
|
|
|
|
# once the parent block is retrieved).
|
|
|
|
#
|
|
|
|
# [REJECT] The sidecar's block's parent (defined by
|
|
|
|
# `block_header.parent_root`) passes validation.
|
2024-07-02 09:06:44 +00:00
|
|
|
let parent = dag.getBlockRef(block_header.parent_root).valueOr:
|
|
|
|
if block_header.parent_root in quarantine[].unviable:
|
|
|
|
quarantine[].addUnviable(block_root)
|
|
|
|
return dag.checkedReject("DataColumnSidecar: parent not validated")
|
|
|
|
else:
|
|
|
|
quarantine[].addMissing(block_header.parent_root)
|
|
|
|
return errIgnore("DataColumnSidecar: parent not found")
|
|
|
|
|
|
|
|
# [REJECT] The sidecar is from a higher slot than the sidecar's
|
|
|
|
# block's parent (defined by `block_header.parent_root`).
|
|
|
|
if not (block_header.slot > parent.bid.slot):
|
|
|
|
return dag.checkedReject("DataColumnSidecar: slot lower than parents'")
|
|
|
|
|
|
|
|
# [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's
|
|
|
|
# block -- i.e. `get_checkpoint_block(store, block_header.parent_root,
|
|
|
|
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
|
|
|
|
let
|
|
|
|
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
|
|
|
|
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
|
|
|
|
|
|
|
|
if ancestor.isNil:
|
|
|
|
# This shouldn't happen: we should always be able to trace the parent back
|
|
|
|
# to the finalized checkpoint (else it wouldn't be in the DAG)
|
|
|
|
return errIgnore("DataColumnSidecar: Can't find ancestor")
|
|
|
|
|
|
|
|
if not (
|
|
|
|
finalized_checkpoint.root == ancestor.root or
|
|
|
|
finalized_checkpoint.root.isZero):
|
|
|
|
quarantine[].addUnviable(block_root)
|
|
|
|
return dag.checkedReject(
|
|
|
|
"DataColumnSidecar: Finalized checkpoint not an ancestor")
|
|
|
|
|
2024-05-27 12:40:00 +00:00
|
|
|
# [REJECT] The sidecar is proposed by the expected `proposer_index`
|
|
|
|
# for the block's slot in the context of the current shuffling
|
|
|
|
# (defined by `block_header.parent_root`/`block_header.slot`).
|
|
|
|
# If the proposer_index cannot immediately be verified against the expected
|
|
|
|
# shuffling, the sidecar MAY be queued for later processing while proposers
|
|
|
|
# for the block's branch are calculated -- in such a case do not
|
|
|
|
# REJECT, instead IGNORE this message.
|
2024-07-02 09:06:44 +00:00
|
|
|
let proposer = getProposer(dag, parent, block_header.slot).valueOr:
|
|
|
|
warn "cannot compute proposer for data column"
|
|
|
|
return errIgnore("DataColumnSidecar: Cannot compute proposer") # internal issue
|
|
|
|
|
|
|
|
if uint64(proposer) != block_header.proposer_index:
|
|
|
|
return dag.checkedReject("DataColumnSidecar: Unexpected proposer")
|
2024-05-27 12:40:00 +00:00
|
|
|
|
|
|
|
# [REJECT] The proposer signature of `blob_sidecar.signed_block_header`,
|
|
|
|
# is valid with respect to the `block_header.proposer_index` pubkey.
|
2024-07-02 13:58:23 +00:00
|
|
|
# if not verify_block_signature(
|
|
|
|
# dag.forkAtEpoch(block_header.slot.epoch),
|
|
|
|
# getStateField(dag.headState, genesis_validators_root),
|
|
|
|
# block_header.slot,
|
|
|
|
# block_root,
|
|
|
|
# dag.validatorKey(proposer).get(),
|
|
|
|
# data_column_sidecar.signed_block_header.signature):
|
|
|
|
# return dag.checkedReject("DataColumnSidecar: Invalid proposer signature")
|
2024-07-02 09:06:44 +00:00
|
|
|
|
|
|
|
# [REJECT] The sidecar's column data is valid as
|
|
|
|
# verified by `verify_data_column_kzg_proofs(sidecar)`
|
2024-07-02 13:05:58 +00:00
|
|
|
block:
|
|
|
|
let r = check_data_column_sidecar_kzg_proofs(data_column_sidecar)
|
|
|
|
if r.isErr:
|
|
|
|
return dag.checkedReject(r.error)
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "5"
|
2024-07-02 09:06:44 +00:00
|
|
|
# Send notification about new data column sidecar via callback
|
2024-07-02 13:58:23 +00:00
|
|
|
if not(isNil(dataColumnQuarantine.onDataColumnSidecarCallback)):
|
|
|
|
dataColumnQuarantine.onDataColumnSidecarCallback(data_column_sidecar)
|
2024-07-02 11:45:08 +00:00
|
|
|
debugEcho "6"
|
2024-05-27 12:40:00 +00:00
|
|
|
ok()
|
|
|
|
|
|
|
|
|
2023-05-05 20:48:33 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_block
|
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
|
2021-11-05 15:39:47 +00:00
|
|
|
proc validateBeaconBlock*(
|
2021-12-06 09:49:01 +00:00
|
|
|
dag: ChainDAGRef, quarantine: ref Quarantine,
|
2024-05-17 07:13:30 +00:00
|
|
|
signed_beacon_block: phase0.SignedBeaconBlock | altair.SignedBeaconBlock | bellatrix.SignedBeaconBlock | capella.SignedBeaconBlock | deneb.SignedBeaconBlock,
|
2021-11-05 15:39:47 +00:00
|
|
|
wallTime: BeaconTime, flags: UpdateFlags): Result[void, ValidationError] =
|
|
|
|
# In general, checks are ordered from cheap to expensive. Especially, crypto
|
|
|
|
# verification could be quite a bit more expensive than the rest. This is an
|
|
|
|
# externally easy-to-invoke function by tossing network packets at the node.
|
|
|
|
|
|
|
|
# [IGNORE] The block is not from a future slot (with a
|
|
|
|
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. validate that
|
|
|
|
# signed_beacon_block.message.slot <= current_slot (a client MAY queue future
|
|
|
|
# blocks for processing at the appropriate slot).
|
|
|
|
if not (signed_beacon_block.message.slot <=
|
|
|
|
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
|
|
|
|
return errIgnore("BeaconBlock: slot too high")
|
|
|
|
|
|
|
|
# [IGNORE] The block is from a slot greater than the latest finalized slot --
|
|
|
|
# i.e. validate that signed_beacon_block.message.slot >
|
|
|
|
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
|
|
|
if not (signed_beacon_block.message.slot > dag.finalizedHead.slot):
|
|
|
|
return errIgnore("BeaconBlock: slot already finalized")
|
|
|
|
|
|
|
|
# [IGNORE] The block is the first block with valid signature received for the
|
|
|
|
# proposer for the slot, signed_beacon_block.message.slot.
|
|
|
|
#
|
|
|
|
# While this condition is similar to the proposer slashing condition at
|
2023-08-09 03:58:47 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#proposer-slashing
|
2021-11-05 15:39:47 +00:00
|
|
|
# it's not identical, and this check does not address slashing:
|
|
|
|
#
|
|
|
|
# (1) The beacon blocks must be conflicting, i.e. different, for the same
|
|
|
|
# slot and proposer. This check also catches identical blocks.
|
|
|
|
#
|
|
|
|
# (2) By this point in the function, it's not been checked whether they're
|
|
|
|
# signed yet. As in general, expensive checks should be deferred, this
|
|
|
|
# would add complexity not directly relevant this function.
|
|
|
|
#
|
|
|
|
# (3) As evidenced by point (1), the similarity in the validation condition
|
|
|
|
# and slashing condition, while not coincidental, aren't similar enough
|
|
|
|
# to combine, as one or the other might drift.
|
|
|
|
#
|
|
|
|
# (4) Furthermore, this function, as much as possible, simply returns a yes
|
|
|
|
# or no answer, without modifying other state for p2p network interface
|
|
|
|
# validation. Complicating this interface, for the sake of sharing only
|
|
|
|
# couple lines of code, wouldn't be worthwhile.
|
|
|
|
#
|
|
|
|
# TODO might check unresolved/orphaned blocks too, and this might not see all
|
|
|
|
# blocks at a given slot (though, in theory, those get checked elsewhere), or
|
|
|
|
# adding metrics that count how often these conditions occur.
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
if dag.containsForkBlock(signed_beacon_block.root):
|
2021-11-05 15:39:47 +00:00
|
|
|
# The gossip algorithm itself already does one round of hashing to find
|
|
|
|
# already-seen data, but it is fairly aggressive about forgetting about
|
|
|
|
# what it has seen already
|
|
|
|
# "[IGNORE] The block is the first block ..."
|
|
|
|
return errIgnore("BeaconBlock: already seen")
|
|
|
|
|
|
|
|
let
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
slotBlock = getBlockIdAtSlot(dag, signed_beacon_block.message.slot)
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2022-03-15 08:24:55 +00:00
|
|
|
if slotBlock.isSome() and slotBlock.get().isProposed() and
|
Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted
for a full DAG, to represent all of chain history. This is a correct and
simple design, but does not exploit the linearity of the chain once
parts of it finalize.
By pruning the in-memory `BlockRef` structure at finalization, we save,
at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory
landing us at a steady state of ~750mb normal memory usage for a
validating node.
Above all though, we prevent memory usage from growing proportionally
with the length of the chain, something that would not be sustainable
over time - instead, the steady state memory usage is roughly
determined by the validator set size which grows much more slowly. With
these changes, the core should remain sustainable memory-wise post-merge
all the way to withdrawals (when the validator set is expected to grow).
In-memory indices are still used for the "hot" unfinalized portion of
the chain - this ensure that consensus performance remains unchanged.
What changes is that for historical access, we use a db-based linear
slot index which is cache-and-disk-friendly, keeping the cost for
accessing historical data at a similar level as before, achieving the
savings at no percievable cost to functionality or performance.
A nice collateral benefit is the almost-instant startup since we no
longer load any large indicies at dag init.
The cost of this functionality instead can be found in the complexity of
having to deal with two ways of traversing the chain - by `BlockRef` and
by slot.
* use `BlockId` instead of `BlockRef` where finalized / historical data
may be required
* simplify clearance pre-advancement
* remove dag.finalizedBlocks (~50:ish mb)
* remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead
* `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef`
instance, unlike `BlockRef` traversal
* prune `BlockRef` parents on finality (~200:ish mb)
* speed up ChainDAG init by not loading finalized history index
* mess up light client server error handling - this need revisiting :)
2022-03-17 17:42:56 +00:00
|
|
|
slotBlock.get().bid.slot == signed_beacon_block.message.slot:
|
|
|
|
let curBlock = dag.getForkedBlock(slotBlock.get().bid)
|
2022-03-11 12:08:17 +00:00
|
|
|
if curBlock.isOk():
|
|
|
|
let data = curBlock.get()
|
|
|
|
if getForkedBlockField(data, proposer_index) ==
|
|
|
|
signed_beacon_block.message.proposer_index and
|
|
|
|
data.signature.toRaw() != signed_beacon_block.signature.toRaw():
|
|
|
|
return errIgnore("BeaconBlock: already proposed in the same slot")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
# [IGNORE] The block's parent (defined by block.parent_root) has been seen
|
|
|
|
# (via both gossip and non-gossip sources) (a client MAY queue blocks for
|
|
|
|
# processing once the parent block is retrieved).
|
|
|
|
#
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] The block's parent (defined by block.parent_root)
|
|
|
|
# passes validation.
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr:
|
2022-01-26 12:20:08 +00:00
|
|
|
if signed_beacon_block.message.parent_root in quarantine[].unviable:
|
|
|
|
quarantine[].addUnviable(signed_beacon_block.root)
|
2022-07-21 18:39:43 +00:00
|
|
|
|
2023-05-05 20:48:33 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
|
2022-07-21 18:39:43 +00:00
|
|
|
# `is_execution_enabled(state, block.body)` check, but unlike in
|
|
|
|
# validateBeaconBlockBellatrix() don't have parent BlockRef.
|
2024-02-09 22:10:38 +00:00
|
|
|
if signed_beacon_block.message.is_execution_block:
|
2022-07-21 18:39:43 +00:00
|
|
|
# Blocks with execution enabled will be permitted to propagate
|
|
|
|
# regardless of the validity of the execution payload. This prevents
|
|
|
|
# network segregation between optimistic and non-optimistic nodes.
|
|
|
|
#
|
2024-02-09 22:10:38 +00:00
|
|
|
# If execution_payload verification of block's parent by an execution
|
|
|
|
# node is not complete:
|
|
|
|
#
|
|
|
|
# - [REJECT] The block's parent (defined by `block.parent_root`) passes
|
|
|
|
# all validation (excluding execution node verification of the
|
|
|
|
# `block.body.execution_payload`).
|
|
|
|
#
|
|
|
|
# otherwise:
|
|
|
|
#
|
|
|
|
# - [IGNORE] The block's parent (defined by `block.parent_root`) passes
|
|
|
|
# all validation (including execution node verification of the
|
|
|
|
# `block.body.execution_payload`).
|
|
|
|
|
|
|
|
# Implementation restrictions:
|
|
|
|
#
|
|
|
|
# - We don't know if the parent state had execution enabled.
|
|
|
|
# If it had, and the block doesn't have it enabled anymore,
|
|
|
|
# we end up in the pre-Merge path below (`else`) and REJECT.
|
|
|
|
# Such a block is clearly invalid, though, without asking the EL.
|
|
|
|
#
|
|
|
|
# - We know that the parent was marked unviable, but don't know
|
|
|
|
# whether it was marked unviable due to consensus (REJECT) or
|
|
|
|
# execution (IGNORE) verification failure. We err on the IGNORE side.
|
2022-07-21 18:39:43 +00:00
|
|
|
return errIgnore("BeaconBlock: ignored, parent from unviable fork")
|
|
|
|
else:
|
|
|
|
# [REJECT] The block's parent (defined by `block.parent_root`) passes
|
|
|
|
# validation.
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"BeaconBlock: rejected, parent from unviable fork")
|
2022-01-26 12:20:08 +00:00
|
|
|
|
2021-12-06 09:49:01 +00:00
|
|
|
# When the parent is missing, we can't validate the block - we'll queue it
|
|
|
|
# in the quarantine for later processing
|
2023-05-21 17:47:00 +00:00
|
|
|
if (let r = quarantine[].addOrphan(
|
2022-01-26 12:20:08 +00:00
|
|
|
dag.finalizedHead.slot,
|
2023-05-21 17:47:00 +00:00
|
|
|
ForkedSignedBeaconBlock.init(signed_beacon_block)); r.isErr):
|
|
|
|
debug "validateBeaconBlock: could not add orphan",
|
|
|
|
blockRoot = shortLog(signed_beacon_block.root),
|
|
|
|
blck = shortLog(signed_beacon_block.message),
|
|
|
|
err = r.error()
|
2023-07-11 16:55:51 +00:00
|
|
|
else:
|
|
|
|
debug "Block quarantined",
|
|
|
|
blockRoot = shortLog(signed_beacon_block.root),
|
|
|
|
blck = shortLog(signed_beacon_block.message),
|
|
|
|
signature = shortLog(signed_beacon_block.signature)
|
2023-11-05 08:13:57 +00:00
|
|
|
return errIgnore("BeaconBlock: parent not found")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2022-07-21 18:39:43 +00:00
|
|
|
# Continues block parent validity checking in optimistic case, where it does
|
|
|
|
# appear as a `BlockRef` (and not handled above) but isn't usable for gossip
|
|
|
|
# validation.
|
|
|
|
validateBeaconBlockBellatrix(signed_beacon_block, parent)
|
|
|
|
|
2022-01-26 17:22:06 +00:00
|
|
|
# [REJECT] The block is from a higher slot than its parent.
|
|
|
|
if not (signed_beacon_block.message.slot > parent.bid.slot):
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"BeaconBlock: block not from higher slot than its parent")
|
2022-01-26 17:22:06 +00:00
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
# [REJECT] The current finalized_checkpoint is an ancestor of block -- i.e.
|
|
|
|
# get_ancestor(store, block.parent_root,
|
|
|
|
# compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) ==
|
|
|
|
# store.finalized_checkpoint.root
|
|
|
|
let
|
2022-03-16 07:20:40 +00:00
|
|
|
finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
|
limit by-root requests to non-finalized blocks (#3293)
* limit by-root requests to non-finalized blocks
Presently, we keep a mapping from block root to `BlockRef` in memory -
this has simplified reasoning about the dag, but is not sustainable with
the chain growing.
We can distinguish between two cases where by-root access is useful:
* unfinalized blocks - this is where the beacon chain is operating
generally, by validating incoming data as interesting for future fork
choice decisions - bounded by the length of the unfinalized period
* finalized blocks - historical access in the REST API etc - no bounds,
really
In this PR, we limit the by-root block index to the first use case:
finalized chain data can more efficiently be addressed by slot number.
Future work includes:
* limiting the `BlockRef` horizon in general - each instance is 40
bytes+overhead which adds up - this needs further refactoring to deal
with the tail vs state problem
* persisting the finalized slot-to-hash index - this one also keeps
growing unbounded (albeit slowly)
Anyway, this PR easily shaves ~128mb of memory usage at the time of
writing.
* No longer honor `BeaconBlocksByRoot` requests outside of the
non-finalized period - previously, Nimbus would generously return any
block through this libp2p request - per the spec, finalized blocks
should be fetched via `BeaconBlocksByRange` instead.
* return `Opt[BlockRef]` instead of `nil` when blocks can't be found -
this becomes a lot more common now and thus deserves more attention
* `dag.blocks` -> `dag.forkBlocks` - this index only carries unfinalized
blocks from now - `finalizedBlocks` covers the other `BlockRef`
instances
* in backfill, verify that the last backfilled block leads back to
genesis, or panic
* add backfill timings to log
* fix missing check that `BlockRef` block can be fetched with
`getForkedBlock` reliably
* shortcut doppelganger check when feature is not enabled
* in REST/JSON-RPC, fetch blocks without involving `BlockRef`
* fix dag.blocks ref
2022-01-21 11:33:16 +00:00
|
|
|
ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
if ancestor.isNil:
|
|
|
|
# This shouldn't happen: we should always be able to trace the parent back
|
|
|
|
# to the finalized checkpoint (else it wouldn't be in the DAG)
|
|
|
|
return errIgnore("BeaconBlock: Can't find ancestor")
|
|
|
|
|
2022-02-14 05:26:19 +00:00
|
|
|
if not (
|
|
|
|
finalized_checkpoint.root == ancestor.root or
|
|
|
|
finalized_checkpoint.root.isZero):
|
2022-01-26 12:20:08 +00:00
|
|
|
quarantine[].addUnviable(signed_beacon_block.root)
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"BeaconBlock: Finalized checkpoint not an ancestor")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
# [REJECT] The block is proposed by the expected proposer_index for the
|
|
|
|
# block's slot in the context of the current shuffling (defined by
|
|
|
|
# parent_root/slot). If the proposer_index cannot immediately be verified
|
|
|
|
# against the expected shuffling, the block MAY be queued for later
|
|
|
|
# processing while proposers for the block's branch are calculated -- in such
|
|
|
|
# a case do not REJECT, instead IGNORE this message.
|
|
|
|
let
|
2023-01-11 12:29:21 +00:00
|
|
|
proposer = getProposer(
|
|
|
|
dag, parent, signed_beacon_block.message.slot).valueOr:
|
2023-11-05 08:13:57 +00:00
|
|
|
warn "cannot compute proposer for block"
|
2023-01-11 12:29:21 +00:00
|
|
|
return errIgnore("BeaconBlock: Cannot compute proposer") # internal issue
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
if uint64(proposer) != signed_beacon_block.message.proposer_index:
|
2022-01-26 12:20:08 +00:00
|
|
|
quarantine[].addUnviable(signed_beacon_block.root)
|
2023-11-05 08:13:57 +00:00
|
|
|
return dag.checkedReject("BeaconBlock: Unexpected proposer")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
|
|
|
# [REJECT] The proposer signature, signed_beacon_block.signature, is valid
|
|
|
|
# with respect to the proposer_index pubkey.
|
|
|
|
if not verify_block_signature(
|
|
|
|
dag.forkAtEpoch(signed_beacon_block.message.slot.epoch),
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(dag.headState, genesis_validators_root),
|
2021-11-05 15:39:47 +00:00
|
|
|
signed_beacon_block.message.slot,
|
|
|
|
signed_beacon_block.root,
|
2023-01-11 12:29:21 +00:00
|
|
|
dag.validatorKey(proposer).get(),
|
2021-11-05 15:39:47 +00:00
|
|
|
signed_beacon_block.signature):
|
2022-01-26 12:20:08 +00:00
|
|
|
quarantine[].addUnviable(signed_beacon_block.root)
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject("BeaconBlock: Invalid proposer signature")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2023-01-16 16:26:48 +00:00
|
|
|
ok()
|
2022-12-14 17:30:56 +00:00
|
|
|
|
2024-05-17 07:13:30 +00:00
|
|
|
proc validateBeaconBlock*(
|
|
|
|
dag: ChainDAGRef, quarantine: ref Quarantine,
|
|
|
|
signed_beacon_block: electra.SignedBeaconBlock,
|
|
|
|
wallTime: BeaconTime, flags: UpdateFlags): Result[void, ValidationError] =
|
|
|
|
debugComment "it's sometimes not"
|
|
|
|
ok()
|
|
|
|
|
2023-08-09 03:58:47 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
|
2020-08-27 07:34:12 +00:00
|
|
|
proc validateAttestation*(
|
2021-04-02 14:36:43 +00:00
|
|
|
pool: ref AttestationPool,
|
|
|
|
batchCrypto: ref BatchCrypto,
|
2024-04-17 20:44:29 +00:00
|
|
|
attestation: phase0.Attestation,
|
2021-04-02 14:36:43 +00:00
|
|
|
wallTime: BeaconTime,
|
2021-05-10 07:13:36 +00:00
|
|
|
subnet_id: SubnetId, checkSignature: bool):
|
2021-11-05 15:39:47 +00:00
|
|
|
Future[Result[
|
|
|
|
tuple[attesting_index: ValidatorIndex, sig: CookedSig],
|
2024-01-22 16:34:54 +00:00
|
|
|
ValidationError]] {.async: (raises: [CancelledError]).} =
|
2021-03-01 19:50:43 +00:00
|
|
|
# Some of the checks below have been reordered compared to the spec, to
|
|
|
|
# perform the cheap checks first - in particular, we want to avoid loading
|
|
|
|
# an `EpochRef` and checking signatures. This reordering might lead to
|
|
|
|
# different IGNORE/REJECT results in turn affecting gossip scores.
|
|
|
|
|
2020-10-19 09:25:06 +00:00
|
|
|
# [REJECT] The attestation's epoch matches its target -- i.e.
|
|
|
|
# attestation.data.target.epoch ==
|
|
|
|
# compute_epoch_at_slot(attestation.data.slot)
|
2022-01-08 23:28:49 +00:00
|
|
|
let slot = block:
|
2020-10-19 09:25:06 +00:00
|
|
|
let v = check_attestation_slot_target(attestation.data)
|
2020-09-18 11:53:09 +00:00
|
|
|
if v.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(v.error())
|
2022-01-08 23:28:49 +00:00
|
|
|
v.get()
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
# attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE
|
|
|
|
# slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
|
|
|
|
# attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot
|
2020-10-19 09:25:06 +00:00
|
|
|
# >= attestation.data.slot (a client MAY queue future attestations for
|
2020-08-27 07:34:12 +00:00
|
|
|
# processing at the appropriate slot).
|
2023-06-29 08:34:21 +00:00
|
|
|
#
|
2023-09-21 18:06:51 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_attestation_subnet_id
|
2023-06-29 08:34:21 +00:00
|
|
|
# modifies this for Deneb and newer forks.
|
2021-04-02 14:36:43 +00:00
|
|
|
block:
|
2023-06-29 08:34:21 +00:00
|
|
|
let v = check_propagation_slot_range(
|
|
|
|
pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot,
|
|
|
|
wallTime)
|
2023-05-02 11:06:02 +00:00
|
|
|
if v.isErr(): # [IGNORE]
|
2021-11-05 15:39:47 +00:00
|
|
|
return err(v.error())
|
2020-08-27 07:34:12 +00:00
|
|
|
|
|
|
|
# The attestation is unaggregated -- that is, it has exactly one
|
|
|
|
# participating validator (len([bit for bit in attestation.aggregation_bits
|
|
|
|
# if bit == 0b1]) == 1).
|
2021-04-02 14:36:43 +00:00
|
|
|
block:
|
2023-05-02 11:06:02 +00:00
|
|
|
let v = check_aggregation_count(attestation, singular = true)
|
|
|
|
if v.isErr(): # [REJECT]
|
|
|
|
return pool.checkedReject(v.error)
|
2020-08-27 07:34:12 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
# The block being voted for (attestation.data.beacon_block_root) has been seen
|
2023-05-02 11:06:02 +00:00
|
|
|
# (via both gossip and non-gossip sources) (a client MAY queue attestations
|
|
|
|
# for processing once block is retrieved).
|
|
|
|
# [REJECT] The block being voted for (attestation.data.beacon_block_root)
|
|
|
|
# passes validation.
|
2021-03-01 19:50:43 +00:00
|
|
|
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
2021-04-02 14:36:43 +00:00
|
|
|
let target = block:
|
2023-05-02 11:06:02 +00:00
|
|
|
let v = check_beacon_and_target_block(pool[], attestation.data)
|
|
|
|
if v.isErr(): # [IGNORE/REJECT]
|
|
|
|
return pool.checkedResult(v.error)
|
2021-04-02 14:36:43 +00:00
|
|
|
v.get()
|
2020-08-03 19:47:42 +00:00
|
|
|
|
2020-08-04 15:52:46 +00:00
|
|
|
# The following rule follows implicitly from that we clear out any
|
|
|
|
# unviable blocks from the chain dag:
|
|
|
|
#
|
2023-06-29 08:34:21 +00:00
|
|
|
# [IGNORE] The current finalized_checkpoint is an ancestor of the block
|
|
|
|
# defined by attestation.data.beacon_block_root -- i.e.
|
|
|
|
# get_checkpoint_block(store, attestation.data.beacon_block_root,
|
|
|
|
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root
|
2021-03-01 19:50:43 +00:00
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef =
|
|
|
|
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
|
|
|
# Target is verified - shouldn't happen
|
|
|
|
warn "No shuffling for attestation - report bug",
|
2022-01-05 18:38:04 +00:00
|
|
|
attestation = shortLog(attestation), target = shortLog(target)
|
2022-08-18 18:07:01 +00:00
|
|
|
return errIgnore("Attestation: no shuffling")
|
2020-08-06 19:48:47 +00:00
|
|
|
|
2020-10-19 09:25:06 +00:00
|
|
|
# [REJECT] The committee index is within the expected range -- i.e.
|
|
|
|
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
2022-01-08 23:28:49 +00:00
|
|
|
let committee_index = block:
|
2022-08-18 18:07:01 +00:00
|
|
|
let idx = shufflingRef.get_committee_index(attestation.data.index)
|
2022-01-08 23:28:49 +00:00
|
|
|
if idx.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(
|
|
|
|
"Attestation: committee index not within expected range")
|
2022-01-08 23:28:49 +00:00
|
|
|
idx.get()
|
2020-10-19 09:25:06 +00:00
|
|
|
|
2020-08-06 19:48:47 +00:00
|
|
|
# [REJECT] The attestation is for the correct subnet -- i.e.
|
|
|
|
# compute_subnet_for_attestation(committees_per_slot,
|
|
|
|
# attestation.data.slot, attestation.data.index) == subnet_id, where
|
|
|
|
# committees_per_slot = get_committee_count_per_slot(state,
|
|
|
|
# attestation.data.target.epoch), which may be pre-computed along with the
|
|
|
|
# committee information for the signature check.
|
2021-04-02 14:36:43 +00:00
|
|
|
block:
|
2022-01-08 23:28:49 +00:00
|
|
|
let v = check_attestation_subnet(
|
2023-05-02 11:06:02 +00:00
|
|
|
shufflingRef, attestation.data.slot, committee_index, subnet_id)
|
|
|
|
if v.isErr(): # [REJECT]
|
|
|
|
return pool.checkedReject(v.error)
|
2020-08-06 19:48:47 +00:00
|
|
|
|
2020-10-19 09:25:06 +00:00
|
|
|
# [REJECT] The number of aggregation bits matches the committee size -- i.e.
|
|
|
|
# len(attestation.aggregation_bits) == len(get_beacon_committee(state,
|
|
|
|
# data.slot, data.index)).
|
|
|
|
#
|
|
|
|
# This uses the same epochRef as data.target.epoch, because the attestation's
|
|
|
|
# epoch matches its target and attestation.data.target.root is an ancestor of
|
|
|
|
# attestation.data.beacon_block_root.
|
2023-11-20 14:42:29 +00:00
|
|
|
if not attestation.aggregation_bits.compatible_with_shuffling(
|
|
|
|
shufflingRef, slot, committee_index):
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(
|
2021-08-24 19:49:51 +00:00
|
|
|
"Attestation: number of aggregation bits and committee size mismatch")
|
2020-10-19 09:25:06 +00:00
|
|
|
|
2020-08-06 19:48:47 +00:00
|
|
|
let
|
2021-08-10 20:46:35 +00:00
|
|
|
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
|
2021-04-26 20:39:44 +00:00
|
|
|
attesting_index = get_attesting_indices_one(
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef, slot, committee_index, attestation.aggregation_bits)
|
2020-06-23 10:38:59 +00:00
|
|
|
|
2020-10-19 09:25:06 +00:00
|
|
|
# The number of aggregation bits matches the committee size, which ensures
|
|
|
|
# this condition holds.
|
2023-05-02 11:06:02 +00:00
|
|
|
doAssert attesting_index.isSome(),
|
|
|
|
"We've checked bits length and one count already"
|
2021-04-26 20:39:44 +00:00
|
|
|
let validator_index = attesting_index.get()
|
2020-09-25 17:51:44 +00:00
|
|
|
|
|
|
|
# There has been no other valid attestation seen on an attestation subnet
|
|
|
|
# that has an identical `attestation.data.target.epoch` and participating
|
|
|
|
# validator index.
|
|
|
|
# Slightly modified to allow only newer attestations than were previously
|
|
|
|
# seen (no point in propagating older votes)
|
2021-02-08 07:27:30 +00:00
|
|
|
if (pool.nextAttestationEpoch.lenu64 > validator_index.uint64) and
|
2020-12-14 20:58:32 +00:00
|
|
|
pool.nextAttestationEpoch[validator_index].subnet >
|
|
|
|
attestation.data.target.epoch:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Attestation: Validator has already voted in epoch")
|
2020-09-25 17:51:44 +00:00
|
|
|
|
2023-01-11 12:29:21 +00:00
|
|
|
let pubkey = pool.dag.validatorKey(validator_index).valueOr:
|
2021-12-09 12:56:54 +00:00
|
|
|
# can't happen, in theory, because we checked the aggregator index above
|
|
|
|
return errIgnore("Attestation: cannot find validator pubkey")
|
2020-06-10 06:58:12 +00:00
|
|
|
|
2023-05-05 20:48:33 +00:00
|
|
|
# [REJECT] The signature of `attestation` is valid.
|
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
# In the spec, is_valid_indexed_attestation is used to verify the signature -
|
|
|
|
# here, we do a batch verification instead
|
2021-05-10 07:13:36 +00:00
|
|
|
let sig =
|
|
|
|
if checkSignature:
|
|
|
|
# Attestation signatures are batch-verified
|
|
|
|
let deferredCrypto = batchCrypto
|
2021-12-09 12:56:54 +00:00
|
|
|
.scheduleAttestationCheck(
|
2023-02-20 08:26:22 +00:00
|
|
|
fork, attestation.data, pubkey,
|
|
|
|
attestation.signature)
|
2021-05-25 14:17:47 +00:00
|
|
|
if deferredCrypto.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(deferredCrypto.error)
|
2021-05-10 07:13:36 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
let (cryptoFut, sig) = deferredCrypto.get()
|
2021-05-10 07:13:36 +00:00
|
|
|
# Await the crypto check
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = (await cryptoFut)
|
2021-05-10 07:13:36 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Attestation: invalid signature")
|
2021-05-10 07:13:36 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_attestations_dropped_queue_full.inc()
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Attestation: timeout checking signature")
|
2021-05-10 07:13:36 +00:00
|
|
|
of BatchResult.Valid:
|
|
|
|
sig # keep going only in this case
|
|
|
|
else:
|
2023-01-11 12:29:21 +00:00
|
|
|
attestation.signature.load().valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Attestation: unable to load signature")
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2021-02-08 07:27:30 +00:00
|
|
|
# Only valid attestations go in the list, which keeps validator_index
|
|
|
|
# in range
|
|
|
|
if not (pool.nextAttestationEpoch.lenu64 > validator_index.uint64):
|
|
|
|
pool.nextAttestationEpoch.setLen(validator_index.int + 1)
|
2020-12-14 20:58:32 +00:00
|
|
|
pool.nextAttestationEpoch[validator_index].subnet =
|
|
|
|
attestation.data.target.epoch + 1
|
2020-09-25 17:51:44 +00:00
|
|
|
|
2021-04-26 20:39:44 +00:00
|
|
|
return ok((validator_index, sig))
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2024-05-14 16:01:26 +00:00
|
|
|
proc validateAttestation*(
|
|
|
|
pool: ref AttestationPool,
|
|
|
|
batchCrypto: ref BatchCrypto,
|
|
|
|
attestation: electra.Attestation,
|
|
|
|
wallTime: BeaconTime,
|
|
|
|
subnet_id: SubnetId, checkSignature: bool):
|
|
|
|
Future[Result[
|
|
|
|
tuple[attesting_index: ValidatorIndex, sig: CookedSig],
|
|
|
|
ValidationError]] {.async: (raises: [CancelledError]).} =
|
|
|
|
debugComment "should reject a bunch"
|
|
|
|
# [REJECT] The attestation's epoch matches its target -- i.e.
|
|
|
|
# attestation.data.target.epoch ==
|
|
|
|
# compute_epoch_at_slot(attestation.data.slot)
|
|
|
|
let slot = block:
|
|
|
|
let v = check_attestation_slot_target(attestation.data)
|
|
|
|
if v.isErr():
|
|
|
|
return pool.checkedReject(v.error())
|
|
|
|
v.get()
|
|
|
|
|
|
|
|
# The block being voted for (attestation.data.beacon_block_root) has been seen
|
|
|
|
# (via both gossip and non-gossip sources) (a client MAY queue attestations
|
|
|
|
# for processing once block is retrieved).
|
|
|
|
# [REJECT] The block being voted for (attestation.data.beacon_block_root)
|
|
|
|
# passes validation.
|
|
|
|
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
|
|
|
let target = block:
|
|
|
|
let v = check_beacon_and_target_block(pool[], attestation.data)
|
|
|
|
if v.isErr(): # [IGNORE/REJECT]
|
|
|
|
return pool.checkedResult(v.error)
|
|
|
|
v.get()
|
|
|
|
|
|
|
|
# The following rule follows implicitly from that we clear out any
|
|
|
|
# unviable blocks from the chain dag:
|
|
|
|
#
|
|
|
|
# [IGNORE] The current finalized_checkpoint is an ancestor of the block
|
|
|
|
# defined by attestation.data.beacon_block_root -- i.e.
|
|
|
|
# get_checkpoint_block(store, attestation.data.beacon_block_root,
|
|
|
|
# store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root
|
|
|
|
let
|
|
|
|
shufflingRef =
|
|
|
|
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
|
|
|
# Target is verified - shouldn't happen
|
|
|
|
warn "No shuffling for attestation - report bug",
|
|
|
|
attestation = shortLog(attestation), target = shortLog(target)
|
|
|
|
return errIgnore("Attestation: no shuffling")
|
|
|
|
|
|
|
|
let
|
|
|
|
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
|
|
|
|
attesting_index = get_attesting_indices_one(
|
2024-05-17 12:37:41 +00:00
|
|
|
shufflingRef, slot, attestation.committee_bits,
|
|
|
|
attestation.aggregation_bits, false)
|
2024-05-14 16:01:26 +00:00
|
|
|
|
|
|
|
# The number of aggregation bits matches the committee size, which ensures
|
|
|
|
# this condition holds.
|
|
|
|
doAssert attesting_index.isSome(),
|
|
|
|
"We've checked bits length and one count already"
|
|
|
|
let validator_index = attesting_index.get()
|
|
|
|
|
|
|
|
# In the spec, is_valid_indexed_attestation is used to verify the signature -
|
|
|
|
# here, we do a batch verification instead
|
|
|
|
let sig =
|
|
|
|
attestation.signature.load().valueOr:
|
|
|
|
return pool.checkedReject("Attestation: unable to load signature")
|
|
|
|
|
|
|
|
return ok((validator_index, sig))
|
|
|
|
|
2023-08-09 03:58:47 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
|
2020-08-27 07:34:12 +00:00
|
|
|
proc validateAggregate*(
|
2021-04-02 14:36:43 +00:00
|
|
|
pool: ref AttestationPool,
|
|
|
|
batchCrypto: ref BatchCrypto,
|
2024-05-14 04:12:35 +00:00
|
|
|
signedAggregateAndProof: phase0.SignedAggregateAndProof,
|
2022-07-06 16:11:44 +00:00
|
|
|
wallTime: BeaconTime,
|
|
|
|
checkSignature = true, checkCover = true):
|
2021-11-05 15:39:47 +00:00
|
|
|
Future[Result[
|
|
|
|
tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
|
2024-01-22 16:34:54 +00:00
|
|
|
ValidationError]] {.async: (raises: [CancelledError]).} =
|
2021-03-01 19:50:43 +00:00
|
|
|
# Some of the checks below have been reordered compared to the spec, to
|
|
|
|
# perform the cheap checks first - in particular, we want to avoid loading
|
|
|
|
# an `EpochRef` and checking signatures. This reordering might lead to
|
|
|
|
# different IGNORE/REJECT results in turn affecting gossip scores.
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
template aggregate_and_proof: untyped = signedAggregateAndProof.message
|
|
|
|
template aggregate: untyped = aggregate_and_proof.aggregate
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2020-11-12 15:29:32 +00:00
|
|
|
# [REJECT] The aggregate attestation's epoch matches its target -- i.e.
|
|
|
|
# `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)`
|
2022-01-08 23:28:49 +00:00
|
|
|
let slot = block:
|
2020-11-12 15:29:32 +00:00
|
|
|
let v = check_attestation_slot_target(aggregate.data)
|
|
|
|
if v.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(v.error)
|
2022-01-08 23:28:49 +00:00
|
|
|
v.get()
|
2020-11-12 15:29:32 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
# [IGNORE] aggregate.data.slot is within the last
|
|
|
|
# ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
|
|
|
|
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
|
|
|
|
# ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot
|
2023-06-29 08:34:21 +00:00
|
|
|
#
|
2023-09-21 18:06:51 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/deneb/p2p-interface.md#beacon_aggregate_and_proof
|
2023-06-29 08:34:21 +00:00
|
|
|
# modifies this for Deneb and newer forks.
|
2021-04-02 14:36:43 +00:00
|
|
|
block:
|
2023-06-29 08:34:21 +00:00
|
|
|
let v = check_propagation_slot_range(
|
|
|
|
pool.dag.cfg.consensusForkAtEpoch(wallTime.slotOrZero.epoch), slot,
|
|
|
|
wallTime)
|
2023-05-02 11:06:02 +00:00
|
|
|
if v.isErr(): # [IGNORE]
|
2021-11-05 15:39:47 +00:00
|
|
|
return err(v.error())
|
2021-03-01 19:50:43 +00:00
|
|
|
|
2020-07-02 16:15:27 +00:00
|
|
|
# [IGNORE] The aggregate is the first valid aggregate received for the
|
|
|
|
# aggregator with index aggregate_and_proof.aggregator_index for the epoch
|
|
|
|
# aggregate.data.target.epoch.
|
2020-12-14 20:58:32 +00:00
|
|
|
# Slightly modified to allow only newer attestations than were previously
|
|
|
|
# seen (no point in propagating older votes)
|
|
|
|
if (pool.nextAttestationEpoch.lenu64 >
|
|
|
|
aggregate_and_proof.aggregator_index) and
|
|
|
|
pool.nextAttestationEpoch[
|
|
|
|
aggregate_and_proof.aggregator_index].aggregate >
|
|
|
|
aggregate.data.target.epoch:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("Aggregate: validator has already aggregated in epoch")
|
2020-07-02 16:15:27 +00:00
|
|
|
|
|
|
|
# [REJECT] The attestation has participants -- that is,
|
|
|
|
# len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1.
|
|
|
|
#
|
|
|
|
# get_attesting_indices() is:
|
|
|
|
# committee = get_beacon_committee(state, data.slot, data.index)
|
|
|
|
# return set(index for i, index in enumerate(committee) if bits[i])
|
|
|
|
#
|
|
|
|
# the attestation doesn't have participants is iff either:
|
|
|
|
# (1) the aggregation bits are all 0; or
|
|
|
|
# (2) the non-zero aggregation bits don't overlap with extant committee
|
|
|
|
# members, i.e. they counts don't match.
|
|
|
|
# But (2) would reflect an invalid aggregation in other ways, so reject it
|
|
|
|
# either way.
|
2021-04-02 14:36:43 +00:00
|
|
|
block:
|
2023-05-02 11:06:02 +00:00
|
|
|
let v = check_aggregation_count(aggregate, singular = false)
|
|
|
|
if v.isErr(): # [REJECT]
|
|
|
|
return pool.checkedReject(v.error)
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2020-08-27 07:34:12 +00:00
|
|
|
# [REJECT] The block being voted for (aggregate.data.beacon_block_root)
|
|
|
|
# passes validation.
|
2021-03-01 19:50:43 +00:00
|
|
|
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
2021-04-02 14:36:43 +00:00
|
|
|
let target = block:
|
2023-05-02 11:06:02 +00:00
|
|
|
let v = check_beacon_and_target_block(pool[], aggregate.data)
|
|
|
|
if v.isErr(): # [IGNORE/REJECT]
|
|
|
|
return pool.checkedResult(v.error)
|
2021-04-02 14:36:43 +00:00
|
|
|
v.get()
|
2020-07-13 14:58:38 +00:00
|
|
|
|
2021-03-01 19:50:43 +00:00
|
|
|
let
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef =
|
|
|
|
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
|
|
|
# Target is verified - shouldn't happen
|
|
|
|
warn "No shuffling for attestation - report bug",
|
2022-01-05 18:38:04 +00:00
|
|
|
aggregate = shortLog(aggregate), target = shortLog(target)
|
2022-08-18 18:07:01 +00:00
|
|
|
return errIgnore("Aggregate: no shuffling")
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2021-08-24 19:49:51 +00:00
|
|
|
# [REJECT] The committee index is within the expected range -- i.e.
|
|
|
|
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
2022-01-08 23:28:49 +00:00
|
|
|
let committee_index = block:
|
2022-08-18 18:07:01 +00:00
|
|
|
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
2022-01-08 23:28:49 +00:00
|
|
|
if idx.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(
|
|
|
|
"Attestation: committee index not within expected range")
|
2022-01-08 23:28:49 +00:00
|
|
|
idx.get()
|
2023-11-20 14:42:29 +00:00
|
|
|
if not aggregate.aggregation_bits.compatible_with_shuffling(
|
|
|
|
shufflingRef, slot, committee_index):
|
|
|
|
return pool.checkedReject(
|
|
|
|
"Aggregate: number of aggregation bits and committee size mismatch")
|
|
|
|
|
|
|
|
if checkCover and
|
|
|
|
pool[].covers(aggregate.data, aggregate.aggregation_bits):
|
|
|
|
# [IGNORE] A valid aggregate attestation defined by
|
|
|
|
# `hash_tree_root(aggregate.data)` whose `aggregation_bits` is a non-strict
|
|
|
|
# superset has _not_ already been seen.
|
|
|
|
# https://github.com/ethereum/consensus-specs/pull/2847
|
2024-01-17 14:46:54 +00:00
|
|
|
return errIgnore("Aggregate: already covered")
|
2021-08-24 19:49:51 +00:00
|
|
|
|
2022-02-25 16:15:39 +00:00
|
|
|
# [REJECT] aggregate_and_proof.selection_proof selects the validator as an
|
|
|
|
# aggregator for the slot -- i.e. is_aggregator(state, aggregate.data.slot,
|
|
|
|
# aggregate.data.index, aggregate_and_proof.selection_proof) returns True.
|
2020-08-06 19:48:47 +00:00
|
|
|
if not is_aggregator(
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef, slot, committee_index, aggregate_and_proof.selection_proof):
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: incorrect aggregator")
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2020-08-06 19:48:47 +00:00
|
|
|
# [REJECT] The aggregator's validator index is within the committee -- i.e.
|
|
|
|
# aggregate_and_proof.aggregator_index in get_beacon_committee(state,
|
|
|
|
# aggregate.data.slot, aggregate.data.index).
|
2022-05-23 23:39:08 +00:00
|
|
|
|
|
|
|
let aggregator_index =
|
|
|
|
ValidatorIndex.init(aggregate_and_proof.aggregator_index).valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: invalid aggregator index")
|
2022-05-23 23:39:08 +00:00
|
|
|
|
|
|
|
if aggregator_index notin
|
2022-08-18 18:07:01 +00:00
|
|
|
get_beacon_committee(shufflingRef, slot, committee_index):
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(
|
|
|
|
"Aggregate: aggregator's validator index not in committee")
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature
|
|
|
|
# of the aggregate.data.slot by the validator with index
|
2021-06-10 07:37:02 +00:00
|
|
|
# aggregate_and_proof.aggregator_index.
|
|
|
|
# get_slot_signature(state, aggregate.data.slot, privkey)
|
2023-05-02 11:06:02 +00:00
|
|
|
# 2. [REJECT] The aggregator signature,
|
|
|
|
# signed_aggregate_and_proof.signature, is valid.
|
2021-06-10 07:37:02 +00:00
|
|
|
# 3. [REJECT] The signature of aggregate is valid.
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2021-04-09 12:59:24 +00:00
|
|
|
let
|
2021-08-10 20:46:35 +00:00
|
|
|
fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch)
|
2022-01-08 23:28:49 +00:00
|
|
|
attesting_indices = get_attesting_indices(
|
2022-08-18 18:07:01 +00:00
|
|
|
shufflingRef, slot, committee_index, aggregate.aggregation_bits)
|
2021-12-09 12:56:54 +00:00
|
|
|
|
2021-04-09 12:59:24 +00:00
|
|
|
let
|
2022-07-06 16:11:44 +00:00
|
|
|
sig = if checkSignature:
|
|
|
|
let deferredCrypto = batchCrypto
|
|
|
|
.scheduleAggregateChecks(
|
2023-02-20 08:26:22 +00:00
|
|
|
fork, signedAggregateAndProof, pool.dag,
|
|
|
|
attesting_indices
|
2022-07-06 16:11:44 +00:00
|
|
|
)
|
|
|
|
if deferredCrypto.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(deferredCrypto.error)
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2022-07-06 16:11:44 +00:00
|
|
|
let
|
|
|
|
(aggregatorFut, slotFut, aggregateFut, sig) = deferredCrypto.get()
|
|
|
|
|
|
|
|
block:
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] The aggregator signature,
|
|
|
|
# signed_aggregate_and_proof.signature, is valid.
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await aggregatorFut
|
2022-07-06 16:11:44 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: invalid aggregator signature")
|
2022-07-06 16:11:44 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_aggregates_dropped_queue_full.inc()
|
|
|
|
return errIgnore("Aggregate: timeout checking aggregator signature")
|
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
|
|
|
|
|
|
|
block:
|
|
|
|
# [REJECT] aggregate_and_proof.selection_proof
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await slotFut
|
2022-07-06 16:11:44 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: invalid slot signature")
|
2022-07-06 16:11:44 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_aggregates_dropped_queue_full.inc()
|
|
|
|
return errIgnore("Aggregate: timeout checking slot signature")
|
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
|
|
|
|
|
|
|
block:
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] The aggregator signature,
|
|
|
|
# signed_aggregate_and_proof.signature, is valid.
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await aggregateFut
|
2022-07-06 16:11:44 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: invalid aggregate signature")
|
2022-07-06 16:11:44 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_aggregates_dropped_queue_full.inc()
|
|
|
|
return errIgnore("Aggregate: timeout checking aggregate signature")
|
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
|
|
|
sig
|
|
|
|
else:
|
2023-01-11 12:29:21 +00:00
|
|
|
aggregate.signature.load().valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject("Aggregate: unable to load signature")
|
2020-07-02 16:15:27 +00:00
|
|
|
|
2020-08-10 14:49:18 +00:00
|
|
|
# The following rule follows implicitly from that we clear out any
|
|
|
|
# unviable blocks from the chain dag:
|
|
|
|
#
|
2023-06-29 08:34:21 +00:00
|
|
|
# [IGNORE] The current finalized_checkpoint is an ancestor of the block
|
|
|
|
# defined by aggregate.data.beacon_block_root -- i.e.
|
|
|
|
# get_checkpoint_block(store, aggregate.data.beacon_block_root,
|
|
|
|
# finalized_checkpoint.epoch) == store.finalized_checkpoint.root
|
2020-08-10 14:49:18 +00:00
|
|
|
|
2020-12-14 20:58:32 +00:00
|
|
|
# Only valid aggregates go in the list
|
|
|
|
if pool.nextAttestationEpoch.lenu64 <= aggregate_and_proof.aggregator_index:
|
|
|
|
pool.nextAttestationEpoch.setLen(
|
|
|
|
aggregate_and_proof.aggregator_index.int + 1)
|
|
|
|
pool.nextAttestationEpoch[aggregate_and_proof.aggregator_index].aggregate =
|
|
|
|
aggregate.data.target.epoch + 1
|
|
|
|
|
2021-04-09 12:59:24 +00:00
|
|
|
return ok((attesting_indices, sig))
|
2021-04-02 14:36:43 +00:00
|
|
|
|
2024-05-14 16:01:26 +00:00
|
|
|
proc validateAggregate*(
|
|
|
|
pool: ref AttestationPool,
|
|
|
|
batchCrypto: ref BatchCrypto,
|
|
|
|
signedAggregateAndProof: electra.SignedAggregateAndProof,
|
|
|
|
wallTime: BeaconTime,
|
|
|
|
checkSignature = true, checkCover = true):
|
|
|
|
Future[Result[
|
|
|
|
tuple[attestingIndices: seq[ValidatorIndex], sig: CookedSig],
|
|
|
|
ValidationError]] {.async: (raises: [CancelledError]).} =
|
|
|
|
debugComment "is not"
|
|
|
|
template aggregate_and_proof: untyped = signedAggregateAndProof.message
|
|
|
|
template aggregate: untyped = aggregate_and_proof.aggregate
|
|
|
|
|
|
|
|
# [REJECT] The aggregate attestation's epoch matches its target -- i.e.
|
|
|
|
# `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)`
|
|
|
|
let slot = block:
|
|
|
|
let v = check_attestation_slot_target(aggregate.data)
|
|
|
|
if v.isErr():
|
|
|
|
return pool.checkedReject(v.error)
|
|
|
|
v.get()
|
|
|
|
|
|
|
|
# [REJECT] The block being voted for (aggregate.data.beacon_block_root)
|
|
|
|
# passes validation.
|
|
|
|
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
|
|
|
let target = block:
|
|
|
|
let v = check_beacon_and_target_block(pool[], aggregate.data)
|
|
|
|
if v.isErr(): # [IGNORE/REJECT]
|
|
|
|
return pool.checkedResult(v.error)
|
|
|
|
v.get()
|
|
|
|
|
|
|
|
let
|
|
|
|
shufflingRef =
|
|
|
|
pool.dag.getShufflingRef(target.blck, target.slot.epoch, false).valueOr:
|
|
|
|
# Target is verified - shouldn't happen
|
|
|
|
warn "No shuffling for attestation - report bug",
|
|
|
|
aggregate = shortLog(aggregate), target = shortLog(target)
|
|
|
|
return errIgnore("Aggregate: no shuffling")
|
|
|
|
|
|
|
|
# [REJECT] The committee index is within the expected range -- i.e.
|
|
|
|
# data.index < get_committee_count_per_slot(state, data.target.epoch).
|
|
|
|
let committee_index = block:
|
|
|
|
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
|
|
|
if idx.isErr():
|
|
|
|
return pool.checkedReject(
|
|
|
|
"Attestation: committee index not within expected range")
|
|
|
|
idx.get()
|
|
|
|
let
|
|
|
|
fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch)
|
|
|
|
attesting_indices = get_attesting_indices(
|
2024-05-17 12:37:41 +00:00
|
|
|
shufflingRef, slot, committee_index, aggregate.aggregation_bits, false)
|
2024-05-14 16:01:26 +00:00
|
|
|
|
|
|
|
let
|
|
|
|
sig =
|
|
|
|
aggregate.signature.load().valueOr:
|
|
|
|
return pool.checkedReject("Aggregate: unable to load signature")
|
|
|
|
|
|
|
|
ok((attesting_indices, sig))
|
|
|
|
|
2024-05-09 05:03:10 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/capella/p2p-interface.md#bls_to_execution_change
|
2023-01-19 22:00:40 +00:00
|
|
|
proc validateBlsToExecutionChange*(
|
2023-02-17 13:35:12 +00:00
|
|
|
pool: ValidatorChangePool, batchCrypto: ref BatchCrypto,
|
|
|
|
signed_address_change: SignedBLSToExecutionChange,
|
2024-01-22 16:34:54 +00:00
|
|
|
wallEpoch: Epoch): Future[Result[void, ValidationError]] {.async: (raises: [CancelledError]).} =
|
2023-01-19 22:00:40 +00:00
|
|
|
# [IGNORE] `current_epoch >= CAPELLA_FORK_EPOCH`, where `current_epoch` is
|
|
|
|
# defined by the current wall-clock time.
|
|
|
|
if not (wallEpoch >= pool.dag.cfg.CAPELLA_FORK_EPOCH):
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
|
|
|
"SignedBLSToExecutionChange: not accepting gossip until Capella")
|
2023-01-19 22:00:40 +00:00
|
|
|
|
|
|
|
# [IGNORE] The `signed_bls_to_execution_change` is the first valid signed bls
|
|
|
|
# to execution change received for the validator with index
|
|
|
|
# `signed_bls_to_execution_change.message.validator_index`.
|
|
|
|
if pool.isSeen(signed_address_change):
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
|
|
|
"SignedBLSToExecutionChange: not first valid change for validator index")
|
2023-01-19 22:00:40 +00:00
|
|
|
|
|
|
|
# [REJECT] All of the conditions within `process_bls_to_execution_change`
|
|
|
|
# pass validation.
|
|
|
|
withState(pool.dag.headState):
|
2023-03-11 00:35:52 +00:00
|
|
|
when consensusFork < ConsensusFork.Capella:
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
|
|
|
"SignedBLSToExecutionChange: can't validate against pre-Capella state")
|
2023-01-19 22:00:40 +00:00
|
|
|
else:
|
|
|
|
let res = check_bls_to_execution_change(
|
2023-02-17 13:35:12 +00:00
|
|
|
pool.dag.cfg.genesisFork, forkyState.data, signed_address_change,
|
|
|
|
{skipBlsValidation})
|
2023-01-19 22:00:40 +00:00
|
|
|
if res.isErr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(res.error)
|
2023-01-19 22:00:40 +00:00
|
|
|
|
2023-06-30 19:34:04 +00:00
|
|
|
# BLS to execution change signatures are batch-verified
|
|
|
|
let deferredCrypto = batchCrypto.scheduleBlsToExecutionChangeCheck(
|
2023-12-04 21:10:13 +00:00
|
|
|
pool.dag.cfg.genesisFork, signed_address_change)
|
2023-06-30 19:34:04 +00:00
|
|
|
if deferredCrypto.isErr():
|
|
|
|
return pool.checkedReject(deferredCrypto.error)
|
|
|
|
|
2023-11-11 08:49:34 +00:00
|
|
|
let (cryptoFut, _) = deferredCrypto.get()
|
2023-06-30 19:34:04 +00:00
|
|
|
case await cryptoFut
|
|
|
|
of BatchResult.Invalid:
|
|
|
|
return pool.checkedReject(
|
|
|
|
"SignedBLSToExecutionChange: invalid signature")
|
|
|
|
of BatchResult.Timeout:
|
|
|
|
return errIgnore(
|
|
|
|
"SignedBLSToExecutionChange: timeout checking signature")
|
|
|
|
of BatchResult.Valid:
|
|
|
|
discard # keep going only in this case
|
2023-02-17 13:35:12 +00:00
|
|
|
|
2023-12-22 13:52:43 +00:00
|
|
|
# Send notification about new BLS to execution change via callback
|
|
|
|
if not(isNil(pool.onBLSToExecutionChangeReceived)):
|
|
|
|
pool.onBLSToExecutionChangeReceived(signed_address_change)
|
|
|
|
|
2023-02-17 13:35:12 +00:00
|
|
|
return ok()
|
2023-01-19 22:00:40 +00:00
|
|
|
|
2023-04-21 18:52:43 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attester_slashing
|
2021-03-06 07:32:55 +00:00
|
|
|
proc validateAttesterSlashing*(
|
2024-04-21 05:49:11 +00:00
|
|
|
pool: ValidatorChangePool, attester_slashing: phase0.AttesterSlashing):
|
2021-11-05 15:39:47 +00:00
|
|
|
Result[void, ValidationError] =
|
2021-03-06 07:32:55 +00:00
|
|
|
# [IGNORE] At least one index in the intersection of the attesting indices of
|
|
|
|
# each attestation has not yet been seen in any prior attester_slashing (i.e.
|
|
|
|
# attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices),
|
|
|
|
# verify if any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))).
|
2021-11-05 15:39:47 +00:00
|
|
|
if pool.isSeen(attester_slashing):
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore(
|
|
|
|
"AttesterSlashing: attester-slashed index already attester-slashed")
|
2021-03-06 07:32:55 +00:00
|
|
|
|
|
|
|
# [REJECT] All of the conditions within process_attester_slashing pass
|
|
|
|
# validation.
|
|
|
|
let attester_slashing_validity =
|
2022-03-16 07:20:40 +00:00
|
|
|
check_attester_slashing(pool.dag.headState, attester_slashing, {})
|
2021-03-06 07:32:55 +00:00
|
|
|
if attester_slashing_validity.isErr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(attester_slashing_validity.error)
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2023-12-22 17:54:55 +00:00
|
|
|
# Send notification about new attester slashing via callback
|
|
|
|
if not(isNil(pool.onAttesterSlashingReceived)):
|
|
|
|
pool.onAttesterSlashingReceived(attester_slashing)
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
ok()
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2023-11-08 05:28:03 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.4/specs/phase0/p2p-interface.md#proposer_slashing
|
2021-03-06 07:32:55 +00:00
|
|
|
proc validateProposerSlashing*(
|
2023-01-19 22:00:40 +00:00
|
|
|
pool: ValidatorChangePool, proposer_slashing: ProposerSlashing):
|
2021-11-05 15:39:47 +00:00
|
|
|
Result[void, ValidationError] =
|
2021-03-06 07:32:55 +00:00
|
|
|
# Not from spec; the rest of NBC wouldn't have correctly processed it either.
|
2023-05-02 11:06:02 +00:00
|
|
|
if proposer_slashing.signed_header_1.message.proposer_index > int.high.uint64:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("ProposerSlashing: proposer-slashed index too high")
|
2021-03-06 07:32:55 +00:00
|
|
|
|
|
|
|
# [IGNORE] The proposer slashing is the first valid proposer slashing
|
|
|
|
# received for the proposer with index
|
|
|
|
# proposer_slashing.signed_header_1.message.proposer_index.
|
2021-11-05 15:39:47 +00:00
|
|
|
if pool.isSeen(proposer_slashing):
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore(
|
|
|
|
"ProposerSlashing: proposer-slashed index already proposer-slashed")
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] All of the conditions within process_proposer_slashing
|
|
|
|
# pass validation.
|
2021-03-06 07:32:55 +00:00
|
|
|
let proposer_slashing_validity =
|
2022-03-16 07:20:40 +00:00
|
|
|
check_proposer_slashing(pool.dag.headState, proposer_slashing, {})
|
2021-03-06 07:32:55 +00:00
|
|
|
if proposer_slashing_validity.isErr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(proposer_slashing_validity.error)
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2023-12-22 17:54:55 +00:00
|
|
|
# Send notification about new proposer slashing via callback
|
|
|
|
if not(isNil(pool.onProposerSlashingReceived)):
|
|
|
|
pool.onProposerSlashingReceived(proposer_slashing)
|
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
ok()
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2023-12-06 22:16:55 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#voluntary_exit
|
2021-03-06 07:32:55 +00:00
|
|
|
proc validateVoluntaryExit*(
|
2023-01-19 22:00:40 +00:00
|
|
|
pool: ValidatorChangePool, signed_voluntary_exit: SignedVoluntaryExit):
|
2021-11-05 15:39:47 +00:00
|
|
|
Result[void, ValidationError] =
|
2021-03-06 07:32:55 +00:00
|
|
|
# [IGNORE] The voluntary exit is the first valid voluntary exit received for
|
|
|
|
# the validator with index signed_voluntary_exit.message.validator_index.
|
|
|
|
if signed_voluntary_exit.message.validator_index >=
|
2022-03-16 07:20:40 +00:00
|
|
|
getStateField(pool.dag.headState, validators).lenu64:
|
2021-08-24 19:49:51 +00:00
|
|
|
return errIgnore("VoluntaryExit: validator index too high")
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2021-06-01 11:13:40 +00:00
|
|
|
# Given that getStateField(pool.dag.headState, validators) is a seq,
|
2021-03-06 07:32:55 +00:00
|
|
|
# signed_voluntary_exit.message.validator_index.int is already valid, but
|
|
|
|
# check explicitly if one changes that data structure.
|
2021-11-05 15:39:47 +00:00
|
|
|
if pool.isSeen(signed_voluntary_exit):
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
|
|
|
"VoluntaryExit: validator index already voluntarily exited")
|
2021-03-06 07:32:55 +00:00
|
|
|
|
|
|
|
# [REJECT] All of the conditions within process_voluntary_exit pass
|
|
|
|
# validation.
|
|
|
|
let voluntary_exit_validity =
|
Implement split preset/config support (#2710)
* Implement split preset/config support
This is the initial bulk refactor to introduce runtime config values in
a number of places, somewhat replacing the existing mechanism of loading
network metadata.
It still needs more work, this is the initial refactor that introduces
runtime configuration in some of the places that need it.
The PR changes the way presets and constants work, to match the spec. In
particular, a "preset" now refers to the compile-time configuration
while a "cfg" or "RuntimeConfig" is the dynamic part.
A single binary can support either mainnet or minimal, but not both.
Support for other presets has been removed completely (can be readded,
in case there's need).
There's a number of outstanding tasks:
* `SECONDS_PER_SLOT` still needs fixing
* loading custom runtime configs needs redoing
* checking constants against YAML file
* yeerongpilly support
`build/nimbus_beacon_node --network=yeerongpilly --discv5:no --log-level=DEBUG`
* load fork epoch from config
* fix fork digest sent in status
* nicer error string for request failures
* fix tools
* one more
* fixup
* fixup
* fixup
* use "standard" network definition folder in local testnet
Files are loaded from their standard locations, including genesis etc,
to conform to the format used in the `eth2-networks` repo.
* fix launch scripts, allow unknown config values
* fix base config of rest test
* cleanups
* bundle mainnet config using common loader
* fix spec links and names
* only include supported preset in binary
* drop yeerongpilly, add altair-devnet-0, support boot_enr.yaml
2021-07-12 13:01:38 +00:00
|
|
|
check_voluntary_exit(
|
2022-03-16 07:20:40 +00:00
|
|
|
pool.dag.cfg, pool.dag.headState, signed_voluntary_exit, {})
|
2021-03-06 07:32:55 +00:00
|
|
|
if voluntary_exit_validity.isErr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return pool.checkedReject(voluntary_exit_validity.error)
|
2021-03-06 07:32:55 +00:00
|
|
|
|
2021-09-22 12:17:15 +00:00
|
|
|
# Send notification about new voluntary exit via callback
|
|
|
|
if not(isNil(pool.onVoluntaryExitReceived)):
|
|
|
|
pool.onVoluntaryExitReceived(signed_voluntary_exit)
|
|
|
|
|
2021-03-06 07:32:55 +00:00
|
|
|
ok()
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2023-08-09 03:58:47 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/altair/p2p-interface.md#sync_committee_subnet_id
|
2021-08-28 10:40:01 +00:00
|
|
|
proc validateSyncCommitteeMessage*(
|
|
|
|
dag: ChainDAGRef,
|
2023-05-17 04:55:55 +00:00
|
|
|
quarantine: ref Quarantine,
|
2021-12-09 12:56:54 +00:00
|
|
|
batchCrypto: ref BatchCrypto,
|
2021-12-11 15:39:24 +00:00
|
|
|
syncCommitteeMsgPool: ref SyncCommitteeMsgPool,
|
2021-08-28 10:40:01 +00:00
|
|
|
msg: SyncCommitteeMessage,
|
2021-11-05 15:39:47 +00:00
|
|
|
subcommitteeIdx: SyncSubcommitteeIndex,
|
2021-08-28 10:40:01 +00:00
|
|
|
wallTime: BeaconTime,
|
|
|
|
checkSignature: bool):
|
2023-05-17 04:55:55 +00:00
|
|
|
Future[Result[
|
2024-01-22 16:34:54 +00:00
|
|
|
(BlockId, CookedSig, seq[uint64]), ValidationError]] {.async: (raises: [CancelledError]).} =
|
2021-08-28 10:40:01 +00:00
|
|
|
block:
|
2022-01-13 13:46:08 +00:00
|
|
|
# [IGNORE] The message's slot is for the current slot (with a
|
|
|
|
# `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e.
|
|
|
|
# `sync_committee_message.slot == current_slot`.
|
2023-06-29 08:34:21 +00:00
|
|
|
let v = check_slot_exact(msg.slot, wallTime)
|
2021-12-09 12:56:54 +00:00
|
|
|
if v.isErr():
|
|
|
|
return err(v.error())
|
2021-08-28 10:40:01 +00:00
|
|
|
|
|
|
|
# [REJECT] The subnet_id is valid for the given validator
|
|
|
|
# i.e. subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index).
|
|
|
|
# Note this validation implies the validator is part of the broader
|
|
|
|
# current sync committee along with the correct subcommittee.
|
|
|
|
# This check also ensures that the validator index is in range
|
2021-09-28 07:44:20 +00:00
|
|
|
let positionsInSubcommittee = dag.getSubcommitteePositions(
|
2021-11-05 15:39:47 +00:00
|
|
|
msg.slot + 1, subcommitteeIdx, msg.validator_index)
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-09-28 07:44:20 +00:00
|
|
|
if positionsInSubcommittee.len == 0:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
2021-08-28 10:40:01 +00:00
|
|
|
"SyncCommitteeMessage: originator not part of sync committee")
|
|
|
|
|
2023-05-17 04:55:55 +00:00
|
|
|
# [IGNORE] The block being signed (`sync_committee_message.beacon_block_root`)
|
|
|
|
# has been seen (via both gossip and non-gossip sources) (a client MAY queue
|
|
|
|
# sync committee messages for processing once block is received)
|
|
|
|
# [REJECT] The block being signed (`sync_committee_message.beacon_block_root`)
|
|
|
|
# passes validation.
|
|
|
|
let
|
|
|
|
blockRoot = msg.beacon_block_root
|
|
|
|
blck = dag.getBlockRef(blockRoot).valueOr:
|
|
|
|
if blockRoot in quarantine[].unviable:
|
|
|
|
return dag.checkedReject("SyncCommitteeMessage: target invalid")
|
|
|
|
quarantine[].addMissing(blockRoot)
|
|
|
|
return errIgnore("SyncCommitteeMessage: target not found")
|
|
|
|
|
2021-08-28 10:40:01 +00:00
|
|
|
block:
|
2022-01-13 13:46:08 +00:00
|
|
|
# [IGNORE] There has been no other valid sync committee message for the
|
|
|
|
# declared `slot` for the validator referenced by
|
|
|
|
# `sync_committee_message.validator_index`
|
2021-08-28 10:40:01 +00:00
|
|
|
#
|
|
|
|
# Note this validation is per topic so that for a given slot, multiple
|
|
|
|
# messages could be forwarded with the same validator_index as long as
|
|
|
|
# the subnet_ids are distinct.
|
2023-05-17 04:55:55 +00:00
|
|
|
if syncCommitteeMsgPool[].isSeen(msg, subcommitteeIdx, dag.head.bid):
|
2021-09-27 14:36:28 +00:00
|
|
|
return errIgnore("SyncCommitteeMessage: duplicate message")
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-11-05 15:39:47 +00:00
|
|
|
# [REJECT] The signature is valid for the message beacon_block_root for the
|
|
|
|
# validator referenced by validator_index.
|
|
|
|
let
|
2023-01-11 12:29:21 +00:00
|
|
|
senderPubKey = dag.validatorKey(msg.validator_index).valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject("SyncCommitteeMessage: invalid validator index")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
let sig =
|
|
|
|
if checkSignature:
|
|
|
|
# Attestation signatures are batch-verified
|
|
|
|
let deferredCrypto = batchCrypto
|
|
|
|
.scheduleSyncCommitteeMessageCheck(
|
2023-05-17 04:55:55 +00:00
|
|
|
dag.forkAtEpoch(msg.slot.epoch),
|
|
|
|
msg.slot, msg.beacon_block_root,
|
2023-01-11 12:29:21 +00:00
|
|
|
senderPubKey, msg.signature)
|
2021-12-09 12:56:54 +00:00
|
|
|
if deferredCrypto.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(deferredCrypto.error)
|
2021-12-09 12:56:54 +00:00
|
|
|
|
|
|
|
# Await the crypto check
|
|
|
|
let
|
|
|
|
(cryptoFut, sig) = deferredCrypto.get()
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = (await cryptoFut)
|
2021-12-09 12:56:54 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject("SyncCommitteeMessage: invalid signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_sync_messages_dropped_queue_full.inc()
|
|
|
|
return errIgnore("SyncCommitteeMessage: timeout checking signature")
|
|
|
|
of BatchResult.Valid:
|
|
|
|
sig # keep going only in this case
|
|
|
|
else:
|
2023-01-11 12:29:21 +00:00
|
|
|
msg.signature.load().valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"SyncCommitteeMessage: unable to load signature")
|
2021-11-05 15:39:47 +00:00
|
|
|
|
2023-05-17 04:55:55 +00:00
|
|
|
return ok((blck.bid, sig, positionsInSubcommittee))
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2023-05-05 20:48:33 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
|
2021-11-25 12:20:36 +00:00
|
|
|
proc validateContribution*(
|
2021-08-28 10:40:01 +00:00
|
|
|
dag: ChainDAGRef,
|
2023-05-17 04:55:55 +00:00
|
|
|
quarantine: ref Quarantine,
|
2021-12-09 12:56:54 +00:00
|
|
|
batchCrypto: ref BatchCrypto,
|
|
|
|
syncCommitteeMsgPool: ref SyncCommitteeMsgPool,
|
2021-08-28 10:40:01 +00:00
|
|
|
msg: SignedContributionAndProof,
|
|
|
|
wallTime: BeaconTime,
|
2023-05-02 11:06:02 +00:00
|
|
|
checkSignature: bool
|
2023-05-17 04:55:55 +00:00
|
|
|
): Future[Result[
|
2024-01-22 16:34:54 +00:00
|
|
|
(BlockId, CookedSig, seq[ValidatorIndex]), ValidationError]] {.async: (raises: [CancelledError]).} =
|
2021-12-09 12:56:54 +00:00
|
|
|
block:
|
2023-09-13 01:23:18 +00:00
|
|
|
# [IGNORE] The contribution's slot is for the current slot
|
|
|
|
# (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
|
|
|
# i.e. contribution.slot == current_slot.
|
|
|
|
let v = check_slot_exact(msg.message.contribution.slot, wallTime)
|
2023-05-02 11:06:02 +00:00
|
|
|
if v.isErr(): # [IGNORE]
|
2021-12-09 12:56:54 +00:00
|
|
|
return err(v.error())
|
2021-08-28 10:40:01 +00:00
|
|
|
|
|
|
|
# [REJECT] The subcommittee index is in the allowed range
|
|
|
|
# i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT.
|
2023-01-12 14:08:08 +00:00
|
|
|
let subcommitteeIdx = SyncSubcommitteeIndex.init(
|
|
|
|
msg.message.contribution.subcommittee_index).valueOr:
|
2023-05-17 04:55:55 +00:00
|
|
|
return dag.checkedReject("Contribution: subcommittee index too high")
|
|
|
|
|
|
|
|
# [REJECT] The contribution has participants
|
|
|
|
# that is, any(contribution.aggregation_bits).
|
|
|
|
if msg.message.contribution.aggregation_bits.isZeros:
|
|
|
|
return dag.checkedReject("Contribution: aggregation bits empty")
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] contribution_and_proof.selection_proof selects the validator
|
|
|
|
# as an aggregator for the slot
|
|
|
|
# i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof)
|
|
|
|
# returns True.
|
2021-08-28 10:40:01 +00:00
|
|
|
if not is_sync_committee_aggregator(msg.message.selection_proof):
|
2023-05-17 04:55:55 +00:00
|
|
|
return dag.checkedReject("Contribution: invalid selection_proof")
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# [IGNORE] The sync committee contribution is the first valid
|
|
|
|
# contribution received for the aggregator with index
|
|
|
|
# contribution_and_proof.aggregator_index for the slot contribution.slot
|
|
|
|
# and subcommittee index contribution.subcommittee_index
|
2021-12-09 12:56:54 +00:00
|
|
|
# (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this
|
|
|
|
# topic that can be flushed after each slot).
|
|
|
|
if syncCommitteeMsgPool[].isSeen(msg.message):
|
2024-01-17 14:46:54 +00:00
|
|
|
return errIgnore("Contribution: validator has already aggregated in slot")
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-10-19 15:20:55 +00:00
|
|
|
# [REJECT] The aggregator's validator index is in the declared subcommittee
|
|
|
|
# of the current sync committee.
|
|
|
|
# i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in
|
|
|
|
# get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index).
|
|
|
|
let
|
2023-05-17 04:55:55 +00:00
|
|
|
aggregator_index =
|
|
|
|
ValidatorIndex.init(msg.message.aggregator_index).valueOr:
|
|
|
|
return dag.checkedReject("Contribution: invalid aggregator index")
|
|
|
|
# TODO we take a copy of the participants to avoid the data going stale
|
|
|
|
# between validation and use - nonetheless, a design that avoids it and
|
|
|
|
# stays safe would be nice
|
|
|
|
participants = dag.syncCommitteeParticipants(
|
2023-09-13 01:23:18 +00:00
|
|
|
msg.message.contribution.slot + 1, subcommitteeIdx)
|
2023-05-17 04:55:55 +00:00
|
|
|
if aggregator_index notin participants:
|
|
|
|
return dag.checkedReject("Contribution: aggregator not in subcommittee")
|
|
|
|
|
|
|
|
# [IGNORE] The block being signed
|
|
|
|
# (`contribution_and_proof.contribution.beacon_block_root`) has been seen
|
|
|
|
# (via both gossip and non-gossip sources) (a client MAY queue sync committee
|
|
|
|
# contributions for processing once block is received)
|
|
|
|
# [REJECT] The block being signed
|
|
|
|
# (`contribution_and_proof.contribution.beacon_block_root`) passes validation.
|
|
|
|
let
|
|
|
|
blockRoot = msg.message.contribution.beacon_block_root
|
|
|
|
blck = dag.getBlockRef(blockRoot).valueOr:
|
|
|
|
if blockRoot in quarantine[].unviable:
|
|
|
|
return dag.checkedReject("Contribution: target invalid")
|
|
|
|
quarantine[].addMissing(blockRoot)
|
|
|
|
return errIgnore("Contribution: target not found")
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2023-05-02 11:06:02 +00:00
|
|
|
# [IGNORE] A valid sync committee contribution with equal `slot`,
|
|
|
|
# `beacon_block_root` and `subcommittee_index` whose `aggregation_bits`
|
|
|
|
# is non-strict superset has _not_ already been seen.
|
2023-05-17 04:55:55 +00:00
|
|
|
if syncCommitteeMsgPool[].covers(msg.message.contribution, blck.bid):
|
2024-01-17 14:46:54 +00:00
|
|
|
return errIgnore("Contribution: already covered")
|
2021-12-20 19:20:31 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
let sig = if checkSignature:
|
|
|
|
let deferredCrypto = batchCrypto.scheduleContributionChecks(
|
2023-05-17 04:55:55 +00:00
|
|
|
dag.forkAtEpoch(msg.message.contribution.slot.epoch),
|
|
|
|
msg, subcommitteeIdx, dag)
|
2021-12-09 12:56:54 +00:00
|
|
|
if deferredCrypto.isErr():
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(deferredCrypto.error)
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
let
|
|
|
|
(aggregatorFut, proofFut, contributionFut, sig) = deferredCrypto.get()
|
|
|
|
|
|
|
|
block:
|
2023-05-02 11:06:02 +00:00
|
|
|
# [REJECT] The aggregator signature,
|
2023-05-05 20:48:33 +00:00
|
|
|
# `signed_contribution_and_proof.signature`, is valid.
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await aggregatorFut
|
2021-12-09 12:56:54 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject(
|
2023-05-17 04:55:55 +00:00
|
|
|
"Contribution: invalid aggregator signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_contributions_dropped_queue_full.inc()
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
2023-05-17 04:55:55 +00:00
|
|
|
"Contribution: timeout checking aggregator signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
2021-08-28 10:40:01 +00:00
|
|
|
|
2021-12-09 12:56:54 +00:00
|
|
|
block:
|
2023-05-05 20:48:33 +00:00
|
|
|
# [REJECT] The `contribution_and_proof.selection_proof`
|
|
|
|
# is a valid signature of the `SyncAggregatorSelectionData`
|
|
|
|
# derived from the `contribution` by the validator with index
|
|
|
|
# `contribution_and_proof.aggregator_index`.
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await proofFut
|
2021-12-09 12:56:54 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-17 04:55:55 +00:00
|
|
|
return dag.checkedReject("Contribution: invalid proof")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_contributions_dropped_queue_full.inc()
|
2023-05-17 04:55:55 +00:00
|
|
|
return errIgnore("Contribution: timeout checking proof")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
|
|
|
|
|
|
|
block:
|
2023-05-05 20:48:33 +00:00
|
|
|
# [REJECT] The aggregate signature is valid for the message
|
|
|
|
# `beacon_block_root` and aggregate pubkey derived from the
|
|
|
|
# participation info in `aggregation_bits` for the subcommittee
|
|
|
|
# specified by the `contribution.subcommittee_index`.
|
2022-12-05 21:36:53 +00:00
|
|
|
let x = await contributionFut
|
2021-12-09 12:56:54 +00:00
|
|
|
case x
|
|
|
|
of BatchResult.Invalid:
|
2023-05-17 04:55:55 +00:00
|
|
|
return dag.checkedReject(
|
|
|
|
"Contribution: invalid contribution signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Timeout:
|
|
|
|
beacon_contributions_dropped_queue_full.inc()
|
2023-05-02 11:06:02 +00:00
|
|
|
return errIgnore(
|
2023-05-17 04:55:55 +00:00
|
|
|
"Contribution: timeout checking contribution signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
of BatchResult.Valid:
|
|
|
|
discard
|
|
|
|
sig
|
|
|
|
else:
|
2023-01-11 12:29:21 +00:00
|
|
|
msg.message.contribution.signature.load().valueOr:
|
2023-05-02 11:06:02 +00:00
|
|
|
return dag.checkedReject("SyncCommitteeMessage: unable to load signature")
|
2021-12-09 12:56:54 +00:00
|
|
|
|
2023-05-17 04:55:55 +00:00
|
|
|
return ok((blck.bid, sig, participants))
|
2022-03-14 13:05:38 +00:00
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_finality_update
|
2022-05-23 12:02:54 +00:00
|
|
|
proc validateLightClientFinalityUpdate*(
|
|
|
|
pool: var LightClientPool, dag: ChainDAGRef,
|
2023-01-12 17:11:38 +00:00
|
|
|
finality_update: ForkedLightClientFinalityUpdate,
|
2022-05-23 12:02:54 +00:00
|
|
|
wallTime: BeaconTime): Result[void, ValidationError] =
|
2023-11-17 03:57:15 +00:00
|
|
|
# [IGNORE] The `finalized_header.beacon.slot` is greater than that of all
|
|
|
|
# previously forwarded `finality_update`s, or it matches the highest
|
|
|
|
# previously forwarded slot and also has a `sync_aggregate` indicating
|
|
|
|
# supermajority (> 2/3) sync committee participation while the previously
|
|
|
|
# forwarded `finality_update` for that slot did not indicate supermajority
|
2023-01-12 17:11:38 +00:00
|
|
|
let finalized_slot = withForkyFinalityUpdate(finality_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-13 15:46:35 +00:00
|
|
|
forkyFinalityUpdate.finalized_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
else:
|
|
|
|
GENESIS_SLOT
|
2023-11-17 03:57:15 +00:00
|
|
|
if finalized_slot < pool.latestForwardedFinalitySlot:
|
2022-05-23 12:02:54 +00:00
|
|
|
return errIgnore("LightClientFinalityUpdate: slot already forwarded")
|
2023-11-17 03:57:15 +00:00
|
|
|
let has_supermajority = withForkyFinalityUpdate(finality_update):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
forkyFinalityUpdate.sync_aggregate.hasSupermajoritySyncParticipation
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
if finalized_slot == pool.latestForwardedFinalitySlot:
|
|
|
|
if pool.latestForwardedFinalityHasSupermajority:
|
|
|
|
return errIgnore("LightClientFinalityUpdate: already have supermajority")
|
|
|
|
if not has_supermajority:
|
|
|
|
return errIgnore("LightClientFinalityUpdate: no new supermajority")
|
2022-03-14 13:05:38 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
let
|
2023-01-12 17:11:38 +00:00
|
|
|
signature_slot = withForkyFinalityUpdate(finality_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
forkyFinalityUpdate.signature_slot
|
|
|
|
else:
|
|
|
|
GENESIS_SLOT
|
2022-05-23 12:02:54 +00:00
|
|
|
currentTime = wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY
|
|
|
|
forwardTime = signature_slot.light_client_finality_update_time
|
|
|
|
if currentTime < forwardTime:
|
|
|
|
# [IGNORE] The `finality_update` is received after the block at
|
|
|
|
# `signature_slot` was given enough time to propagate through the network.
|
|
|
|
return errIgnore("LightClientFinalityUpdate: received too early")
|
|
|
|
|
2023-01-12 17:11:38 +00:00
|
|
|
if not finality_update.matches(dag.lcDataStore.cache.latest):
|
2022-05-23 12:02:54 +00:00
|
|
|
# [IGNORE] The received `finality_update` matches the locally computed one
|
|
|
|
# exactly.
|
|
|
|
return errIgnore("LightClientFinalityUpdate: not matching local")
|
|
|
|
|
|
|
|
pool.latestForwardedFinalitySlot = finalized_slot
|
2023-11-17 03:57:15 +00:00
|
|
|
pool.latestForwardedFinalityHasSupermajority = has_supermajority
|
2022-05-23 12:02:54 +00:00
|
|
|
ok()
|
2022-03-14 13:05:38 +00:00
|
|
|
|
2023-12-05 02:34:45 +00:00
|
|
|
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
|
2022-05-23 12:02:54 +00:00
|
|
|
proc validateLightClientOptimisticUpdate*(
|
|
|
|
pool: var LightClientPool, dag: ChainDAGRef,
|
2023-01-12 17:11:38 +00:00
|
|
|
optimistic_update: ForkedLightClientOptimisticUpdate,
|
2022-05-23 12:02:54 +00:00
|
|
|
wallTime: BeaconTime): Result[void, ValidationError] =
|
2023-01-12 17:11:38 +00:00
|
|
|
let attested_slot = withForkyOptimisticUpdate(optimistic_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-13 15:46:35 +00:00
|
|
|
forkyOptimisticUpdate.attested_header.beacon.slot
|
2023-01-12 17:11:38 +00:00
|
|
|
else:
|
|
|
|
GENESIS_SLOT
|
2022-05-23 12:02:54 +00:00
|
|
|
if attested_slot <= pool.latestForwardedOptimisticSlot:
|
2023-01-13 15:46:35 +00:00
|
|
|
# [IGNORE] The `attested_header.beacon.slot` is greater than that of all
|
2023-01-12 17:11:38 +00:00
|
|
|
# previously forwarded `optimistic_update`s
|
2022-05-23 12:02:54 +00:00
|
|
|
return errIgnore("LightClientOptimisticUpdate: slot already forwarded")
|
2022-03-14 13:05:38 +00:00
|
|
|
|
2022-05-23 12:02:54 +00:00
|
|
|
let
|
2023-01-12 17:11:38 +00:00
|
|
|
signature_slot = withForkyOptimisticUpdate(optimistic_update):
|
2023-01-14 21:19:50 +00:00
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-01-12 17:11:38 +00:00
|
|
|
forkyOptimisticUpdate.signature_slot
|
|
|
|
else:
|
|
|
|
GENESIS_SLOT
|
2022-05-23 12:02:54 +00:00
|
|
|
currentTime = wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY
|
|
|
|
forwardTime = signature_slot.light_client_optimistic_update_time
|
|
|
|
if currentTime < forwardTime:
|
|
|
|
# [IGNORE] The `optimistic_update` is received after the block at
|
|
|
|
# `signature_slot` was given enough time to propagate through the network.
|
|
|
|
return errIgnore("LightClientOptimisticUpdate: received too early")
|
|
|
|
|
2022-06-24 14:57:50 +00:00
|
|
|
if not optimistic_update.matches(dag.lcDataStore.cache.latest):
|
2022-05-23 12:02:54 +00:00
|
|
|
# [IGNORE] The received `optimistic_update` matches the locally computed one
|
|
|
|
# exactly.
|
|
|
|
return errIgnore("LightClientOptimisticUpdate: not matching local")
|
|
|
|
|
|
|
|
pool.latestForwardedOptimisticSlot = attested_slot
|
2022-03-14 13:05:38 +00:00
|
|
|
ok()
|