harden gossip `strictVerification` in local testnets (#4880)
Fail local testnets on any gossip REJECT, instead of just asserting some of the attestation related checks. This now also ensures that blocks, BLS to Execution changes, blob sidecars and LC messages are checked when running in a local testnet environment (`--verify-finalization`). https://github.com/status-im/nimbus-eth2/pull/2904#discussion_r719603935
This commit is contained in:
parent
dc32add555
commit
ecaf6c8e92
|
@ -132,7 +132,6 @@ func check_beacon_and_target_block(
|
|||
|
||||
func check_aggregation_count(
|
||||
attestation: Attestation, singular: bool): Result[void, ValidationError] =
|
||||
|
||||
let ones = attestation.aggregation_bits.countOnes()
|
||||
if singular and ones != 1:
|
||||
return errReject("Attestation must have a single attestation bit set")
|
||||
|
@ -156,23 +155,74 @@ func check_attestation_subnet(
|
|||
# Gossip Validation
|
||||
# ----------------------------------------------------------------
|
||||
|
||||
template checkedReject(msg: cstring): untyped =
|
||||
if strictVerification in pool.dag.updateFlags:
|
||||
# This doesn't depend on the wall clock or the exact state of the DAG; it's
|
||||
# an internal consistency/correctness check only, and effectively never has
|
||||
# false positives. These don't, for example, arise from timeouts.
|
||||
# Generally, the following rules apply for gossip validation:
|
||||
#
|
||||
# [REJECT]
|
||||
# This doesn't depend on the wall clock or the exact state of the DAG; it's
|
||||
# an internal consistency/correctness check only, and effectively never has
|
||||
# false positives. These don't, for example, arise from timeouts.
|
||||
#
|
||||
# [IGNORE]
|
||||
# This may be intermittent, depend on timing or the current state of the DAG.
|
||||
|
||||
template checkedReject(
|
||||
msg: cstring, strictVerification: bool): untyped =
|
||||
if strictVerification:
|
||||
raiseAssert $msg
|
||||
errReject(msg)
|
||||
|
||||
template checkedReject(error: ValidationError): untyped =
|
||||
template checkedReject(
|
||||
error: ValidationError, strictVerification: bool): untyped =
|
||||
doAssert error[0] == ValidationResult.Reject
|
||||
if strictVerification in pool.dag.updateFlags:
|
||||
# This doesn't depend on the wall clock or the exact state of the DAG; it's
|
||||
# an internal consistency/correctness check only, and effectively never has
|
||||
# false positives. These don't, for example, arise from timeouts.
|
||||
if strictVerification:
|
||||
raiseAssert $error[1]
|
||||
err(error)
|
||||
|
||||
template checkedResult*(
|
||||
error: ValidationError, strictVerification: bool): untyped =
|
||||
if error[0] == ValidationResult.Reject and strictVerification:
|
||||
raiseAssert $error[1]
|
||||
err(error)
|
||||
|
||||
# ChainDAGRef
|
||||
template checkedReject(
|
||||
dag: ChainDAGRef, msg: cstring): untyped =
|
||||
checkedReject(msg, strictVerification in dag.updateFlags)
|
||||
|
||||
template checkedReject(
|
||||
dag: ChainDAGRef, error: ValidationError): untyped =
|
||||
checkedReject(error, strictVerification in dag.updateFlags)
|
||||
|
||||
template checkedResult(
|
||||
dag: ChainDAGRef, error: ValidationError): untyped =
|
||||
checkedResult(error, strictVerification in dag.updateFlags)
|
||||
|
||||
# AttestationPool
|
||||
template checkedReject(
|
||||
pool: ref AttestationPool, msg: cstring): untyped =
|
||||
pool[].dag.checkedReject(msg)
|
||||
|
||||
template checkedReject(
|
||||
pool: ref AttestationPool, error: ValidationError): untyped =
|
||||
pool[].dag.checkedReject(error)
|
||||
|
||||
template checkedResult(
|
||||
pool: ref AttestationPool, error: ValidationError): untyped =
|
||||
pool[].dag.checkedResult(error)
|
||||
|
||||
# ValidatorChangePool
|
||||
template checkedReject(
|
||||
pool: ValidatorChangePool, msg: cstring): untyped =
|
||||
pool.dag.checkedReject(msg)
|
||||
|
||||
template checkedReject(
|
||||
pool: ValidatorChangePool, error: ValidationError): untyped =
|
||||
pool.dag.checkedReject(error)
|
||||
|
||||
template checkedResult(
|
||||
pool: ValidatorChangePool, error: ValidationError): untyped =
|
||||
pool.dag.checkedResult(error)
|
||||
|
||||
template validateBeaconBlockBellatrix(
|
||||
signed_beacon_block: phase0.SignedBeaconBlock | altair.SignedBeaconBlock,
|
||||
parent: BlockRef): untyped =
|
||||
|
@ -180,9 +230,11 @@ template validateBeaconBlockBellatrix(
|
|||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
|
||||
template validateBeaconBlockBellatrix(
|
||||
signed_beacon_block: bellatrix.SignedBeaconBlock |
|
||||
capella.SignedBeaconBlock | deneb.SignedBeaconBlock,
|
||||
parent: BlockRef): untyped =
|
||||
signed_beacon_block:
|
||||
bellatrix.SignedBeaconBlock |
|
||||
capella.SignedBeaconBlock |
|
||||
deneb.SignedBeaconBlock,
|
||||
parent: BlockRef): untyped =
|
||||
# If the execution is enabled for the block -- i.e.
|
||||
# is_execution_enabled(state, block.body) then validate the following:
|
||||
#
|
||||
|
@ -210,7 +262,8 @@ template validateBeaconBlockBellatrix(
|
|||
if not (signed_beacon_block.message.body.execution_payload.timestamp ==
|
||||
timestampAtSlot):
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
return errReject("BeaconBlock: mismatched execution payload timestamp")
|
||||
return dag.checkedReject(
|
||||
"BeaconBlock: mismatched execution payload timestamp")
|
||||
|
||||
# The condition:
|
||||
# [REJECT] The block's parent (defined by `block.parent_root`) passes all
|
||||
|
@ -229,7 +282,7 @@ proc validateBlobSidecar*(
|
|||
# [REJECT] The sidecar is for the correct topic --
|
||||
# i.e. sidecar.index matches the topic {index}.
|
||||
if sbs.message.index != idx:
|
||||
return errReject("SignedBlobSidecar: mismatched gossip topic index")
|
||||
return dag.checkedReject("SignedBlobSidecar: mismatched gossip topic index")
|
||||
|
||||
if dag.getBlockRef(sbs.message.block_root).isSome():
|
||||
return errIgnore("SignedBlobSidecar: already have block")
|
||||
|
@ -252,12 +305,12 @@ proc validateBlobSidecar*(
|
|||
# [REJECT] The sidecar's block's parent (defined by sidecar.block_parent_root)
|
||||
# passes validation.
|
||||
let parent = dag.getBlockRef(sbs.message.block_parent_root).valueOr:
|
||||
return errReject("SignedBlobSidecar: parent not validated")
|
||||
return dag.checkedReject("SignedBlobSidecar: parent not validated")
|
||||
|
||||
# [REJECT] The sidecar is from a higher slot than the sidecar's
|
||||
# block's parent (defined by sidecar.block_parent_root).
|
||||
if sbs.message.slot <= parent.bid.slot:
|
||||
return errReject("SignedBlobSidecar: slot lower than parents'")
|
||||
return dag.checkedReject("SignedBlobSidecar: slot lower than parents'")
|
||||
|
||||
# [REJECT] The sidecar is proposed by the expected proposer_index
|
||||
# for the block's slot in the context of the current shuffling
|
||||
|
@ -273,7 +326,7 @@ proc validateBlobSidecar*(
|
|||
return errIgnore("SignedBlobSidecar: Cannot compute proposer")
|
||||
|
||||
if uint64(proposer) != sbs.message.proposer_index:
|
||||
return errReject("SignedBlobSidecar: Unexpected proposer")
|
||||
return dag.checkedReject("SignedBlobSidecar: Unexpected proposer")
|
||||
|
||||
# [REJECT] The proposer signature, signed_blob_sidecar.signature,
|
||||
# is valid as verified by verify_sidecar_signature.
|
||||
|
@ -284,7 +337,7 @@ proc validateBlobSidecar*(
|
|||
sbs.message,
|
||||
dag.validatorKey(proposer).get(),
|
||||
sbs.signature):
|
||||
return errReject("SignedBlobSidecar: invalid blob signature")
|
||||
return dag.checkedReject("SignedBlobSidecar: invalid blob signature")
|
||||
|
||||
# [IGNORE] The sidecar is the only sidecar with valid signature
|
||||
# received for the tuple (sidecar.block_root, sidecar.index).
|
||||
|
@ -369,7 +422,8 @@ proc validateBeaconBlock*(
|
|||
# (via both gossip and non-gossip sources) (a client MAY queue blocks for
|
||||
# processing once the parent block is retrieved).
|
||||
#
|
||||
# [REJECT] The block's parent (defined by block.parent_root) passes validation.
|
||||
# [REJECT] The block's parent (defined by block.parent_root)
|
||||
# passes validation.
|
||||
let parent = dag.getBlockRef(signed_beacon_block.message.parent_root).valueOr:
|
||||
if signed_beacon_block.message.parent_root in quarantine[].unviable:
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
|
@ -383,14 +437,15 @@ proc validateBeaconBlock*(
|
|||
# regardless of the validity of the execution payload. This prevents
|
||||
# network segregation between optimistic and non-optimistic nodes.
|
||||
#
|
||||
# [IGNORE] The block's parent (defined by `block.parent_root`) passes all
|
||||
# validation (including execution node verification of the
|
||||
# [IGNORE] The block's parent (defined by `block.parent_root`)
|
||||
# passes all validation (including execution node verification of the
|
||||
# `block.body.execution_payload`).
|
||||
return errIgnore("BeaconBlock: ignored, parent from unviable fork")
|
||||
else:
|
||||
# [REJECT] The block's parent (defined by `block.parent_root`) passes
|
||||
# validation.
|
||||
return errReject("BeaconBlock: rejected, parent from unviable fork")
|
||||
return dag.checkedReject(
|
||||
"BeaconBlock: rejected, parent from unviable fork")
|
||||
|
||||
# When the parent is missing, we can't validate the block - we'll queue it
|
||||
# in the quarantine for later processing
|
||||
|
@ -408,7 +463,8 @@ proc validateBeaconBlock*(
|
|||
|
||||
# [REJECT] The block is from a higher slot than its parent.
|
||||
if not (signed_beacon_block.message.slot > parent.bid.slot):
|
||||
return errReject("BeaconBlock: block not from higher slot than its parent")
|
||||
return dag.checkedReject(
|
||||
"BeaconBlock: block not from higher slot than its parent")
|
||||
|
||||
# [REJECT] The current finalized_checkpoint is an ancestor of block -- i.e.
|
||||
# get_ancestor(store, block.parent_root,
|
||||
|
@ -427,7 +483,8 @@ proc validateBeaconBlock*(
|
|||
finalized_checkpoint.root == ancestor.root or
|
||||
finalized_checkpoint.root.isZero):
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
return errReject("BeaconBlock: Finalized checkpoint not an ancestor")
|
||||
return dag.checkedReject(
|
||||
"BeaconBlock: Finalized checkpoint not an ancestor")
|
||||
|
||||
# [REJECT] The block is proposed by the expected proposer_index for the
|
||||
# block's slot in the context of the current shuffling (defined by
|
||||
|
@ -443,7 +500,7 @@ proc validateBeaconBlock*(
|
|||
|
||||
if uint64(proposer) != signed_beacon_block.message.proposer_index:
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
return errReject("BeaconBlock: Unexpected proposer proposer")
|
||||
return dag.checkedReject("BeaconBlock: Unexpected proposer proposer")
|
||||
|
||||
# [REJECT] The proposer signature, signed_beacon_block.signature, is valid
|
||||
# with respect to the proposer_index pubkey.
|
||||
|
@ -455,7 +512,7 @@ proc validateBeaconBlock*(
|
|||
dag.validatorKey(proposer).get(),
|
||||
signed_beacon_block.signature):
|
||||
quarantine[].addUnviable(signed_beacon_block.root)
|
||||
return errReject("BeaconBlock: Invalid proposer signature")
|
||||
return dag.checkedReject("BeaconBlock: Invalid proposer signature")
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -480,7 +537,7 @@ proc validateAttestation*(
|
|||
let slot = block:
|
||||
let v = check_attestation_slot_target(attestation.data)
|
||||
if v.isErr():
|
||||
return errReject(v.error())
|
||||
return pool.checkedReject(v.error())
|
||||
v.get()
|
||||
|
||||
# attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE
|
||||
|
@ -489,28 +546,28 @@ proc validateAttestation*(
|
|||
# >= attestation.data.slot (a client MAY queue future attestations for
|
||||
# processing at the appropriate slot).
|
||||
block:
|
||||
let v = check_propagation_slot_range(slot, wallTime) # [IGNORE]
|
||||
if v.isErr():
|
||||
let v = check_propagation_slot_range(slot, wallTime)
|
||||
if v.isErr(): # [IGNORE]
|
||||
return err(v.error())
|
||||
|
||||
# The attestation is unaggregated -- that is, it has exactly one
|
||||
# participating validator (len([bit for bit in attestation.aggregation_bits
|
||||
# if bit == 0b1]) == 1).
|
||||
block:
|
||||
let v = check_aggregation_count(attestation, singular = true) # [REJECT]
|
||||
if v.isErr():
|
||||
return checkedReject(v.error)
|
||||
let v = check_aggregation_count(attestation, singular = true)
|
||||
if v.isErr(): # [REJECT]
|
||||
return pool.checkedReject(v.error)
|
||||
|
||||
# The block being voted for (attestation.data.beacon_block_root) has been seen
|
||||
# (via both gossip and non-gossip sources) (a client MAY queue attestations for
|
||||
# processing once block is retrieved).
|
||||
# The block being voted for (attestation.data.beacon_block_root) passes
|
||||
# validation.
|
||||
# (via both gossip and non-gossip sources) (a client MAY queue attestations
|
||||
# for processing once block is retrieved).
|
||||
# [REJECT] The block being voted for (attestation.data.beacon_block_root)
|
||||
# passes validation.
|
||||
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
||||
let target = block:
|
||||
let v = check_beacon_and_target_block(pool[], attestation.data) # [IGNORE/REJECT]
|
||||
if v.isErr():
|
||||
return err(v.error)
|
||||
let v = check_beacon_and_target_block(pool[], attestation.data)
|
||||
if v.isErr(): # [IGNORE/REJECT]
|
||||
return pool.checkedResult(v.error)
|
||||
v.get()
|
||||
|
||||
# The following rule follows implicitly from that we clear out any
|
||||
|
@ -534,7 +591,8 @@ proc validateAttestation*(
|
|||
let committee_index = block:
|
||||
let idx = shufflingRef.get_committee_index(attestation.data.index)
|
||||
if idx.isErr():
|
||||
return checkedReject("Attestation: committee index not within expected range")
|
||||
return pool.checkedReject(
|
||||
"Attestation: committee index not within expected range")
|
||||
idx.get()
|
||||
|
||||
# [REJECT] The attestation is for the correct subnet -- i.e.
|
||||
|
@ -545,9 +603,9 @@ proc validateAttestation*(
|
|||
# committee information for the signature check.
|
||||
block:
|
||||
let v = check_attestation_subnet(
|
||||
shufflingRef, attestation.data.slot, committee_index, subnet_id) # [REJECT]
|
||||
if v.isErr():
|
||||
return err(v.error)
|
||||
shufflingRef, attestation.data.slot, committee_index, subnet_id)
|
||||
if v.isErr(): # [REJECT]
|
||||
return pool.checkedReject(v.error)
|
||||
|
||||
# [REJECT] The number of aggregation bits matches the committee size -- i.e.
|
||||
# len(attestation.aggregation_bits) == len(get_beacon_committee(state,
|
||||
|
@ -558,7 +616,7 @@ proc validateAttestation*(
|
|||
# attestation.data.beacon_block_root.
|
||||
if not (attestation.aggregation_bits.lenu64 == get_beacon_committee_len(
|
||||
shufflingRef, attestation.data.slot, committee_index)):
|
||||
return checkedReject(
|
||||
return pool.checkedReject(
|
||||
"Attestation: number of aggregation bits and committee size mismatch")
|
||||
|
||||
let
|
||||
|
@ -568,7 +626,8 @@ proc validateAttestation*(
|
|||
|
||||
# The number of aggregation bits matches the committee size, which ensures
|
||||
# this condition holds.
|
||||
doAssert attesting_index.isSome(), "We've checked bits length and one count already"
|
||||
doAssert attesting_index.isSome(),
|
||||
"We've checked bits length and one count already"
|
||||
let validator_index = attesting_index.get()
|
||||
|
||||
# There has been no other valid attestation seen on an attestation subnet
|
||||
|
@ -595,14 +654,14 @@ proc validateAttestation*(
|
|||
fork, attestation.data, pubkey,
|
||||
attestation.signature)
|
||||
if deferredCrypto.isErr():
|
||||
return checkedReject(deferredCrypto.error)
|
||||
return pool.checkedReject(deferredCrypto.error)
|
||||
|
||||
let (cryptoFut, sig) = deferredCrypto.get()
|
||||
# Await the crypto check
|
||||
let x = (await cryptoFut)
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return checkedReject("Attestation: invalid signature")
|
||||
return pool.checkedReject("Attestation: invalid signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_attestations_dropped_queue_full.inc()
|
||||
return errIgnore("Attestation: timeout checking signature")
|
||||
|
@ -610,7 +669,7 @@ proc validateAttestation*(
|
|||
sig # keep going only in this case
|
||||
else:
|
||||
attestation.signature.load().valueOr:
|
||||
return checkedReject("Attestation: unable to load signature")
|
||||
return pool.checkedReject("Attestation: unable to load signature")
|
||||
|
||||
# Only valid attestations go in the list, which keeps validator_index
|
||||
# in range
|
||||
|
@ -644,7 +703,7 @@ proc validateAggregate*(
|
|||
let slot = block:
|
||||
let v = check_attestation_slot_target(aggregate.data)
|
||||
if v.isErr():
|
||||
return checkedReject(v.error)
|
||||
return pool.checkedReject(v.error)
|
||||
v.get()
|
||||
|
||||
# [IGNORE] aggregate.data.slot is within the last
|
||||
|
@ -652,8 +711,8 @@ proc validateAggregate*(
|
|||
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
|
||||
# ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot
|
||||
block:
|
||||
let v = check_propagation_slot_range(slot, wallTime) # [IGNORE]
|
||||
if v.isErr():
|
||||
let v = check_propagation_slot_range(slot, wallTime)
|
||||
if v.isErr(): # [IGNORE]
|
||||
return err(v.error())
|
||||
|
||||
# [IGNORE] The aggregate is the first valid aggregate received for the
|
||||
|
@ -682,17 +741,17 @@ proc validateAggregate*(
|
|||
# But (2) would reflect an invalid aggregation in other ways, so reject it
|
||||
# either way.
|
||||
block:
|
||||
let v = check_aggregation_count(aggregate, singular = false) # [REJECT]
|
||||
if v.isErr():
|
||||
return err(v.error)
|
||||
let v = check_aggregation_count(aggregate, singular = false)
|
||||
if v.isErr(): # [REJECT]
|
||||
return pool.checkedReject(v.error)
|
||||
|
||||
# [REJECT] The block being voted for (aggregate.data.beacon_block_root)
|
||||
# passes validation.
|
||||
# [IGNORE] if block is unseen so far and enqueue it in missing blocks
|
||||
let target = block:
|
||||
let v = check_beacon_and_target_block(pool[], aggregate.data) # [IGNORE/REJECT]
|
||||
if v.isErr():
|
||||
return err(v.error)
|
||||
let v = check_beacon_and_target_block(pool[], aggregate.data)
|
||||
if v.isErr(): # [IGNORE/REJECT]
|
||||
return pool.checkedResult(v.error)
|
||||
v.get()
|
||||
|
||||
if checkCover and
|
||||
|
@ -716,7 +775,8 @@ proc validateAggregate*(
|
|||
let committee_index = block:
|
||||
let idx = shufflingRef.get_committee_index(aggregate.data.index)
|
||||
if idx.isErr():
|
||||
return checkedReject("Attestation: committee index not within expected range")
|
||||
return pool.checkedReject(
|
||||
"Attestation: committee index not within expected range")
|
||||
idx.get()
|
||||
|
||||
# [REJECT] aggregate_and_proof.selection_proof selects the validator as an
|
||||
|
@ -724,7 +784,7 @@ proc validateAggregate*(
|
|||
# aggregate.data.index, aggregate_and_proof.selection_proof) returns True.
|
||||
if not is_aggregator(
|
||||
shufflingRef, slot, committee_index, aggregate_and_proof.selection_proof):
|
||||
return checkedReject("Aggregate: incorrect aggregator")
|
||||
return pool.checkedReject("Aggregate: incorrect aggregator")
|
||||
|
||||
# [REJECT] The aggregator's validator index is within the committee -- i.e.
|
||||
# aggregate_and_proof.aggregator_index in get_beacon_committee(state,
|
||||
|
@ -732,17 +792,19 @@ proc validateAggregate*(
|
|||
|
||||
let aggregator_index =
|
||||
ValidatorIndex.init(aggregate_and_proof.aggregator_index).valueOr:
|
||||
return checkedReject("Aggregate: invalid aggregator index")
|
||||
return pool.checkedReject("Aggregate: invalid aggregator index")
|
||||
|
||||
if aggregator_index notin
|
||||
get_beacon_committee(shufflingRef, slot, committee_index):
|
||||
return checkedReject("Aggregate: aggregator's validator index not in committee")
|
||||
return pool.checkedReject(
|
||||
"Aggregate: aggregator's validator index not in committee")
|
||||
|
||||
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the
|
||||
# aggregate.data.slot by the validator with index
|
||||
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature
|
||||
# of the aggregate.data.slot by the validator with index
|
||||
# aggregate_and_proof.aggregator_index.
|
||||
# get_slot_signature(state, aggregate.data.slot, privkey)
|
||||
# 2. [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# 2. [REJECT] The aggregator signature,
|
||||
# signed_aggregate_and_proof.signature, is valid.
|
||||
# 3. [REJECT] The signature of aggregate is valid.
|
||||
|
||||
let
|
||||
|
@ -758,17 +820,18 @@ proc validateAggregate*(
|
|||
attesting_indices
|
||||
)
|
||||
if deferredCrypto.isErr():
|
||||
return checkedReject(deferredCrypto.error)
|
||||
return pool.checkedReject(deferredCrypto.error)
|
||||
|
||||
let
|
||||
(aggregatorFut, slotFut, aggregateFut, sig) = deferredCrypto.get()
|
||||
|
||||
block:
|
||||
# [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# [REJECT] The aggregator signature,
|
||||
# signed_aggregate_and_proof.signature, is valid.
|
||||
let x = await aggregatorFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return checkedReject("Aggregate: invalid aggregator signature")
|
||||
return pool.checkedReject("Aggregate: invalid aggregator signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_aggregates_dropped_queue_full.inc()
|
||||
return errIgnore("Aggregate: timeout checking aggregator signature")
|
||||
|
@ -780,7 +843,7 @@ proc validateAggregate*(
|
|||
let x = await slotFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return checkedReject("Aggregate: invalid slot signature")
|
||||
return pool.checkedReject("Aggregate: invalid slot signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_aggregates_dropped_queue_full.inc()
|
||||
return errIgnore("Aggregate: timeout checking slot signature")
|
||||
|
@ -788,11 +851,12 @@ proc validateAggregate*(
|
|||
discard
|
||||
|
||||
block:
|
||||
# [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# [REJECT] The aggregator signature,
|
||||
# signed_aggregate_and_proof.signature, is valid.
|
||||
let x = await aggregateFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return checkedReject("Aggregate: invalid aggregate signature")
|
||||
return pool.checkedReject("Aggregate: invalid aggregate signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_aggregates_dropped_queue_full.inc()
|
||||
return errIgnore("Aggregate: timeout checking aggregate signature")
|
||||
|
@ -801,7 +865,7 @@ proc validateAggregate*(
|
|||
sig
|
||||
else:
|
||||
aggregate.signature.load().valueOr:
|
||||
return checkedReject("Aggregate: unable to load signature")
|
||||
return pool.checkedReject("Aggregate: unable to load signature")
|
||||
|
||||
# The following rule follows implicitly from that we clear out any
|
||||
# unviable blocks from the chain dag:
|
||||
|
@ -829,38 +893,43 @@ proc validateBlsToExecutionChange*(
|
|||
# [IGNORE] `current_epoch >= CAPELLA_FORK_EPOCH`, where `current_epoch` is
|
||||
# defined by the current wall-clock time.
|
||||
if not (wallEpoch >= pool.dag.cfg.CAPELLA_FORK_EPOCH):
|
||||
return errIgnore("validateBlsToExecutionChange: not accepting gossip until Capella")
|
||||
return errIgnore(
|
||||
"SignedBLSToExecutionChange: not accepting gossip until Capella")
|
||||
|
||||
# [IGNORE] The `signed_bls_to_execution_change` is the first valid signed bls
|
||||
# to execution change received for the validator with index
|
||||
# `signed_bls_to_execution_change.message.validator_index`.
|
||||
if pool.isSeen(signed_address_change):
|
||||
return errIgnore("validateBlsToExecutionChange: not first signed BLS to execution change received for validator index")
|
||||
return errIgnore(
|
||||
"SignedBLSToExecutionChange: not first valid change for validator index")
|
||||
|
||||
# [REJECT] All of the conditions within `process_bls_to_execution_change`
|
||||
# pass validation.
|
||||
withState(pool.dag.headState):
|
||||
when consensusFork < ConsensusFork.Capella:
|
||||
return errIgnore("validateBlsToExecutionChange: can't validate against pre-Capella state")
|
||||
return errIgnore(
|
||||
"SignedBLSToExecutionChange: can't validate against pre-Capella state")
|
||||
else:
|
||||
let res = check_bls_to_execution_change(
|
||||
pool.dag.cfg.genesisFork, forkyState.data, signed_address_change,
|
||||
{skipBlsValidation})
|
||||
if res.isErr:
|
||||
return errReject(res.error)
|
||||
return pool.checkedReject(res.error)
|
||||
|
||||
# BLS to execution change signatures are batch-verified
|
||||
let deferredCrypto = batchCrypto.scheduleBlsToExecutionChangeCheck(
|
||||
pool.dag.cfg.genesisFork, signed_address_change)
|
||||
if deferredCrypto.isErr():
|
||||
return checkedReject(deferredCrypto.error)
|
||||
return pool.checkedReject(deferredCrypto.error)
|
||||
|
||||
let (cryptoFut, sig) = deferredCrypto.get()
|
||||
case await cryptoFut
|
||||
of BatchResult.Invalid:
|
||||
return checkedReject("validateBlsToExecutionChange: invalid signature")
|
||||
return pool.checkedReject(
|
||||
"SignedBLSToExecutionChange: invalid signature")
|
||||
of BatchResult.Timeout:
|
||||
return errIgnore("validateBlsToExecutionChange: timeout checking signature")
|
||||
return errIgnore(
|
||||
"SignedBLSToExecutionChange: timeout checking signature")
|
||||
of BatchResult.Valid:
|
||||
discard # keep going only in this case
|
||||
|
||||
|
@ -883,7 +952,7 @@ proc validateAttesterSlashing*(
|
|||
let attester_slashing_validity =
|
||||
check_attester_slashing(pool.dag.headState, attester_slashing, {})
|
||||
if attester_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, attester_slashing_validity.error))
|
||||
return pool.checkedReject(attester_slashing_validity.error)
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -892,7 +961,7 @@ proc validateProposerSlashing*(
|
|||
pool: ValidatorChangePool, proposer_slashing: ProposerSlashing):
|
||||
Result[void, ValidationError] =
|
||||
# Not from spec; the rest of NBC wouldn't have correctly processed it either.
|
||||
if proposer_slashing.signed_header_1.message.proposer_index > high(int).uint64:
|
||||
if proposer_slashing.signed_header_1.message.proposer_index > int.high.uint64:
|
||||
return errIgnore("ProposerSlashing: proposer-slashed index too high")
|
||||
|
||||
# [IGNORE] The proposer slashing is the first valid proposer slashing
|
||||
|
@ -902,11 +971,12 @@ proc validateProposerSlashing*(
|
|||
return errIgnore(
|
||||
"ProposerSlashing: proposer-slashed index already proposer-slashed")
|
||||
|
||||
# [REJECT] All of the conditions within process_proposer_slashing pass validation.
|
||||
# [REJECT] All of the conditions within process_proposer_slashing
|
||||
# pass validation.
|
||||
let proposer_slashing_validity =
|
||||
check_proposer_slashing(pool.dag.headState, proposer_slashing, {})
|
||||
if proposer_slashing_validity.isErr:
|
||||
return err((ValidationResult.Reject, proposer_slashing_validity.error))
|
||||
return pool.checkedReject(proposer_slashing_validity.error)
|
||||
|
||||
ok()
|
||||
|
||||
|
@ -924,7 +994,8 @@ proc validateVoluntaryExit*(
|
|||
# signed_voluntary_exit.message.validator_index.int is already valid, but
|
||||
# check explicitly if one changes that data structure.
|
||||
if pool.isSeen(signed_voluntary_exit):
|
||||
return errIgnore("VoluntaryExit: validator index already voluntarily exited")
|
||||
return errIgnore(
|
||||
"VoluntaryExit: validator index already voluntarily exited")
|
||||
|
||||
# [REJECT] All of the conditions within process_voluntary_exit pass
|
||||
# validation.
|
||||
|
@ -932,7 +1003,7 @@ proc validateVoluntaryExit*(
|
|||
check_voluntary_exit(
|
||||
pool.dag.cfg, pool.dag.headState, signed_voluntary_exit, {})
|
||||
if voluntary_exit_validity.isErr:
|
||||
return err((ValidationResult.Reject, voluntary_exit_validity.error))
|
||||
return pool.checkedReject(voluntary_exit_validity.error)
|
||||
|
||||
# Send notification about new voluntary exit via callback
|
||||
if not(isNil(pool.onVoluntaryExitReceived)):
|
||||
|
@ -967,7 +1038,7 @@ proc validateSyncCommitteeMessage*(
|
|||
msg.slot + 1, subcommitteeIdx, msg.validator_index)
|
||||
|
||||
if positionsInSubcommittee.len == 0:
|
||||
return errReject(
|
||||
return dag.checkedReject(
|
||||
"SyncCommitteeMessage: originator not part of sync committee")
|
||||
|
||||
block:
|
||||
|
@ -987,7 +1058,7 @@ proc validateSyncCommitteeMessage*(
|
|||
epoch = msg.slot.epoch
|
||||
fork = dag.forkAtEpoch(epoch)
|
||||
senderPubKey = dag.validatorKey(msg.validator_index).valueOr:
|
||||
return errReject("SyncCommitteeMessage: invalid validator index")
|
||||
return dag.checkedReject("SyncCommitteeMessage: invalid validator index")
|
||||
|
||||
let sig =
|
||||
if checkSignature:
|
||||
|
@ -997,7 +1068,7 @@ proc validateSyncCommitteeMessage*(
|
|||
fork, msg.slot, msg.beacon_block_root,
|
||||
senderPubKey, msg.signature)
|
||||
if deferredCrypto.isErr():
|
||||
return errReject(deferredCrypto.error)
|
||||
return dag.checkedReject(deferredCrypto.error)
|
||||
|
||||
# Await the crypto check
|
||||
let
|
||||
|
@ -1006,7 +1077,7 @@ proc validateSyncCommitteeMessage*(
|
|||
let x = (await cryptoFut)
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return errReject("SyncCommitteeMessage: invalid signature")
|
||||
return dag.checkedReject("SyncCommitteeMessage: invalid signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_sync_messages_dropped_queue_full.inc()
|
||||
return errIgnore("SyncCommitteeMessage: timeout checking signature")
|
||||
|
@ -1014,7 +1085,8 @@ proc validateSyncCommitteeMessage*(
|
|||
sig # keep going only in this case
|
||||
else:
|
||||
msg.signature.load().valueOr:
|
||||
return errReject("SyncCommitteeMessage: unable to load signature")
|
||||
return dag.checkedReject(
|
||||
"SyncCommitteeMessage: unable to load signature")
|
||||
|
||||
return ok((positionsInSubcommittee, sig))
|
||||
|
||||
|
@ -1025,8 +1097,8 @@ proc validateContribution*(
|
|||
syncCommitteeMsgPool: ref SyncCommitteeMsgPool,
|
||||
msg: SignedContributionAndProof,
|
||||
wallTime: BeaconTime,
|
||||
checkSignature: bool):
|
||||
Future[Result[(CookedSig, seq[ValidatorIndex]), ValidationError]] {.async.} =
|
||||
checkSignature: bool
|
||||
): Future[Result[(CookedSig, seq[ValidatorIndex]), ValidationError]] {.async.} =
|
||||
let
|
||||
syncCommitteeSlot = msg.message.contribution.slot
|
||||
|
||||
|
@ -1034,24 +1106,29 @@ proc validateContribution*(
|
|||
# (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance)
|
||||
# i.e. contribution.slot == current_slot.
|
||||
block:
|
||||
let v = check_propagation_slot_range(syncCommitteeSlot, wallTime) # [IGNORE]
|
||||
if v.isErr():
|
||||
let v = check_propagation_slot_range(syncCommitteeSlot, wallTime)
|
||||
if v.isErr(): # [IGNORE]
|
||||
return err(v.error())
|
||||
|
||||
# [REJECT] The subcommittee index is in the allowed range
|
||||
# i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT.
|
||||
let subcommitteeIdx = SyncSubcommitteeIndex.init(
|
||||
msg.message.contribution.subcommittee_index).valueOr:
|
||||
return errReject("SignedContributionAndProof: subcommittee index too high")
|
||||
return dag.checkedReject(
|
||||
"SignedContributionAndProof: subcommittee index too high")
|
||||
|
||||
# [REJECT] contribution_and_proof.selection_proof selects the validator as an aggregator for the slot
|
||||
# i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof) returns True.
|
||||
# [REJECT] contribution_and_proof.selection_proof selects the validator
|
||||
# as an aggregator for the slot
|
||||
# i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof)
|
||||
# returns True.
|
||||
if not is_sync_committee_aggregator(msg.message.selection_proof):
|
||||
return errReject("SignedContributionAndProof: invalid selection_proof")
|
||||
return dag.checkedReject(
|
||||
"SignedContributionAndProof: invalid selection_proof")
|
||||
|
||||
# [IGNORE] The sync committee contribution is the first valid contribution
|
||||
# received for the aggregator with index contribution_and_proof.aggregator_index
|
||||
# for the slot contribution.slot and subcommittee index contribution.subcommittee_index
|
||||
# [IGNORE] The sync committee contribution is the first valid
|
||||
# contribution received for the aggregator with index
|
||||
# contribution_and_proof.aggregator_index for the slot contribution.slot
|
||||
# and subcommittee index contribution.subcommittee_index
|
||||
# (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this
|
||||
# topic that can be flushed after each slot).
|
||||
if syncCommitteeMsgPool[].isSeen(msg.message):
|
||||
|
@ -1068,11 +1145,12 @@ proc validateContribution*(
|
|||
if msg.message.contribution.aggregation_bits.countOnes() == 0:
|
||||
# [REJECT] The contribution has participants
|
||||
# that is, any(contribution.aggregation_bits).
|
||||
return errReject("SignedContributionAndProof: aggregation bits empty")
|
||||
return dag.checkedReject(
|
||||
"SignedContributionAndProof: aggregation bits empty")
|
||||
|
||||
# _[IGNORE]_ A valid sync committee contribution with equal `slot`, `beacon_block_root`
|
||||
# and `subcommittee_index` whose `aggregation_bits` is non-strict superset has _not_
|
||||
# already been seen.
|
||||
# [IGNORE] A valid sync committee contribution with equal `slot`,
|
||||
# `beacon_block_root` and `subcommittee_index` whose `aggregation_bits`
|
||||
# is non-strict superset has _not_ already been seen.
|
||||
if syncCommitteeMsgPool[].covers(msg.message.contribution):
|
||||
return errIgnore("SignedContributionAndProof: duplicate contribution")
|
||||
|
||||
|
@ -1086,20 +1164,23 @@ proc validateContribution*(
|
|||
let deferredCrypto = batchCrypto.scheduleContributionChecks(
|
||||
fork, msg, subcommitteeIdx, dag)
|
||||
if deferredCrypto.isErr():
|
||||
return errReject(deferredCrypto.error)
|
||||
return dag.checkedReject(deferredCrypto.error)
|
||||
|
||||
let
|
||||
(aggregatorFut, proofFut, contributionFut, sig) = deferredCrypto.get()
|
||||
|
||||
block:
|
||||
# [REJECT] The aggregator signature, signed_contribution_and_proof.signature, is valid
|
||||
# [REJECT] The aggregator signature,
|
||||
# signed_contribution_and_proof.signature, is valid
|
||||
let x = await aggregatorFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return errReject("SignedContributionAndProof: invalid aggregator signature")
|
||||
return dag.checkedReject(
|
||||
"SignedContributionAndProof: invalid aggregator signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_contributions_dropped_queue_full.inc()
|
||||
return errIgnore("SignedContributionAndProof: timeout checking aggregator signature")
|
||||
return errIgnore(
|
||||
"SignedContributionAndProof: timeout checking aggregator signature")
|
||||
of BatchResult.Valid:
|
||||
discard
|
||||
|
||||
|
@ -1107,7 +1188,7 @@ proc validateContribution*(
|
|||
let x = await proofFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return errReject("SignedContributionAndProof: invalid proof")
|
||||
return dag.checkedReject("SignedContributionAndProof: invalid proof")
|
||||
of BatchResult.Timeout:
|
||||
beacon_contributions_dropped_queue_full.inc()
|
||||
return errIgnore("SignedContributionAndProof: timeout checking proof")
|
||||
|
@ -1115,20 +1196,23 @@ proc validateContribution*(
|
|||
discard
|
||||
|
||||
block:
|
||||
# [REJECT] The aggregator signature, signed_aggregate_and_proof.signature, is valid.
|
||||
# [REJECT] The aggregator signature,
|
||||
# signed_aggregate_and_proof.signature, is valid.
|
||||
let x = await contributionFut
|
||||
case x
|
||||
of BatchResult.Invalid:
|
||||
return errReject("SignedContributionAndProof: invalid contribution signature")
|
||||
return dag.checkedReject(
|
||||
"SignedContributionAndProof: invalid contribution signature")
|
||||
of BatchResult.Timeout:
|
||||
beacon_contributions_dropped_queue_full.inc()
|
||||
return errIgnore("SignedContributionAndProof: timeout checking contribution signature")
|
||||
return errIgnore(
|
||||
"SignedContributionAndProof: timeout checking contribution signature")
|
||||
of BatchResult.Valid:
|
||||
discard
|
||||
sig
|
||||
else:
|
||||
msg.message.contribution.signature.load().valueOr:
|
||||
return errReject("SyncCommitteeMessage: unable to load signature")
|
||||
return dag.checkedReject("SyncCommitteeMessage: unable to load signature")
|
||||
|
||||
return ok((sig, participants))
|
||||
|
||||
|
|
|
@ -93,6 +93,7 @@ type
|
|||
dumpEnabled: bool
|
||||
dumpDirInvalid: string
|
||||
dumpDirIncoming: string
|
||||
strictVerification: bool
|
||||
|
||||
# Consumer
|
||||
# ----------------------------------------------------------------
|
||||
|
@ -144,12 +145,14 @@ proc new*(
|
|||
bootstrapObserver: BootstrapObserver = nil,
|
||||
updateObserver: UpdateObserver = nil,
|
||||
finalityUpdateObserver: FinalityUpdateObserver = nil,
|
||||
optimisticUpdateObserver: OptimisticUpdateObserver = nil
|
||||
optimisticUpdateObserver: OptimisticUpdateObserver = nil,
|
||||
strictVerification = false
|
||||
): ref LightClientProcessor =
|
||||
(ref LightClientProcessor)(
|
||||
dumpEnabled: dumpEnabled,
|
||||
dumpDirInvalid: dumpDirInvalid,
|
||||
dumpDirIncoming: dumpDirIncoming,
|
||||
strictVerification: strictVerification,
|
||||
store: store,
|
||||
getBeaconTime: getBeaconTime,
|
||||
getTrustedBlockRoot: getTrustedBlockRoot,
|
||||
|
@ -535,8 +538,10 @@ proc processLightClientFinalityUpdate*(
|
|||
wallTime = self.getBeaconTime()
|
||||
r = self.storeObject(src, wallTime, finality_update)
|
||||
v = self.toValidationError(r, wallTime, finality_update)
|
||||
if v.isOk:
|
||||
self.latestFinalityUpdate = finality_update.toOptimistic
|
||||
if v.isErr:
|
||||
return checkedResult(v.error, self.strictVerification)
|
||||
|
||||
self.latestFinalityUpdate = finality_update.toOptimistic
|
||||
v
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.3.0-rc.3/specs/altair/light-client/sync-protocol.md#process_light_client_finality_update
|
||||
|
@ -548,18 +553,20 @@ proc processLightClientOptimisticUpdate*(
|
|||
wallTime = self.getBeaconTime()
|
||||
r = self.storeObject(src, wallTime, optimistic_update)
|
||||
v = self.toValidationError(r, wallTime, optimistic_update)
|
||||
if v.isOk:
|
||||
let
|
||||
latestFinalitySlot = withForkyOptimisticUpdate(self.latestFinalityUpdate):
|
||||
when lcDataFork > LightClientDataFork.None:
|
||||
forkyOptimisticUpdate.attested_header.beacon.slot
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
attestedSlot = withForkyOptimisticUpdate(optimistic_update):
|
||||
when lcDataFork > LightClientDataFork.None:
|
||||
forkyOptimisticUpdate.attested_header.beacon.slot
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
if attestedSlot >= latestFinalitySlot:
|
||||
self.latestFinalityUpdate.reset() # Only forward once
|
||||
if v.isErr:
|
||||
return checkedResult(v.error, self.strictVerification)
|
||||
|
||||
let
|
||||
latestFinalitySlot = withForkyOptimisticUpdate(self.latestFinalityUpdate):
|
||||
when lcDataFork > LightClientDataFork.None:
|
||||
forkyOptimisticUpdate.attested_header.beacon.slot
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
attestedSlot = withForkyOptimisticUpdate(optimistic_update):
|
||||
when lcDataFork > LightClientDataFork.None:
|
||||
forkyOptimisticUpdate.attested_header.beacon.slot
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
if attestedSlot >= latestFinalitySlot:
|
||||
self.latestFinalityUpdate.reset() # Only forward once
|
||||
v
|
||||
|
|
|
@ -90,7 +90,8 @@ proc createLightClient(
|
|||
forkDigests: ref ForkDigests,
|
||||
getBeaconTime: GetBeaconTimeFn,
|
||||
genesis_validators_root: Eth2Digest,
|
||||
finalizationMode: LightClientFinalizationMode
|
||||
finalizationMode: LightClientFinalizationMode,
|
||||
strictVerification = false
|
||||
): LightClient =
|
||||
let lightClient = LightClient(
|
||||
network: network,
|
||||
|
@ -136,7 +137,8 @@ proc createLightClient(
|
|||
cfg, genesis_validators_root, finalizationMode,
|
||||
lightClient.store, getBeaconTime, getTrustedBlockRoot,
|
||||
onStoreInitialized, onFinalizedHeader, onOptimisticHeader,
|
||||
bootstrapObserver, updateObserver, finalityObserver, optimisticObserver)
|
||||
bootstrapObserver, updateObserver, finalityObserver, optimisticObserver,
|
||||
strictVerification)
|
||||
|
||||
proc lightClientVerifier(obj: SomeForkedLightClientObject):
|
||||
Future[Result[void, VerifierError]] =
|
||||
|
@ -199,7 +201,8 @@ proc createLightClient*(
|
|||
createLightClient(
|
||||
network, rng,
|
||||
config.dumpEnabled, config.dumpDirInvalid, config.dumpDirIncoming,
|
||||
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode)
|
||||
cfg, forkDigests, getBeaconTime, genesis_validators_root, finalizationMode,
|
||||
strictVerification = config.strictVerification)
|
||||
|
||||
proc createLightClient*(
|
||||
network: Eth2Node,
|
||||
|
|
Loading…
Reference in New Issue