* cleanups

* use ForkedTrustedSignedBeaconBlock.ionit where appropriate
* move `is_aggregator` to `spec/`
* use `errReject` in a few more places
* update enr fork id when time is auspicious
* use network broadcast functions

* Return Ignore for aggregate signature validation timeouts

...consistently between aggregates and attestations.

* clean up some more reject/ignore rules
* shorten texts a bit

* errReject->checkedReject, use err helpers throughout

* get rid of quarantine in exitpool as well
This commit is contained in:
Jacek Sieka 2021-08-24 21:49:51 +02:00 committed by GitHub
parent c7d4659d32
commit ba06f13942
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 136 additions and 172 deletions

View File

@ -98,7 +98,6 @@ type
## Records voluntary exit indices seen.
dag*: ChainDAGRef
quarantine*: QuarantineRef
# #############################################
#

View File

@ -132,7 +132,7 @@ func validatorKey*(
## at any point in time - this function may return pubkeys for indicies that
## are not (yet) part of the head state (if the key has been observed on a
## non-head branch)!
epochRef.dag.validatorKey(index)
validatorKey(epochRef.dag, index)
func init*(
T: type EpochRef, dag: ChainDAGRef, state: StateData,
@ -148,7 +148,9 @@ func init*(
getStateField(state.data, current_justified_checkpoint),
finalized_checkpoint: getStateField(state.data, finalized_checkpoint),
shuffled_active_validator_indices:
cache.get_shuffled_active_validator_indices(state.data, epoch))
cache.get_shuffled_active_validator_indices(state.data, epoch)
)
for i in 0'u64..<SLOTS_PER_EPOCH:
epochRef.beacon_proposers[i] = get_beacon_proposer_index(
state.data, cache, epoch.compute_start_slot_at_epoch() + i)
@ -683,13 +685,11 @@ proc getForkedBlock*(dag: ChainDAGRef, blck: BlockRef): ForkedTrustedSignedBeaco
# TODO implement this properly
let phase0Block = dag.db.getBlock(blck.root)
if phase0Block.isOk:
return ForkedTrustedSignedBeaconBlock(kind: BeaconBlockFork.Phase0,
phase0Block: phase0Block.get)
return ForkedTrustedSignedBeaconBlock.init(phase0Block.get)
let altairBlock = dag.db.getAltairBlock(blck.root)
if altairBlock.isOk:
return ForkedTrustedSignedBeaconBlock(kind: BeaconBlockFork.Altair,
altairBlock: altairBlock.get)
return ForkedTrustedSignedBeaconBlock.init(altairBlock.get)
raiseAssert "BlockRef without backing data, database corrupt?"

View File

@ -15,10 +15,10 @@ import
# Internal
../spec/[forks, helpers],
../spec/datatypes/base,
"."/[blockchain_dag, block_quarantine],
"."/[blockchain_dag],
../beacon_node_types
export beacon_node_types, intsets
export beacon_node_types, base, intsets, deques
logScope: topics = "exitpool"
@ -28,7 +28,7 @@ const
VOLUNTARY_EXITS_BOUND* = MAX_VOLUNTARY_EXITS * 2
proc init*(
T: type ExitPool, dag: ChainDAGRef, quarantine: QuarantineRef): T =
T: type ExitPool, dag: ChainDAGRef): T =
## Initialize an ExitPool from the dag `headState`
T(
# Allow for filtering out some exit messages during block production
@ -39,7 +39,6 @@ proc init*(
voluntary_exits:
initDeque[SignedVoluntaryExit](initialSize = VOLUNTARY_EXITS_BOUND.int),
dag: dag,
quarantine: quarantine
)
func addExitMessage*(subpool: var auto, exitMessage, bound: auto) =

View File

@ -193,3 +193,9 @@ iterator get_committee_assignments*(
includedIndices, idx,
compute_subnet_for_attestation(committees_per_slot, slot, idx),
slot)
func is_aggregator*(epochRef: EpochRef, slot: Slot, index: CommitteeIndex,
slot_signature: ValidatorSig): bool =
let
committee_len = get_beacon_committee_len(epochRef, slot, index)
return is_aggregator(committee_len, slot_signature)

View File

@ -9,7 +9,7 @@
import
# Standard library
std/[intsets, deques],
std/[intsets],
# Status
chronicles, chronos, metrics,
stew/results,
@ -18,11 +18,8 @@ import
../spec/[
beaconstate, state_transition_block, forks, helpers, network, signatures],
../consensus_object_pools/[
spec_cache, blockchain_dag, block_quarantine, spec_cache,
attestation_pool, exit_pool
],
attestation_pool, blockchain_dag, block_quarantine, exit_pool, spec_cache],
".."/[beacon_node_types, beacon_clock],
../validators/attestation_aggregation,
./batch_validation
from libp2p/protocols/pubsub/pubsub import ValidationResult
@ -38,6 +35,11 @@ declareCounter beacon_attestations_dropped_queue_full,
declareCounter beacon_aggregates_dropped_queue_full,
"Number of aggregates dropped because queue is full"
template errIgnore(msg: cstring): untyped =
err((ValidationResult.Ignore, cstring msg))
template errReject(msg: cstring): untyped =
err((ValidationResult.Reject, cstring msg))
# Internal checks
# ----------------------------------------------------------------
@ -50,20 +52,18 @@ func check_attestation_block(
# useless - other blocks that are not rooted in the finalized chain will be
# pruned by the chain dag, and thus we can no longer get a BlockRef for them
if not (blck.slot > pool.dag.finalizedHead.slot):
return err((ValidationResult.Ignore, cstring(
"Voting for already-finalized block")))
return errIgnore("Voting for already-finalized block")
# The attestation shouldn't be voting for a block that didn't exist at the
# time - not in spec, but hard to reason about
if not (attestationSlot >= blck.slot):
return err((ValidationResult.Ignore, cstring(
"Voting for block that didn't exist at the time")))
return errIgnore("Voting for block that didn't exist at the time")
# We'll also cap it at 4 epochs which is somewhat arbitrary, but puts an
# upper bound on the processing done to validate the attestation
# TODO revisit with less arbitrary approach
if not ((attestationSlot - blck.slot) <= uint64(4 * SLOTS_PER_EPOCH)):
return err((ValidationResult.Ignore, cstring("Voting for very old block")))
return errIgnore("Voting for very old block")
ok()
@ -74,8 +74,7 @@ func check_propagation_slot_range(
futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
if not futureSlot.afterGenesis or msgSlot > futureSlot.slot:
return err((ValidationResult.Ignore, cstring(
"Attestation slot in the future")))
return errIgnore("Attestation slot in the future")
let
pastSlot = (wallTime - MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
@ -88,8 +87,7 @@ func check_propagation_slot_range(
if pastSlot.afterGenesis and
msgSlot + ATTESTATION_PROPAGATION_SLOT_RANGE < pastSlot.slot:
return err((ValidationResult.Ignore, cstring(
"Attestation slot in the past")))
return errIgnore("Attestation slot in the past")
ok()
@ -104,7 +102,7 @@ func check_beacon_and_target_block(
let blck = pool.dag.getRef(data.beacon_block_root)
if blck.isNil:
pool.quarantine.addMissing(data.beacon_block_root)
return err((ValidationResult.Ignore, cstring("Attestation block unknown")))
return errIgnore("Attestation block unknown")
# Not in spec - check that rewinding to the state is sane
? check_attestation_block(pool, data.slot, blck)
@ -119,8 +117,8 @@ func check_beacon_and_target_block(
blck, compute_start_slot_at_epoch(data.target.epoch), SLOTS_PER_EPOCH.int)
if not (target.root == data.target.root):
return err((ValidationResult.Reject, cstring(
"attestation's target block not an ancestor of LMD vote block")))
return errIgnore(
"Attestation's target block not an ancestor of LMD vote block")
ok(target)
@ -130,11 +128,9 @@ func check_aggregation_count(
let ones = attestation.aggregation_bits.countOnes()
if singular and ones != 1:
return err((ValidationResult.Reject, cstring(
"Attestation must have a single attestation bit set")))
return errReject("Attestation must have a single attestation bit set")
elif not singular and ones < 1:
return err((ValidationResult.Reject, cstring(
"Attestation must have at least one attestation bit set")))
return errReject("Attestation must have at least one attestation bit set")
ok()
@ -148,15 +144,14 @@ func check_attestation_subnet(
attestation.data.slot, attestation.data.index.CommitteeIndex)
if expectedSubnet != subnet_id:
return err((ValidationResult.Reject, cstring(
"Attestation not on the correct subnet")))
return errReject("Attestation not on the correct subnet")
ok()
# Gossip Validation
# ----------------------------------------------------------------
template errReject(msg: cstring): untyped =
template checkedReject(msg: cstring): untyped =
if verifyFinalization in pool.dag.updateFlags:
# This doesn't depend on the wall clock or the exact state of the DAG; it's
# an internal consistency/correctness check only, and effectively never has
@ -164,7 +159,7 @@ template errReject(msg: cstring): untyped =
raiseAssert $msg
err((ValidationResult.Reject, cstring msg))
template errReject(error: (ValidationResult, cstring)): untyped =
template checkedReject(error: (ValidationResult, cstring)): untyped =
doAssert error[0] == ValidationResult.Reject
if verifyFinalization in pool.dag.updateFlags:
# This doesn't depend on the wall clock or the exact state of the DAG; it's
@ -173,9 +168,6 @@ template errReject(error: (ValidationResult, cstring)): untyped =
raiseAssert $error[1]
err(error)
template errIgnore(msg: cstring): untyped =
err((ValidationResult.Ignore, cstring msg))
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
proc validateAttestation*(
pool: ref AttestationPool,
@ -196,7 +188,7 @@ proc validateAttestation*(
block:
let v = check_attestation_slot_target(attestation.data)
if v.isErr():
return errReject(v.error)
return checkedReject(v.error)
# attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE
# slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
@ -214,7 +206,7 @@ proc validateAttestation*(
block:
let v = check_aggregation_count(attestation, singular = true) # [REJECT]
if v.isErr():
return errReject(v.error)
return checkedReject(v.error)
# The block being voted for (attestation.data.beacon_block_root) has been seen
# (via both gossip and non-gossip sources) (a client MAY queue attestations for
@ -242,8 +234,7 @@ proc validateAttestation*(
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
if not (attestation.data.index < get_committee_count_per_slot(epochRef)):
return errReject(cstring(
"validateAttestation: committee index not within expected range"))
return checkedReject("Attestation: committee index not within expected range")
# [REJECT] The attestation is for the correct subnet -- i.e.
# compute_subnet_for_attestation(committees_per_slot,
@ -265,8 +256,8 @@ proc validateAttestation*(
# attestation.data.beacon_block_root.
if not (attestation.aggregation_bits.lenu64 == get_beacon_committee_len(
epochRef, attestation.data.slot, attestation.data.index.CommitteeIndex)):
return errReject(cstring(
"validateAttestation: number of aggregation bits and committee size mismatch"))
return checkedReject(
"Attestation: number of aggregation bits and committee size mismatch")
let
fork = pool.dag.forkAtEpoch(attestation.data.slot.epoch)
@ -288,8 +279,7 @@ proc validateAttestation*(
if (pool.nextAttestationEpoch.lenu64 > validator_index.uint64) and
pool.nextAttestationEpoch[validator_index].subnet >
attestation.data.target.epoch:
return err((ValidationResult.Ignore, cstring(
"Validator has already voted in epoch")))
return errIgnore("Attestation: Validator has already voted in epoch")
block:
# First pass - without cryptography
@ -297,7 +287,7 @@ proc validateAttestation*(
fork, genesis_validators_root, epochRef, attestation,
{skipBLSValidation})
if v.isErr():
return errReject(v.error)
return checkedReject(v.error)
let sig =
if checkSignature:
@ -308,7 +298,7 @@ proc validateAttestation*(
attestation
)
if deferredCrypto.isErr():
return errReject(deferredCrypto.error)
return checkedReject(deferredCrypto.error)
# Await the crypto check
let
@ -317,18 +307,16 @@ proc validateAttestation*(
var x = (await cryptoFut)
case x
of BatchResult.Invalid:
return errReject("validateAttestation: invalid signature")
return checkedReject("Attestation: invalid signature")
of BatchResult.Timeout:
beacon_attestations_dropped_queue_full.inc()
return err((ValidationResult.Ignore, cstring("validateAttestation: timeout checking signature")))
return errIgnore("Attestation: timeout checking signature")
of BatchResult.Valid:
sig # keep going only in this case
else:
let sig = attestation.signature.load()
if not sig.isSome():
return err((
ValidationResult.Ignore,
cstring("validateAttestation: unable to load signature")))
return checkedReject("Attestation: unable to load signature")
sig.get()
# Only valid attestations go in the list, which keeps validator_index
@ -361,7 +349,7 @@ proc validateAggregate*(
block:
let v = check_attestation_slot_target(aggregate.data)
if v.isErr():
return errReject(v.error)
return checkedReject(v.error)
# [IGNORE] aggregate.data.slot is within the last
# ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
@ -390,7 +378,7 @@ proc validateAggregate*(
pool.nextAttestationEpoch[
aggregate_and_proof.aggregator_index].aggregate >
aggregate.data.target.epoch:
return errIgnore("validateAggregate: Validator has already aggregated in epoch")
return errIgnore("Aggregate: validator has already aggregated in epoch")
# [REJECT] The attestation has participants -- that is,
# len(get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)) >= 1.
@ -425,10 +413,15 @@ proc validateAggregate*(
let
epochRef = pool.dag.getEpochRef(target, aggregate.data.target.epoch)
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
if not (aggregate.data.index < get_committee_count_per_slot(epochRef)):
return checkedReject("Aggregate: committee index not within expected range")
if not is_aggregator(
epochRef, aggregate.data.slot, aggregate.data.index.CommitteeIndex,
aggregate_and_proof.selection_proof):
return errReject(cstring("Incorrect aggregator"))
return checkedReject("Aggregate: incorrect aggregator")
# [REJECT] The aggregator's validator index is within the committee -- i.e.
# aggregate_and_proof.aggregator_index in get_beacon_committee(state,
@ -436,8 +429,7 @@ proc validateAggregate*(
if aggregate_and_proof.aggregator_index.ValidatorIndex notin
get_beacon_committee(
epochRef, aggregate.data.slot, aggregate.data.index.CommitteeIndex):
return errReject(cstring(
"Aggregator's validator index not in committee"))
return checkedReject("Aggregate: aggregator's validator index not in committee")
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the
# aggregate.data.slot by the validator with index
@ -457,7 +449,7 @@ proc validateAggregate*(
signed_aggregate_and_proof
)
if deferredCrypto.isErr():
return errReject(deferredCrypto.error)
return checkedReject(deferredCrypto.error)
let
(cryptoFuts, sig) = deferredCrypto.get()
@ -467,10 +459,10 @@ proc validateAggregate*(
var x = await cryptoFuts.slotCheck
case x
of BatchResult.Invalid:
return errReject(cstring("validateAggregate: invalid slot signature"))
return checkedReject("Aggregate: invalid slot signature")
of BatchResult.Timeout:
beacon_aggregates_dropped_queue_full.inc()
return err((ValidationResult.Reject, cstring("validateAggregate: timeout checking slot signature")))
return errIgnore("Aggregate: timeout checking slot signature")
of BatchResult.Valid:
discard
@ -479,10 +471,10 @@ proc validateAggregate*(
var x = await cryptoFuts.aggregatorCheck
case x
of BatchResult.Invalid:
return errReject("validateAggregate: invalid aggregator signature")
return checkedReject("Aggregate: invalid aggregator signature")
of BatchResult.Timeout:
beacon_aggregates_dropped_queue_full.inc()
return err((ValidationResult.Reject, cstring("validateAggregate: timeout checking aggregator signature")))
return errIgnore("Aggregate: timeout checking aggregator signature")
of BatchResult.Valid:
discard
@ -491,10 +483,10 @@ proc validateAggregate*(
var x = await cryptoFuts.aggregateCheck
case x
of BatchResult.Invalid:
return errReject("validateAggregate: invalid aggregate signature")
return checkedReject("Aggregate: invalid aggregate signature")
of BatchResult.Timeout:
beacon_aggregates_dropped_queue_full.inc()
return err((ValidationResult.Reject, cstring("validateAggregate: timeout checking aggregate signature")))
return errIgnore("Aggregate: timeout checking aggregate signature")
of BatchResult.Valid:
discard
@ -660,7 +652,6 @@ proc isValidBeaconBlock*(
ok()
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attester_slashing
proc validateAttesterSlashing*(
pool: var ExitPool, attester_slashing: AttesterSlashing):
@ -681,8 +672,8 @@ proc validateAttesterSlashing*(
if not disjoint(
attester_slashed_indices, pool.prior_seen_attester_slashed_indices):
return err((ValidationResult.Ignore, cstring(
"validateAttesterSlashing: attester-slashed index already attester-slashed")))
return errIgnore(
"AttesterSlashing: attester-slashed index already attester-slashed")
# [REJECT] All of the conditions within process_attester_slashing pass
# validation.
@ -703,16 +694,15 @@ proc validateProposerSlashing*(
Result[bool, (ValidationResult, cstring)] =
# Not from spec; the rest of NBC wouldn't have correctly processed it either.
if proposer_slashing.signed_header_1.message.proposer_index > high(int).uint64:
return err((ValidationResult.Ignore, cstring(
"validateProposerSlashing: proposer-slashed index too high")))
return errIgnore("ProposerSlashing: proposer-slashed index too high")
# [IGNORE] The proposer slashing is the first valid proposer slashing
# received for the proposer with index
# proposer_slashing.signed_header_1.message.proposer_index.
if proposer_slashing.signed_header_1.message.proposer_index.int in
pool.prior_seen_proposer_slashed_indices:
return err((ValidationResult.Ignore, cstring(
"validateProposerSlashing: proposer-slashed index already proposer-slashed")))
return errIgnore(
"ProposerSlashing: proposer-slashed index already proposer-slashed")
# [REJECT] All of the conditions within process_proposer_slashing pass validation.
let proposer_slashing_validity =
@ -735,16 +725,14 @@ proc validateVoluntaryExit*(
# the validator with index signed_voluntary_exit.message.validator_index.
if signed_voluntary_exit.message.validator_index >=
getStateField(pool.dag.headState.data, validators).lenu64:
return err((ValidationResult.Ignore, cstring(
"validateVoluntaryExit: validator index too high")))
return errIgnore("VoluntaryExit: validator index too high")
# Given that getStateField(pool.dag.headState, validators) is a seq,
# signed_voluntary_exit.message.validator_index.int is already valid, but
# check explicitly if one changes that data structure.
if signed_voluntary_exit.message.validator_index.int in
pool.prior_seen_voluntary_exit_indices:
return err((ValidationResult.Ignore, cstring(
"validateVoluntaryExit: validator index already voluntarily exited")))
return errIgnore("VoluntaryExit: validator index already voluntarily exited")
# [REJECT] All of the conditions within process_voluntary_exit pass
# validation.

View File

@ -29,13 +29,15 @@ import
./networking/[eth2_discovery, eth2_network, network_metadata],
./gossip_processing/[eth2_processor, block_processor, consensus_manager],
./validators/[
attestation_aggregation, validator_duties, validator_pool,
validator_duties, validator_pool,
slashing_protection, keystore_management],
./sync/[sync_manager, sync_protocol, request_manager],
./rpc/[rest_api, rpc_api],
./spec/datatypes/[altair, phase0],
./spec/eth2_apis/rpc_beacon_client,
./spec/[beaconstate, forks, helpers, network, weak_subjectivity, signatures],
./spec/[
beaconstate, forks, helpers, network, weak_subjectivity, signatures,
validator],
./consensus_object_pools/[
blockchain_dag, block_quarantine, block_clearance, block_pools_types,
attestation_pool, exit_pool, spec_cache],
@ -312,7 +314,7 @@ proc init*(T: type BeaconNode,
rng, config, netKeys, cfg, dag.forkDigests, getBeaconTime,
getStateField(dag.headState.data, genesis_validators_root))
attestationPool = newClone(AttestationPool.init(dag, quarantine))
exitPool = newClone(ExitPool.init(dag, quarantine))
exitPool = newClone(ExitPool.init(dag))
case config.slashingDbKind
of SlashingDbKind.v2:
@ -1017,6 +1019,11 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
if nextAttestationSlot != FAR_FUTURE_SLOT:
next_action_wait.set(nextActionWaitTime.toFloatSeconds)
let epoch = slot.epoch
if epoch >= node.network.forkId.next_fork_epoch:
node.network.updateForkId(
node.dag.cfg.getENRForkID(epoch, node.dag.genesisValidatorsRoot))
node.updateGossipStatus(slot)
# When we're not behind schedule, we'll speculatively update the clearance

View File

@ -4,9 +4,10 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
std/[typetraits, sequtils, strutils, deques, sets, options],
std/[typetraits, sequtils, strutils, deques, sets],
stew/[results, base10],
chronicles,
json_serialization, json_serialization/std/[options, net],
nimcrypto/utils as ncrutils,
../beacon_node_common, ../networking/eth2_network,
../consensus_object_pools/[blockchain_dag, exit_pool, spec_cache],
@ -96,13 +97,6 @@ proc toString*(kind: ValidatorFilterKind): string =
of ValidatorFilterKind.WithdrawalDone:
"withdrawal_done"
proc getBeaconBlocksTopic(node: BeaconNode, kind: BeaconBlockFork): string =
case kind
of BeaconBlockFork.Phase0:
getBeaconBlocksTopic(node.dag.forkDigests.phase0)
of BeaconBlockFork.Altair:
getBeaconBlocksTopic(node.dag.forkDigests.altair)
proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
# https://ethereum.github.io/beacon-APIs/#/Beacon/getGenesis
router.api(MethodGet, "/api/eth/v1/beacon/genesis") do () -> RestApiResponse:

View File

@ -8,11 +8,12 @@ import
stew/[results, base10],
chronicles,
nimcrypto/utils as ncrutils,
../spec/datatypes/[phase0],
../beacon_node_common, ../networking/eth2_network,
".."/[beacon_chain_db, beacon_node_common],
../networking/eth2_network,
../consensus_object_pools/[blockchain_dag, spec_cache, attestation_pool],
../validators/validator_duties,
../spec/[forks, network],
../spec/datatypes/[phase0, altair],
./rest_utils
logScope: topics = "rest_validatorapi"

View File

@ -405,8 +405,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
if head.slot >= blck.message.slot:
# TODO altair-transition, but not immediate testnet-priority to detect
# Altair and fail, since /v1/ doesn't support Altair
let blocksTopic = getBeaconBlocksTopic(node.dag.forkDigests.phase0)
node.network.broadcast(blocksTopic, blck)
node.network.broadcastBeaconBlock(ForkedSignedBeaconBlock.init(blck))
# The block failed validation, but was successfully broadcast anyway.
# It was not integrated into the beacon node's database.
return 202
@ -414,8 +413,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let res = await proposeSignedBlock(node, head, AttachedValidator(), blck)
if res == head:
# TODO altair-transition, but not immediate testnet-priority
let blocksTopic = getBeaconBlocksTopic(node.dag.forkDigests.phase0)
node.network.broadcast(blocksTopic, blck)
node.network.broadcastBeaconBlock(ForkedSignedBeaconBlock.init(blck))
# The block failed validation, but was successfully broadcast anyway.
# It was not integrated into the beacon node''s database.
return 202

View File

@ -78,8 +78,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("post_v1_validator_aggregate_and_proofs") do (
payload: SignedAggregateAndProof) -> bool:
debug "post_v1_validator_aggregate_and_proofs"
node.network.broadcast(
getAggregateAndProofsTopic(node.dag.forkDigests.phase0), payload)
node.network.broadcastAggregateAndProof(payload)
notice "Aggregated attestation sent",
attestation = shortLog(payload.message.aggregate),
signature = shortLog(payload.signature)

View File

@ -28,8 +28,8 @@
# stew/byteutils,
import
std/[macros, typetraits],
chronicles,
std/macros,
stew/[assign2, bitops2],
json_serialization/types as jsonTypes

View File

@ -9,6 +9,7 @@ import
stew/[results, base10, byteutils, endians2],
presto,
libp2p/peerid,
serialization,
json_serialization, json_serialization/std/[options, net],
nimcrypto/utils as ncrutils,
../datatypes/[phase0, altair, merge],

View File

@ -374,3 +374,10 @@ func get_beacon_proposer_index*(
func get_beacon_proposer_index*(state: SomeBeaconState, cache: var StateCache):
Option[ValidatorIndex] =
get_beacon_proposer_index(state, cache, state.slot)
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
func is_aggregator*(committee_len: uint64, slot_signature: ValidatorSig): bool =
let
modulo = max(1'u64, committee_len div TARGET_AGGREGATORS_PER_COMMITTEE)
bytes_to_uint64(eth2digest(
slot_signature.toRaw()).data.toOpenArray(0, 7)) mod modulo == 0

View File

@ -70,17 +70,13 @@ proc readChunkPayload*(conn: Connection, peer: Peer,
if contextBytes == peer.network.forkDigests.phase0:
let res = await readChunkPayload(conn, peer, phase0.SignedBeaconBlock)
if res.isOk:
return ok ForkedSignedBeaconBlock(
kind: BeaconBlockFork.Phase0,
phase0Block: res.get)
return ok ForkedSignedBeaconBlock.init(res.get)
else:
return err(res.error)
elif contextBytes == peer.network.forkDigests.altair:
let res = await readChunkPayload(conn, peer, altair.SignedBeaconBlock)
if res.isOk:
return ok ForkedSignedBeaconBlock(
kind: BeaconBlockFork.Altair,
altairBlock: res.get)
return ok ForkedSignedBeaconBlock.init(res.get)
else:
return err(res.error)
else:

View File

@ -1,6 +1,6 @@
import std/[sets, sequtils]
import chronicles
import common, api, block_service
import "."/[common, api, block_service]
logScope: service = "attestation_service"

View File

@ -7,9 +7,9 @@ import chronos, presto, presto/client as presto_client, chronicles, confutils,
# Local modules
import
../spec/datatypes/[phase0, altair],
../spec/[eth2_merkleization, helpers, signatures],
../spec/[eth2_merkleization, helpers, signatures, validator],
../spec/eth2_apis/rest_beacon_client,
../validators/[attestation_aggregation, keystore_management,
../validators/[keystore_management,
validator_pool, slashing_protection],
".."/[conf, beacon_clock, version, beacon_node_types,
nimbus_binary_common]
@ -19,11 +19,11 @@ export os, tables, sequtils, sequtils, chronos, presto, chronicles, confutils,
byteutils, presto_client
export rest_beacon_client,
phase0, altair, helpers, signatures, eth2_merkleization,
phase0, altair, helpers, signatures, validator, eth2_merkleization,
beacon_clock,
kvstore, kvstore_sqlite3,
keystore_management, slashing_protection, validator_pool,
attestation_aggregation, beacon_node_types
beacon_node_types
const
SYNC_TOLERANCE* = 4'u64

View File

@ -1,50 +0,0 @@
# beacon_chain
# Copyright (c) 2019-2021 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
import
std/options,
../spec/datatypes/base,
../spec/[helpers],
../consensus_object_pools/[spec_cache, attestation_pool]
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
func is_aggregator*(committee_len: uint64, slot_signature: ValidatorSig): bool =
let
modulo = max(1'u64, committee_len div TARGET_AGGREGATORS_PER_COMMITTEE)
bytes_to_uint64(eth2digest(
slot_signature.toRaw()).data.toOpenArray(0, 7)) mod modulo == 0
func is_aggregator*(epochRef: EpochRef, slot: Slot, index: CommitteeIndex,
slot_signature: ValidatorSig): bool =
let
committee_len = get_beacon_committee_len(epochRef, slot, index)
return is_aggregator(committee_len, slot_signature)
proc aggregate_attestations*(
pool: var AttestationPool, epochRef: EpochRef, slot: Slot, index: CommitteeIndex,
validatorIndex: ValidatorIndex, slot_signature: ValidatorSig): Option[AggregateAndProof] =
doAssert validatorIndex in get_beacon_committee(epochRef, slot, index)
doAssert index.uint64 < get_committee_count_per_slot(epochRef)
# TODO for testing purposes, refactor this into the condition check
# and just calculation
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
if not is_aggregator(epochRef, slot, index, slot_signature):
return none(AggregateAndProof)
let maybe_slot_attestation = getAggregatedAttestation(pool, slot, index)
if maybe_slot_attestation.isNone:
return none(AggregateAndProof)
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#construct-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregateandproof
some(AggregateAndProof(
aggregator_index: validatorIndex.uint64,
aggregate: maybe_slot_attestation.get,
selection_proof: slot_signature))

View File

@ -30,9 +30,7 @@ import
../sszdump, ../sync/sync_manager,
../gossip_processing/consensus_manager,
".."/[conf, beacon_clock, beacon_node_common, beacon_node_types, version],
"."/[
slashing_protection, attestation_aggregation, validator_pool,
keystore_management]
"."/[slashing_protection, validator_pool, keystore_management]
# Metrics for tracking attestation and beacon block loss
const delayBuckets = [-Inf, -4.0, -2.0, -1.0, -0.5, -0.1, -0.05,
@ -432,7 +430,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
epochRef = node.dag.getEpochRef(
attestationHead.blck, slot.compute_epoch_at_slot())
committees_per_slot = get_committee_count_per_slot(epochRef)
fork = getStateField(node.dag.headState.data, fork)
fork = node.dag.forkAtEpoch(slot.epoch)
genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root)
@ -493,6 +491,29 @@ proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
return head
proc makeAggregateAndProof*(
pool: var AttestationPool, epochRef: EpochRef, slot: Slot, index: CommitteeIndex,
validatorIndex: ValidatorIndex, slot_signature: ValidatorSig): Option[AggregateAndProof] =
doAssert validatorIndex in get_beacon_committee(epochRef, slot, index)
doAssert index.uint64 < get_committee_count_per_slot(epochRef)
# TODO for testing purposes, refactor this into the condition check
# and just calculation
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregation-selection
if not is_aggregator(epochRef, slot, index, slot_signature):
return none(AggregateAndProof)
let maybe_slot_attestation = getAggregatedAttestation(pool, slot, index)
if maybe_slot_attestation.isNone:
return none(AggregateAndProof)
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#construct-aggregate
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/validator.md#aggregateandproof
some(AggregateAndProof(
aggregator_index: validatorIndex.uint64,
aggregate: maybe_slot_attestation.get,
selection_proof: slot_signature))
proc sendAggregatedAttestations(
node: BeaconNode, aggregationHead: BlockRef, aggregationSlot: Slot) {.async.} =
# The index is via a
@ -505,7 +526,7 @@ proc sendAggregatedAttestations(
let
epochRef = node.dag.getEpochRef(aggregationHead, aggregationSlot.epoch)
fork = getStateField(node.dag.headState.data, fork)
fork = node.dag.forkAtEpoch(aggregationSlot.epoch)
genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root)
committees_per_slot = get_committee_count_per_slot(epochRef)
@ -532,8 +553,8 @@ proc sendAggregatedAttestations(
for curr in zip(slotSigsData, slotSigs):
let aggregateAndProof =
aggregate_attestations(node.attestationPool[], epochRef, aggregationSlot,
curr[0].committee_index.CommitteeIndex,
makeAggregateAndProof(node.attestationPool[], epochRef, aggregationSlot,
curr[0].committee_index.CommitteeIndex,
curr[0].validator_idx,
curr[1].read)
@ -544,8 +565,7 @@ proc sendAggregatedAttestations(
var signedAP = SignedAggregateAndProof(
message: aggregateAndProof.get,
signature: sig)
node.network.broadcast(
getAggregateAndProofsTopic(node.dag.forkDigests.phase0), signedAP)
node.network.broadcastAggregateAndProof(signedAP)
notice "Aggregated attestation sent",
attestation = shortLog(signedAP.message.aggregate),
validator = shortLog(curr[0].v),

View File

@ -10,13 +10,13 @@
import chronicles
import eth/keys
import ../beacon_chain/spec/datatypes/base
import ../beacon_chain/consensus_object_pools/[block_quarantine, blockchain_dag, exit_pool]
import ../beacon_chain/consensus_object_pools/[blockchain_dag, exit_pool]
import "."/[testutil, testdbutil]
proc getExitPool(): auto =
let dag =
init(ChainDAGRef, defaultRuntimeConfig, makeTestDB(SLOTS_PER_EPOCH * 3), {})
newClone(ExitPool.init(dag, QuarantineRef.init(keys.newRng())))
newClone(ExitPool.init(dag))
suite "Exit pool testing suite":
setup:

View File

@ -3,9 +3,8 @@
import
unittest2,
./testutil,
../beacon_chain/spec/network,
../beacon_chain/spec/datatypes/[base, altair],
../beacon_chain/validators/attestation_aggregation
../beacon_chain/spec/[network, validator],
../beacon_chain/spec/datatypes/[base, altair]
suite "Honest validator":
var forkDigest: ForkDigest