Perform attestation check before broadcast (#2550)

Currently, we have a bit of a convoluted flow where when sending
attestations, we start broadcasting them over gossip then pass them to
the attestation validation to include them in the local attestation pool
- it should be the other way around: we should be checking attestations
_before_ gossipping them - this serves as an additional safety net to
ensure that we don't publish junk - this becomes more important when
publishing attestations from the API.

Also, the REST API was performing its own validation meaning
attestations coming from REST would be validated twice - finally, the
JSON RPC wasn't pre-validating and would happily broadcast invalid
attestations.

* Unified attestation production pipeline with the same flow for gossip,
locally and API-produced attestations: all are now validated and entered
into the pool, then broadcast/republished
* Refactor subnet handling with specific SubnetId alias, streamlining
where subnets are computed, avoiding the need to pass around the number
of active validators
* Move some of the subnet handling code to eth2_network
* Use BitArray throughout for subnet handling
This commit is contained in:
Jacek Sieka 2021-05-10 09:13:36 +02:00 committed by GitHub
parent 39da640beb
commit 867d8f3223
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 398 additions and 298 deletions

View File

@ -101,9 +101,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
```diff
+ iterating words OK
+ overlaps OK
+ roundtrips OK
+ roundtrips BitArray OK
+ roundtrips BitSeq OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
OK: 4/4 Fail: 0/4 Skip: 0/4
## Block pool processing [Preset: mainnet]
```diff
+ Adding the same block twice returns a Duplicate error [Preset: mainnet] OK
@ -318,4 +319,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 175/184 Fail: 0/184 Skip: 9/184
OK: 176/185 Fail: 0/185 Skip: 9/185

View File

@ -135,11 +135,11 @@ type
AttestationSubnets* = object
enabled*: bool
stabilitySubnets*: seq[tuple[subnet: uint8, expiration: Epoch]]
stabilitySubnets*: seq[tuple[subnet_id: SubnetId, expiration: Epoch]]
nextCycleEpoch*: Epoch
# These encode states in per-subnet state machines
subscribedSubnets*: set[uint8]
subscribedSubnets*: BitArray[ATTESTATION_SUBNET_COUNT]
subscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]
unsubscribeSlot*: array[ATTESTATION_SUBNET_COUNT, Slot]

View File

@ -24,6 +24,10 @@ func count_active_validators*(epochInfo: EpochRef): uint64 =
func get_committee_count_per_slot*(epochInfo: EpochRef): uint64 =
get_committee_count_per_slot(count_active_validators(epochInfo))
iterator get_committee_indices*(epochRef: EpochRef): CommitteeIndex =
for i in 0'u64..<get_committee_count_per_slot(epochRef):
yield CommitteeIndex(i)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#get_beacon_committee
iterator get_beacon_committee*(
epochRef: EpochRef, slot: Slot, index: CommitteeIndex): ValidatorIndex =
@ -225,7 +229,7 @@ iterator get_committee_assignments*(
epochRef: EpochRef, epoch: Epoch, validator_indices: IntSet):
tuple[validatorIndices: IntSet,
committeeIndex: CommitteeIndex,
subnetIndex: uint8, slot: Slot] =
subnet_id: SubnetId, slot: Slot] =
let
committees_per_slot = get_committee_count_per_slot(epochRef)
start_slot = compute_start_slot_at_epoch(epoch)
@ -240,5 +244,5 @@ iterator get_committee_assignments*(
if includedIndices.len > 0:
yield (
includedIndices, idx,
compute_subnet_for_attestation(committees_per_slot, slot, idx).uint8,
compute_subnet_for_attestation(committees_per_slot, slot, idx),
slot)

View File

@ -187,11 +187,11 @@ proc checkForPotentialDoppelganger(
proc attestationValidator*(
self: ref Eth2Processor,
attestation: Attestation,
attestation_subnet: uint64,
checksExpensive: bool = true): Future[ValidationResult] {.async.} =
subnet_id: SubnetId,
checkSignature: bool = true): Future[ValidationResult] {.async.} =
logScope:
attestation = shortLog(attestation)
attestation_subnet
subnet_id
let
wallTime = self.getWallTime()
@ -209,8 +209,7 @@ proc attestationValidator*(
# Now proceed to validation
let v = await self.attestationPool.validateAttestation(
self.batchCrypto,
attestation, wallTime, attestation_subnet, checksExpensive)
self.batchCrypto, attestation, wallTime, subnet_id, checkSignature)
if v.isErr():
debug "Dropping attestation", err = v.error()
return v.error[0]

View File

@ -140,14 +140,14 @@ func check_aggregation_count(
func check_attestation_subnet(
epochRef: EpochRef, attestation: Attestation,
attestation_subnet: uint64): Result[void, (ValidationResult, cstring)] =
subnet_id: SubnetId): Result[void, (ValidationResult, cstring)] =
let
expectedSubnet =
compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef),
attestation.data.slot, attestation.data.index.CommitteeIndex)
if expectedSubnet != attestation_subnet:
if expectedSubnet != subnet_id:
return err((ValidationResult.Reject, cstring(
"Attestation not on the correct subnet")))
@ -162,7 +162,7 @@ proc validateAttestation*(
batchCrypto: ref BatchCrypto,
attestation: Attestation,
wallTime: BeaconTime,
attestation_subnet: uint64, checksExpensive: bool):
subnet_id: SubnetId, checkSignature: bool):
Future[Result[tuple[attesting_index: ValidatorIndex, sig: CookedSig],
(ValidationResult, cstring)]] {.async.} =
# Some of the checks below have been reordered compared to the spec, to
@ -232,7 +232,7 @@ proc validateAttestation*(
# attestation.data.target.epoch), which may be pre-computed along with the
# committee information for the signature check.
block:
let v = check_attestation_subnet(epochRef, attestation, attestation_subnet) # [REJECT]
let v = check_attestation_subnet(epochRef, attestation, subnet_id) # [REJECT]
if v.isErr():
return err(v.error)
@ -271,14 +271,6 @@ proc validateAttestation*(
return err((ValidationResult.Ignore, cstring(
"Validator has already voted in epoch")))
if not checksExpensive:
# Only sendAttestation, which discards result, doesn't use checksExpensive
# TODO this means that (a) this becomes an "expensive" check and (b) it is
# doing in-principle unnecessary work, since this should be known from the
# attestation creation.
return ok((validator_index, attestation.signature.load.get().CookedSig))
# The signature of attestation is valid.
block:
# First pass - without cryptography
let v = is_valid_indexed_attestation(
@ -287,7 +279,9 @@ proc validateAttestation*(
if v.isErr():
return err((ValidationResult.Reject, v.error))
# Buffer crypto checks
let sig =
if checkSignature:
# Attestation signatures are batch-verified
let deferredCrypto = batchCrypto
.scheduleAttestationCheck(
fork, genesis_validators_root, epochRef,
@ -309,7 +303,14 @@ proc validateAttestation*(
beacon_attestations_dropped_queue_full.inc()
return err((ValidationResult.Ignore, cstring("validateAttestation: timeout checking signature")))
of BatchResult.Valid:
discard # keep going only in this case
sig # keep going only in this case
else:
let sig = attestation.signature.load()
if not sig.isSome():
return err((
ValidationResult.Ignore,
cstring("validateAttestation: unable to load signature")))
sig.get()
# Only valid attestations go in the list, which keeps validator_index
# in range

View File

@ -31,7 +31,7 @@ import
libp2p/stream/connection,
libp2p/utils/semaphore,
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
eth/net/nat, eth/p2p/discoveryv5/[enr, node],
eth/net/nat, eth/p2p/discoveryv5/[enr, node, random2],
".."/[
version, conf,
ssz/ssz_serialization, beacon_clock],
@ -1763,3 +1763,50 @@ proc broadcast*(node: Eth2Node, topic: string, msg: auto) =
traceMessage(futSnappy, gossipId(uncompressed, true))
except IOError as exc:
raiseAssert exc.msg # TODO in-memory compression shouldn't fail
proc subscribeAttestationSubnets*(
node: Eth2Node, subnets: BitArray[ATTESTATION_SUBNET_COUNT]) {.
raises: [Defect, CatchableError].} =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
for subnet_id, enabled in subnets:
if enabled:
node.subscribe(getAttestationTopic(
node.forkID.fork_digest, SubnetId(subnet_id)), TopicParams.init()) # don't score attestation subnets for now
proc unsubscribeAttestationSubnets*(
node: Eth2Node, subnets: BitArray[ATTESTATION_SUBNET_COUNT]) {.
raises: [Defect, CatchableError].} =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
for subnet_id, enabled in subnets:
if enabled:
node.unsubscribe(getAttestationTopic(
node.forkID.fork_digest, SubnetId(subnet_id)))
proc updateStabilitySubnetMetadata*(
node: Eth2Node, attnets: BitArray[ATTESTATION_SUBNET_COUNT]) =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#metadata
node.metadata.seq_number += 1
node.metadata.attnets = attnets
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.discovery.updateRecord(
{"attnets": SSZ.encode(node.metadata.attnets)})
if res.isErr():
# This should not occur in this scenario as the private key would always
# be the correct one and the ENR will not increase in size.
warn "Failed to update record on subnet cycle", error = res.error
else:
debug "Stability subnets changed; updated ENR attnets", attnets
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
func getStabilitySubnetLength*(node: Eth2Node): uint64 =
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
node.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
func getRandomSubnetId*(node: Eth2Node): SubnetId =
node.rng[].rand(ATTESTATION_SUBNET_COUNT - 1).SubnetId

View File

@ -384,8 +384,8 @@ proc init*(T: type BeaconNode,
getVoluntaryExitsTopic(enrForkId.fork_digest),
getAggregateAndProofsTopic(enrForkId.fork_digest)
]
for subnet in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
topics &= getAttestationTopic(enrForkId.fork_digest, subnet)
for subnet_id in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
topics &= getAttestationTopic(enrForkId.fork_digest, SubnetId(subnet_id))
topics)
if node.config.inProcessValidators:
@ -426,34 +426,9 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
# finalization occurs every slot, to 4 slots vs scheduledSlot.
doAssert finalizedEpoch + 4 >= epoch
proc installAttestationSubnetHandlers(node: BeaconNode, subnets: set[uint8]) {.
raises: [Defect, CatchableError].} =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestations-and-aggregation
# nimbus won't score attestation subnets for now, we just rely on block and aggregate which are more stabe and reliable
for subnet in subnets:
node.network.subscribe(getAttestationTopic(node.forkDigest, subnet), TopicParams.init()) # don't score attestation subnets for now
proc updateStabilitySubnetMetadata(
node: BeaconNode, stabilitySubnets: set[uint8]) =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#metadata
node.network.metadata.seq_number += 1
for subnet in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
node.network.metadata.attnets[subnet] = (subnet in stabilitySubnets)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/p2p-interface.md#attestation-subnet-bitfield
let res = node.network.discovery.updateRecord(
{"attnets": SSZ.encode(node.network.metadata.attnets)})
if res.isErr():
# This should not occur in this scenario as the private key would always
# be the correct one and the ENR will not increase in size.
warn "Failed to update record on subnet cycle", error = res.error
else:
debug "Stability subnets changed; updated ENR attnets", stabilitySubnets
func getStabilitySubnets(stabilitySubnets: auto): set[uint8] =
func getStabilitySubnets(stabilitySubnets: auto): BitArray[ATTESTATION_SUBNET_COUNT] =
for subnetInfo in stabilitySubnets:
result.incl subnetInfo.subnet
result[subnetInfo.subnet_id.int] = true
proc getAttachedValidators(node: BeaconNode):
Table[ValidatorIndex, AttachedValidator] =
@ -503,7 +478,7 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
# The relevant bitmaps are 32 bits each.
static: doAssert SLOTS_PER_EPOCH <= 32
for (validatorIndices, committeeIndex, subnetIndex, slot) in
for (validatorIndices, committeeIndex, subnet_id, slot) in
get_committee_assignments(epochRef, epoch, validatorIndices):
doAssert compute_epoch_at_slot(slot) == epoch
@ -530,43 +505,36 @@ proc updateSubscriptionSchedule(node: BeaconNode, epoch: Epoch) {.async.} =
get_beacon_committee_len(epochRef, slot, committeeIndex), slot):
continue
node.attestationSubnets.unsubscribeSlot[subnetIndex] =
max(slot + 1, node.attestationSubnets.unsubscribeSlot[subnetIndex])
if subnetIndex notin node.attestationSubnets.subscribedSubnets:
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64] =
max(slot + 1, node.attestationSubnets.unsubscribeSlot[subnet_id.uint64])
if node.attestationSubnets.subscribedSubnets[subnet_id.uint64]:
const SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS = 34
node.attestationSubnets.subscribeSlot[subnetIndex] =
node.attestationSubnets.subscribeSlot[subnet_id.uint64] =
# Queue upcoming subscription potentially earlier
# SLOTS_PER_EPOCH emulates one boundary condition of the per-epoch
# cycling mechanism timing buffers
min(
slot - min(slot.uint64, SUBNET_SUBSCRIPTION_LEAD_TIME_SLOTS),
node.attestationSubnets.subscribeSlot[subnetIndex])
node.attestationSubnets.subscribeSlot[subnet_id.uint64])
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
func getStabilitySubnetLength(node: BeaconNode): uint64 =
EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION +
node.network.rng[].rand(EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION.int).uint64
func updateStabilitySubnets(node: BeaconNode, slot: Slot): set[uint8] =
func updateStabilitySubnets(node: BeaconNode, slot: Slot): BitArray[ATTESTATION_SUBNET_COUNT] =
# Equivalent to wallSlot by cycleAttestationSubnets(), especially
# since it'll try to run early in epochs, avoiding race conditions.
static: doAssert ATTESTATION_SUBNET_COUNT <= high(uint8)
let epoch = slot.epoch
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
for i in 0 ..< node.attestationSubnets.stabilitySubnets.len:
if epoch >= node.attestationSubnets.stabilitySubnets[i].expiration:
node.attestationSubnets.stabilitySubnets[i].subnet =
node.network.rng[].rand(ATTESTATION_SUBNET_COUNT - 1).uint8
node.attestationSubnets.stabilitySubnets[i].expiration =
epoch + node.getStabilitySubnetLength()
for ss in node.attestationSubnets.stabilitySubnets.mitems():
if epoch >= ss.expiration:
ss.subnet_id = node.network.getRandomSubnetId()
ss.expiration = epoch + node.network.getStabilitySubnetLength()
result.incl node.attestationSubnets.stabilitySubnets[i].subnet
result[ss.subnet_id.int] = true
proc cycleAttestationSubnetsPerEpoch(
node: BeaconNode, wallSlot: Slot, prevStabilitySubnets: set[uint8]):
Future[set[uint8]] {.async.} =
node: BeaconNode, wallSlot: Slot,
prevStabilitySubnets: BitArray[ATTESTATION_SUBNET_COUNT]):
Future[BitArray[ATTESTATION_SUBNET_COUNT]] {.async.} =
# Per-epoch portion of subnet cycling: updating stability subnets and
# calculating future attestation subnets.
@ -600,7 +568,7 @@ proc cycleAttestationSubnetsPerEpoch(
# In subscribeAllSubnets mode, this only gets set once, at initial subnet
# attestation handler creation, since they're all considered as stability
# subnets in that case.
node.updateStabilitySubnetMetadata(stabilitySubnets)
node.network.updateStabilitySubnetMetadata(stabilitySubnets)
return stabilitySubnets
@ -610,36 +578,34 @@ proc cycleAttestationSubnets(node: BeaconNode, wallSlot: Slot) {.async.} =
let prevSubscribedSubnets = node.attestationSubnets.subscribedSubnets
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
if i in node.attestationSubnets.subscribedSubnets:
for i in 0..<node.attestationSubnets.subscribedSubnets.len():
if node.attestationSubnets.subscribedSubnets[i]:
if wallSlot >= node.attestationSubnets.unsubscribeSlot[i]:
node.attestationSubnets.subscribedSubnets.excl i
node.attestationSubnets.subscribedSubnets[i] = false
else:
if wallSlot >= node.attestationSubnets.subscribeSlot[i]:
node.attestationSubnets.subscribedSubnets.incl i
node.attestationSubnets.subscribedSubnets[i] = true
let
prevStabilitySubnets =
getStabilitySubnets(node.attestationSubnets.stabilitySubnets)
node.attestationSubnets.stabilitySubnets.getStabilitySubnets()
stabilitySubnets =
await node.cycleAttestationSubnetsPerEpoch(wallSlot, prevStabilitySubnets)
# Accounting specific to non-stability subnets
for expiringSubnet in
prevSubscribedSubnets - node.attestationSubnets.subscribedSubnets:
node.attestationSubnets.subscribeSlot[expiringSubnet] = FAR_FUTURE_SLOT
for i, enabled in
(prevSubscribedSubnets - node.attestationSubnets.subscribedSubnets):
if enabled:
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
let
prevAllSubnets = prevSubscribedSubnets + prevStabilitySubnets
allSubnets = node.attestationSubnets.subscribedSubnets + stabilitySubnets
unsubscribedSubnets = prevAllSubnets - allSubnets
subscribedSubnets = allSubnets - prevAllSubnets
unsubscribeSubnets = prevAllSubnets - allSubnets
subscribeSubnets = allSubnets - prevAllSubnets
for subnet in unsubscribedSubnets:
node.network.unsubscribe(
getAttestationTopic(node.forkDigest, subnet))
node.installAttestationSubnetHandlers(subscribedSubnets)
node.network.unsubscribeAttestationSubnets(unsubscribeSubnets)
node.network.subscribeAttestationSubnets(subscribeSubnets)
debug "Attestation subnets",
expiringSubnets =
@ -652,10 +618,10 @@ proc cycleAttestationSubnets(node: BeaconNode, wallSlot: Slot) {.async.} =
num_stability_subnets = node.attestationSubnets.stabilitySubnets.len,
expiring_stability_subnets = prevStabilitySubnets - stabilitySubnets,
new_stability_subnets = stabilitySubnets - prevStabilitySubnets,
subscribedSubnets,
unsubscribedSubnets
subscribeSubnets,
unsubscribeSubnets
proc getInitialAttestationSubnets(node: BeaconNode): Table[uint8, Slot] =
proc getInitialAttestationSubnets(node: BeaconNode): Table[SubnetId, Slot] =
let
wallEpoch = node.beaconClock.now().slotOrZero().epoch
validatorIndices = toIntSet(toSeq(node.getAttachedValidators().keys()))
@ -665,12 +631,12 @@ proc getInitialAttestationSubnets(node: BeaconNode): Table[uint8, Slot] =
# https://github.com/nim-lang/Nim/issues/16217 are fixed, in
# Nimbus's Nim, use (_, _, subnetIndex, slot).
let epochRef = node.chainDag.getEpochRef(node.chainDag.head, epoch)
for (_, ci, subnetIndex, slot) in get_committee_assignments(
for (_, ci, subnet_id, slot) in get_committee_assignments(
epochRef, epoch, validatorIndices):
result.withValue(subnetIndex, v) do:
result.withValue(subnet_id, v) do:
v[] = max(v[], slot + 1)
do:
result[subnetIndex] = slot + 1
result[subnet_id] = slot + 1
# Either wallEpoch is 0, in which case it might be pre-genesis, but we only
# care about the already-known first two epochs of attestations, or it's in
@ -680,7 +646,7 @@ proc getInitialAttestationSubnets(node: BeaconNode): Table[uint8, Slot] =
mergeAttestationSubnets(wallEpoch)
mergeAttestationSubnets(wallEpoch + 1)
proc getAttestationSubnetHandlers(node: BeaconNode) {.
proc subscribeAttestationSubnetHandlers(node: BeaconNode) {.
raises: [Defect, CatchableError].} =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#phase-0-attestation-subnet-stability
# TODO:
@ -688,53 +654,46 @@ proc getAttestationSubnetHandlers(node: BeaconNode) {.
# - Restarting the node with a presistent netkey
# - When going from synced -> syncing -> synced state
template getAllAttestationSubnets(): Table[uint8, Slot] =
var subnets: Table[uint8, Slot]
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
subnets[i] = FAR_FUTURE_SLOT
subnets
let
initialSubnets =
if node.config.subscribeAllSubnets:
getAllAttestationSubnets()
# In all-subnets mode, we create a stability subnet subscription for every
# subnet - this will be propagated in the attnets ENR entry
node.attestationSubnets.stabilitySubnets.setLen(ATTESTATION_SUBNET_COUNT)
for i, ss in node.attestationSubnets.stabilitySubnets.mpairs():
ss.subnet_id = SubnetId(i)
ss.expiration = FAR_FUTURE_EPOCH
else:
node.getInitialAttestationSubnets()
wallEpoch = node.beaconClock.now().slotOrZero().epoch
var initialStabilitySubnets: set[uint8]
let wallEpoch = node.beaconClock.now().slotOrZero().epoch
# TODO make length dynamic when validator-client-based validators join and leave
# In normal mode, there's one subnet subscription per validator, changing
# randomly over time
node.attestationSubnets.stabilitySubnets.setLen(
node.attachedValidators[].count)
for i in 0 ..< node.attachedValidators[].count:
node.attestationSubnets.stabilitySubnets[i] = (
subnet: node.network.rng[].rand(ATTESTATION_SUBNET_COUNT - 1).uint8,
expiration: wallEpoch + node.getStabilitySubnetLength())
initialStabilitySubnets.incl(
node.attestationSubnets.stabilitySubnets[i].subnet)
for i, ss in node.attestationSubnets.stabilitySubnets.mpairs():
ss.subnet_id = node.network.getRandomSubnetId()
ss.expiration = wallEpoch + node.network.getStabilitySubnetLength()
node.updateStabilitySubnetMetadata(
if node.config.subscribeAllSubnets:
{0'u8 .. (ATTESTATION_SUBNET_COUNT - 1)}
else:
node.attestationSubnets.stabilitySubnets.getStabilitySubnets)
let initialStabilitySubnets =
node.attestationSubnets.stabilitySubnets.getStabilitySubnets()
node.network.updateStabilitySubnetMetadata(initialStabilitySubnets)
let
initialSubnets = node.getInitialAttestationSubnets()
for i in 0'u8 ..< ATTESTATION_SUBNET_COUNT:
if i in initialSubnets:
node.attestationSubnets.subscribedSubnets.incl i
if SubnetId(i) in initialSubnets:
node.attestationSubnets.subscribedSubnets[i] = true
node.attestationSubnets.unsubscribeSlot[i] =
try: initialSubnets[i] except KeyError: raiseAssert "checked with in"
try: initialSubnets[SubnetId(i)] except KeyError: raiseAssert "checked with in"
else:
node.attestationSubnets.subscribedSubnets.excl i
node.attestationSubnets.subscribedSubnets[i] = false
node.attestationSubnets.subscribeSlot[i] = FAR_FUTURE_SLOT
node.attestationSubnets.enabled = true
debug "Initial attestation subnets subscribed",
initialSubnets,
initialStabilitySubnets,
wallEpoch
node.installAttestationSubnetHandlers(
initialStabilitySubnets
node.network.subscribeAttestationSubnets(
node.attestationSubnets.subscribedSubnets + initialStabilitySubnets)
proc addMessageHandlers(node: BeaconNode) {.raises: [Defect, CatchableError].} =
@ -792,7 +751,7 @@ proc addMessageHandlers(node: BeaconNode) {.raises: [Defect, CatchableError].} =
node.network.subscribe(getProposerSlashingsTopic(node.forkDigest), basicParams)
node.network.subscribe(getVoluntaryExitsTopic(node.forkDigest), basicParams)
node.network.subscribe(getAggregateAndProofsTopic(node.forkDigest), aggregateTopicParams, enableTopicMetrics = true)
node.getAttestationSubnetHandlers()
node.subscribeAttestationSubnetHandlers()
func getTopicSubscriptionEnabled(node: BeaconNode): bool =
node.attestationSubnets.enabled
@ -807,8 +766,9 @@ proc removeMessageHandlers(node: BeaconNode) {.raises: [Defect, CatchableError].
node.network.unsubscribe(getAttesterSlashingsTopic(node.forkDigest))
node.network.unsubscribe(getAggregateAndProofsTopic(node.forkDigest))
for subnet in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
node.network.unsubscribe(getAttestationTopic(node.forkDigest, subnet))
for subnet_id in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
node.network.unsubscribe(
getAttestationTopic(node.forkDigest, SubnetId(subnet_id)))
proc setupDoppelgangerDetection(node: BeaconNode, slot: Slot) =
# When another client's already running, this is very likely to detect
@ -1185,12 +1145,12 @@ proc installMessageValidators(node: BeaconNode) =
# subnets are subscribed to during any given epoch.
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
closureScope:
let attestation_subnet = it
let subnet_id = SubnetId(it)
node.network.addAsyncValidator(
getAttestationTopic(node.forkDigest, attestation_subnet),
getAttestationTopic(node.forkDigest, subnet_id),
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): Future[ValidationResult] =
node.processor.attestationValidator(attestation, attestation_subnet))
node.processor.attestationValidator(attestation, subnet_id))
node.network.addAsyncValidator(
getAggregateAndProofsTopic(node.forkDigest),

View File

@ -194,9 +194,12 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
ad, a.committee_length.int, a.validator_committee_index,
vc.fork, vc.beaconGenesis.genesis_validators_root)
notice "Attesting",
slot, public_key = a.public_key, attestation = shortLog(attestation)
discard await vc.client.post_v1_beacon_pool_attestations(attestation)
notice "Sending attestation to beacon node",
public_key = a.public_key, attestation = shortLog(attestation)
let ok = await vc.client.post_v1_beacon_pool_attestations(attestation)
if not ok:
warn "Failed to send attestation to beacon node",
public_key = a.public_key, attestation = shortLog(attestation)
validatorToAttestationDataRoot[a.public_key] = attestation.data.hash_tree_root
else:

View File

@ -461,8 +461,7 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
rpcServer.rpc("post_v1_beacon_pool_attestations") do (
attestation: Attestation) -> bool:
node.sendAttestation(attestation)
return true
return await node.sendAttestation(attestation)
rpcServer.rpc("get_v1_beacon_pool_attester_slashings") do (
) -> seq[AttesterSlashing]:

View File

@ -718,15 +718,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
var failures: seq[RestAttestationsFailureTuple]
for atindex, attestation in attestations.pairs():
let wallTime = node.processor.getWallTime()
let res = await node.attestationPool.validateAttestation(
node.processor.batchCrypto, attestation, wallTime,
attestation.data.index, true
)
if res.isErr():
failures.add((index: uint64(atindex), message: $res.error()))
else:
node.sendAttestation(attestation)
if not await node.sendAttestation(attestation):
failures.add(
(index: uint64(atindex), message: "Attestation failed validation"))
if len(failures) > 0:
return RestApiResponse.jsonErrorList(Http400, AttestationValidationError,

View File

@ -150,21 +150,21 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let
head = node.doChecksAndGetCurrentHead(epoch)
epochRef = node.chainDag.getEpochRef(head, epoch)
subnet = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), slot, committee_index).uint8
subnet_id = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), slot, committee_index)
# Either subnet already subscribed or not. If not, subscribe. If it is,
# extend subscription. All one knows from the API combined with how far
# ahead one can check for attestation schedule is that it might be used
# for up to the end of next epoch. Therefore, arrange for subscriptions
# to last at least that long.
if subnet notin node.attestationSubnets.subscribedSubnets:
if node.attestationSubnets.subscribedSubnets[subnet_id.uint64]:
# When to subscribe. Since it's not clear when from the API it's first
# needed, do so immediately.
node.attestationSubnets.subscribeSlot[subnet] =
min(node.attestationSubnets.subscribeSlot[subnet], wallSlot)
node.attestationSubnets.subscribeSlot[subnet_id.uint64] =
min(node.attestationSubnets.subscribeSlot[subnet_id.uint64], wallSlot)
node.attestationSubnets.unsubscribeSlot[subnet] =
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64] =
max(
compute_start_slot_at_epoch(epoch + 2),
node.attestationSubnets.unsubscribeSlot[subnet])
node.attestationSubnets.unsubscribeSlot[subnet_id.uint64])

View File

@ -149,6 +149,10 @@ type
# leave it at spec size
CommitteeIndex* = distinct uint64
# The subnet id maps which gossip subscription to use to publish an
# attestation - it is distinct from the CommitteeIndex in particular
SubnetId* = distinct uint8
Gwei* = uint64
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#proposerslashing
@ -816,6 +820,18 @@ proc readValue*(reader: var JsonReader, value: var CommitteeIndex)
{.raises: [IOError, SerializationError, Defect].} =
value = CommitteeIndex reader.readValue(distinctBase CommitteeIndex)
proc writeValue*(writer: var JsonWriter, value: SubnetId)
{.raises: [IOError, Defect].} =
writeValue(writer, distinctBase value)
proc readValue*(reader: var JsonReader, value: var SubnetId)
{.raises: [IOError, SerializationError, Defect].} =
let v = reader.readValue(distinctBase SubnetId)
if v > ATTESTATION_SUBNET_COUNT:
raiseUnexpectedValue(
reader, "Subnet id must be <= " & $ATTESTATION_SUBNET_COUNT)
value = SubnetId(v)
proc writeValue*(writer: var JsonWriter, value: HashList)
{.raises: [IOError, SerializationError, Defect].} =
writeValue(writer, value.data)
@ -883,6 +899,9 @@ proc `<`*(x, y: CommitteeIndex) : bool {.borrow, noSideEffect.}
proc hash*(x: CommitteeIndex): Hash {.borrow, noSideEffect.}
func `$`*(x: CommitteeIndex): auto = $(distinctBase(x))
proc `==`*(x, y: SubnetId) : bool {.borrow, noSideEffect.}
proc `$`*(x: SubnetId): string {.borrow, noSideEffect.}
func `as`*(d: DepositData, T: type DepositMessage): T =
T(pubkey: d.pubkey,
withdrawal_credentials: d.withdrawal_credentials,
@ -1138,3 +1157,4 @@ static:
# Sanity checks - these types should be trivial enough to copy with memcpy
doAssert supportsCopyMem(Validator)
doAssert supportsCopyMem(Eth2Digest)
doAssert ATTESTATION_SUBNET_COUNT <= high(distinctBase SubnetId).int

View File

@ -53,7 +53,7 @@ func getAggregateAndProofsTopic*(forkDigest: ForkDigest): string =
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#broadcast-attestation
func compute_subnet_for_attestation*(
committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex):
uint64 =
SubnetId =
# Compute the correct subnet for an attestation for Phase 0.
# Note, this mimics expected Phase 1 behavior where attestations will be
# mapped to their shard subnet.
@ -62,16 +62,15 @@ func compute_subnet_for_attestation*(
committees_since_epoch_start =
committees_per_slot * slots_since_epoch_start
SubnetId(
(committees_since_epoch_start + committee_index.uint64) mod
ATTESTATION_SUBNET_COUNT
ATTESTATION_SUBNET_COUNT)
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#broadcast-attestation
func getAttestationTopic*(forkDigest: ForkDigest, subnetIndex: uint64):
func getAttestationTopic*(forkDigest: ForkDigest, subnet_id: SubnetId):
string =
## For subscribing and unsubscribing to/from a subnet.
doAssert subnetIndex < ATTESTATION_SUBNET_COUNT
eth2Prefix(forkDigest) & "beacon_attestation_" & $subnetIndex & "/ssz"
eth2Prefix(forkDigest) & "beacon_attestation_" & $uint64(subnet_id) & "/ssz"
func getENRForkID*(fork: Fork, genesis_validators_root: Eth2Digest): ENRForkID =
let

View File

@ -281,3 +281,29 @@ func countZeros*(x: BitSeq): int =
template bytes*(x: BitSeq): untyped =
seq[byte](x)
iterator items*(x: BitArray): bool =
for i in 0..<x.bits:
yield x[i]
iterator pairs*(x: BitArray): (int, bool) =
for i in 0..<x.bits:
yield (i, x[i])
func incl*(a: var BitArray, b: BitArray) =
# Update `a` to include the bits of `b`, as if applying `or` to each bit
for i in 0..<a.bytes.len:
a[i] = a[i] or b[i]
func clear*(a: var BitArray) =
for b in a.bytes.mitems(): b = 0
# Set operations
func `+`*(a, b: BitArray): BitArray =
for i in 0..<a.bytes.len:
result[i] = a[i] or b[i]
func `-`*(a, b: BitArray): BitArray =
for i in 0..<a.bytes.len:
result[i] = a[i] and (not b[i])

View File

@ -160,19 +160,26 @@ proc isSynced*(node: BeaconNode, head: BlockRef): bool =
true
proc sendAttestation*(
node: BeaconNode, attestation: Attestation, num_active_validators: uint64) =
let subnet_index =
compute_subnet_for_attestation(
get_committee_count_per_slot(num_active_validators), attestation.data.slot,
attestation.data.index.CommitteeIndex)
node: BeaconNode, attestation: Attestation,
subnet_id: SubnetId, checkSignature: bool): Future[bool] {.async.} =
# Validate attestation before sending it via gossip - validation will also
# register the attestation with the attestation pool. Notably, although
# libp2p calls the data handler for any subscription on the subnet
# topic, it does not perform validation.
let ok = await node.processor.attestationValidator(
attestation, subnet_id, checkSignature)
return case ok
of ValidationResult.Accept:
node.network.broadcast(
getAttestationTopic(node.forkDigest, subnet_index), attestation)
# Ensure node's own broadcast attestations end up in its attestation pool
discard node.processor.attestationValidator(
attestation, subnet_index, false)
getAttestationTopic(node.forkDigest, subnet_id), attestation)
beacon_attestations_sent.inc()
true
else:
notice "Produced attestation failed validation",
attestation = shortLog(attestation),
result = $ok
false
proc sendVoluntaryExit*(node: BeaconNode, exit: SignedVoluntaryExit) =
node.network.broadcast(getVoluntaryExitsTopic(node.forkDigest), exit)
@ -185,18 +192,21 @@ proc sendProposerSlashing*(node: BeaconNode, slashing: ProposerSlashing) =
node.network.broadcast(getProposerSlashingsTopic(node.forkDigest),
slashing)
proc sendAttestation*(node: BeaconNode, attestation: Attestation) =
# For the validator API, which doesn't supply num_active_validators.
proc sendAttestation*(node: BeaconNode, attestation: Attestation): Future[bool] =
# For the validator API, which doesn't supply the subnet id.
let attestationBlck =
node.chainDag.getRef(attestation.data.beacon_block_root)
if attestationBlck.isNil:
debug "Attempt to send attestation without corresponding block"
return
let
epochRef = node.chainDag.getEpochRef(
attestationBlck, attestation.data.target.epoch)
subnet_id = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), attestation.data.slot,
attestation.data.index.CommitteeIndex)
node.sendAttestation(
attestation,
count_active_validators(
node.chainDag.getEpochRef(attestationBlck, attestation.data.target.epoch)))
node.sendAttestation(attestation, subnet_id, checkSignature = true)
proc createAndSendAttestation(node: BeaconNode,
fork: Fork,
@ -205,13 +215,19 @@ proc createAndSendAttestation(node: BeaconNode,
attestationData: AttestationData,
committeeLen: int,
indexInCommittee: int,
num_active_validators: uint64) {.async.} =
var attestation = await validator.produceAndSignAttestation(
subnet_id: SubnetId) {.async.} =
try:
var
attestation = await validator.produceAndSignAttestation(
attestationData, committeeLen, indexInCommittee, fork,
genesis_validators_root)
node.sendAttestation(attestation, num_active_validators)
let ok = await node.sendAttestation(
attestation, subnet_id, checkSignature = false)
if not ok: # Logged in sendAttestation
return
let sent = node.beaconClock.now()
if node.config.dumpEnabled:
dump(node.config.dumpDirOutgoing, attestation.data, validator.pubKey)
@ -230,6 +246,10 @@ proc createAndSendAttestation(node: BeaconNode,
indexInCommittee = indexInCommittee
beacon_attestation_sent_delay.observe(delaySecs)
except CatchableError as exc:
# An error could happen here when the signature task fails - we must
# not leak the exception because this is an asyncSpawn task
notice "Error sending attestation", err = exc.msg
proc getBlockProposalEth1Data*(node: BeaconNode,
stateData: StateData): BlockProposalEth1Data =
@ -405,10 +425,6 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
attestationHeadRoot = shortLog(attestationHead.blck.root),
attestationSlot = shortLog(slot)
var attestations: seq[tuple[
data: AttestationData, committeeLen, indexInCommittee: int,
validator: AttachedValidator, validator_index: ValidatorIndex]]
# We need to run attestations exactly for the slot that we're attesting to.
# In case blocks went missing, this means advancing past the latest block
# using empty slots as fillers.
@ -416,46 +432,42 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
let
epochRef = node.chainDag.getEpochRef(
attestationHead.blck, slot.compute_epoch_at_slot())
committees_per_slot =
get_committee_count_per_slot(epochRef)
num_active_validators = count_active_validators(epochRef)
committees_per_slot = get_committee_count_per_slot(epochRef)
fork = getStateField(node.chainDag.headState, fork)
genesis_validators_root =
getStateField(node.chainDag.headState, genesis_validators_root)
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
epochRef, slot, committee_index.CommitteeIndex)
for committee_index in get_committee_indices(epochRef):
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validator_index in committee:
let validator = node.getAttachedValidator(epochRef, validator_index)
if validator != nil:
let ad = makeAttestationData(
epochRef, attestationHead, committee_index.CommitteeIndex)
attestations.add(
(ad, committee.len, index_in_committee, validator, validator_index))
if validator == nil:
continue
for a in attestations:
let
data = makeAttestationData(epochRef, attestationHead, committee_index)
# TODO signing_root is recomputed in produceAndSignAttestation/signAttestation just after
let signing_root = compute_attestation_root(
fork, genesis_validators_root, a.data)
let notSlashable = node.attachedValidators
signing_root = compute_attestation_root(
fork, genesis_validators_root, data)
registered = node.attachedValidators
.slashingProtection
.registerAttestation(
a.validator_index,
a.validator.pubkey,
a.data.source.epoch,
a.data.target.epoch,
signing_root
)
if notSlashable.isOk():
traceAsyncErrors createAndSendAttestation(
node, fork, genesis_validators_root, a.validator, a.data,
a.committeeLen, a.indexInCommittee, num_active_validators)
validator_index,
validator.pubkey,
data.source.epoch,
data.target.epoch,
signing_root)
if registered.isOk():
let subnet_id = compute_subnet_for_attestation(
committees_per_slot, data.slot, data.index.CommitteeIndex)
asyncSpawn createAndSendAttestation(
node, fork, genesis_validators_root, validator, data,
committee.len(), index_in_committee, subnet_id)
else:
warn "Slashing protection activated for attestation",
validator = a.validator.pubkey,
badVoteDetails = $notSlashable.error
validator = validator.pubkey,
badVoteDetails = $registered.error()
proc handleProposal(node: BeaconNode, head: BlockRef, slot: Slot):
Future[BlockRef] {.async.} =

View File

@ -63,13 +63,9 @@ proc signWithRemoteValidator(v: AttachedValidator, data: Eth2Digest):
v.connection.inStream.flush()
var line = newStringOfCap(120).TaintedString
discard v.connection.outStream.readLine(line)
result = ValidatorSig.fromHex(line).get()
# TODO this is an ugly hack to fake a delay and subsequent async reordering
# for the purpose of testing the external validator delay - to be
# replaced by something more sensible
await sleepAsync(chronos.milliseconds(1))
return ValidatorSig.fromHex(line).get()
# TODO: Honest validator - https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md
proc signBlockProposal*(v: AttachedValidator, fork: Fork,
genesis_validators_root: Eth2Digest, slot: Slot,
blockRoot: Eth2Digest): Future[ValidatorSig] {.async.} =
@ -81,14 +77,14 @@ proc signBlockProposal*(v: AttachedValidator, fork: Fork,
await signWithRemoteValidator(v, root)
proc signAttestation*(v: AttachedValidator,
attestation: AttestationData,
data: AttestationData,
fork: Fork, genesis_validators_root: Eth2Digest):
Future[ValidatorSig] {.async.} =
return if v.kind == inProcess:
get_attestation_signature(
fork, genesis_validators_root, attestation, v.privKey).toValidatorSig()
fork, genesis_validators_root, data, v.privKey).toValidatorSig()
else:
let root = compute_attestation_root(fork, genesis_validators_root, attestation)
let root = compute_attestation_root(fork, genesis_validators_root, data)
await signWithRemoteValidator(v, root)
proc produceAndSignAttestation*(validator: AttachedValidator,

View File

@ -194,7 +194,7 @@ func getTopics(forkDigest: ForkDigest,
of TopicFilter.Attestations:
mapIt(
0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64,
getAttestationTopic(forkDigest, it) & "_snappy")
getAttestationTopic(forkDigest, SubnetId(it)) & "_snappy")
proc loadBootFile(name: string): seq[string] =
try:

View File

@ -7,7 +7,45 @@ import
./testutil
suite "Bit fields":
test "roundtrips":
test "roundtrips BitArray":
var
a = BitArray[100]()
b = BitArray[100]()
check:
not a[0]
a.setBit 1
check:
not a[0]
a[1]
a + b == a
a - b == a
b + a == a
b - a == b # b is empty
b.setBit 2
check:
(a + b)[2]
(b - a)[2]
not (b - a)[1]
a.incl(b)
check:
not a[0]
a[1]
a[2]
a.clear()
check:
not a[1]
test "roundtrips BitSeq":
var
a = BitSeq.init(100)
b = BitSeq.init(100)

View File

@ -92,13 +92,14 @@ suite "Gossip validation " & preset():
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Wrong subnet
validateAttestation(pool, batchCrypto, att_1_0, beaconTime, subnet + 1, true).waitFor().isErr
validateAttestation(
pool, batchCrypto, att_1_0, beaconTime, SubnetId(subnet.uint8 + 1), true).waitFor().isErr
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
# Too far in the future
validateAttestation(
pool, batchCrypto, att_1_0, beaconTime - 1.seconds, subnet + 1, true).waitFor().isErr
pool, batchCrypto, att_1_0, beaconTime - 1.seconds, subnet, true).waitFor().isErr
pool[].nextAttestationEpoch.setLen(0) # reset for test
check:
@ -106,7 +107,7 @@ suite "Gossip validation " & preset():
validateAttestation(
pool, batchCrypto, att_1_0,
beaconTime - (SECONDS_PER_SLOT * SLOTS_PER_EPOCH - 1).int.seconds,
subnet + 1, true).waitFor().isErr
subnet, true).waitFor().isErr
block:
var broken = att_1_0

View File

@ -19,45 +19,45 @@ suite "Honest validator":
test "Mainnet attestation topics":
check:
getAttestationTopic(forkDigest, 0) ==
getAttestationTopic(forkDigest, SubnetId(0)) ==
"/eth2/00000000/beacon_attestation_0/ssz"
getAttestationTopic(forkDigest, 5) ==
getAttestationTopic(forkDigest, SubnetId(5)) ==
"/eth2/00000000/beacon_attestation_5/ssz"
getAttestationTopic(forkDigest, 7) ==
getAttestationTopic(forkDigest, SubnetId(7)) ==
"/eth2/00000000/beacon_attestation_7/ssz"
getAttestationTopic(forkDigest, 9) ==
getAttestationTopic(forkDigest, SubnetId(9)) ==
"/eth2/00000000/beacon_attestation_9/ssz"
getAttestationTopic(forkDigest, 13) ==
getAttestationTopic(forkDigest, SubnetId(13)) ==
"/eth2/00000000/beacon_attestation_13/ssz"
getAttestationTopic(forkDigest, 19) ==
getAttestationTopic(forkDigest, SubnetId(19)) ==
"/eth2/00000000/beacon_attestation_19/ssz"
getAttestationTopic(forkDigest, 20) ==
getAttestationTopic(forkDigest, SubnetId(20)) ==
"/eth2/00000000/beacon_attestation_20/ssz"
getAttestationTopic(forkDigest, 22) ==
getAttestationTopic(forkDigest, SubnetId(22)) ==
"/eth2/00000000/beacon_attestation_22/ssz"
getAttestationTopic(forkDigest, 25) ==
getAttestationTopic(forkDigest, SubnetId(25)) ==
"/eth2/00000000/beacon_attestation_25/ssz"
getAttestationTopic(forkDigest, 27) ==
getAttestationTopic(forkDigest, SubnetId(27)) ==
"/eth2/00000000/beacon_attestation_27/ssz"
getAttestationTopic(forkDigest, 31) ==
getAttestationTopic(forkDigest, SubnetId(31)) ==
"/eth2/00000000/beacon_attestation_31/ssz"
getAttestationTopic(forkDigest, 39) ==
getAttestationTopic(forkDigest, SubnetId(39)) ==
"/eth2/00000000/beacon_attestation_39/ssz"
getAttestationTopic(forkDigest, 45) ==
getAttestationTopic(forkDigest, SubnetId(45)) ==
"/eth2/00000000/beacon_attestation_45/ssz"
getAttestationTopic(forkDigest, 47) ==
getAttestationTopic(forkDigest, SubnetId(47)) ==
"/eth2/00000000/beacon_attestation_47/ssz"
getAttestationTopic(forkDigest, 48) ==
getAttestationTopic(forkDigest, SubnetId(48)) ==
"/eth2/00000000/beacon_attestation_48/ssz"
getAttestationTopic(forkDigest, 50) ==
getAttestationTopic(forkDigest, SubnetId(50)) ==
"/eth2/00000000/beacon_attestation_50/ssz"
getAttestationTopic(forkDigest, 53) ==
getAttestationTopic(forkDigest, SubnetId(53)) ==
"/eth2/00000000/beacon_attestation_53/ssz"
getAttestationTopic(forkDigest, 54) ==
getAttestationTopic(forkDigest, SubnetId(54)) ==
"/eth2/00000000/beacon_attestation_54/ssz"
getAttestationTopic(forkDigest, 62) ==
getAttestationTopic(forkDigest, SubnetId(62)) ==
"/eth2/00000000/beacon_attestation_62/ssz"
getAttestationTopic(forkDigest, 63) ==
getAttestationTopic(forkDigest, SubnetId(63)) ==
"/eth2/00000000/beacon_attestation_63/ssz"
test "is_aggregator":