Harden CommitteeIndex, SubnetId, SyncSubcommitteeIndex (#3259)

* Harden CommitteeIndex, SubnetId, SyncSubcommitteeIndex

Harden the use of `CommitteeIndex` et al to prevent future issues by
using a distinct type, then validating before use in several cases -
datatypes in spec are kept simple though so that invalid data still can
be read.

* fix invalid epoch used in REST
`/eth/v1/beacon/states/{state_id}/committees` committee length (could
return invalid data)
* normalize some variable names
* normalize committee index loops
* fix `RestAttesterDuty` to use `uint64` for `validator_committee_index`
* validate `CommitteeIndex` on ingress in REST API
* update rest rules with stricter parsing
* better REST serializers
* save lots of memory by not using `zip` ...at least a few bytes!
This commit is contained in:
Jacek Sieka 2022-01-09 00:28:49 +01:00 committed by GitHub
parent 6f7e0e3393
commit 20e700fae4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
31 changed files with 432 additions and 432 deletions

View File

@ -203,7 +203,7 @@ OK: 7/7 Fail: 0/7 Skip: 0/7
OK: 1/1 Fail: 0/1 Skip: 0/1
## Gossip validation [Preset: mainnet]
```diff
+ Any committee index is valid OK
+ Empty committee when no committee for slot OK
+ validateAttestation OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2

View File

@ -390,7 +390,7 @@ proc addForkChoice*(pool: var AttestationPool,
blck = shortLog(blck), err = state.error
iterator attestations*(pool: AttestationPool, slot: Option[Slot],
index: Option[CommitteeIndex]): Attestation =
committee_index: Option[CommitteeIndex]): Attestation =
let candidateIndices =
if slot.isSome():
let candidateIdx = pool.candidateIdx(slot.get())
@ -403,7 +403,7 @@ iterator attestations*(pool: AttestationPool, slot: Option[Slot],
for candidateIndex in candidateIndices:
for _, entry in pool.candidates[candidateIndex]:
if index.isNone() or entry.data.index == index.get().uint64:
if committee_index.isNone() or entry.data.index == committee_index.get():
var singleAttestation = Attestation(
aggregation_bits: CommitteeValidatorsBits.init(entry.committee_len),
data: entry.data)
@ -458,36 +458,27 @@ func init(
cur_epoch = state.data.get_current_epoch()
template update_attestation_pool_cache(
epoch: Epoch, slot: Slot, participation_bitmap: untyped) =
for slot_committee_index in 0'u64 ..< get_committee_count_per_slot(
state.data, epoch, cache):
var
validator_bits =
CommitteeValidatorsBits.init(
get_beacon_committee_len(
state.data, slot, slot_committee_index.CommitteeIndex, cache).int)
i = 0
for index in get_beacon_committee(
state.data, slot, slot_committee_index.CommitteeIndex, cache):
if participation_bitmap[index] != 0:
# If any flag got set, there was an attestation from this validator.
validator_bits[i] = true
i += 1
result.add(
(slot, slot_committee_index),
validator_bits)
epoch: Epoch, participation_bitmap: untyped) =
let
start_slot = epoch.compute_start_slot_at_epoch()
for committee_index in get_committee_indices(state.data, epoch, cache):
for slot in start_slot..<start_slot + SLOTS_PER_EPOCH:
let committee = get_beacon_committee(
state.data, slot, committee_index, cache)
var
validator_bits = CommitteeValidatorsBits.init(committee.len)
for index_in_committee, validator_index in committee:
if participation_bitmap[validator_index] != 0:
# If any flag got set, there was an attestation from this validator.
validator_bits[index_in_committee] = true
result.add((slot, committee_index.uint64), validator_bits)
# This treats all types of rewards as equivalent, which isn't ideal
for slot_offset in 0 ..< SLOTS_PER_EPOCH:
update_attestation_pool_cache(
state.data.get_previous_epoch(),
prev_epoch.compute_start_slot_at_epoch + slot_offset,
state.data.previous_epoch_participation)
update_attestation_pool_cache(
state.data.get_current_epoch(),
cur_epoch.compute_start_slot_at_epoch + slot_offset,
state.data.current_epoch_participation)
update_attestation_pool_cache(
prev_epoch, state.data.previous_epoch_participation)
update_attestation_pool_cache(
cur_epoch, state.data.current_epoch_participation)
proc score(
attCache: var AttestationCache, data: AttestationData,
@ -702,7 +693,7 @@ proc getAggregatedAttestation*(pool: var AttestationPool,
var res: Option[Attestation]
for _, entry in pool.candidates[candidateIdx.get].mpairs():
doAssert entry.data.slot == slot
if index.uint64 != entry.data.index:
if index != entry.data.index:
continue
entry.updateAggregates()

View File

@ -9,14 +9,15 @@
import
std/sequtils,
stew/results,
chronicles,
../extras,
../spec/[helpers, network, signatures, validator],
../spec/[beaconstate, helpers, network, signatures, validator],
../spec/datatypes/base,
./block_pools_types, blockchain_dag
export
base, extras, block_pools_types
base, extras, block_pools_types, results
# Spec functions implemented based on cached values instead of the full state
func count_active_validators*(epochInfo: EpochRef): uint64 =
@ -27,91 +28,89 @@ func get_committee_count_per_slot*(epochInfo: EpochRef): uint64 =
get_committee_count_per_slot(count_active_validators(epochInfo))
iterator get_committee_indices*(epochRef: EpochRef): CommitteeIndex =
for i in 0'u64..<get_committee_count_per_slot(epochRef):
yield CommitteeIndex(i)
let committees_per_slot = get_committee_count_per_slot(epochRef)
for committee_index in get_committee_indices(committees_per_slot):
yield committee_index
func get_committee_index*(epochRef: EpochRef, index: uint64):
Result[CommitteeIndex, cstring] =
check_attestation_index(index, get_committee_count_per_slot(epochRef))
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_beacon_committee
iterator get_beacon_committee*(
epochRef: EpochRef, slot: Slot, index: CommitteeIndex): ValidatorIndex =
epochRef: EpochRef, slot: Slot, committee_index: CommitteeIndex):
(int, ValidatorIndex) =
## Return the beacon committee at ``slot`` for ``index``.
let
committees_per_slot = get_committee_count_per_slot(epochRef)
for idx in compute_committee(
for index_in_committee, idx in compute_committee(
epochRef.shuffled_active_validator_indices,
(slot mod SLOTS_PER_EPOCH) * committees_per_slot +
index.uint64,
(slot mod SLOTS_PER_EPOCH) * committees_per_slot + committee_index.asUInt64,
committees_per_slot * SLOTS_PER_EPOCH
): yield idx
): yield (index_in_committee, idx)
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee*(
epochRef: EpochRef, slot: Slot, index: CommitteeIndex): seq[ValidatorIndex] =
epochRef: EpochRef, slot: Slot, committee_index: CommitteeIndex):
seq[ValidatorIndex] =
## Return the beacon committee at ``slot`` for ``index``.
let
committees_per_slot = get_committee_count_per_slot(epochRef)
compute_committee(
epochRef.shuffled_active_validator_indices,
(slot mod SLOTS_PER_EPOCH) * committees_per_slot +
index.uint64,
(slot mod SLOTS_PER_EPOCH) * committees_per_slot + committee_index.asUInt64,
committees_per_slot * SLOTS_PER_EPOCH
)
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_beacon_committee
func get_beacon_committee_len*(
epochRef: EpochRef, slot: Slot, index: CommitteeIndex): uint64 =
epochRef: EpochRef, slot: Slot, committee_index: CommitteeIndex): uint64 =
## Return the number of members in the beacon committee at ``slot`` for ``index``.
let
committees_per_slot = get_committee_count_per_slot(epochRef)
compute_committee_len(
count_active_validators(epochRef),
(slot mod SLOTS_PER_EPOCH) * committees_per_slot +
index.uint64,
(slot mod SLOTS_PER_EPOCH) * committees_per_slot + committee_index.asUInt64,
committees_per_slot * SLOTS_PER_EPOCH
)
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_attesting_indices
iterator get_attesting_indices*(epochRef: EpochRef,
data: AttestationData,
slot: Slot,
committee_index: CommitteeIndex,
bits: CommitteeValidatorsBits):
ValidatorIndex =
if bits.lenu64 != get_beacon_committee_len(epochRef, data.slot, data.index.CommitteeIndex):
if bits.lenu64 != get_beacon_committee_len(epochRef, slot, committee_index):
trace "get_attesting_indices: inconsistent aggregation and committee length"
else:
var i = 0
for index in get_beacon_committee(epochRef, data.slot, data.index.CommitteeIndex):
if bits[i]:
yield index
inc i
for index_in_committee, validator_index in get_beacon_committee(
epochRef, slot, committee_index).pairs():
if bits[index_in_committee]:
yield validator_index
func get_attesting_indices_one*(epochRef: EpochRef,
data: AttestationData,
slot: Slot,
committee_index: CommitteeIndex,
bits: CommitteeValidatorsBits):
Option[ValidatorIndex] =
# A variation on get_attesting_indices that returns the validator index only
# if only one validator index is set
if bits.lenu64 != get_beacon_committee_len(epochRef, data.slot, data.index.CommitteeIndex):
trace "get_attesting_indices: inconsistent aggregation and committee length"
none(ValidatorIndex)
else:
var res = none(ValidatorIndex)
var i = 0
for index in get_beacon_committee(epochRef, data.slot, data.index.CommitteeIndex):
if bits[i]:
if res.isNone():
res = some(index)
else:
return none(ValidatorIndex)
inc i
res
var res = none(ValidatorIndex)
for validator_index in get_attesting_indices(
epochRef, slot, committee_index, bits):
if res.isSome(): return none(ValidatorIndex)
res = some(validator_index)
res
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_attesting_indices
func get_attesting_indices*(epochRef: EpochRef,
data: AttestationData,
slot: Slot,
committee_index: CommitteeIndex,
bits: CommitteeValidatorsBits):
seq[ValidatorIndex] =
# TODO sequtils2 mapIt
for idx in get_attesting_indices(epochRef, data, bits):
for idx in get_attesting_indices(epochRef, slot, committee_index, bits):
result.add(idx)
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#is_valid_indexed_attestation
@ -162,7 +161,7 @@ func makeAttestationData*(
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/validator.md#attestation-data
AttestationData(
slot: slot,
index: committee_index.uint64,
index: committee_index.asUInt64,
beacon_block_root: bs.blck.root,
source: epochRef.current_justified_checkpoint,
target: Checkpoint(
@ -174,7 +173,7 @@ func makeAttestationData*(
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/validator.md#validator-assignments
iterator get_committee_assignments*(
epochRef: EpochRef, validator_indices: HashSet[ValidatorIndex]):
tuple[subcommitteeIdx: CommitteeIndex,
tuple[committee_index: CommitteeIndex,
subnet_id: SubnetId, slot: Slot] =
let
committees_per_slot = get_committee_count_per_slot(epochRef)
@ -182,14 +181,11 @@ iterator get_committee_assignments*(
start_slot = compute_start_slot_at_epoch(epoch)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
for index in 0'u64 ..< committees_per_slot:
let
idx = index.CommitteeIndex
if anyIt(
get_beacon_committee(epochRef, slot, idx), it in validator_indices):
for committee_index in get_committee_indices(committees_per_slot):
if anyIt(get_beacon_committee(epochRef, slot, committee_index), it in validator_indices):
yield (
idx,
compute_subnet_for_attestation(committees_per_slot, slot, idx),
committee_index,
compute_subnet_for_attestation(committees_per_slot, slot, committee_index),
slot)
func is_aggregator*(epochRef: EpochRef, slot: Slot, index: CommitteeIndex,

View File

@ -25,11 +25,11 @@ type
SyncCommitteeMsgKey = object
originator: uint64 # ValidatorIndex avoiding mess with invalid values
slot: Slot
subcommitteeIndex: uint64 # SyncSubcommitteeIndex avoiding mess with invalid values
subcommitteeIdx: uint64 # SyncSubcommitteeIndex avoiding mess with invalid values
TrustedSyncCommitteeMsg* = object
slot*: Slot
subcommitteeIndex*: SyncSubcommitteeIndex
subcommitteeIdx*: SyncSubcommitteeIndex
positionInCommittee*: uint64
signature*: CookedSig
@ -90,11 +90,11 @@ func pruneData*(pool: var SyncCommitteeMsgPool, slot: Slot) =
func isSeen*(
pool: SyncCommitteeMsgPool,
msg: SyncCommitteeMessage,
subcommitteeIndex: SyncSubcommitteeIndex): bool =
subcommitteeIdx: SyncSubcommitteeIndex): bool =
let seenKey = SyncCommitteeMsgKey(
originator: msg.validator_index, # Might be unvalidated at this point
slot: msg.slot,
subcommitteeIndex: subcommitteeIndex.uint64)
subcommitteeIdx: subcommitteeIdx.uint64)
seenKey in pool.seenSyncMsgByAuthor
proc addSyncCommitteeMessage*(
@ -103,21 +103,21 @@ proc addSyncCommitteeMessage*(
blockRoot: Eth2Digest,
validatorIndex: uint64,
signature: CookedSig,
subcommitteeIndex: SyncSubcommitteeIndex,
subcommitteeIdx: SyncSubcommitteeIndex,
positionsInCommittee: openArray[uint64]) =
let
seenKey = SyncCommitteeMsgKey(
originator: validatorIndex,
slot: slot,
subcommitteeIndex: subcommitteeIndex.uint64)
subcommitteeIdx: subcommitteeIdx.uint64)
pool.seenSyncMsgByAuthor.incl seenKey
for position in positionsInCommittee:
pool.syncMessages.mgetOrPut(blockRoot, @[]).add TrustedSyncCommitteeMsg(
slot: slot,
subcommitteeIndex: subcommitteeIndex,
subcommitteeIdx: subcommitteeIdx,
positionInCommittee: position,
signature: signature)
@ -125,14 +125,14 @@ proc addSyncCommitteeMessage*(
slot = slot, blockRoot = shortLog(blockRoot), validatorIndex
func computeAggregateSig(votes: seq[TrustedSyncCommitteeMsg],
subcommitteeIndex: SyncSubcommitteeIndex,
subcommitteeIdx: SyncSubcommitteeIndex,
contribution: var SyncCommitteeContribution): bool =
var
aggregateSig {.noInit.}: AggregateSignature
initialized = false
for vote in votes:
if vote.subcommitteeIndex != subcommitteeIndex:
if vote.subcommitteeIdx != subcommitteeIdx:
continue
if not contribution.aggregation_bits[vote.positionInCommittee]:
@ -153,15 +153,15 @@ func produceContribution*(
pool: SyncCommitteeMsgPool,
slot: Slot,
headRoot: Eth2Digest,
subcommitteeIndex: SyncSubcommitteeIndex,
subcommitteeIdx: SyncSubcommitteeIndex,
outContribution: var SyncCommitteeContribution): bool =
if headRoot in pool.syncMessages:
outContribution.slot = slot
outContribution.beacon_block_root = headRoot
outContribution.subcommittee_index = subcommitteeIndex.asUInt64
outContribution.subcommittee_index = subcommitteeIdx.asUInt64
try:
computeAggregateSig(pool.syncMessages[headRoot],
subcommitteeIndex,
subcommitteeIdx,
outContribution)
except KeyError:
raiseAssert "We have checked for the key upfront"
@ -188,7 +188,7 @@ func isSeen*(
let seenKey = SyncCommitteeMsgKey(
originator: msg.aggregator_index,
slot: msg.contribution.slot,
subcommitteeIndex: msg.contribution.subcommittee_index)
subcommitteeIdx: msg.contribution.subcommittee_index)
seenKey in pool.seenContributionByAuthor
proc addContribution(pool: var SyncCommitteeMsgPool,
@ -198,7 +198,7 @@ proc addContribution(pool: var SyncCommitteeMsgPool,
let seenKey = SyncCommitteeMsgKey(
originator: aggregator_index,
slot: contribution.slot,
subcommitteeIndex: contribution.subcommittee_index)
subcommitteeIdx: contribution.subcommittee_index)
pool.seenContributionByAuthor.incl seenKey
template blockRoot: auto = contribution.beacon_block_root
@ -237,20 +237,20 @@ proc produceSyncAggregateAux(
initialized = false
startTime = Moment.now
for subnetId in allSyncSubcommittees():
if bestContributions.subnets[subnetId].totalParticipants == 0:
for subcommitteeIdx in SyncSubcommitteeIndex:
if bestContributions.subnets[subcommitteeIdx].totalParticipants == 0:
continue
for pos, value in bestContributions.subnets[subnetId].participationBits:
for pos, value in bestContributions.subnets[subcommitteeIdx].participationBits:
if value:
let globalPos = subnetId.asInt * SYNC_SUBCOMMITTEE_SIZE + pos
let globalPos = subcommitteeIdx.asInt * SYNC_SUBCOMMITTEE_SIZE + pos
result.sync_committee_bits.setBit globalPos
if not initialized:
initialized = true
aggregateSig.init(bestContributions.subnets[subnetId].signature)
aggregateSig.init(bestContributions.subnets[subcommitteeIdx].signature)
else:
aggregateSig.aggregate(bestContributions.subnets[subnetId].signature)
aggregateSig.aggregate(bestContributions.subnets[subcommitteeIdx].signature)
if initialized:
result.sync_committee_signature = aggregateSig.finish.toValidatorSig

View File

@ -330,21 +330,11 @@ func process_block*(self: var ForkChoiceBackend,
self.proto_array.onBlock(
block_root, parent_root, justified_checkpoint, finalized_checkpoint)
# TODO workaround for https://github.com/nim-lang/Nim/issues/18095
# it expresses as much of:
# blck: phase0.SomeBeaconBlock | altair.SomeBeaconBlock
# or
# blck: SomeSomeBeaconBlock
# as comes up. Other types can be added as needed.
type ReallyAnyBeaconBlock =
phase0.BeaconBlock | altair.BeaconBlock | bellatrix.BeaconBlock |
phase0.TrustedBeaconBlock | altair.TrustedBeaconBlock |
bellatrix.TrustedBeaconBlock
proc process_block*(self: var ForkChoice,
dag: ChainDAGRef,
epochRef: EpochRef,
blckRef: BlockRef,
blck: ReallyAnyBeaconBlock,
blck: ForkyTrustedBeaconBlock,
wallTime: BeaconTime): FcResult[void] =
? update_time(self, dag, wallTime)
? process_state(self.checkpoints, dag, epochRef, blckRef)
@ -355,12 +345,19 @@ proc process_block*(self: var ForkChoice,
let targetBlck = dag.getRef(attestation.data.target.root)
if targetBlck.isNil:
continue
if attestation.data.beacon_block_root in self.backend and
# TODO not-actually-correct hotfix for crash
# https://github.com/status-im/nimbus-eth2/issues/1879
attestation.data.index < committees_per_slot:
let committee_index = block:
let v = CommitteeIndex.init(attestation.data.index, committees_per_slot)
if v.isErr():
warn "Unexpected committee index in block attestation",
blck = shortLog(blck),
data = shortLog(attestation.data)
continue
v.get()
if attestation.data.beacon_block_root in self.backend:
for validator in get_attesting_indices(
epochRef, attestation.data, attestation.aggregation_bits):
epochRef, attestation.data.slot, committee_index,
attestation.aggregation_bits):
self.backend.process_attestation(
validator,
attestation.data.beacon_block_root,

View File

@ -415,7 +415,7 @@ proc scheduleContributionChecks*(
batchCrypto: ref BatchCrypto,
fork: Fork, genesis_validators_root: Eth2Digest,
signedContributionAndProof: SignedContributionAndProof,
subcommitteeIndex: SyncSubcommitteeIndex,
subcommitteeIdx: SyncSubcommitteeIndex,
dag: ChainDAGRef): Result[tuple[
aggregatorFut, proofFut, contributionFut: Future[BatchResult],
sig: CookedSig], cstring] =
@ -445,7 +445,7 @@ proc scheduleContributionChecks*(
"SignedContributionAndProof: invalid contribution signature")
contributionKey = ? aggregateAll(
dag, dag.syncCommitteeParticipants(contribution.slot, subcommitteeIndex),
dag, dag.syncCommitteeParticipants(contribution.slot, subcommitteeIdx),
contribution.aggregation_bits)
let
aggregatorFut = batchCrypto.withBatch("scheduleContributionAndProofChecks.aggregator"):

View File

@ -191,9 +191,12 @@ proc storeBlock*(
src, wallTime, trustedBlock.message)
for attestation in trustedBlock.message.body.attestations:
for idx in get_attesting_indices(
epochRef, attestation.data, attestation.aggregation_bits):
vm[].registerAttestationInBlock(attestation.data, idx,
for validator_index in get_attesting_indices(
epochRef, attestation.data.slot,
CommitteeIndex.init(attestation.data.index).expect(
"index has been checked"),
attestation.aggregation_bits):
vm[].registerAttestationInBlock(attestation.data, validator_index,
trustedBlock.message)
withState(dag[].clearanceState.data):

View File

@ -79,7 +79,7 @@ func check_attestation_block(
ok()
func check_propagation_slot_range(
msgSlot: Slot, wallTime: BeaconTime): Result[void, ValidationError] =
msgSlot: Slot, wallTime: BeaconTime): Result[Slot, ValidationError] =
let
futureSlot = (wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).toSlot()
@ -99,7 +99,7 @@ func check_propagation_slot_range(
msgSlot + ATTESTATION_PROPAGATION_SLOT_RANGE < pastSlot.slot:
return errIgnore("Attestation slot in the past")
ok()
ok(msgSlot)
func check_beacon_and_target_block(
pool: var AttestationPool, data: AttestationData):
@ -149,12 +149,11 @@ func check_aggregation_count(
ok()
func check_attestation_subnet(
epochRef: EpochRef, attestation: Attestation,
epochRef: EpochRef, slot: Slot, committee_index: CommitteeIndex,
subnet_id: SubnetId): Result[void, ValidationError] =
let
expectedSubnet = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef),
attestation.data.slot, attestation.data.index.CommitteeIndex)
get_committee_count_per_slot(epochRef), slot, committee_index)
if expectedSubnet != subnet_id:
return errReject("Attestation not on the correct subnet")
@ -380,10 +379,11 @@ proc validateAttestation*(
# [REJECT] The attestation's epoch matches its target -- i.e.
# attestation.data.target.epoch ==
# compute_epoch_at_slot(attestation.data.slot)
block:
let slot = block:
let v = check_attestation_slot_target(attestation.data)
if v.isErr():
return errReject(v.error())
v.get()
# attestation.data.slot is within the last ATTESTATION_PROPAGATION_SLOT_RANGE
# slots (within a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e.
@ -391,7 +391,7 @@ proc validateAttestation*(
# >= attestation.data.slot (a client MAY queue future attestations for
# processing at the appropriate slot).
block:
let v = check_propagation_slot_range(attestation.data.slot, wallTime) # [IGNORE]
let v = check_propagation_slot_range(slot, wallTime) # [IGNORE]
if v.isErr():
return err(v.error())
@ -434,8 +434,11 @@ proc validateAttestation*(
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
if not (attestation.data.index < get_committee_count_per_slot(epochRef)):
return checkedReject("Attestation: committee index not within expected range")
let committee_index = block:
let idx = epochRef.get_committee_index(attestation.data.index)
if idx.isErr():
return checkedReject("Attestation: committee index not within expected range")
idx.get()
# [REJECT] The attestation is for the correct subnet -- i.e.
# compute_subnet_for_attestation(committees_per_slot,
@ -444,7 +447,8 @@ proc validateAttestation*(
# attestation.data.target.epoch), which may be pre-computed along with the
# committee information for the signature check.
block:
let v = check_attestation_subnet(epochRef, attestation, subnet_id) # [REJECT]
let v = check_attestation_subnet(
epochRef, attestation.data.slot, committee_index, subnet_id) # [REJECT]
if v.isErr():
return err(v.error)
@ -456,7 +460,7 @@ proc validateAttestation*(
# epoch matches its target and attestation.data.target.root is an ancestor of
# attestation.data.beacon_block_root.
if not (attestation.aggregation_bits.lenu64 == get_beacon_committee_len(
epochRef, attestation.data.slot, attestation.data.index.CommitteeIndex)):
epochRef, attestation.data.slot, committee_index)):
return checkedReject(
"Attestation: number of aggregation bits and committee size mismatch")
@ -465,7 +469,7 @@ proc validateAttestation*(
genesis_validators_root =
getStateField(pool.dag.headState.data, genesis_validators_root)
attesting_index = get_attesting_indices_one(
epochRef, attestation.data, attestation.aggregation_bits)
epochRef, slot, committee_index, attestation.aggregation_bits)
# The number of aggregation bits matches the committee size, which ensures
# this condition holds.
@ -544,17 +548,18 @@ proc validateAggregate*(
# [REJECT] The aggregate attestation's epoch matches its target -- i.e.
# `aggregate.data.target.epoch == compute_epoch_at_slot(aggregate.data.slot)`
block:
let slot = block:
let v = check_attestation_slot_target(aggregate.data)
if v.isErr():
return checkedReject(v.error)
v.get()
# [IGNORE] aggregate.data.slot is within the last
# ATTESTATION_PROPAGATION_SLOT_RANGE slots (with a
# MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance) -- i.e. aggregate.data.slot +
# ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot
block:
let v = check_propagation_slot_range(aggregate.data.slot, wallTime) # [IGNORE]
let v = check_propagation_slot_range(slot, wallTime) # [IGNORE]
if v.isErr():
return err(v.error())
@ -611,20 +616,21 @@ proc validateAggregate*(
# [REJECT] The committee index is within the expected range -- i.e.
# data.index < get_committee_count_per_slot(state, data.target.epoch).
if not (aggregate.data.index < get_committee_count_per_slot(epochRef)):
return checkedReject("Aggregate: committee index not within expected range")
let committee_index = block:
let idx = epochRef.get_committee_index(aggregate.data.index)
if idx.isErr():
return checkedReject("Attestation: committee index not within expected range")
idx.get()
if not is_aggregator(
epochRef, aggregate.data.slot, aggregate.data.index.CommitteeIndex,
aggregate_and_proof.selection_proof):
epochRef, slot, committee_index, aggregate_and_proof.selection_proof):
return checkedReject("Aggregate: incorrect aggregator")
# [REJECT] The aggregator's validator index is within the committee -- i.e.
# aggregate_and_proof.aggregator_index in get_beacon_committee(state,
# aggregate.data.slot, aggregate.data.index).
if aggregate_and_proof.aggregator_index.ValidatorIndex notin
get_beacon_committee(
epochRef, aggregate.data.slot, aggregate.data.index.CommitteeIndex):
get_beacon_committee(epochRef, slot, committee_index):
return checkedReject("Aggregate: aggregator's validator index not in committee")
# 1. [REJECT] The aggregate_and_proof.selection_proof is a valid signature of the
@ -638,9 +644,8 @@ proc validateAggregate*(
fork = pool.dag.forkAtEpoch(aggregate.data.slot.epoch)
genesis_validators_root =
getStateField(pool.dag.headState.data, genesis_validators_root)
let attesting_indices = get_attesting_indices(
epochRef, aggregate.data, aggregate.aggregation_bits)
attesting_indices = get_attesting_indices(
epochRef, slot, committee_index, aggregate.aggregation_bits)
let deferredCrypto = batchCrypto
.scheduleAggregateChecks(
@ -890,8 +895,11 @@ proc validateContribution*(
# [REJECT] The subcommittee index is in the allowed range
# i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT.
let subcommitteeIdx = msg.message.contribution.subcommittee_index.validateSyncCommitteeIndexOr:
return errReject("SignedContributionAndProof: subcommittee index too high")
let subcommitteeIdx = block:
let v = SyncSubcommitteeIndex.init(msg.message.contribution.subcommittee_index)
if v.isErr():
return errReject("SignedContributionAndProof: subcommittee index too high")
v.get()
# [REJECT] contribution_and_proof.selection_proof selects the validator as an aggregator for the slot
# i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof) returns True.

View File

@ -962,7 +962,7 @@ proc queryRandom*(
peer = n.record.toURI(), exception = e.name, msg = e.msg
continue
for i in allSyncSubcommittees():
for i in SyncSubcommitteeIndex:
if wantedSyncnets[i] and syncnetsNode[i]:
score += 10 # connecting to the right syncnet is urgent
@ -2220,8 +2220,9 @@ proc broadcastBeaconBlock*(node: Eth2Node, forked: ForkedSignedBeaconBlock) =
withBlck(forked): node.broadcastBeaconBlock(blck)
proc broadcastSyncCommitteeMessage*(
node: Eth2Node, msg: SyncCommitteeMessage, committeeIdx: SyncSubcommitteeIndex) =
let topic = getSyncCommitteeTopic(node.forkDigests.altair, committeeIdx)
node: Eth2Node, msg: SyncCommitteeMessage,
subcommitteeIdx: SyncSubcommitteeIndex) =
let topic = getSyncCommitteeTopic(node.forkDigests.altair, subcommitteeIdx)
node.broadcast(topic, msg)
proc broadcastSignedContributionAndProof*(

View File

@ -671,9 +671,8 @@ proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
node.network.unsubscribe(getAttesterSlashingsTopic(forkDigest))
node.network.unsubscribe(getAggregateAndProofsTopic(forkDigest))
for subnet_id in 0'u64 ..< ATTESTATION_SUBNET_COUNT:
node.network.unsubscribe(
getAttestationTopic(forkDigest, SubnetId(subnet_id)))
for subnet_id in SubnetId:
node.network.unsubscribe(getAttestationTopic(forkDigest, subnet_id))
node.actionTracker.subscribedSubnets = default(AttnetBits)
@ -683,9 +682,9 @@ proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Sl
var syncnets: SyncnetBits
# TODO: What are the best topic params for this?
for committeeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = committeeIdx
let idx = subcommitteeIdx
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
node.network.subscribe(getSyncCommitteeTopic(forkDigest, idx), basicParams)
syncnets.setBit(idx.asInt)
@ -697,9 +696,9 @@ proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Sl
proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
node.removePhase0MessageHandlers(forkDigest)
for committeeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = committeeIdx
let idx = subcommitteeIdx
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx))
@ -1031,9 +1030,9 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, signedBlock)))
template installPhase0Validators(digest: auto) =
for it in 0'u64 ..< ATTESTATION_SUBNET_COUNT.uint64:
for it in SubnetId:
closureScope:
let subnet_id = SubnetId(it)
let subnet_id = it
node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id),
# This proc needs to be within closureScope; don't lift out of loop.
@ -1091,9 +1090,9 @@ proc installMessageValidators(node: BeaconNode) =
MsgSource.gossip, signedBlock)))
template installSyncCommitteeeValidators(digest: auto) =
for committeeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = committeeIdx
let idx = subcommitteeIdx
node.network.addAsyncValidator(
getSyncCommitteeTopic(digest, idx),
# This proc needs to be within closureScope; don't lift out of loop.

View File

@ -566,21 +566,24 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
proc getCommittee(slot: Slot,
index: CommitteeIndex): RestBeaconStatesCommittees =
let validators = get_beacon_committee(stateData.data, slot, index,
cache).mapIt(it)
cache)
RestBeaconStatesCommittees(index: index, slot: slot,
validators: validators)
proc forSlot(slot: Slot, cindex: Option[CommitteeIndex],
res: var seq[RestBeaconStatesCommittees]) =
let committees_per_slot =
get_committee_count_per_slot(stateData.data, Epoch(slot), cache)
if cindex.isNone:
for committee_index in 0'u64 ..< committees_per_slot:
res.add(getCommittee(slot, CommitteeIndex(committee_index)))
for committee_index in
get_committee_indices(stateData.data, slot.epoch, cache):
res.add(getCommittee(slot, committee_index))
else:
let idx = cindex.get()
if uint64(idx) < committees_per_slot:
let
idx = cindex.get()
committees_per_slot = get_committee_count_per_slot(
stateData.data, slot.epoch, cache)
if idx < committees_per_slot:
res.add(getCommittee(slot, idx))
var res: seq[RestBeaconStatesCommittees]
@ -591,8 +594,9 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
vepoch.get()
if vslot.isNone():
for i in 0 ..< SLOTS_PER_EPOCH:
forSlot(compute_start_slot_at_epoch(qepoch) + i, vindex, res)
let start_slot = qepoch.compute_start_slot_at_epoch()
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
forSlot(slot, vindex, res)
else:
forSlot(vslot.get(), vindex, res)

View File

@ -77,14 +77,13 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, PrunedStateError)
tmp.get()
let committees_per_slot = get_committee_count_per_slot(epochRef)
for i in 0 ..< SLOTS_PER_EPOCH:
let slot = compute_start_slot_at_epoch(qepoch) + i
for committee_index in 0'u64 ..< committees_per_slot:
let commitee = get_beacon_committee(
epochRef, slot, CommitteeIndex(committee_index)
)
for index_in_committee, validator_index in commitee:
let
committees_per_slot = get_committee_count_per_slot(epochRef)
start_slot = qepoch.compute_start_slot_at_epoch()
for committee_index in get_committee_indices(committees_per_slot):
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validator_index in committee:
if validator_index in indexList:
let validator_key = epochRef.validatorKey(validator_index)
if validator_key.isSome():
@ -92,11 +91,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
RestAttesterDuty(
pubkey: validator_key.get().toPubKey(),
validator_index: validator_index,
committee_index: CommitteeIndex(committee_index),
committee_length: lenu64(commitee),
committee_index: committee_index,
committee_length: lenu64(committee),
committees_at_slot: committees_per_slot,
validator_committee_index:
ValidatorIndex(index_in_committee),
validator_committee_index: uint64(index_in_committee),
slot: slot
)
)
@ -611,8 +609,6 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
"/eth/v1/validator/sync_committee_contribution") do (
slot: Option[Slot], subcommittee_index: Option[uint64],
beacon_block_root: Option[Eth2Digest]) -> RestApiResponse:
# We doing this check to avoid any confusion in future.
static: doAssert(SYNC_COMMITTEE_SUBNET_COUNT <= high(uint8))
let qslot =
if slot.isNone():
return RestApiResponse.jsonError(Http400, MissingSlotValueError)
@ -631,17 +627,13 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400,
MissingSubCommitteeIndexValueError)
else:
let res = subcommittee_index.get()
let v = subcommittee_index.get()
let res = (v and SyncSubcommitteeIndex.init(v.get()))
if res.isErr():
return RestApiResponse.jsonError(Http400,
InvalidSubCommitteeIndexValueError,
$res.error())
let value = res.get().validateSyncCommitteeIndexOr:
return RestApiResponse.jsonError(Http400,
InvalidSubCommitteeIndexValueError,
"subcommittee_index exceeds " &
"maximum allowed value")
value
InvalidSubCommitteeIndexValueError,
$res.error())
res.get()
let qroot =
if beacon_block_root.isNone():
return RestApiResponse.jsonError(Http400,

View File

@ -352,11 +352,13 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
if index.isNone:
for committee_index in 0'u64..<committees_per_slot:
res.add(getCommittee(slot, committee_index.CommitteeIndex))
for committee_index in get_committee_indices(committees_per_slot):
res.add(getCommittee(slot, committee_index))
else:
if index.get() < committees_per_slot:
res.add(getCommittee(slot, CommitteeIndex(index.get())))
let cindex = CommitteeIndex.init(index.get()).expect(
"valid because verified against committees_per_slot")
res.add(getCommittee(slot, cindex))
var res: seq[RpcBeaconStatesCommittees]
@ -367,8 +369,9 @@ proc installBeaconApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
Epoch(epoch.get())
if slot.isNone:
for i in 0 ..< SLOTS_PER_EPOCH:
forSlot(compute_start_slot_at_epoch(qepoch) + i, res)
let start_slot = qepoch.compute_start_slot_at_epoch()
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
forSlot(slot, res)
else:
forSlot(Slot(slot.get()), res)

View File

@ -92,18 +92,17 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
let
committees_per_slot = get_committee_count_per_slot(epochRef)
for i in 0 ..< SLOTS_PER_EPOCH:
let slot = compute_start_slot_at_epoch(epoch) + i
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
epochRef, slot, committee_index.CommitteeIndex)
for index_in_committee, validatorIdx in committee:
let curr_val_pubkey = epochRef.validatorKey(validatorIdx)
start_slot = compute_start_slot_at_epoch(epoch)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validator_index in committee:
let curr_val_pubkey = epochRef.validatorKey(validator_index)
if curr_val_pubkey.isSome():
if public_keys.findIt(it == curr_val_pubkey.get().toPubKey()) != -1:
result.add((public_key: curr_val_pubkey.get().toPubKey(),
validator_index: validatorIdx,
committee_index: committee_index.CommitteeIndex,
validator_index: validator_index,
committee_index: committee_index,
committee_length: committee.lenu64,
validator_committee_index: index_in_committee.uint64,
slot: slot))

View File

@ -424,24 +424,24 @@ func get_attesting_indices*(state: ForkyBeaconState,
data: AttestationData,
bits: CommitteeValidatorsBits,
cache: var StateCache): seq[ValidatorIndex] =
## Return the set of attesting indices corresponding to ``data`` and ``bits``.
## Return the set of attesting indices corresponding to ``data`` and ``bits``
## or nothing if `data` is invalid
var res: seq[ValidatorIndex]
# Can't be an iterator due to https://github.com/nim-lang/Nim/issues/18188
if bits.lenu64 != get_beacon_committee_len(
state, data.slot, data.index.CommitteeIndex, cache):
trace "get_attesting_indices: inconsistent aggregation and committee length"
let committee_index = CommitteeIndex.init(data.index)
if committee_index.isErr() or bits.lenu64 != get_beacon_committee_len(
state, data.slot, committee_index.get(), cache):
trace "get_attesting_indices: invalid attestation data"
else:
var i = 0
for index in get_beacon_committee(
state, data.slot, data.index.CommitteeIndex, cache):
if bits[i]:
res.add index
inc i
for index_in_committee, validator_index in get_beacon_committee(
state, data.slot, committee_index.get(), cache).pairs():
if bits[index_in_committee]:
res.add validator_index
res
proc get_attesting_indices*(state: ForkedHashedBeaconState;
func get_attesting_indices*(state: ForkedHashedBeaconState;
data: AttestationData;
bits: CommitteeValidatorsBits;
cache: var StateCache): seq[ValidatorIndex] =
@ -487,42 +487,45 @@ proc is_valid_indexed_attestation*(
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#attestations
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
func check_attestation_slot_target*(data: AttestationData): Result[void, cstring] =
func check_attestation_slot_target*(data: AttestationData): Result[Slot, cstring] =
if not (data.target.epoch == compute_epoch_at_slot(data.slot)):
return err("Target epoch doesn't match attestation slot")
ok()
ok(data.slot)
func check_attestation_target_epoch(
data: AttestationData, current_epoch: Epoch): Result[void, cstring] =
data: AttestationData, current_epoch: Epoch): Result[Epoch, cstring] =
if not (data.target.epoch == get_previous_epoch(current_epoch) or
data.target.epoch == current_epoch):
return err("Target epoch not current or previous epoch")
ok()
ok(data.target.epoch)
func check_attestation_inclusion(data: AttestationData,
func check_attestation_inclusion(attestation_slot: Slot,
current_slot: Slot): Result[void, cstring] =
# Check for overflow
static:
doAssert SLOTS_PER_EPOCH >= MIN_ATTESTATION_INCLUSION_DELAY
if data.slot + SLOTS_PER_EPOCH <= data.slot:
if attestation_slot + SLOTS_PER_EPOCH <= attestation_slot:
return err("attestation data.slot overflow, malicious?")
if not (data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= current_slot):
if not (attestation_slot + MIN_ATTESTATION_INCLUSION_DELAY <= current_slot):
return err("Attestation too new")
if not (current_slot <= data.slot + SLOTS_PER_EPOCH):
if not (current_slot <= attestation_slot + SLOTS_PER_EPOCH):
return err("Attestation too old")
ok()
func check_attestation_index(
data: AttestationData, committees_per_slot: uint64): Result[void, cstring] =
if not (data.index < committees_per_slot):
return err("Data index exceeds committee count")
func check_attestation_index*(
index, committees_per_slot: uint64):
Result[CommitteeIndex, cstring] =
CommitteeIndex.init(index, committees_per_slot)
ok()
func check_attestation_index*(
data: AttestationData, committees_per_slot: uint64):
Result[CommitteeIndex, cstring] =
check_attestation_index(data.index, committees_per_slot)
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/beacon-chain.md#get_attestation_participation_flag_indices
func get_attestation_participation_flag_indices(state: altair.BeaconState | bellatrix.BeaconState,
@ -598,21 +601,21 @@ proc check_attestation*(
let
data = attestation.data
? check_attestation_target_epoch(data, state.get_current_epoch())
? check_attestation_slot_target(data)
? check_attestation_inclusion(data, state.slot)
? check_attestation_index(
epoch = ? check_attestation_target_epoch(data, state.get_current_epoch())
slot = ? check_attestation_slot_target(data)
committee_index = ? check_attestation_index(
data,
get_committee_count_per_slot(state, data.target.epoch, cache))
get_committee_count_per_slot(state, epoch, cache))
? check_attestation_inclusion(slot, state.slot)
let committee_len = get_beacon_committee_len(
state, data.slot, data.index.CommitteeIndex, cache)
state, slot, committee_index, cache)
if attestation.aggregation_bits.lenu64 != committee_len:
return err("Inconsistent aggregation and committee length")
if data.target.epoch == get_current_epoch(state):
if epoch == get_current_epoch(state):
if not (data.source == state.current_justified_checkpoint):
return err("FFG data not matching current justified epoch")
else:

View File

@ -503,10 +503,6 @@ type
chronicles.formatIt BeaconBlock: it.shortLog
chronicles.formatIt SyncSubcommitteeIndex: uint8(it)
template asInt*(x: SyncSubcommitteeIndex): int = int(x)
template asUInt8*(x: SyncSubcommitteeIndex): uint8 = uint8(x)
template asUInt64*(x: SyncSubcommitteeIndex): uint64 = uint64(x)
template `[]`*(a: auto; i: SyncSubcommitteeIndex): auto =
a[i.asInt]
@ -514,23 +510,7 @@ template `[]`*(arr: array[SYNC_COMMITTEE_SIZE, any] | seq;
idx: IndexInSyncCommittee): auto =
arr[int idx]
template `==`*(x, y: SyncSubcommitteeIndex): bool =
distinctBase(x) == distinctBase(y)
iterator allSyncSubcommittees*: SyncSubcommitteeIndex =
for subcommitteeIdx in 0 ..< SYNC_COMMITTEE_SUBNET_COUNT:
yield SyncSubcommitteeIndex(subcommitteeIdx)
template validateSyncCommitteeIndexOr*(
networkValParam: uint64,
elseBody: untyped): SyncSubcommitteeIndex =
let networkVal = networkValParam
if networkVal < SYNC_COMMITTEE_SUBNET_COUNT:
SyncSubcommitteeIndex(networkVal)
else:
elseBody
template asUInt8*(x: SyncSubcommitteeIndex): uint8 = uint8(x)
makeLimitedU64(SyncSubcommitteeIndex, SYNC_COMMITTEE_SUBNET_COUNT)
func shortLog*(v: SomeBeaconBlock): auto =
(

View File

@ -32,7 +32,7 @@ export
import
std/[macros, hashes, strutils, tables, typetraits],
stew/[assign2, byteutils],
stew/[assign2, byteutils, results],
chronicles,
chronos/timer,
ssz_serialization/types as sszTypes,
@ -40,7 +40,7 @@ import
".."/[crypto, digest, presets]
export
timer, crypto, digest, sszTypes, presets
timer, crypto, digest, sszTypes, presets, results
# Presently, we're reusing the data types from the serialization (uint64) in the
# objects we pass around to the beacon chain logic, thus keeping the two
@ -139,14 +139,25 @@ type
# Nim seq constraints.
ValidatorIndex* = distinct uint32
# Though in theory the committee index would fit in a uint8, it is not used
# in a way that would significantly benefit from the smaller type, thus we
# leave it at spec size
CommitteeIndex* = distinct uint64
CommitteeIndex* = distinct uint8
## Index identifying a per-slot committee - depending on the active
## validator count, there may be up to `MAX_COMMITTEES_PER_SLOT` committees
## working in each slot.
##
## The `CommitteeIndex` type is constrained to values in the range
## `[0, MAX_COMMITTEES_PER_SLOT)` during initialization - to find out if
## a committee index is valid for a particular state, see
## `check_attestation_index`.
##
## `CommitteeIndex` is not used in `datatypes` to allow reading invalid data
## (validation happens on use instead, via `init`).
# The subnet id maps which gossip subscription to use to publish an
# attestation - it is distinct from the CommitteeIndex in particular
SubnetId* = distinct uint8
## The subnet id maps which gossip subscription to use to publish an
## attestation - it is distinct from the CommitteeIndex in particular
##
## The `SubnetId` type is constrained to values in the range
## `[0, ATTESTATION_SUBNET_COUNT)` during initialization.
Gwei* = uint64
@ -581,6 +592,60 @@ template ethTimeUnit(typ: type) {.dirty.} =
{.raises: [IOError, SerializationError, Defect].} =
value = typ reader.readValue(uint64)
template makeLimitedU64*(T: untyped, limit: uint64) =
# A "tigher" type is often used for T, but for the range check to be effective
# it must make sense..
static: doAssert limit <= distinctBase(T).high()
# Many `uint64` values in the spec have a more limited range of valid values
func init*(t: type T, value: uint64): Result[T, cstring] =
if value < limit:
ok(Result[T, cstring], T(value))
else:
err(Result[T, cstring], name(T) & " out of range")
iterator items*(t: type T): T =
for i in 0'u64..<limit:
yield T(i)
proc writeValue*(writer: var JsonWriter, value: T)
{.raises: [IOError, Defect].} =
writeValue(writer, distinctBase value)
proc readValue*(reader: var JsonReader, value: var T)
{.raises: [IOError, SerializationError, Defect].} =
let v = T.init(reader.readValue(uint64))
if v.isSome():
value = v.get()
else:
raiseUnexpectedValue(reader, $v.error())
template `==`*(x, y: T): bool = distinctBase(x) == distinctBase(y)
template `==`*(x: T, y: uint64): bool = distinctBase(x) == y
template `==`*(x: uint64, y: T): bool = x == distinctBase(y)
template `<`*(x, y: T): bool = distinctBase(x) < distinctBase(y)
template `<`*(x: T, y: uint64): bool = distinctBase(x) < y
template `<`*(x: uint64, y: T): bool = x < distinctBase(y)
template hash*(x: T): Hash =
hash distinctBase(x)
template `$`*(x: T): string = $ distinctBase(x)
template asInt*(x: T): int = int(distinctBase(x))
template asUInt64*(x: T): uint64 = uint64(distinctBase(x))
makeLimitedU64(CommitteeIndex, MAX_COMMITTEES_PER_SLOT)
makeLimitedU64(SubnetId, ATTESTATION_SUBNET_COUNT)
func init*(T: type CommitteeIndex, index, committees_per_slot: uint64):
Result[CommitteeIndex, cstring] =
if index < min(committees_per_slot, MAX_COMMITTEES_PER_SLOT):
ok(CommitteeIndex(index))
else:
err("Committee index out of range for epoch")
proc writeValue*(writer: var JsonWriter, value: ValidatorIndex)
{.raises: [IOError, Defect].} =
writeValue(writer, distinctBase value)
@ -589,26 +654,6 @@ proc readValue*(reader: var JsonReader, value: var ValidatorIndex)
{.raises: [IOError, SerializationError, Defect].} =
value = ValidatorIndex reader.readValue(distinctBase ValidatorIndex)
proc writeValue*(writer: var JsonWriter, value: CommitteeIndex)
{.raises: [IOError, Defect].} =
writeValue(writer, distinctBase value)
proc readValue*(reader: var JsonReader, value: var CommitteeIndex)
{.raises: [IOError, SerializationError, Defect].} =
value = CommitteeIndex reader.readValue(distinctBase CommitteeIndex)
proc writeValue*(writer: var JsonWriter, value: SubnetId)
{.raises: [IOError, Defect].} =
writeValue(writer, distinctBase value)
proc readValue*(reader: var JsonReader, value: var SubnetId)
{.raises: [IOError, SerializationError, Defect].} =
let v = reader.readValue(distinctBase SubnetId)
if v > ATTESTATION_SUBNET_COUNT:
raiseUnexpectedValue(
reader, "Subnet id must be <= " & $ATTESTATION_SUBNET_COUNT)
value = SubnetId(v)
template writeValue*(
writer: var JsonWriter, value: Version | ForkDigest | DomainType) =
writeValue(writer, to0xHex(distinctBase(value)))
@ -666,7 +711,7 @@ template `<`*(x, y: ValidatorIndex): bool =
template hash*(x: ValidatorIndex): Hash =
hash distinctBase(x)
template `$`*(x: ValidatorIndex): auto =
template `$`*(x: ValidatorIndex): string =
$ distinctBase(x)
template `==`*(x: uint64, y: ValidatorIndex): bool =
@ -675,27 +720,9 @@ template `==`*(x: uint64, y: ValidatorIndex): bool =
template `==`*(x: ValidatorIndex, y: uint64): bool =
uint64(x) == y
template `==`*(x, y: CommitteeIndex): bool =
distinctBase(x) == distinctBase(y)
template `<`*(x, y: CommitteeIndex): bool =
distinctBase(x) < distinctBase(y)
template hash*(x: CommitteeIndex): Hash =
hash distinctBase(x)
template `$`*(x: CommitteeIndex): auto =
$ distinctBase(x)
template `==`*(x, y: SubnetId): bool =
distinctBase(x) == distinctBase(y)
template `==`*(x, y: JustificationBits): bool =
distinctBase(x) == distinctBase(y)
template `$`*(x: SubnetId): string =
$ distinctBase(x)
func `as`*(d: DepositData, T: type DepositMessage): T =
T(pubkey: d.pubkey,
withdrawal_credentials: d.withdrawal_credentials,

View File

@ -469,14 +469,16 @@ proc readValue*(reader: var JsonReader[RestJson],
## CommitteeIndex
proc writeValue*(writer: var JsonWriter[RestJson], value: CommitteeIndex) {.
raises: [IOError, Defect].} =
writeValue(writer, Base10.toString(uint64(value)))
writeValue(writer, value.asUInt64)
proc readValue*(reader: var JsonReader[RestJson], value: var CommitteeIndex) {.
raises: [IOError, SerializationError, Defect].} =
let svalue = reader.readValue(string)
let res = Base10.decode(uint64, svalue)
var v: uint64
reader.readValue(v)
let res = CommitteeIndex.init(v)
if res.isOk():
value = CommitteeIndex(res.get())
value = res.get()
else:
reader.raiseUnexpectedValue($res.error())
@ -904,17 +906,17 @@ proc writeValue*(writer: var JsonWriter[RestJson], value: ForkedHashedBeaconStat
proc writeValue*(writer: var JsonWriter[RestJson],
value: SyncSubcommitteeIndex) {.
raises: [IOError, Defect].} =
writeValue(writer, Base10.toString(uint8(value)))
writeValue(writer, value.asUInt64)
proc readValue*(reader: var JsonReader[RestJson],
value: var SyncSubcommitteeIndex) {.
raises: [IOError, SerializationError, Defect].} =
let res = Base10.decode(uint8, reader.readValue(string))
var v: uint64
reader.readValue(v)
let res = SyncSubcommitteeIndex.init(v)
if res.isOk():
if res.get() < SYNC_COMMITTEE_SUBNET_COUNT:
value = SyncSubcommitteeIndex(res.get())
else:
reader.raiseUnexpectedValue("Sync sub-committee index out of rage")
value = res.get()
else:
reader.raiseUnexpectedValue($res.error())
@ -1601,15 +1603,12 @@ proc decodeString*(t: typedesc[PeerID],
proc decodeString*(t: typedesc[CommitteeIndex],
value: string): Result[CommitteeIndex, cstring] =
let res = ? Base10.decode(uint64, value)
ok(CommitteeIndex(res))
CommitteeIndex.init(res)
proc decodeString*(t: typedesc[SyncSubcommitteeIndex],
value: string): Result[SyncSubcommitteeIndex, cstring] =
let res = ? Base10.decode(uint8, value)
if res.get < SYNC_COMMITTEE_SUBNET_COUNT:
ok(CommitteeIndex(res))
else:
err("sync subcommittee index out of range")
let res = ? Base10.decode(uint64, value)
SyncSubcommitteeIndex.init(res)
proc decodeString*(t: typedesc[Eth2Digest],
value: string): Result[Eth2Digest, cstring] =

View File

@ -94,7 +94,7 @@ type
committee_index*: CommitteeIndex
committee_length*: uint64
committees_at_slot*: uint64
validator_committee_index*: ValidatorIndex
validator_committee_index*: uint64
slot*: Slot
RestProposerDuty* = object

View File

@ -87,7 +87,7 @@ func getAttestationTopic*(forkDigest: ForkDigest,
func getSyncCommitteeTopic*(forkDigest: ForkDigest,
subcommitteeIdx: SyncSubcommitteeIndex): string =
## For subscribing and unsubscribing to/from a subnet.
eth2Prefix(forkDigest) & "sync_committee_" & $(subcommitteeIdx.asUInt8) & "/ssz_snappy"
eth2Prefix(forkDigest) & "sync_committee_" & $subcommitteeIdx & "/ssz_snappy"
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/p2p-interface.md#topics-and-messages
func getSyncCommitteeContributionAndProofTopic*(forkDigest: ForkDigest): string =

View File

@ -169,13 +169,9 @@ func get_committee_count_per_slot*(state: ForkyBeaconState,
epoch: Epoch,
cache: var StateCache): uint64 =
## Return the number of committees at ``slot``.
let
active_validator_count = count_active_validators(state, epoch, cache)
result = get_committee_count_per_slot(active_validator_count)
# Otherwise, get_beacon_committee(...) cannot access some committees.
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT) >= uint64(result)
get_committee_count_per_slot(active_validator_count)
func get_committee_count_per_slot*(state: ForkedHashedBeaconState,
epoch: Epoch,
@ -183,16 +179,19 @@ func get_committee_count_per_slot*(state: ForkedHashedBeaconState,
withState(state):
get_committee_count_per_slot(state.data, epoch, cache)
iterator committee_indices_per_slot*(state: ForkyBeaconState,
epoch: Epoch,
cache: var StateCache): CommitteeIndex =
for idx in 0'u64 ..< get_committee_count_per_slot(state, epoch, cache):
yield CommitteeIndex.verifiedValue(idx)
iterator get_committee_indices*(committee_count_per_slot: uint64): CommitteeIndex =
for idx in 0'u64..<min(committee_count_per_slot, MAX_COMMITTEES_PER_SLOT):
let committee_index = CommitteeIndex.init(idx).expect("value clamped")
yield committee_index
func get_committee_count_per_slot*(state: ForkyBeaconState,
slot: Slot,
cache: var StateCache): uint64 =
get_committee_count_per_slot(state, slot.compute_epoch_at_slot, cache)
iterator get_committee_indices*(state: ForkyBeaconState | ForkedHashedBeaconState,
epoch: Epoch,
cache: var StateCache): CommitteeIndex =
let committee_count_per_slot =
get_committee_count_per_slot(state, epoch, cache)
for committee_index in get_committee_indices(committee_count_per_slot):
yield committee_index
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_previous_epoch
func get_previous_epoch*(current_epoch: Epoch): Epoch =
@ -227,11 +226,13 @@ func compute_committee_slice*(
0 .. -1
iterator compute_committee*(shuffled_indices: seq[ValidatorIndex],
index: uint64, count: uint64): ValidatorIndex =
index: uint64, count: uint64): (int, ValidatorIndex) =
let
slice = compute_committee_slice(shuffled_indices.lenu64, index, count)
var idx = 0
for i in slice:
yield shuffled_indices[i]
yield (idx, shuffled_indices[i])
idx += 1
func compute_committee*(shuffled_indices: seq[ValidatorIndex],
index: uint64, count: uint64): seq[ValidatorIndex] =
@ -259,17 +260,17 @@ func compute_committee_len*(
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/beacon-chain.md#get_beacon_committee
iterator get_beacon_committee*(
state: ForkyBeaconState, slot: Slot, index: CommitteeIndex,
cache: var StateCache): ValidatorIndex =
cache: var StateCache): (int, ValidatorIndex) =
## Return the beacon committee at ``slot`` for ``index``.
let
epoch = compute_epoch_at_slot(slot)
committees_per_slot = get_committee_count_per_slot(state, epoch, cache)
for idx in compute_committee(
for index_in_committee, idx in compute_committee(
cache.get_shuffled_active_validator_indices(state, epoch),
(slot mod SLOTS_PER_EPOCH) * committees_per_slot +
index.uint64,
committees_per_slot * SLOTS_PER_EPOCH
): yield idx
): yield (index_in_committee, idx)
func get_beacon_committee*(
state: ForkyBeaconState, slot: Slot, index: CommitteeIndex,

View File

@ -215,7 +215,7 @@ proc produceAndPublishAggregates(service: AttestationServiceRef,
let
vc = service.client
slot = adata.slot
committeeIndex = CommitteeIndex(adata.index)
committeeIndex = adata.index
attestationRoot = adata.hash_tree_root()
let aggregateItems =

View File

@ -287,22 +287,22 @@ proc sendSyncCommitteeMessages*(node: BeaconNode,
let (pending, indices) = block:
var resFutures: seq[Future[SendResult]]
var resIndices: seq[int]
for committeeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
for valKey in syncSubcommittee(
node.dag.headSyncCommittees.current_sync_committee, committeeIdx):
node.dag.headSyncCommittees.current_sync_committee, subcommitteeIdx):
let index = keysCur.getOrDefault(uint64(valKey), -1)
if index >= 0:
resIndices.add(index)
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
committeeIdx, true))
for committeeIdx in allSyncSubcommittees():
subcommitteeIdx, true))
for subcommitteeIdx in SyncSubcommitteeIndex:
for valKey in syncSubcommittee(
node.dag.headSyncCommittees.next_sync_committee, committeeIdx):
node.dag.headSyncCommittees.next_sync_committee, subcommitteeIdx):
let index = keysNxt.getOrDefault(uint64(valKey), -1)
if index >= 0:
resIndices.add(index)
resFutures.add(node.sendSyncCommitteeMessage(msgs[index],
committeeIdx, true))
subcommitteeIdx, true))
(resFutures, resIndices)
await allFutures(pending)
@ -635,7 +635,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
genesis_validators_root =
getStateField(node.dag.headState.data, genesis_validators_root)
for committee_index in get_committee_indices(epochRef):
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validator_index in committee:
@ -658,7 +658,7 @@ proc handleAttestations(node: BeaconNode, head: BlockRef, slot: Slot) =
signing_root)
if registered.isOk():
let subnet_id = compute_subnet_for_attestation(
committees_per_slot, data.slot, data.index.CommitteeIndex)
committees_per_slot, data.slot, committee_index)
asyncSpawn createAndSendAttestation(
node, fork, genesis_validators_root, validator, data,
committee.len(), index_in_committee, subnet_id)
@ -722,14 +722,14 @@ proc handleSyncCommitteeMessages(node: BeaconNode, head: BlockRef, slot: Slot) =
# TODO Use a view type to avoid the copy
var syncCommittee = node.dag.syncCommitteeParticipants(slot + 1)
for committeeIdx in allSyncSubcommittees():
for valIdx in syncSubcommittee(syncCommittee, committeeIdx):
for subcommitteeIdx in SyncSubcommitteeIndex:
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
let validator = node.getAttachedValidator(
getStateField(node.dag.headState.data, validators), valIdx)
if isNil(validator) or validator.index.isNone():
continue
asyncSpawn createAndSendSyncCommitteeMessage(node, slot, validator,
committeeIdx, head)
subcommitteeIdx, head)
proc signAndSendContribution(node: BeaconNode,
validator: AttachedValidator,
@ -776,7 +776,7 @@ proc handleSyncCommitteeContributions(node: BeaconNode,
var selectionProofs: seq[Future[SignatureResult]]
var time = timeIt:
for subcommitteeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
# TODO Hoist outside of the loop with a view type
# to avoid the repeated offset calculations
for valIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
@ -862,7 +862,6 @@ proc makeAggregateAndProof*(
pool: var AttestationPool, epochRef: EpochRef, slot: Slot, index: CommitteeIndex,
validatorIndex: ValidatorIndex, slot_signature: ValidatorSig): Option[AggregateAndProof] =
doAssert validatorIndex in get_beacon_committee(epochRef, slot, index)
doAssert index.uint64 < get_committee_count_per_slot(epochRef)
# TODO for testing purposes, refactor this into the condition check
# and just calculation
@ -902,46 +901,48 @@ proc sendAggregatedAttestations(
var
slotSigs: seq[Future[SignatureResult]] = @[]
slotSigsData: seq[tuple[committee_index: uint64,
validator_idx: ValidatorIndex,
slotSigsData: seq[tuple[committee_index: CommitteeIndex,
validator_index: ValidatorIndex,
v: AttachedValidator]] = @[]
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
epochRef, slot, committee_index.CommitteeIndex)
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validatorIdx in committee:
let validator = node.getAttachedValidator(epochRef, validatorIdx)
for index_in_committee, validator_index in committee:
let validator = node.getAttachedValidator(epochRef, validator_index)
if validator != nil:
# the validator index and private key pair.
slotSigs.add getSlotSig(validator, fork,
genesis_validators_root, slot)
slotSigsData.add (committee_index, validatorIdx, validator)
slotSigsData.add (committee_index, validator_index, validator)
await allFutures(slotSigs)
for curr in zip(slotSigsData, slotSigs):
let slotSig = curr[1].read()
doAssert slotSigsData.len == slotSigs.len
for i in 0..<slotSigs.len:
let
slotSig = slotSigs[i].read()
data = slotSigsData[i]
if slotSig.isErr():
error "Unable to create slot signature using remote signer",
validator = shortLog(curr[0].v),
validator = shortLog(data.v),
slot, error_msg = slotSig.error()
continue
let aggregateAndProof =
makeAggregateAndProof(node.attestationPool[], epochRef, slot,
curr[0].committee_index.CommitteeIndex,
curr[0].validator_idx,
data.committee_index,
data.validator_index,
slotSig.get())
# Don't broadcast when, e.g., this node isn't aggregator
if aggregateAndProof.isSome:
let sig =
block:
let res = await signAggregateAndProof(curr[0].v,
let res = await signAggregateAndProof(data.v,
aggregateAndProof.get, fork, genesis_validators_root)
if res.isErr():
error "Unable to sign aggregated attestation using remote signer",
validator = shortLog(curr[0].v), error_msg = res.error()
validator = shortLog(data.v), error_msg = res.error()
return
res.get()
var signedAP = SignedAggregateAndProof(
@ -950,13 +951,12 @@ proc sendAggregatedAttestations(
node.network.broadcastAggregateAndProof(signedAP)
# The subnet on which the attestations (should have) arrived
let subnet_id = compute_subnet_for_attestation(
committees_per_slot, signedAP.message.aggregate.data.slot,
signedAP.message.aggregate.data.index.CommitteeIndex)
committees_per_slot, slot, data.committee_index)
notice "Aggregated attestation sent",
attestation = shortLog(signedAP.message.aggregate),
aggregator_index = signedAP.message.aggregator_index,
signature = shortLog(signedAP.signature),
validator = shortLog(curr[0].v),
validator = shortLog(data.v),
subnet_id
proc updateValidatorMetrics*(node: BeaconNode) =
@ -1145,28 +1145,35 @@ proc sendAttestation*(node: BeaconNode,
attestation: Attestation): Future[SendResult] {.async.} =
# REST/JSON-RPC API helper procedure.
let
blck =
target =
block:
let res = node.dag.getRef(attestation.data.beacon_block_root)
let res = node.dag.getRef(attestation.data.target.root)
if isNil(res):
notice "Attempt to send attestation for unknown block",
notice "Attempt to send attestation for unknown target",
attestation = shortLog(attestation)
return SendResult.err(
"Attempt to send attestation for unknown block")
res
epochRef = block:
let tmp = node.dag.getEpochRef(
blck, attestation.data.target.epoch, false)
target, attestation.data.target.epoch, false)
if tmp.isErr(): # Shouldn't happen
warn "Cannot construct EpochRef for attestation, skipping send - report bug",
blck = shortLog(blck),
target = shortLog(target),
attestation = shortLog(attestation)
return
tmp.get()
committee_index = block:
let v = epochRef.get_committee_index(attestation.data.index)
if v.isErr():
notice "Invalid committee index in attestation",
attestation = shortLog(attestation)
return SendResult.err("Invalid committee index in attestation")
v.get()
subnet_id = compute_subnet_for_attestation(
get_committee_count_per_slot(epochRef), attestation.data.slot,
attestation.data.index.CommitteeIndex)
committee_index)
res = await node.sendAttestation(attestation, subnet_id,
checkSignature = true)
if not res.isOk():
@ -1322,16 +1329,15 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
fork = node.dag.forkAtEpoch(slot.epoch)
committees_per_slot = get_committee_count_per_slot(epochRef)
for committee_index in 0'u64..<committees_per_slot:
let committee = get_beacon_committee(
epochRef, slot, committee_index.CommitteeIndex)
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(epochRef, slot, committee_index)
for index_in_committee, validatorIdx in committee:
let validator = node.getAttachedValidator(epochRef, validatorIdx)
for index_in_committee, validator_index in committee:
let validator = node.getAttachedValidator(epochRef, validator_index)
if validator != nil:
let
subnet_id = compute_subnet_for_attestation(
committees_per_slot, slot, committee_index.CommitteeIndex)
committees_per_slot, slot, committee_index)
let slotSigRes = await getSlotSig(validator, fork,
genesis_validators_root, slot)
if slotSigRes.isErr():
@ -1341,4 +1347,4 @@ proc registerDuties*(node: BeaconNode, wallSlot: Slot) {.async.} =
continue
let isAggregator = is_aggregator(committee.lenu64, slotSigRes.get())
node.registerDuty(slot, subnet_id, validatorIdx, isAggregator)
node.registerDuty(slot, subnet_id, validator_index, isAggregator)

View File

@ -615,12 +615,12 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
state[].data.get_block_root_at_slot(penultimate_epoch_end_slot)
let first_slot_attesters = block:
let committee_count = state[].data.get_committee_count_per_slot(
let committees_per_slot = state[].data.get_committee_count_per_slot(
prev_epoch_target_slot.epoch, cache)
var indices = HashSet[ValidatorIndex]()
for committee_index in 0..<committee_count:
for committee_index in get_committee_indices(committees_per_slot):
for validator_index in state[].data.get_beacon_committee(
prev_epoch_target_slot, committee_index.CommitteeIndex, cache):
prev_epoch_target_slot, committee_index, cache):
indices.incl(validator_index)
indices
case info.kind

View File

@ -1460,9 +1460,9 @@
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"status": {"operator": "equals", "value": "400"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"], "value": [{"index": "", "slot": "", "validators": [""]}]}]
"body": [{"operator": "jstructcmpns", "value": {"code": "", "message": ""}}]
}
},
{
@ -2512,9 +2512,9 @@
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"status": {"operator": "equals", "value": "400"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"],"value": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}]}]
"body": [{"operator": "jstructcmpns", "value": {"code": "", "message": ""}}]
}
},
{
@ -2572,9 +2572,9 @@
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "200"},
"status": {"operator": "equals", "value": "400"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmps", "start": ["data"],"value": [{"aggregation_bits": "", "signature": "", "data": {"slot": "", "index": "", "beacon_block_root": "", "source": {"epoch": "", "root": ""}, "target": {"epoch": "", "root": ""}}}]}]
"body": [{"operator": "jstructcmpns", "value": {"code": "", "message": ""}}]
}
},
{
@ -3017,7 +3017,7 @@
"headers": {"Accept": "application/json"}
},
"response": {
"status": {"operator": "equals", "value": "503"},
"status": {"operator": "equals", "value": "400"},
"headers": [{"key": "Content-Type", "value": "application/json", "operator": "equals"}],
"body": [{"operator": "jstructcmpns", "value": {"code": "", "message": ""}}]
}

View File

@ -116,19 +116,19 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
let committees_per_slot =
get_committee_count_per_slot(stateData.data, slot.epoch, cache)
for committee_index in 0'u64..<committees_per_slot:
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(
stateData.data, slot, committee_index.CommitteeIndex, cache)
stateData.data, slot, committee_index, cache)
for index_in_committee, validatorIdx in committee:
for index_in_committee, validator_index in committee:
if rand(r, 1.0) <= attesterRatio:
let
data = makeAttestationData(
stateData.data, slot, committee_index.CommitteeIndex, blck.root)
stateData.data, slot, committee_index, blck.root)
sig =
get_attestation_signature(getStateField(stateData.data, fork),
getStateField(stateData.data, genesis_validators_root),
data, MockPrivKeys[validatorIdx])
data, MockPrivKeys[validator_index])
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits.setBit index_in_committee
@ -137,7 +137,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
data: data,
aggregation_bits: aggregation_bits,
signature: sig.toValidatorSig()
), [validatorIdx], sig, data.slot.toBeaconTime)
), [validator_index], sig, data.slot.toBeaconTime)
do:
raiseAssert "withUpdatedState failed"
@ -157,7 +157,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
var aggregators: seq[Aggregator]
for subcommitteeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
for validatorIdx in syncSubcommittee(syncCommittee, subcommitteeIdx):
if rand(r, 1.0) > syncCommitteeRatio:
continue

View File

@ -206,8 +206,8 @@ proc stepOnAttestation(
dag.getEpochRef(
dag.head, time.slotOrZero().compute_epoch_at_slot(),
false).expect("no pruning in test")
let attesters = epochRef.get_attesting_indices(att.data, att.aggregation_bits)
let attesters = epochRef.get_attesting_indices(
att.data.slot, CommitteeIndex(att.data.index), att.aggregation_bits)
let status = fkChoice[].on_attestation(
dag,
att.data.slot, att.data.beacon_block_root, attesters,

View File

@ -42,11 +42,9 @@ func addMockAttestations*(
start_slot = compute_start_slot_at_epoch(epoch)
committees_per_slot = get_committee_count_per_slot(state, epoch, cache)
# for-loop of distinct type is broken: https://github.com/nim-lang/Nim/issues/12074
for slot in start_slot.uint64 ..< start_slot.uint64 + SLOTS_PER_EPOCH:
for index in 0'u64 ..< committees_per_slot:
let committee = get_beacon_committee(
state, slot.Slot, index.CommitteeIndex, cache)
for slot in start_slot ..< start_slot + SLOTS_PER_EPOCH:
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(state, slot, committee_index, cache)
# Create a bitfield filled with the given count per attestation,
# exactly on the right-most part of the committee field.
@ -70,7 +68,7 @@ func addMockAttestations*(
aggregation_bits: aggregation_bits,
data: AttestationData(
slot: slot.Slot,
index: index,
index: committee_index.uint64,
beacon_block_root: [byte 0xFF] * 32, # Irrelevant for testing
source: source,
target: target,

View File

@ -566,9 +566,9 @@ suite "Attestation pool processing" & preset():
pruneAtFinalization(dag, pool[])
attestations.setlen(0)
for index in 0'u64 ..< committees_per_slot:
for committee_index in get_committee_indices(committees_per_slot):
let committee = get_beacon_committee(
state[].data, getStateField(state.data, slot), index.CommitteeIndex,
state[].data, getStateField(state.data, slot), committee_index,
cache)
# Create a bitfield filled with the given count per attestation,
@ -581,7 +581,7 @@ suite "Attestation pool processing" & preset():
aggregation_bits: aggregation_bits,
data: makeAttestationData(
state[].data, getStateField(state.data, slot),
index.CommitteeIndex, blockRef.get().root)
committee_index, blockRef.get().root)
# signature: ValidatorSig()
)

View File

@ -54,7 +54,7 @@ suite "Gossip validation " & preset():
defaultRuntimeConfig, state.data, getStateField(state.data, slot) + 1,
cache, info, {})
test "Any committee index is valid":
test "Empty committee when no committee for slot":
template committee(idx: uint64): untyped =
get_beacon_committee(
dag.headState.data, dag.head.slot, idx.CommitteeIndex, cache)
@ -65,13 +65,11 @@ suite "Gossip validation " & preset():
check:
committee(0).len > 0
committee(10000).len == 0
committee(uint64.high).len == 0
committee(63).len == 0
check:
committeeLen(2) > 0
committeeLen(10000) == 0
committeeLen(uint64.high) == 0
committeeLen(63) == 0
test "validateAttestation":
var

View File

@ -190,7 +190,7 @@ func makeAttestationData*(
func makeAttestation*(
state: ForkedHashedBeaconState, beacon_block_root: Eth2Digest,
committee: seq[ValidatorIndex], slot: Slot, index: CommitteeIndex,
committee: seq[ValidatorIndex], slot: Slot, committee_index: CommitteeIndex,
validator_index: ValidatorIndex, cache: var StateCache,
flags: UpdateFlags = {}): Attestation =
# Avoids state_sim silliness; as it's responsible for all validators,
@ -199,7 +199,7 @@ func makeAttestation*(
# want ValidatorIndex, so that's supported too.
let
sac_index = committee.find(validator_index)
data = makeAttestationData(state, slot, index, beacon_block_root)
data = makeAttestationData(state, slot, committee_index, beacon_block_root)
doAssert sac_index != -1, "find_beacon_committee should guarantee this"
@ -251,15 +251,10 @@ func makeFullAttestations*(
flags: UpdateFlags = {}): seq[Attestation] =
# Create attestations in which the full committee participates for each shard
# that should be attested to during a particular slot
let committees_per_slot =
get_committee_count_per_slot(state, slot.epoch, cache)
for index in 0'u64..<committees_per_slot:
for committee_index in get_committee_indices(state, slot.epoch, cache):
let
committee = get_beacon_committee(
state, slot, index.CommitteeIndex, cache)
data = makeAttestationData(
state, slot, index.CommitteeIndex, beacon_block_root)
committee = get_beacon_committee(state, slot, committee_index, cache)
data = makeAttestationData(state, slot, committee_index, beacon_block_root)
doAssert committee.len() >= 1
# Initial attestation
@ -320,7 +315,7 @@ proc makeSyncAggregate(
validatorIdx: ValidatorIndex
selectionProof: ValidatorSig
var aggregators: seq[Aggregator]
for subcommitteeIdx in allSyncSubcommittees():
for subcommitteeIdx in SyncSubcommitteeIndex:
let
firstKeyIdx = subcommitteeIdx.int * SYNC_SUBCOMMITTEE_SIZE
lastKeyIdx = firstKeyIdx + SYNC_SUBCOMMITTEE_SIZE - 1