ShufflingRef approach to next-epoch validator duty calculation/prediction (#5414)

* ShufflingRef approach to next-epoch validator duty calculation/prediction

* refactor action_tracker.updateActions to take ShufflingRef + beacon_proposers; refactor maybeUpdateActionTrackerNextEpoch to be separate and reused function; add actual fallback logic

* document one possible set of conditions

* check epoch participation flags and inactivity scores to ensure no penalties and MAX_EFFECTIVE_BALANCE to ensure rewards don't matter

* correctly (un)shuffle each proposer index

* remove debugging assertion
This commit is contained in:
tersec 2023-10-10 00:02:07 +00:00 committed by GitHub
parent a4cf203849
commit 447786518f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 227 additions and 77 deletions

View File

@ -261,13 +261,14 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
## Honest validator ## Honest validator
```diff ```diff
+ General pubsub topics OK + General pubsub topics OK
+ Index shuffling and unshuffling invert OK
+ Liveness failsafe conditions OK + Liveness failsafe conditions OK
+ Mainnet attestation topics OK + Mainnet attestation topics OK
+ Stability subnets OK + Stability subnets OK
+ isNearSyncCommitteePeriod OK + isNearSyncCommitteePeriod OK
+ is_aggregator OK + is_aggregator OK
``` ```
OK: 6/6 Fail: 0/6 Skip: 0/6 OK: 7/7 Fail: 0/7 Skip: 0/7
## ImportKeystores requests [Beacon Node] [Preset: mainnet] ## ImportKeystores requests [Beacon Node] [Preset: mainnet]
```diff ```diff
+ ImportKeystores/ListKeystores/DeleteKeystores [Beacon Node] [Preset: mainnet] OK + ImportKeystores/ListKeystores/DeleteKeystores [Beacon Node] [Preset: mainnet] OK
@ -704,4 +705,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9 OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL--- ---TOTAL---
OK: 397/402 Fail: 0/402 Skip: 5/402 OK: 398/403 Fail: 0/403 Skip: 5/403

View File

@ -544,11 +544,8 @@ func putEpochRef(dag: ChainDAGRef, epochRef: EpochRef) =
func init*( func init*(
T: type ShufflingRef, state: ForkedHashedBeaconState, T: type ShufflingRef, state: ForkedHashedBeaconState,
cache: var StateCache, epoch: Epoch): T = cache: var StateCache, epoch: Epoch): T =
let let attester_dependent_root =
dependent_epoch = withState(state): forkyState.dependent_root(epoch.get_previous_epoch)
if epoch < 1: Epoch(0) else: epoch - 1
attester_dependent_root =
withState(state): forkyState.dependent_root(dependent_epoch)
ShufflingRef( ShufflingRef(
epoch: epoch, epoch: epoch,

View File

@ -1061,6 +1061,83 @@ proc doppelgangerChecked(node: BeaconNode, epoch: Epoch) =
for validator in node.attachedValidators[]: for validator in node.attachedValidators[]:
validator.doppelgangerChecked(epoch - 1) validator.doppelgangerChecked(epoch - 1)
from ./spec/state_transition_epoch import effective_balance_might_update
proc maybeUpdateActionTrackerNextEpoch(
node: BeaconNode, forkyState: ForkyHashedBeaconState, nextEpoch: Epoch) =
if node.consensusManager[].actionTracker.needsUpdate(
forkyState, nextEpoch):
template epochRefFallback() =
let epochRef =
node.dag.getEpochRef(node.dag.head, nextEpoch, false).expect(
"Getting head EpochRef should never fail")
node.consensusManager[].actionTracker.updateActions(
epochRef.shufflingRef, epochRef.beacon_proposers)
when forkyState is phase0.HashedBeaconState:
# The previous_epoch_participation-based logic requires Altair or newer
epochRefFallback()
else:
let
shufflingRef = node.dag.getShufflingRef(node.dag.head, nextEpoch, false).valueOr:
# epochRefFallback() won't work in this case either
return
nextEpochProposers = get_beacon_proposer_indices(
forkyState.data, shufflingRef.shuffled_active_validator_indices,
nextEpoch)
nextEpochFirstProposer = nextEpochProposers[0].valueOr:
# All proposers except the first can be more straightforwardly and
# efficiently (re)computed correctly once in that epoch.
epochRefFallback()
return
# Has to account for potential epoch transition TIMELY_SOURCE_FLAG_INDEX,
# TIMELY_TARGET_FLAG_INDEX, and inactivity penalties, resulting from spec
# functions get_flag_index_deltas() and get_inactivity_penalty_deltas().
#
# There are no penalties associated with TIMELY_HEAD_FLAG_INDEX, but a
# reward exists. effective_balance == MAX_EFFECTIVE_BALANCE ensures if
# even so, then the effective balanace cannot change as a result.
#
# It's not truly necessary to avoid all rewards and penalties, but only
# to bound them to ensure they won't unexpected alter effective balance
# during the upcoming epoch transition.
#
# During genesis epoch, the check for epoch participation is against current,
# not previous, epoch, and therefore there's a possibility of checking for if
# a validator has participated in an epoch before it will happen.
#
# Because process_rewards_and_penalties() in epoch processing happens
# before the current/previous participation swap, previous is correct
# even here, and consistent with what the epoch transition uses.
#
# Whilst slashing, proposal, and sync committee rewards and penalties do
# update the balances as they occur, they don't update effective_balance
# until the end of epoch, so detect via effective_balance_might_update.
#
# On EF mainnet epoch 233906, this matches 99.5% of active validators;
# with Holesky epoch 2041, 83% of active validators.
let
participation_flags =
forkyState.data.previous_epoch_participation.item(
nextEpochFirstProposer)
effective_balance = forkyState.data.validators.item(
nextEpochFirstProposer).effective_balance
if participation_flags.has_flag(TIMELY_SOURCE_FLAG_INDEX) and
participation_flags.has_flag(TIMELY_TARGET_FLAG_INDEX) and
effective_balance == MAX_EFFECTIVE_BALANCE and
forkyState.data.slot.epoch != GENESIS_EPOCH and
forkyState.data.inactivity_scores.item(
nextEpochFirstProposer) == 0 and
not effective_balance_might_update(
forkyState.data.balances.item(nextEpochFirstProposer),
effective_balance):
node.consensusManager[].actionTracker.updateActions(
shufflingRef, nextEpochProposers)
else:
epochRefFallback()
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} = proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
## Subscribe to subnets that we are providing stability for or aggregating ## Subscribe to subnets that we are providing stability for or aggregating
## and unsubscribe from the ones that are no longer relevant. ## and unsubscribe from the ones that are no longer relevant.
@ -1133,13 +1210,10 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
forkyState, slot.epoch): forkyState, slot.epoch):
let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect( let epochRef = node.dag.getEpochRef(head, slot.epoch, false).expect(
"Getting head EpochRef should never fail") "Getting head EpochRef should never fail")
node.consensusManager[].actionTracker.updateActions(epochRef) node.consensusManager[].actionTracker.updateActions(
epochRef.shufflingRef, epochRef.beacon_proposers)
if node.consensusManager[].actionTracker.needsUpdate( node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1)
forkyState, slot.epoch + 1):
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect(
"Getting head EpochRef should never fail")
node.consensusManager[].actionTracker.updateActions(epochRef)
if node.gossipState.card > 0 and targetGossipState.card == 0: if node.gossipState.card > 0 and targetGossipState.card == 0:
debug "Disabling topic subscriptions", debug "Disabling topic subscriptions",
@ -1260,11 +1334,19 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
let head = node.dag.head let head = node.dag.head
if node.isSynced(head) and head.executionValid: if node.isSynced(head) and head.executionValid:
withState(node.dag.headState): withState(node.dag.headState):
if node.consensusManager[].actionTracker.needsUpdate( # maybeUpdateActionTrackerNextEpoch might not account for balance changes
forkyState, slot.epoch + 1): # from the process_rewards_and_penalties() epoch transition but only from
let epochRef = node.dag.getEpochRef(head, slot.epoch + 1, false).expect( # process_block() and other per-slot sources. This mainly matters insofar
"Getting head EpochRef should never fail") # as it might trigger process_effective_balance_updates() changes in that
node.consensusManager[].actionTracker.updateActions(epochRef) # same epoch transition, which function is therefore potentially blind to
# but which might then affect beacon proposers.
#
# Because this runs every slot, it can account naturally for slashings,
# which affect balances via slash_validator() when they happen, and any
# missed sync committee participation via process_sync_aggregate(), but
# attestation penalties for example, need, specific handling.
# checked by maybeUpdateActionTrackerNextEpoch.
node.maybeUpdateActionTrackerNextEpoch(forkyState, slot.epoch + 1)
let let
nextAttestationSlot = nextAttestationSlot =

View File

@ -357,7 +357,7 @@ func partialBeaconBlock*(
parent_root: state.latest_block_root, parent_root: state.latest_block_root,
body: consensusFork.BeaconBlockBody( body: consensusFork.BeaconBlockBody(
randao_reveal: randao_reveal, randao_reveal: randao_reveal,
eth1_data: eth1data, eth1_data: eth1_data,
graffiti: graffiti, graffiti: graffiti,
proposer_slashings: validator_changes.proposer_slashings, proposer_slashings: validator_changes.proposer_slashings,
attester_slashings: validator_changes.attester_slashings, attester_slashings: validator_changes.attester_slashings,

View File

@ -936,20 +936,24 @@ func process_eth1_data_reset*(state: var ForkyBeaconState) =
if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0: if next_epoch mod EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
state.eth1_data_votes = default(type state.eth1_data_votes) state.eth1_data_votes = default(type state.eth1_data_votes)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#effective-balances-updates
template effective_balance_might_update*(
balance: Gwei, effective_balance: Gwei): bool =
const
HYSTERESIS_INCREMENT = EFFECTIVE_BALANCE_INCREMENT div HYSTERESIS_QUOTIENT
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#effective-balances-updates # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#effective-balances-updates
func process_effective_balance_updates*(state: var ForkyBeaconState) = func process_effective_balance_updates*(state: var ForkyBeaconState) =
# Update effective balances with hysteresis # Update effective balances with hysteresis
for vidx in state.validators.vindices: for vidx in state.validators.vindices:
let balance = state.balances.item(vidx) let
const balance = state.balances.item(vidx)
HYSTERESIS_INCREMENT = effective_balance = state.validators.item(vidx).effective_balance
EFFECTIVE_BALANCE_INCREMENT div HYSTERESIS_QUOTIENT if effective_balance_might_update(balance, effective_balance):
DOWNWARD_THRESHOLD =
HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
let effective_balance = state.validators.item(vidx).effective_balance
if balance + DOWNWARD_THRESHOLD < effective_balance or
effective_balance + UPWARD_THRESHOLD < balance:
let new_effective_balance = let new_effective_balance =
min( min(
balance - balance mod EFFECTIVE_BALANCE_INCREMENT, balance - balance mod EFFECTIVE_BALANCE_INCREMENT,

View File

@ -8,10 +8,7 @@
{.push raises: [].} {.push raises: [].}
import import ./helpers
./datatypes/[phase0, altair, bellatrix],
./helpers
export helpers export helpers
const const
@ -23,8 +20,7 @@ const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_shuffled_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_shuffled_index
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_committee # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_committee
# Port of https://github.com/protolambda/zrnt/blob/master/eth2/beacon/shuffle.go # Port of https://github.com/protolambda/zrnt/blob/v0.14.0/eth2/beacon/shuffle.go
# Shuffles or unshuffles, depending on the `dir` (true for shuffling, false for unshuffling
func shuffle_list*(input: var seq[ValidatorIndex], seed: Eth2Digest) = func shuffle_list*(input: var seq[ValidatorIndex], seed: Eth2Digest) =
let list_size = input.lenu64 let list_size = input.lenu64
@ -302,8 +298,9 @@ func get_beacon_committee_len*(
get_beacon_committee_len(forkyState.data, slot, index, cache) get_beacon_committee_len(forkyState.data, slot, index, cache)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_shuffled_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_shuffled_index
func compute_shuffled_index*( template compute_shuffled_index_aux(
index: uint64, index_count: uint64, seed: Eth2Digest): uint64 = index: uint64, index_count: uint64, seed: Eth2Digest, iter: untyped):
uint64 =
## Return the shuffled index corresponding to ``seed`` (and ``index_count``). ## Return the shuffled index corresponding to ``seed`` (and ``index_count``).
doAssert index < index_count doAssert index < index_count
@ -315,7 +312,7 @@ func compute_shuffled_index*(
# Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf) # Swap or not (https://link.springer.com/content/pdf/10.1007%2F978-3-642-32009-5_1.pdf)
# See the 'generalized domain' algorithm on page 3 # See the 'generalized domain' algorithm on page 3
for current_round in 0'u8 ..< SHUFFLE_ROUND_COUNT.uint8: for current_round in iter:
source_buffer[32] = current_round source_buffer[32] = current_round
let let
@ -336,32 +333,58 @@ func compute_shuffled_index*(
cur_idx_permuted cur_idx_permuted
func compute_shuffled_index*(
index: uint64, index_count: uint64, seed: Eth2Digest): uint64 =
## Return the shuffled index corresponding to ``seed`` (and ``index_count``).
compute_shuffled_index_aux(index, index_count, seed) do:
0'u8 ..< SHUFFLE_ROUND_COUNT.uint8
func compute_inverted_shuffled_index*(
index: uint64, index_count: uint64, seed: Eth2Digest): uint64 =
## Return the inverse of the shuffled index corresponding to ``seed`` (and
## ``index_count``).
compute_shuffled_index_aux(index, index_count, seed) do:
countdown(SHUFFLE_ROUND_COUNT.uint8 - 1, 0'u8, 1)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#compute_proposer_index
func compute_proposer_index(state: ForkyBeaconState, template compute_proposer_index(state: ForkyBeaconState,
indices: seq[ValidatorIndex], seed: Eth2Digest): Opt[ValidatorIndex] = indices: openArray[ValidatorIndex], seed: Eth2Digest,
unshuffleTransform: untyped): Opt[ValidatorIndex] =
## Return from ``indices`` a random index sampled by effective balance. ## Return from ``indices`` a random index sampled by effective balance.
const MAX_RANDOM_BYTE = 255 const MAX_RANDOM_BYTE = 255
if len(indices) == 0: if len(indices) == 0:
return Opt.none(ValidatorIndex) Opt.none(ValidatorIndex)
else:
let seq_len {.inject.} = indices.lenu64
let seq_len = indices.lenu64 var
i = 0'u64
buffer: array[32+8, byte]
res: Opt[ValidatorIndex]
buffer[0..31] = seed.data
while true:
buffer[32..39] = uint_to_bytes(i div 32)
let
shuffled_index {.inject.} =
compute_shuffled_index(i mod seq_len, seq_len, seed)
candidate_index = indices[unshuffleTransform]
random_byte = (eth2digest(buffer).data)[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >=
MAX_EFFECTIVE_BALANCE * random_byte:
res = Opt.some(candidate_index)
break
i += 1
var doAssert res.isSome
i = 0'u64 res
buffer: array[32+8, byte]
buffer[0..31] = seed.data func compute_proposer_index(state: ForkyBeaconState,
while true: indices: openArray[ValidatorIndex], seed: Eth2Digest):
buffer[32..39] = uint_to_bytes(i div 32) Opt[ValidatorIndex] =
let ## Return from ``indices`` a random index sampled by effective balance.
candidate_index = compute_proposer_index(state, indices, seed, shuffled_index)
indices[compute_shuffled_index(i mod seq_len, seq_len, seed)]
random_byte = (eth2digest(buffer).data)[i mod 32]
effective_balance = state.validators[candidate_index].effective_balance
if effective_balance * MAX_RANDOM_BYTE >=
MAX_EFFECTIVE_BALANCE * random_byte:
return Opt.some(candidate_index)
i += 1
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#get_beacon_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*( func get_beacon_proposer_index*(
@ -378,19 +401,16 @@ func get_beacon_proposer_index*(
cache.beacon_proposer_indices.withValue(slot, proposer) do: cache.beacon_proposer_indices.withValue(slot, proposer) do:
return proposer[] return proposer[]
do: do:
# Return the beacon proposer index at the current slot. ## Return the beacon proposer index at the current slot.
var buffer: array[32 + 8, byte] var buffer: array[32 + 8, byte]
buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data
# There's exactly one beacon proposer per slot - the same validator may # There's exactly one beacon proposer per slot - the same validator may
# however propose several times in the same epoch (however unlikely) # however propose several times in the same epoch (however unlikely)
let let indices = get_active_validator_indices(state, epoch)
# active validator indices are kept in cache but sorting them takes
# quite a while
indices = get_active_validator_indices(state, epoch)
var res: Opt[ValidatorIndex] var res: Opt[ValidatorIndex]
for epoch_slot in epoch.slots(): for epoch_slot in epoch.slots():
buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64) buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64)
let seed = eth2digest(buffer) let seed = eth2digest(buffer)
@ -401,6 +421,28 @@ func get_beacon_proposer_index*(
return res return res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_indices*(
state: ForkyBeaconState, shuffled_indices: openArray[ValidatorIndex], epoch: Epoch):
seq[Opt[ValidatorIndex]] =
## Return the beacon proposer indices at the current epoch, using shuffled
## rather than sorted active validator indices.
var
buffer {.noinit.}: array[32 + 8, byte]
res: seq[Opt[ValidatorIndex]]
buffer[0..31] = get_seed(state, epoch, DOMAIN_BEACON_PROPOSER).data
let epoch_shuffle_seed = get_seed(state, epoch, DOMAIN_BEACON_ATTESTER)
for epoch_slot in epoch.slots():
buffer[32..39] = uint_to_bytes(epoch_slot.asUInt64)
res.add (
compute_proposer_index(state, shuffled_indices, eth2digest(buffer)) do:
compute_inverted_shuffled_index(
shuffled_index, seq_len, epoch_shuffle_seed))
res
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#get_beacon_proposer_index # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/beacon-chain.md#get_beacon_proposer_index
func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache): func get_beacon_proposer_index*(state: ForkyBeaconState, cache: var StateCache):
Opt[ValidatorIndex] = Opt[ValidatorIndex] =
@ -413,7 +455,7 @@ func get_beacon_proposer_index*(state: ForkedHashedBeaconState,
withState(state): withState(state):
get_beacon_proposer_index(forkyState.data, cache, slot) get_beacon_proposer_index(forkyState.data, cache, slot)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/validator.md#aggregation-selection # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/validator.md#aggregation-selection
func is_aggregator*(committee_len: uint64, slot_signature: ValidatorSig): bool = func is_aggregator*(committee_len: uint64, slot_signature: ValidatorSig): bool =
let modulo = max(1'u64, committee_len div TARGET_AGGREGATORS_PER_COMMITTEE) let modulo = max(1'u64, committee_len div TARGET_AGGREGATORS_PER_COMMITTEE)
bytes_to_uint64(eth2digest( bytes_to_uint64(eth2digest(
@ -471,7 +513,7 @@ func livenessFailsafeInEffect*(
false false
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/p2p-interface.md#attestation-subnet-subscription
func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64): func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64):
SubnetId = SubnetId =
# Ensure neither `truncate` loses information # Ensure neither `truncate` loses information
@ -495,7 +537,7 @@ func compute_subscribed_subnet(node_id: UInt256, epoch: Epoch, index: uint64):
) )
SubnetId((permutated_prefix + index) mod ATTESTATION_SUBNET_COUNT) SubnetId((permutated_prefix + index) mod ATTESTATION_SUBNET_COUNT)
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.1/specs/phase0/p2p-interface.md#attestation-subnet-subscription # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.2/specs/phase0/p2p-interface.md#attestation-subnet-subscription
iterator compute_subscribed_subnets*(node_id: UInt256, epoch: Epoch): SubnetId = iterator compute_subscribed_subnets*(node_id: UInt256, epoch: Epoch): SubnetId =
for index in 0'u64 ..< SUBNETS_PER_NODE: for index in 0'u64 ..< SUBNETS_PER_NODE:
yield compute_subscribed_subnet(node_id, epoch, index) yield compute_subscribed_subnet(node_id, epoch, index)

View File

@ -10,8 +10,9 @@ import
../spec/forks ../spec/forks
from ../spec/validator import compute_subscribed_subnets from ../spec/validator import compute_subscribed_subnets
from ../consensus_object_pools/block_pools_types import ShufflingRef
from ../consensus_object_pools/spec_cache import from ../consensus_object_pools/spec_cache import
EpochRef, epoch, get_committee_assignments epoch, get_committee_assignments
export forks, tables, sets export forks, tables, sets
@ -214,19 +215,19 @@ func needsUpdate*(
from std/sequtils import toSeq from std/sequtils import toSeq
func updateActions*( func updateActions*(
tracker: var ActionTracker, epochRef: EpochRef) = tracker: var ActionTracker, shufflingRef: ShufflingRef,
# Updates the schedule for upcoming attestation and proposal work beaconProposers: openArray[Opt[ValidatorIndex]]) =
let let epoch = shufflingRef.epoch
epoch = epochRef.epoch
tracker.attesterDepRoot = epochRef.shufflingRef.attester_dependent_root # Updates the schedule for upcoming attestation and proposal work
tracker.attesterDepRoot = shufflingRef.attester_dependent_root
tracker.lastCalculatedEpoch = epoch tracker.lastCalculatedEpoch = epoch
let validatorIndices = toHashSet(toSeq(tracker.knownValidators.keys())) let validatorIndices = toHashSet(toSeq(tracker.knownValidators.keys()))
# Update proposals # Update proposals
tracker.proposingSlots[epoch mod 2] = 0 tracker.proposingSlots[epoch mod 2] = 0
for i, proposer in epochRef.beacon_proposers: for i, proposer in beacon_proposers:
if proposer.isSome and proposer.get() in validatorIndices: if proposer.isSome and proposer.get() in validatorIndices:
tracker.proposingSlots[epoch mod 2] = tracker.proposingSlots[epoch mod 2] =
tracker.proposingSlots[epoch mod 2] or (1'u32 shl i) tracker.proposingSlots[epoch mod 2] or (1'u32 shl i)
@ -237,8 +238,7 @@ func updateActions*(
static: doAssert SLOTS_PER_EPOCH <= 32 static: doAssert SLOTS_PER_EPOCH <= 32
for (committeeIndex, subnet_id, slot) in for (committeeIndex, subnet_id, slot) in
get_committee_assignments(epochRef.shufflingRef, validatorIndices): get_committee_assignments(shufflingRef, validatorIndices):
doAssert epoch(slot) == epoch doAssert epoch(slot) == epoch
# Each get_committee_assignments() call here is on the next epoch. At any # Each get_committee_assignments() call here is on the next epoch. At any

View File

@ -186,8 +186,18 @@ suite "Block pool processing" & preset():
nextEpochSlot = nextEpoch.start_slot() nextEpochSlot = nextEpoch.start_slot()
parentBsi = dag.head.parent.atSlot(nextEpochSlot).toBlockSlotId().get() parentBsi = dag.head.parent.atSlot(nextEpochSlot).toBlockSlotId().get()
stateCheckpoint = dag.stateCheckpoint(parentBsi) stateCheckpoint = dag.stateCheckpoint(parentBsi)
shufflingRef = dag.getShufflingRef(dag.head, nextEpoch, false).valueOr:
raiseAssert "false"
nextEpochProposers = withState(dag.headState):
get_beacon_proposer_indices(
forkyState.data, shufflingRef.shuffled_active_validator_indices,
nextEpoch)
check: check:
# get_beacon_proposer_indices based on ShufflingRef matches EpochRef
nextEpochProposers == dag.getEpochRef(
dag.head, nextEpoch, true).get.beacon_proposers
parentBsi.bid == dag.head.parent.bid parentBsi.bid == dag.head.parent.bid
parentBsi.slot == nextEpochSlot parentBsi.slot == nextEpochSlot
# Pre-heated caches # Pre-heated caches
@ -200,6 +210,7 @@ suite "Block pool processing" & preset():
# this is required for the test to work - it's not a "public" # this is required for the test to work - it's not a "public"
# post-condition of getEpochRef # post-condition of getEpochRef
getStateField(dag.epochRefState, slot) == nextEpochSlot getStateField(dag.epochRefState, slot) == nextEpochSlot
assign(state[], dag.epochRefState) assign(state[], dag.epochRefState)
let let

View File

@ -10,8 +10,7 @@
import import
unittest2, unittest2,
./testutil, ./testutil,
../beacon_chain/spec/[network, validator], ../beacon_chain/spec/[network, validator]
../beacon_chain/spec/datatypes/[base, altair]
from std/sequtils import toSeq from std/sequtils import toSeq
@ -309,3 +308,17 @@ suite "Honest validator":
@[16.SubnetId, 17.SubnetId] @[16.SubnetId, 17.SubnetId]
toSeq(compute_subscribed_subnets(default(UInt256), 400.Epoch)) == toSeq(compute_subscribed_subnets(default(UInt256), 400.Epoch)) ==
@[16.SubnetId, 17.SubnetId] @[16.SubnetId, 17.SubnetId]
test "Index shuffling and unshuffling invert":
const seed = Eth2Digest.fromHex(
"0xa0054f8b4dead1ac88bd2c50cf13eab88f86d020362708a97a13012a402c57d3")
for index_count in [1'u64, 4'u64, 52'u64, 2121'u64, 42616'u64]:
for index in 0'u64 ..< index_count:
check:
compute_shuffled_index(
compute_inverted_shuffled_index(
index, index_count, seed), index_count, seed) == index
compute_inverted_shuffled_index(
compute_shuffled_index(
index, index_count, seed), index_count, seed) == index