dynamic sync committee subscriptions (#3308)

* dynamic sync committee subscriptions

* fast-path trivial case rather than rely on RNG with probability 1 outcome

Co-authored-by: zah <zahary@gmail.com>

* use func instead of template; avoid calling async function unnecessarily

* avoid unnecessary sync committee topic computation; use correct epoch lookahead; enforce exception/effect tracking

* don't over-optimistically update ENR syncnets; non-looping version of nearSyncCommitteePeriod

* allow separately setting --allow-all-{sub,att,sync}nets

* remove unnecessary async

Co-authored-by: zah <zahary@gmail.com>
This commit is contained in:
tersec 2022-01-24 20:40:59 +00:00 committed by GitHub
parent 062275461c
commit 00a347457a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 268 additions and 74 deletions

View File

@ -1,34 +1,5 @@
AllTests-mainnet
===
##
```diff
+ Slashing test: duplicate_pubkey_not_slashable.json OK
+ Slashing test: multiple_interchanges_single_validator_single_message_gap.json OK
+ Slashing test: multiple_interchanges_single_validator_single_message_out_of_order.json OK
+ Slashing test: multiple_validators_multiple_blocks_and_attestations.json OK
+ Slashing test: multiple_validators_same_slot_blocks.json OK
+ Slashing test: single_validator_genesis_attestation.json OK
+ Slashing test: single_validator_import_only.json OK
+ Slashing test: single_validator_multiple_block_attempts.json OK
+ Slashing test: single_validator_multiple_blocks_and_attestations.json OK
+ Slashing test: single_validator_out_of_order_attestations.json OK
+ Slashing test: single_validator_out_of_order_blocks.json OK
+ Slashing test: single_validator_resign_attestation.json OK
+ Slashing test: single_validator_resign_block.json OK
+ Slashing test: single_validator_single_attestation.json OK
+ Slashing test: single_validator_single_block.json OK
+ Slashing test: single_validator_single_block_and_attestation.json OK
+ Slashing test: single_validator_single_block_and_attestation_signing_root.json OK
+ Slashing test: single_validator_slashable_attestations_double_vote.json OK
+ Slashing test: single_validator_slashable_attestations_surrounded_by_existing.json OK
+ Slashing test: single_validator_slashable_attestations_surrounds_existing.json OK
+ Slashing test: single_validator_slashable_blocks.json OK
+ Slashing test: single_validator_slashable_blocks_no_root.json OK
+ Slashing test: single_validator_source_greater_than_target.json OK
+ Slashing test: single_validator_two_blocks_no_signing_root.json OK
+ Slashing test: wrong_genesis_validators_root.json OK
```
OK: 25/25 Fail: 0/25 Skip: 0/25
## Attestation pool processing [Preset: mainnet]
```diff
+ Attestations may arrive in any order [Preset: mainnet] OK
@ -222,9 +193,10 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
```diff
+ General pubsub topics OK
+ Mainnet attestation topics OK
+ isNearSyncCommitteePeriod OK
+ is_aggregator OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
OK: 4/4 Fail: 0/4 Skip: 0/4
## ImportKeystores requests [Preset: mainnet]
```diff
+ Invalid Authorization Header [Preset: mainnet] OK
@ -293,17 +265,54 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ deletePeer() test OK
```
OK: 12/12 Fail: 0/12 Skip: 0/12
## Slashing Interchange tests [Preset: mainnet]
```diff
+ Slashing test: duplicate_pubkey_not_slashable.json OK
+ Slashing test: duplicate_pubkey_slashable_attestation.json OK
+ Slashing test: duplicate_pubkey_slashable_block.json OK
+ Slashing test: multiple_interchanges_multiple_validators_repeat_idem.json OK
+ Slashing test: multiple_interchanges_overlapping_validators_merge_stale.json OK
+ Slashing test: multiple_interchanges_overlapping_validators_repeat_idem.json OK
+ Slashing test: multiple_interchanges_single_validator_fail_iff_imported.json OK
+ Slashing test: multiple_interchanges_single_validator_first_surrounds_second.json OK
Slashing test: multiple_interchanges_single_validator_multiple_blocks_out_of_order.json Skip
+ Slashing test: multiple_interchanges_single_validator_second_surrounds_first.json OK
+ Slashing test: multiple_interchanges_single_validator_single_att_out_of_order.json OK
+ Slashing test: multiple_interchanges_single_validator_single_block_out_of_order.json OK
+ Slashing test: multiple_interchanges_single_validator_single_message_gap.json OK
+ Slashing test: multiple_validators_multiple_blocks_and_attestations.json OK
+ Slashing test: multiple_validators_same_slot_blocks.json OK
+ Slashing test: single_validator_genesis_attestation.json OK
+ Slashing test: single_validator_import_only.json OK
+ Slashing test: single_validator_multiple_block_attempts.json OK
+ Slashing test: single_validator_multiple_blocks_and_attestations.json OK
+ Slashing test: single_validator_out_of_order_attestations.json OK
+ Slashing test: single_validator_out_of_order_blocks.json OK
+ Slashing test: single_validator_resign_attestation.json OK
+ Slashing test: single_validator_resign_block.json OK
+ Slashing test: single_validator_single_attestation.json OK
+ Slashing test: single_validator_single_block.json OK
+ Slashing test: single_validator_single_block_and_attestation.json OK
+ Slashing test: single_validator_single_block_and_attestation_signing_root.json OK
+ Slashing test: single_validator_slashable_attestations_double_vote.json OK
+ Slashing test: single_validator_slashable_attestations_surrounded_by_existing.json OK
+ Slashing test: single_validator_slashable_attestations_surrounds_existing.json OK
+ Slashing test: single_validator_slashable_blocks.json OK
+ Slashing test: single_validator_slashable_blocks_no_root.json OK
+ Slashing test: single_validator_source_greater_than_target.json OK
+ Slashing test: single_validator_source_greater_than_target_sensible_iff_minified.json OK
+ Slashing test: single_validator_source_greater_than_target_surrounded.json OK
Slashing test: single_validator_source_greater_than_target_surrounding.json Skip
+ Slashing test: single_validator_two_blocks_no_signing_root.json OK
+ Slashing test: wrong_genesis_validators_root.json OK
```
OK: 36/38 Fail: 0/38 Skip: 2/38
## Slashing Protection DB - Interchange [Preset: mainnet]
```diff
+ Smoke test - Complete format - Invalid database is refused [Preset: mainnet] OK
+ Smoke test - Complete format [Preset: mainnet] OK
```
OK: 2/2 Fail: 0/2 Skip: 0/2
## Slashing Protection DB - v1 and v2 migration [Preset: mainnet]
```diff
+ Minimal format migration [Preset: mainnet] OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Slashing Protection DB [Preset: mainnet]
```diff
+ Attestation ordering #1698 OK
@ -442,4 +451,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 238/240 Fail: 0/240 Skip: 2/240
OK: 249/253 Fail: 0/253 Skip: 4/253

View File

@ -160,9 +160,19 @@ type
desc: "Node agent string which is used as identifier in network"
name: "agent-string" }: string
subscribeAllSubnets* {.
subscribeAllAttnets* {.
defaultValue: false,
desc: "Subscribe to all attestation subnet topics when gossiping"
name: "subscribe-all-attnets" }: bool
subscribeAllSyncnets* {.
defaultValue: false,
desc: "Subscribe to all sync subnet topics when gossiping"
name: "subscribe-all-syncnets" }: bool
subscribeAllSubnets* {.
defaultValue: false,
desc: "Subscribe to all subnet topics when gossiping; implies subscribe-all-attnets and subscribe-all-syncnets"
name: "subscribe-all-subnets" }: bool
slashingDbKind* {.

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -10,6 +10,8 @@
import
std/[sets, tables],
stew/shims/hashes,
bearssl,
eth/p2p/discoveryv5/random2,
chronicles,
../spec/[crypto, digest],
../spec/datatypes/altair
@ -53,13 +55,17 @@ type
bestContributions*: Table[Eth2Digest, BestSyncSubcommitteeContributions]
onContributionReceived*: OnSyncContributionCallback
rng: ref BrHmacDrbgContext
syncCommitteeSubscriptions*: Table[ValidatorPubKey, Epoch]
func hash*(x: SyncCommitteeMsgKey): Hash =
hashAllFields(x)
func init*(T: type SyncCommitteeMsgPool,
rng: ref BrHmacDrbgContext,
onSyncContribution: OnSyncContributionCallback = nil
): SyncCommitteeMsgPool =
T(onContributionReceived: onSyncContribution)
T(rng: rng, onContributionReceived: onSyncContribution)
func pruneData*(pool: var SyncCommitteeMsgPool, slot: Slot) =
## This should be called at the end of slot.
@ -271,3 +277,14 @@ proc produceSyncAggregate*(
raiseAssert "We have checked for the key upfront"
else:
SyncAggregate.init()
proc isEpochLeadTime*(
pool: SyncCommitteeMsgPool, epochsToSyncPeriod: uint64): bool =
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/validator.md#sync-committee-subnet-stability
# This ensures a uniform distribution without requiring additional state:
# (1/4) = 1/4, 4 slots out
# (3/4) * (1/3) = 1/4, 3 slots out
# (3/4) * (2/3) * (1/2) = 1/4, 2 slots out
# (3/4) * (2/3) * (1/2) * (1/1) = 1/4, 1 slot out
doAssert epochsToSyncPeriod > 0
epochsToSyncPeriod == 1 or pool.rng[].rand(epochsToSyncPeriod - 1) == 0

View File

@ -434,7 +434,7 @@ proc init*(T: type BeaconNode,
AttestationPool.init(dag, quarantine, onAttestationReceived)
)
syncCommitteeMsgPool = newClone(
SyncCommitteeMsgPool.init(onSyncContribution)
SyncCommitteeMsgPool.init(rng, onSyncContribution)
)
exitPool = newClone(ExitPool.init(dag, onVoluntaryExitAdded))
@ -528,7 +528,8 @@ proc init*(T: type BeaconNode,
requestManager: RequestManager.init(network, blockVerifier),
syncManager: syncManager,
backfiller: backfiller,
actionTracker: ActionTracker.init(rng, config.subscribeAllSubnets),
actionTracker: ActionTracker.init(
rng, config.subscribeAllAttnets or config.subscribeAllSubnets),
processor: processor,
blockProcessor: blockProcessor,
consensusManager: consensusManager,
@ -586,6 +587,13 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
func subnetLog(v: BitArray): string =
$toSeq(v.oneIndices())
func forkDigests(node: BeaconNode): auto =
let forkDigestsArray: array[BeaconStateFork, auto] = [
node.dag.forkDigests.phase0,
node.dag.forkDigests.altair,
node.dag.forkDigests.bellatrix]
forkDigestsArray
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/phase0/validator.md#phase-0-attestation-subnet-stability
proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) =
if node.gossipState.card == 0:
@ -610,11 +618,7 @@ proc updateAttestationSubnetHandlers(node: BeaconNode, slot: Slot) =
# Remember what we subscribed to, so we can unsubscribe later
node.actionTracker.subscribedSubnets = subnets
let forkDigests: array[BeaconStateFork, auto] = [
node.dag.forkDigests.phase0,
node.dag.forkDigests.altair,
node.dag.forkDigests.bellatrix
]
let forkDigests = node.forkDigests()
for gossipFork in node.gossipState:
let forkDigest = forkDigests[gossipFork]
@ -705,22 +709,41 @@ proc removePhase0MessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
node.actionTracker.subscribedSubnets = default(AttnetBits)
func hasSyncPubKey(node: BeaconNode, epoch: Epoch): auto =
# Only used to determine which gossip topics to which to subscribe
if node.config.subscribeAllSubnets or node.config.subscribeAllSyncnets:
(func(pubkey: ValidatorPubKey): bool {.closure.} = true)
else:
(func(pubkey: ValidatorPubKey): bool =
node.syncCommitteeMsgPool.syncCommitteeSubscriptions.getOrDefault(
pubkey, GENESIS_EPOCH) >= epoch or
pubkey in node.attachedValidators.validators)
proc addAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest, slot: Slot) =
node.addPhase0MessageHandlers(forkDigest, slot)
var syncnets: SyncnetBits
# If this comes online near sync committee period, it'll immediately get
# replaced as usual by trackSyncCommitteeTopics, which runs at slot end.
let
syncCommittee =
withState(node.dag.headState.data):
when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee
else:
default(SyncCommittee)
currentSyncCommitteeSubnets = getSyncSubnets(
node.hasSyncPubKey(slot.epoch), syncCommittee)
# TODO: What are the best topic params for this?
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = subcommitteeIdx
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
node.network.subscribe(getSyncCommitteeTopic(forkDigest, idx), basicParams)
syncnets.setBit(idx.asInt)
if currentSyncCommitteeSubnets[subcommitteeIdx]:
node.network.subscribe(
getSyncCommitteeTopic(forkDigest, subcommitteeIdx), basicParams)
node.network.subscribe(
getSyncCommitteeContributionAndProofTopic(forkDigest), basicParams)
node.network.updateSyncnetsMetadata(syncnets)
node.network.updateSyncnetsMetadata(currentSyncCommitteeSubnets)
proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
node.removePhase0MessageHandlers(forkDigest)
@ -728,15 +751,104 @@ proc removeAltairMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = subcommitteeIdx
# TODO This should be done in dynamic way in trackSyncCommitteeTopics
node.network.unsubscribe(getSyncCommitteeTopic(forkDigest, idx))
node.network.unsubscribe(
getSyncCommitteeContributionAndProofTopic(forkDigest))
proc trackSyncCommitteeTopics*(node: BeaconNode) =
# TODO
discard
proc trackCurrentSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
# Unlike trackNextSyncCommitteeTopics, just snap to the currently correct
# set of subscriptions, and use current_sync_committee. Furthermore, this
# is potentially useful at arbitrary times, so don't guard it by checking
# for epoch alignment.
let
syncCommittee =
withState(node.dag.headState.data):
when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee
else:
default(SyncCommittee)
currentSyncCommitteeSubnets =
getSyncSubnets(node.hasSyncPubKey(slot.epoch), syncCommittee)
debug "trackCurrentSyncCommitteeTopics: aligning with sync committee subnets",
currentSyncCommitteeSubnets,
metadata_syncnets = node.network.metadata.syncnets,
gossipState = node.gossipState
# Assume that different gossip fork sync committee setups are in sync; this
# only remains relevant, currently, for one gossip transition epoch, so the
# consequences of this not being true aren't exceptionally dire, while this
# allows for bookkeeping simplication.
if currentSyncCommitteeSubnets == node.network.metadata.syncnets:
return
let
newSyncSubnets =
currentSyncCommitteeSubnets - node.network.metadata.syncnets
oldSyncSubnets =
node.network.metadata.syncnets - currentSyncCommitteeSubnets
forkDigests = node.forkDigests()
for subcommitteeIdx in SyncSubcommitteeIndex:
doAssert not (newSyncSubnets[subcommitteeIdx] and
oldSyncSubnets[subcommitteeIdx])
for gossipFork in node.gossipState:
template topic(): auto =
getSyncCommitteeTopic(forkDigests[gossipFork], subcommitteeIdx)
if oldSyncSubnets[subcommitteeIdx]:
node.network.unsubscribe(topic)
elif newSyncSubnets[subcommitteeIdx]:
node.network.subscribe(topic, basicParams)
node.network.updateSyncnetsMetadata(currentSyncCommitteeSubnets)
proc trackNextSyncCommitteeTopics(node: BeaconNode, slot: Slot) =
let
epoch = slot.epoch
epochToSyncPeriod = nearSyncCommitteePeriod(epoch)
if epochToSyncPeriod.isNone or
forkVersionAtEpoch(node.dag.cfg, epoch + epochToSyncPeriod.get) ==
node.dag.cfg.GENESIS_FORK_VERSION:
return
if epochToSyncPeriod.get == 0:
node.trackCurrentSyncCommitteeTopics(slot)
return
let
syncCommittee =
withState(node.dag.headState.data):
when stateFork >= BeaconStateFork.Altair:
state.data.next_sync_committee
else:
default(SyncCommittee)
nextSyncCommitteeSubnets = getSyncSubnets(
node.hasSyncPubKey(epoch + epochToSyncPeriod.get), syncCommittee)
forkDigests = node.forkDigests()
var newSubcommittees: SyncnetBits
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/validator.md#sync-committee-subnet-stability
for subcommitteeIdx in SyncSubcommitteeIndex:
if (not node.network.metadata.syncnets[subcommitteeIdx]) and
nextSyncCommitteeSubnets[subcommitteeIdx] and
node.syncCommitteeMsgPool[].isEpochLeadTime(epochToSyncPeriod.get):
for gossipFork in node.gossipState:
node.network.subscribe(getSyncCommitteeTopic(
forkDigests[gossipFork], subcommitteeIdx), basicParams)
newSubcommittees.setBit(distinctBase(subcommitteeIdx))
debug "trackNextSyncCommitteeTopics: subscribing to sync committee subnets",
metadata_syncnets = node.network.metadata.syncnets,
nextSyncCommitteeSubnets,
gossipState = node.gossipState,
epochsToSyncPeriod = epochToSyncPeriod.get,
newSubcommittees
node.network.updateSyncnetsMetadata(
node.network.metadata.syncnets + newSubcommittees)
proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
## Subscribe to subnets that we are providing stability for or aggregating
@ -815,12 +927,7 @@ proc updateGossipStatus(node: BeaconNode, slot: Slot) {.async.} =
headSlot = head.slot,
headDistance
# These depend on forks.BeaconStateFork being properly ordered
let forkDigests: array[BeaconStateFork, auto] = [
node.dag.forkDigests.phase0,
node.dag.forkDigests.altair,
node.dag.forkDigests.bellatrix
]
let forkDigests = node.forkDigests()
const removeMessageHandlers: array[BeaconStateFork, auto] = [
removePhase0MessageHandlers,
@ -877,6 +984,8 @@ proc onSlotEnd(node: BeaconNode, slot: Slot) {.async.} =
node.db.checkpoint()
node.syncCommitteeMsgPool[].pruneData(slot)
if slot.is_epoch:
node.trackNextSyncCommitteeTopics(slot)
# Update upcoming actions - we do this every slot in case a reorg happens
if node.isSynced(node.dag.head) and

View File

@ -594,6 +594,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
let validator_pubkey = getStateField(
node.dag.headState.data, validators).asSeq()[item.validator_index].pubkey
node.syncCommitteeMsgPool.syncCommitteeSubscriptions[validator_pubkey] =
item.until_epoch
node.validatorMonitor[].addAutoMonitor(
validator_pubkey, ValidatorIndex(item.validator_index))

View File

@ -166,3 +166,30 @@ func getTargetGossipState*(
{BeaconStateFork.Phase0, BeaconStateFork.Altair}
else:
raiseAssert "Unknown target gossip state"
func nearSyncCommitteePeriod*(epoch: Epoch): Option[uint64] =
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/validator.md#sync-committee-subnet-stability
if epoch.is_sync_committee_period():
return some 0'u64
let epochsBefore =
EPOCHS_PER_SYNC_COMMITTEE_PERIOD - epoch.since_sync_committee_period_start()
if epoch.is_sync_committee_period() or epochsBefore <= SYNC_COMMITTEE_SUBNET_COUNT:
return some epochsBefore
none(uint64)
func getSyncSubnets*(
nodeHasPubkey: proc(pubkey: ValidatorPubKey):
bool {.noSideEffect, raises: [Defect].},
syncCommittee: SyncCommittee): SyncnetBits =
var res: SyncnetBits
for i, pubkey in syncCommittee.pubkeys:
if not nodeHasPubKey(pubkey):
continue
# https://github.com/ethereum/consensus-specs/blob/v1.1.8/specs/altair/validator.md#broadcast-sync-committee-message
# The first quarter of the pubkeys map to subnet 0, the second quarter to
# subnet 1, the third quarter to subnet 2 and the final quarter to subnet
# 3.
res.setBit(i div (SYNC_COMMITTEE_SIZE div SYNC_COMMITTEE_SUBNET_COUNT))
res

View File

@ -32,7 +32,7 @@ type
ActionTracker* = object
rng: ref BrHmacDrbgContext
subscribeAllSubnets*: bool
subscribeAllAttnets*: bool
currentSlot*: Slot ##\
## Duties that we accept are limited to a range around the current slot
@ -109,7 +109,7 @@ func aggregateSubnets*(tracker: ActionTracker, wallSlot: Slot): AttnetBits =
res
func stabilitySubnets*(tracker: ActionTracker, slot: Slot): AttnetBits =
if tracker.subscribeAllSubnets:
if tracker.subscribeAllAttnets:
allSubnetBits
else:
var res: AttnetBits
@ -240,8 +240,10 @@ proc updateActions*(tracker: var ActionTracker, epochRef: EpochRef) =
tracker.attestingSlots[epoch mod 2] or
(1'u32 shl (slot mod SLOTS_PER_EPOCH))
proc init*(T: type ActionTracker, rng: ref BrHmacDrbgContext, subscribeAllSubnets: bool): T =
proc init*(
T: type ActionTracker, rng: ref BrHmacDrbgContext,
subscribeAllAttnets: bool): T =
T(
rng: rng,
subscribeAllSubnets: subscribeAllSubnets
subscribeAllAttnets: subscribeAllAttnets
)

View File

@ -93,7 +93,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
attPool = AttestationPool.init(dag, quarantine)
batchCrypto = BatchCrypto.new(
keys.newRng(), eager = proc(): bool = true, taskpool)
syncCommitteePool = newClone SyncCommitteeMsgPool.init()
syncCommitteePool = newClone SyncCommitteeMsgPool.init(keys.newRng())
timers: array[Timers, RunningStat]
attesters: RunningStat
r = initRand(1)

View File

@ -227,7 +227,7 @@ suite "Gossip validation - Extra": # Not based on preset config
state[].root)
msg = resMsg.get()
syncCommitteeMsgPool = newClone(SyncCommitteeMsgPool.init())
syncCommitteeMsgPool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
res = waitFor validateSyncCommitteeMessage(
dag, batchCrypto, syncCommitteeMsgPool, msg, subcommitteeIdx,
slot.start_beacon_time(), true)

View File

@ -173,3 +173,18 @@ suite "Honest validator":
is_aggregator(132, ValidatorSig.fromHex(
"0xa1e0546d5acaf84e5e108e9e23d5d2854c543142afaab5992c7544dd8934709c8c6252f9d23ce04207a1e9fca6716c660f950a9b27e1c591255f00ba2830ad7dba0d2595ae6b27106fadeff2059a6d70c32514db0d878b1dbc924058465e313d")[])
test "isNearSyncCommitteePeriod":
check:
nearSyncCommitteePeriod(0.Epoch).get == 0
for i in 1'u64 .. 20'u64:
for j in 0'u64 .. SYNC_COMMITTEE_SUBNET_COUNT:
check: nearSyncCommitteePeriod((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * i - j).Epoch).get == j
# Smaller values EPOCHS_PER_SYNC_COMMITTEE_PERIOD would mean the wrap-around
# causes false test failures
static: doAssert EPOCHS_PER_SYNC_COMMITTEE_PERIOD >= 8
for i in 1'u64 .. 20'u64:
for j in (SYNC_COMMITTEE_SUBNET_COUNT + 1'u64) .. 7'u64:
check: nearSyncCommitteePeriod((EPOCHS_PER_SYNC_COMMITTEE_PERIOD * i - j).Epoch).isNone

View File

@ -2,6 +2,7 @@
import
unittest2,
eth/keys,
../beacon_chain/spec/[beaconstate, helpers, signatures],
../beacon_chain/consensus_object_pools/sync_committee_msg_pool,
./testblockutil
@ -15,7 +16,7 @@ func aggregate(sigs: openarray[CookedSig]): CookedSig =
suite "Sync committee pool":
setup:
var pool = SyncCommitteeMsgPool.init()
var pool = SyncCommitteeMsgPool.init(keys.newRng())
test "An empty pool is safe to use":
let headRoot = eth2digest(@[1.byte, 2, 3])

View File

@ -6,9 +6,10 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
random,
std/[options, random],
chronicles,
options, stew/endians2,
eth/keys,
stew/endians2,
../beacon_chain/consensus_object_pools/sync_committee_msg_pool,
../beacon_chain/validators/validator_pool,
../beacon_chain/spec/datatypes/bellatrix,
@ -321,7 +322,7 @@ proc makeSyncAggregate(
getStateField(state, slot)
latest_block_root =
withState(state): state.latest_block_root
syncCommitteePool = newClone(SyncCommitteeMsgPool.init())
syncCommitteePool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
type
Aggregator = object
subcommitteeIdx: SyncSubcommitteeIndex