Merge branch 'unstable' into dev/etan/rd-shufflingacc

This commit is contained in:
Etan Kissling 2023-05-17 14:06:31 +02:00
commit c70fd8fe97
No known key found for this signature in database
GPG Key ID: B21DA824C5A3D03D
34 changed files with 837 additions and 476 deletions

View File

@ -64,9 +64,10 @@ OK: 24/24 Fail: 0/24 Skip: 0/24
OK: 7/7 Fail: 0/7 Skip: 0/7
## Beacon time
```diff
+ Dependent slots OK
+ basics OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 2/2 Fail: 0/2 Skip: 0/2
## Block pool altair processing [Preset: mainnet]
```diff
+ Invalid signatures [Preset: mainnet] OK
@ -189,12 +190,12 @@ OK: 5/5 Fail: 0/5 Skip: 0/5
OK: 3/3 Fail: 0/3 Skip: 0/3
## Fee recipient management [Beacon Node] [Preset: mainnet]
```diff
+ Configuring the fee recpient [Beacon Node] [Preset: mainnet] OK
+ Configuring the fee recipient [Beacon Node] [Preset: mainnet] OK
+ Invalid Authorization Header [Beacon Node] [Preset: mainnet] OK
+ Invalid Authorization Token [Beacon Node] [Preset: mainnet] OK
+ Missing Authorization header [Beacon Node] [Preset: mainnet] OK
+ Obtaining the fee recpient of a missing validator returns 404 [Beacon Node] [Preset: mainn OK
+ Obtaining the fee recpient of an unconfigured validator returns the suggested default [Bea OK
+ Obtaining the fee recipient of a missing validator returns 404 [Beacon Node] [Preset: main OK
+ Obtaining the fee recipient of an unconfigured validator returns the suggested default [Be OK
+ Setting the fee recipient on a missing validator creates a record for it [Beacon Node] [Pr OK
```
OK: 7/7 Fail: 0/7 Skip: 0/7
@ -537,8 +538,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
+ An empty pool is safe to prune OK
+ An empty pool is safe to prune 2 OK
+ An empty pool is safe to use OK
+ Missed slots across fork transition OK
+ Missed slots across sync committee period boundary OK
+ isSeen OK
```
OK: 4/4 Fail: 0/4 Skip: 0/4
OK: 7/7 Fail: 0/7 Skip: 0/7
## SyncManager test suite
```diff
+ Process all unviable blocks OK
@ -682,4 +686,4 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
OK: 9/9 Fail: 0/9 Skip: 0/9
---TOTAL---
OK: 387/392 Fail: 0/392 Skip: 5/392
OK: 391/396 Fail: 0/396 Skip: 5/396

View File

@ -1308,12 +1308,12 @@ proc loadEth2Network*(
template loadEth2Network*(config: BeaconNodeConf): Eth2NetworkMetadata =
loadEth2Network(config.eth2Network)
func defaultFeeRecipient*(conf: AnyConf): Eth1Address =
func defaultFeeRecipient*(conf: AnyConf): Opt[Eth1Address] =
if conf.suggestedFeeRecipient.isSome:
conf.suggestedFeeRecipient.get
Opt.some conf.suggestedFeeRecipient.get
else:
# https://github.com/nim-lang/Nim/issues/19802
(static(default(Eth1Address)))
(static(Opt.none Eth1Address))
proc loadJwtSecret*(
rng: var HmacDrbgContext,

View File

@ -353,6 +353,12 @@ type
block_root* {.serializedFieldName: "block".}: Eth2Digest
optimistic* {.serializedFieldName: "execution_optimistic".}: Option[bool]
func proposer_dependent_slot*(epochRef: EpochRef): Slot =
epochRef.key.epoch.proposer_dependent_slot()
func attester_dependent_slot*(shufflingRef: ShufflingRef): Slot =
shufflingRef.epoch.attester_dependent_slot()
template head*(dag: ChainDAGRef): BlockRef = dag.headState.blck
template frontfill*(dagParam: ChainDAGRef): Opt[BlockId] =

View File

@ -486,9 +486,6 @@ func epochKey(dag: ChainDAGRef, bid: BlockId, epoch: Epoch): Opt[EpochKey] =
Opt.some(EpochKey(bid: bsi.bid, epoch: epoch))
func shufflingDependentSlot*(epoch: Epoch): Slot =
if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0)
func putShufflingRef*(dag: ChainDAGRef, shufflingRef: ShufflingRef) =
## Store shuffling in the cache
if shufflingRef.epoch < dag.finalizedHead.slot.epoch():
@ -503,7 +500,7 @@ func findShufflingRef*(
## Lookup a shuffling in the cache, returning `none` if it's not present - see
## `getShufflingRef` for a version that creates a new instance if it's missing
let
dependent_slot = epoch.shufflingDependentSlot
dependent_slot = epoch.attester_dependent_slot()
dependent_bsi = ? dag.atSlot(bid, dependent_slot)
# Check `ShufflingRef` cache
@ -1344,7 +1341,7 @@ func ancestorSlotForShuffling*(
dag: ChainDAGRef, state: ForkyHashedBeaconState,
blck: BlockRef, epoch: Epoch): Opt[Slot] =
## Return slot of `blck` ancestor to which `state` can be rewinded
## so that RANDAO at `epoch.shufflingDependentSlot` can be computed.
## so that RANDAO at `epoch.attester_dependent_slot` can be computed.
## Return `err` if `state` is unviable to compute shuffling for `blck@epoch`.
# A state must be somewhat recent so that `get_active_validator_indices`
@ -1395,7 +1392,7 @@ func ancestorSlotForShuffling*(
dag.finalizedHead.blck
else:
? commonAncestor(blck, stateBlck, lowSlot)
dependentSlot = epoch.shufflingDependentSlot
dependentSlot = epoch.attester_dependent_slot
doAssert dependentSlot >= lowSlot
ok min(min(stateBid.slot, ancestorBlck.slot), dependentSlot)
@ -1416,10 +1413,10 @@ proc computeRandaoMix*(
## `state` must have the correct `get_active_validator_indices` for `epoch`.
## RANDAO reveals of blocks from `state.data.slot` back to `ancestorSlot` are
## mixed out from `state.data.randao_mixes`, and RANDAO reveals from blocks
## up through `epoch.shufflingDependentSlot` are mixed in.
## up through `epoch.attester_dependent_slot` are mixed in.
let
stateSlot = state.data.slot
dependentSlot = epoch.shufflingDependentSlot
dependentSlot = epoch.attester_dependent_slot
# Check `state` has locked-in `get_active_validator_indices` for `epoch`
ancestorSlot = ? dag.ancestorSlotForShuffling(state, blck, epoch)
doAssert ancestorSlot <= stateSlot
@ -1542,7 +1539,7 @@ proc computeShufflingRefFromDatabase*(
dag: ChainDAGRef, blck: BlockRef, epoch: Epoch): Opt[ShufflingRef] =
## Load state from DB, for when DAG states are unviable (up to ~500 ms)
let
dependentSlot = epoch.shufflingDependentSlot
dependentSlot = epoch.attester_dependent_slot
state = newClone(dag.headState)
var
e = dependentSlot.epoch

View File

@ -14,12 +14,14 @@ import
../el/el_manager,
../beacon_clock
from ../spec/beaconstate import get_expected_withdrawals
from ../spec/beaconstate import
get_expected_withdrawals, has_eth1_withdrawal_credential
from ../spec/datatypes/capella import Withdrawal
from ../spec/eth2_apis/dynamic_fee_recipients import
DynamicFeeRecipientsStore, getDynamicFeeRecipient
from ../validators/keystore_management import
KeymanagerHost, getSuggestedFeeRecipient, getSuggestedGasLimit
KeymanagerHost, getPerValidatorDefaultFeeRecipient, getSuggestedFeeRecipient,
getSuggestedGasLimit
from ../validators/action_tracker import ActionTracker, getNextProposalSlot
type
@ -48,7 +50,7 @@ type
# ----------------------------------------------------------------
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore
validatorsDir: string
defaultFeeRecipient: Eth1Address
defaultFeeRecipient: Opt[Eth1Address]
defaultGasLimit: uint64
# Tracking last proposal forkchoiceUpdated payload information
@ -66,7 +68,7 @@ func new*(T: type ConsensusManager,
actionTracker: ActionTracker,
dynamicFeeRecipientsStore: ref DynamicFeeRecipientsStore,
validatorsDir: string,
defaultFeeRecipient: Eth1Address,
defaultFeeRecipient: Opt[Eth1Address],
defaultGasLimit: uint64
): ref ConsensusManager =
(ref ConsensusManager)(
@ -296,10 +298,28 @@ proc getFeeRecipient*(
Opt.none(Eth1Address)
dynFeeRecipient.valueOr:
let
withdrawalAddress =
if validatorIdx.isSome:
withState(self.dag.headState):
if validatorIdx.get < forkyState.data.validators.lenu64:
let validator = forkyState.data.validators.item(validatorIdx.get)
if has_eth1_withdrawal_credential(validator):
var address: distinctBase(Eth1Address)
address[0..^1] = validator.withdrawal_credentials.data[12..^1]
Opt.some Eth1Address address
else:
Opt.none Eth1Address
else:
Opt.none Eth1Address
else:
Opt.none Eth1Address
defaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
self.defaultFeeRecipient, withdrawalAddress)
self.validatorsDir.getSuggestedFeeRecipient(
pubkey, self.defaultFeeRecipient).valueOr:
pubkey, defaultFeeRecipient).valueOr:
# Ignore errors and use default - errors are logged in gsfr
self.defaultFeeRecipient
defaultFeeRecipient
proc getGasLimit*(
self: ConsensusManager, pubkey: ValidatorPubKey): uint64 =

View File

@ -8,11 +8,11 @@
{.push raises: [].}
import
std/[sets, tables],
std/[algorithm, sequtils, sets, tables],
stew/shims/hashes,
eth/p2p/discoveryv5/random2,
chronicles,
../spec/[crypto, digest],
../spec/[crypto, digest, forks],
../spec/datatypes/altair
export hashes, sets, tables, altair
@ -24,12 +24,11 @@ const
type
SyncCommitteeMsgKey = object
originator: uint64 # ValidatorIndex avoiding mess with invalid values
originator: uint64 # ValidatorIndex to avoid invalid values
slot: Slot
subcommitteeIdx: uint64 # SyncSubcommitteeIndex avoiding mess with invalid values
subcommitteeIdx: uint64 # SyncSubcommitteeIndex to avoid invalid values
TrustedSyncCommitteeMsg* = object
slot*: Slot
subcommitteeIdx*: SyncSubcommitteeIndex
positionInCommittee*: uint64
signature*: CookedSig
@ -40,32 +39,61 @@ type
signature*: CookedSig
BestSyncSubcommitteeContributions* = object
slot*: Slot
subnets*: array[SYNC_COMMITTEE_SUBNET_COUNT,
BestSyncSubcommitteeContribution]
OnSyncContributionCallback* =
proc(data: SignedContributionAndProof) {.gcsafe, raises: [Defect].}
# Messages from different slots / forks may sign the same beacon block root.
# Messages across slots are compatible, but not across forks (signing root).
# Messages from different periods have different signers, so are incompatible.
# Note that the sync committee is determined by `message.slot + 1`, the fork
# is determined by `message.slot`, and both can be different from `bid.slot`.
SyncMsgTarget = object
bid: BlockId # Based on message `beacon_block_root`
period: SyncCommitteePeriod # Based on message `slot + 1`
fork: ConsensusFork # Based on message `slot`
SyncCommitteeMsgPool* = object
seenSyncMsgByAuthor*: HashSet[SyncCommitteeMsgKey]
seenSyncMsgByAuthor*: Table[SyncCommitteeMsgKey, Eth2Digest]
seenContributionByAuthor*: HashSet[SyncCommitteeMsgKey]
syncMessages*: Table[Eth2Digest, seq[TrustedSyncCommitteeMsg]]
bestContributions*: Table[Eth2Digest, BestSyncSubcommitteeContributions]
syncMessages*: Table[SyncMsgTarget, seq[TrustedSyncCommitteeMsg]]
bestContributions*: Table[SyncMsgTarget, BestSyncSubcommitteeContributions]
onContributionReceived*: OnSyncContributionCallback
rng: ref HmacDrbgContext
cfg: RuntimeConfig
func hash*(x: SyncCommitteeMsgKey): Hash =
hashAllFields(x)
func toSyncMsgTarget(
cfg: RuntimeConfig, bid: BlockId, slot: Slot): SyncMsgTarget =
SyncMsgTarget(
bid: bid,
period: (slot + 1).sync_committee_period,
fork: cfg.consensusForkAtEpoch(slot.epoch))
func hash(x: SyncMsgTarget): Hash =
hashAllFields(x)
func `<`(x, y: SyncMsgTarget): bool =
if x.bid.slot != y.bid.slot:
x.bid.slot < y.bid.slot
elif x.period != y.period:
x.period < y.period
else:
x.fork < y.fork
func init*(T: type SyncCommitteeMsgPool,
rng: ref HmacDrbgContext,
cfg: RuntimeConfig,
onSyncContribution: OnSyncContributionCallback = nil
): SyncCommitteeMsgPool =
T(rng: rng, onContributionReceived: onSyncContribution)
T(rng: rng, cfg: cfg, onContributionReceived: onSyncContribution)
func pruneData*(pool: var SyncCommitteeMsgPool, slot: Slot) =
func pruneData*(pool: var SyncCommitteeMsgPool, slot: Slot, force = false) =
## This should be called at the end of slot.
clear pool.seenContributionByAuthor
clear pool.seenSyncMsgByAuthor
@ -73,60 +101,70 @@ func pruneData*(pool: var SyncCommitteeMsgPool, slot: Slot) =
if slot < syncCommitteeMsgsRetentionSlots:
return
let minSlotToRetain = slot - syncCommitteeMsgsRetentionSlots
var syncMsgsToDelete: seq[Eth2Digest]
var contributionsToDelete: seq[Eth2Digest]
# Messages signing a `beacon_block_root` may remain valid over multiple slots.
# Therefore, we filter by the targeted `BlockId` instead of message `slot`.
let
minSlotToRetain = slot - syncCommitteeMsgsRetentionSlots
minEntriesToKeep = if force: 0 else: syncCommitteeMsgsRetentionSlots
for blockRoot, msgs in pool.syncMessages:
if msgs[0].slot < minSlotToRetain:
syncMsgsToDelete.add blockRoot
template pruneTable(table: untyped) =
if table.len > minEntriesToKeep:
var targets = table.keys().toSeq()
targets.sort(order = SortOrder.Descending)
for i in minEntriesToKeep ..< targets.len:
if targets[i].bid.slot < minSlotToRetain:
table.del targets[i]
for blockRoot in syncMsgsToDelete:
pool.syncMessages.del blockRoot
for blockRoot, bestContributions in pool.bestContributions:
if bestContributions.slot < minSlotToRetain:
contributionsToDelete.add blockRoot
for blockRoot in contributionsToDelete:
pool.bestContributions.del blockRoot
pruneTable pool.syncMessages
pruneTable pool.bestContributions
func isSeen*(
pool: SyncCommitteeMsgPool,
msg: SyncCommitteeMessage,
subcommitteeIdx: SyncSubcommitteeIndex): bool =
subcommitteeIdx: SyncSubcommitteeIndex,
headBid: BlockId): bool =
let seenKey = SyncCommitteeMsgKey(
originator: msg.validator_index, # Might be unvalidated at this point
originator: msg.validator_index, # Might be unvalidated at this point
slot: msg.slot,
subcommitteeIdx: subcommitteeIdx.uint64)
seenKey in pool.seenSyncMsgByAuthor
return
if seenKey notin pool.seenSyncMsgByAuthor:
false
elif msg.beacon_block_root == headBid.root:
pool.seenSyncMsgByAuthor.getOrDefault(seenKey) == headBid.root
else:
true
proc addSyncCommitteeMessage*(
pool: var SyncCommitteeMsgPool,
slot: Slot,
blockRoot: Eth2Digest,
bid: BlockId,
validatorIndex: uint64,
signature: CookedSig,
subcommitteeIdx: SyncSubcommitteeIndex,
positionsInCommittee: openArray[uint64]) =
positionsInCommittee: seq[uint64]) =
let seenKey = SyncCommitteeMsgKey(
originator: validatorIndex,
slot: slot,
subcommitteeIdx: subcommitteeIdx.uint64)
pool.seenSyncMsgByAuthor[seenKey] = bid.root
let
seenKey = SyncCommitteeMsgKey(
originator: validatorIndex,
slot: slot,
subcommitteeIdx: subcommitteeIdx.uint64)
pool.seenSyncMsgByAuthor.incl seenKey
for position in positionsInCommittee:
pool.syncMessages.mgetOrPut(blockRoot, @[]).add TrustedSyncCommitteeMsg(
slot: slot,
subcommitteeIdx: subcommitteeIdx,
positionInCommittee: position,
signature: signature)
func registerVotes(votes: var seq[TrustedSyncCommitteeMsg]) =
for position in positionsInCommittee:
block addVote:
for vote in votes:
if vote.subcommitteeIdx == subcommitteeIdx and
vote.positionInCommittee == position:
break addVote
votes.add TrustedSyncCommitteeMsg(
subcommitteeIdx: subcommitteeIdx,
positionInCommittee: position,
signature: signature)
let target = pool.cfg.toSyncMsgTarget(bid, slot)
pool.syncMessages.mgetOrPut(target, @[]).registerVotes()
debug "Sync committee message resolved",
slot = slot, blockRoot = shortLog(blockRoot), validatorIndex
slot = slot, blockRoot = shortLog(target.bid.root), validatorIndex
func computeAggregateSig(votes: seq[TrustedSyncCommitteeMsg],
subcommitteeIdx: SyncSubcommitteeIndex,
@ -135,6 +173,7 @@ func computeAggregateSig(votes: seq[TrustedSyncCommitteeMsg],
aggregateSig {.noinit.}: AggregateSignature
initialized = false
contribution.aggregation_bits.reset()
for vote in votes:
if vote.subcommitteeIdx != subcommitteeIdx:
continue
@ -150,21 +189,24 @@ func computeAggregateSig(votes: seq[TrustedSyncCommitteeMsg],
if initialized:
contribution.signature = aggregateSig.finish.toValidatorSig
else:
contribution.signature = ValidatorSig.infinity
initialized
func produceContribution*(
pool: SyncCommitteeMsgPool,
slot: Slot,
headRoot: Eth2Digest,
headBid: BlockId,
subcommitteeIdx: SyncSubcommitteeIndex,
outContribution: var SyncCommitteeContribution): bool =
if headRoot in pool.syncMessages:
let target = pool.cfg.toSyncMsgTarget(headBid, slot)
if target in pool.syncMessages:
outContribution.slot = slot
outContribution.beacon_block_root = headRoot
outContribution.beacon_block_root = headBid.root
outContribution.subcommittee_index = subcommitteeIdx.asUInt64
try:
computeAggregateSig(pool.syncMessages[headRoot],
computeAggregateSig(pool.syncMessages[target],
subcommitteeIdx,
outContribution)
except KeyError:
@ -203,11 +245,13 @@ func covers(
func covers*(
pool: var SyncCommitteeMsgPool,
contribution: SyncCommitteeContribution): bool =
contribution: SyncCommitteeContribution,
bid: BlockId): bool =
## Return true iff the given contribution brings no new information compared
## to the contributions already seen in the pool, ie if the contriubution is a
## subset of the best contribution so far
pool.bestContributions.withValue(contribution.beacon_block_root, best):
let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot)
pool.bestContributions.withValue(target, best):
return best[].covers(contribution)
return false
@ -215,6 +259,7 @@ func covers*(
proc addContribution(pool: var SyncCommitteeMsgPool,
aggregator_index: uint64,
contribution: SyncCommitteeContribution,
bid: BlockId,
signature: CookedSig) =
let seenKey = SyncCommitteeMsgKey(
originator: aggregator_index,
@ -222,12 +267,10 @@ proc addContribution(pool: var SyncCommitteeMsgPool,
subcommitteeIdx: contribution.subcommittee_index)
pool.seenContributionByAuthor.incl seenKey
template blockRoot: auto = contribution.beacon_block_root
if blockRoot notin pool.bestContributions:
let target = pool.cfg.toSyncMsgTarget(bid, contribution.slot)
if target notin pool.bestContributions:
let totalParticipants = countOnes(contribution.aggregation_bits)
var initialBestContributions = BestSyncSubcommitteeContributions(
slot: contribution.slot)
var initialBestContributions = BestSyncSubcommitteeContributions()
initialBestContributions.subnets[contribution.subcommittee_index] =
BestSyncSubcommitteeContribution(
@ -235,59 +278,66 @@ proc addContribution(pool: var SyncCommitteeMsgPool,
participationBits: contribution.aggregation_bits,
signature: signature)
pool.bestContributions[blockRoot] = initialBestContributions
pool.bestContributions[target] = initialBestContributions
else:
try:
addAggregateAux(pool.bestContributions[blockRoot], contribution)
addAggregateAux(pool.bestContributions[target], contribution)
except KeyError:
raiseAssert "We have checked for the key upfront"
proc addContribution*(pool: var SyncCommitteeMsgPool,
scproof: SignedContributionAndProof,
bid: BlockId,
signature: CookedSig) =
pool.addContribution(
scproof.message.aggregator_index, scproof.message.contribution, signature)
scproof.message.aggregator_index,
scproof.message.contribution,
bid, signature)
if not(isNil(pool.onContributionReceived)):
pool.onContributionReceived(scproof)
proc produceSyncAggregateAux(
bestContributions: BestSyncSubcommitteeContributions): SyncAggregate =
contributions: BestSyncSubcommitteeContributions): SyncAggregate =
var
aggregateSig {.noinit.}: AggregateSignature
initialized = false
startTime = Moment.now
aggregate: SyncAggregate
for subcommitteeIdx in SyncSubcommitteeIndex:
if bestContributions.subnets[subcommitteeIdx].totalParticipants == 0:
if contributions.subnets[subcommitteeIdx].totalParticipants == 0:
continue
for pos, value in bestContributions.subnets[subcommitteeIdx].participationBits:
for pos, value in contributions.subnets[subcommitteeIdx].participationBits:
if value:
let globalPos = subcommitteeIdx.asInt * SYNC_SUBCOMMITTEE_SIZE + pos
result.sync_committee_bits.setBit globalPos
aggregate.sync_committee_bits.setBit globalPos
if not initialized:
initialized = true
aggregateSig.init(bestContributions.subnets[subcommitteeIdx].signature)
aggregateSig.init(contributions.subnets[subcommitteeIdx].signature)
else:
aggregateSig.aggregate(bestContributions.subnets[subcommitteeIdx].signature)
aggregateSig.aggregate(contributions.subnets[subcommitteeIdx].signature)
if initialized:
result.sync_committee_signature = aggregateSig.finish.toValidatorSig
aggregate.sync_committee_signature = aggregateSig.finish.toValidatorSig
else:
result.sync_committee_signature = ValidatorSig.infinity
aggregate.sync_committee_signature = ValidatorSig.infinity
let duration = Moment.now - startTime
debug "SyncAggregate produced", duration,
bits = result.sync_committee_bits
bits = aggregate.sync_committee_bits
aggregate
proc produceSyncAggregate*(
pool: SyncCommitteeMsgPool,
targetRoot: Eth2Digest): SyncAggregate =
if targetRoot in pool.bestContributions:
bid: BlockId,
slot: Slot): SyncAggregate =
let target = pool.cfg.toSyncMsgTarget(bid, slot)
if target in pool.bestContributions:
try:
produceSyncAggregateAux(pool.bestContributions[targetRoot])
produceSyncAggregateAux(pool.bestContributions[target])
except KeyError:
raiseAssert "We have checked for the key upfront"
else:

View File

@ -91,6 +91,10 @@ const
# https://github.com/ethereum/execution-apis/blob/v1.0.0-beta.3/src/engine/experimental/blob-extension.md#request-2
GETBLOBS_TIMEOUT = 1.seconds
connectionStateChangeHysteresisThreshold = 15
## How many unsuccesful/successful requests we must see
## before declaring the connection as degraded/restored
type
Eth1BlockNumber* = uint64
Eth1BlockTimestamp* = uint64
@ -207,6 +211,7 @@ type
## exchange.
state: ConnectionState
hysteresisCounter: int
depositContractSyncStatus: DepositContractSyncStatus
## Are we sure that this EL has synced the deposit contract?
@ -280,29 +285,56 @@ declareCounter engine_api_last_minute_forkchoice_updates_sent,
"Number of last minute requests to the forkchoiceUpdated Engine API end-point just before block proposals",
labels = ["url"]
proc close(connection: ELConnection): Future[void] {.async.} =
if connection.web3.isSome:
awaitWithTimeout(connection.web3.get.close(), 30.seconds):
debug "Failed to close data provider in time"
proc increaseCounterTowardsStateChange(connection: ELConnection): bool =
result = connection.hysteresisCounter >= connectionStateChangeHysteresisThreshold
if result:
connection.hysteresisCounter = 0
else:
inc connection.hysteresisCounter
proc decreaseCounterTowardsStateChange(connection: ELConnection) =
if connection.hysteresisCounter > 0:
# While we increase the counter by 1, we decreate it by 20% in order
# to require a steady and affirmative change instead of allowing
# the counter to drift very slowly in one direction when the ratio
# between success and failure is roughly 50:50%
connection.hysteresisCounter = connection.hysteresisCounter div 5
proc setDegradedState(connection: ELConnection,
requestName: string,
statusCode: int, errMsg: string) =
debug "Failed EL Request", requestName, statusCode, err = errMsg
case connection.state
of NeverTested, Working:
warn "Connection to EL node degraded",
url = url(connection.engineUrl),
failedRequest = requestName,
statusCode, err = errMsg
of Degraded:
discard
if connection.increaseCounterTowardsStateChange():
warn "Connection to EL node degraded",
url = url(connection.engineUrl),
failedRequest = requestName,
statusCode, err = errMsg
reset connection.web3
connection.state = Degraded
connection.state = Degraded
asyncSpawn connection.close()
connection.web3 = none[Web3]()
of Degraded:
connection.decreaseCounterTowardsStateChange()
proc setWorkingState(connection: ELConnection) =
case connection.state
of Degraded:
info "Connection to EL node restored",
url = url(connection.engineUrl)
if connection.increaseCounterTowardsStateChange():
info "Connection to EL node restored",
url = url(connection.engineUrl)
connection.state = Working
of NeverTested, Working:
discard
connection.state = Working
connection.decreaseCounterTowardsStateChange()
proc trackEngineApiRequest(connection: ELConnection,
request: FutureBase, requestName: string,
@ -658,11 +690,6 @@ func toVoteData(blk: Eth1Block): Eth1Data =
func hash*(x: Eth1Data): Hash =
hash(x.block_hash)
proc close(connection: ELConnection): Future[void] {.async.} =
if connection.web3.isSome:
awaitWithTimeout(connection.web3.get.close(), 30.seconds):
debug "Failed to close data provider in time"
func isConnected(connection: ELConnection): bool =
connection.web3.isSome

View File

@ -587,15 +587,15 @@ proc processSyncCommitteeMessage*(
# Now proceed to validation
let v = await validateSyncCommitteeMessage(
self.dag, self.batchCrypto, self.syncCommitteeMsgPool,
self.dag, self.quarantine, self.batchCrypto, self.syncCommitteeMsgPool,
syncCommitteeMsg, subcommitteeIdx, wallTime, checkSignature)
return if v.isOk():
trace "Sync committee message validated"
let (positions, cookedSig) = v.get()
let (bid, cookedSig, positions) = v.get()
self.syncCommitteeMsgPool[].addSyncCommitteeMessage(
syncCommitteeMsg.slot,
syncCommitteeMsg.beacon_block_root,
bid,
syncCommitteeMsg.validator_index,
cookedSig,
subcommitteeIdx,
@ -633,16 +633,19 @@ proc processSignedContributionAndProof*(
# Now proceed to validation
let v = await validateContribution(
self.dag, self.batchCrypto, self.syncCommitteeMsgPool,
self.dag, self.quarantine, self.batchCrypto, self.syncCommitteeMsgPool,
contributionAndProof, wallTime, checkSignature)
return if v.isOk():
trace "Contribution validated"
let (bid, sig, participants) = v.get
self.syncCommitteeMsgPool[].addContribution(
contributionAndProof, v.get()[0])
contributionAndProof, bid, sig)
self.validatorMonitor[].registerSyncContribution(
src, wallTime, contributionAndProof.message, v.get()[1])
src, wallTime, contributionAndProof.message, participants)
beacon_sync_committee_contributions_received.inc()

View File

@ -1024,13 +1024,15 @@ proc validateVoluntaryExit*(
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
proc validateSyncCommitteeMessage*(
dag: ChainDAGRef,
quarantine: ref Quarantine,
batchCrypto: ref BatchCrypto,
syncCommitteeMsgPool: ref SyncCommitteeMsgPool,
msg: SyncCommitteeMessage,
subcommitteeIdx: SyncSubcommitteeIndex,
wallTime: BeaconTime,
checkSignature: bool):
Future[Result[(seq[uint64], CookedSig), ValidationError]] {.async.} =
Future[Result[
(BlockId, CookedSig, seq[uint64]), ValidationError]] {.async.} =
block:
# [IGNORE] The message's slot is for the current slot (with a
# `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e.
@ -1051,6 +1053,19 @@ proc validateSyncCommitteeMessage*(
return dag.checkedReject(
"SyncCommitteeMessage: originator not part of sync committee")
# [IGNORE] The block being signed (`sync_committee_message.beacon_block_root`)
# has been seen (via both gossip and non-gossip sources) (a client MAY queue
# sync committee messages for processing once block is received)
# [REJECT] The block being signed (`sync_committee_message.beacon_block_root`)
# passes validation.
let
blockRoot = msg.beacon_block_root
blck = dag.getBlockRef(blockRoot).valueOr:
if blockRoot in quarantine[].unviable:
return dag.checkedReject("SyncCommitteeMessage: target invalid")
quarantine[].addMissing(blockRoot)
return errIgnore("SyncCommitteeMessage: target not found")
block:
# [IGNORE] There has been no other valid sync committee message for the
# declared `slot` for the validator referenced by
@ -1059,14 +1074,12 @@ proc validateSyncCommitteeMessage*(
# Note this validation is per topic so that for a given slot, multiple
# messages could be forwarded with the same validator_index as long as
# the subnet_ids are distinct.
if syncCommitteeMsgPool[].isSeen(msg, subcommitteeIdx):
if syncCommitteeMsgPool[].isSeen(msg, subcommitteeIdx, dag.head.bid):
return errIgnore("SyncCommitteeMessage: duplicate message")
# [REJECT] The signature is valid for the message beacon_block_root for the
# validator referenced by validator_index.
let
epoch = msg.slot.epoch
fork = dag.forkAtEpoch(epoch)
senderPubKey = dag.validatorKey(msg.validator_index).valueOr:
return dag.checkedReject("SyncCommitteeMessage: invalid validator index")
@ -1075,7 +1088,8 @@ proc validateSyncCommitteeMessage*(
# Attestation signatures are batch-verified
let deferredCrypto = batchCrypto
.scheduleSyncCommitteeMessageCheck(
fork, msg.slot, msg.beacon_block_root,
dag.forkAtEpoch(msg.slot.epoch),
msg.slot, msg.beacon_block_root,
senderPubKey, msg.signature)
if deferredCrypto.isErr():
return dag.checkedReject(deferredCrypto.error)
@ -1098,17 +1112,19 @@ proc validateSyncCommitteeMessage*(
return dag.checkedReject(
"SyncCommitteeMessage: unable to load signature")
return ok((positionsInSubcommittee, sig))
return ok((blck.bid, sig, positionsInSubcommittee))
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
proc validateContribution*(
dag: ChainDAGRef,
quarantine: ref Quarantine,
batchCrypto: ref BatchCrypto,
syncCommitteeMsgPool: ref SyncCommitteeMsgPool,
msg: SignedContributionAndProof,
wallTime: BeaconTime,
checkSignature: bool
): Future[Result[(CookedSig, seq[ValidatorIndex]), ValidationError]] {.async.} =
): Future[Result[
(BlockId, CookedSig, seq[ValidatorIndex]), ValidationError]] {.async.} =
let
syncCommitteeSlot = msg.message.contribution.slot
@ -1124,16 +1140,19 @@ proc validateContribution*(
# i.e. contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT.
let subcommitteeIdx = SyncSubcommitteeIndex.init(
msg.message.contribution.subcommittee_index).valueOr:
return dag.checkedReject(
"SignedContributionAndProof: subcommittee index too high")
return dag.checkedReject("Contribution: subcommittee index too high")
# [REJECT] The contribution has participants
# that is, any(contribution.aggregation_bits).
if msg.message.contribution.aggregation_bits.isZeros:
return dag.checkedReject("Contribution: aggregation bits empty")
# [REJECT] contribution_and_proof.selection_proof selects the validator
# as an aggregator for the slot
# i.e. is_sync_committee_aggregator(contribution_and_proof.selection_proof)
# returns True.
if not is_sync_committee_aggregator(msg.message.selection_proof):
return dag.checkedReject(
"SignedContributionAndProof: invalid selection_proof")
return dag.checkedReject("Contribution: invalid selection_proof")
# [IGNORE] The sync committee contribution is the first valid
# contribution received for the aggregator with index
@ -1142,37 +1161,48 @@ proc validateContribution*(
# (this requires maintaining a cache of size SYNC_COMMITTEE_SIZE for this
# topic that can be flushed after each slot).
if syncCommitteeMsgPool[].isSeen(msg.message):
return errIgnore("SignedContributionAndProof: duplicate contribution")
return errIgnore("Contribution: duplicate contribution")
# [REJECT] The aggregator's validator index is in the declared subcommittee
# of the current sync committee.
# i.e. state.validators[contribution_and_proof.aggregator_index].pubkey in
# get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index).
let
epoch = msg.message.contribution.slot.epoch
fork = dag.forkAtEpoch(epoch)
aggregator_index =
ValidatorIndex.init(msg.message.aggregator_index).valueOr:
return dag.checkedReject("Contribution: invalid aggregator index")
# TODO we take a copy of the participants to avoid the data going stale
# between validation and use - nonetheless, a design that avoids it and
# stays safe would be nice
participants = dag.syncCommitteeParticipants(
msg.message.contribution.slot, subcommitteeIdx)
if aggregator_index notin participants:
return dag.checkedReject("Contribution: aggregator not in subcommittee")
if msg.message.contribution.aggregation_bits.countOnes() == 0:
# [REJECT] The contribution has participants
# that is, any(contribution.aggregation_bits).
return dag.checkedReject(
"SignedContributionAndProof: aggregation bits empty")
# [IGNORE] The block being signed
# (`contribution_and_proof.contribution.beacon_block_root`) has been seen
# (via both gossip and non-gossip sources) (a client MAY queue sync committee
# contributions for processing once block is received)
# [REJECT] The block being signed
# (`contribution_and_proof.contribution.beacon_block_root`) passes validation.
let
blockRoot = msg.message.contribution.beacon_block_root
blck = dag.getBlockRef(blockRoot).valueOr:
if blockRoot in quarantine[].unviable:
return dag.checkedReject("Contribution: target invalid")
quarantine[].addMissing(blockRoot)
return errIgnore("Contribution: target not found")
# [IGNORE] A valid sync committee contribution with equal `slot`,
# `beacon_block_root` and `subcommittee_index` whose `aggregation_bits`
# is non-strict superset has _not_ already been seen.
if syncCommitteeMsgPool[].covers(msg.message.contribution):
return errIgnore("SignedContributionAndProof: duplicate contribution")
# TODO we take a copy of the participants to avoid the data going stale
# between validation and use - nonetheless, a design that avoids it and
# stays safe would be nice
let participants = dag.syncCommitteeParticipants(
msg.message.contribution.slot, subcommitteeIdx)
if syncCommitteeMsgPool[].covers(msg.message.contribution, blck.bid):
return errIgnore("Contribution: duplicate contribution")
let sig = if checkSignature:
let deferredCrypto = batchCrypto.scheduleContributionChecks(
fork, msg, subcommitteeIdx, dag)
dag.forkAtEpoch(msg.message.contribution.slot.epoch),
msg, subcommitteeIdx, dag)
if deferredCrypto.isErr():
return dag.checkedReject(deferredCrypto.error)
@ -1186,11 +1216,11 @@ proc validateContribution*(
case x
of BatchResult.Invalid:
return dag.checkedReject(
"SignedContributionAndProof: invalid aggregator signature")
"Contribution: invalid aggregator signature")
of BatchResult.Timeout:
beacon_contributions_dropped_queue_full.inc()
return errIgnore(
"SignedContributionAndProof: timeout checking aggregator signature")
"Contribution: timeout checking aggregator signature")
of BatchResult.Valid:
discard
@ -1202,10 +1232,10 @@ proc validateContribution*(
let x = await proofFut
case x
of BatchResult.Invalid:
return dag.checkedReject("SignedContributionAndProof: invalid proof")
return dag.checkedReject("Contribution: invalid proof")
of BatchResult.Timeout:
beacon_contributions_dropped_queue_full.inc()
return errIgnore("SignedContributionAndProof: timeout checking proof")
return errIgnore("Contribution: timeout checking proof")
of BatchResult.Valid:
discard
@ -1217,12 +1247,12 @@ proc validateContribution*(
let x = await contributionFut
case x
of BatchResult.Invalid:
return errReject( # TODO Triggers in local tests around fork transition
"SignedContributionAndProof: invalid contribution signature")
return dag.checkedReject(
"Contribution: invalid contribution signature")
of BatchResult.Timeout:
beacon_contributions_dropped_queue_full.inc()
return errIgnore(
"SignedContributionAndProof: timeout checking contribution signature")
"Contribution: timeout checking contribution signature")
of BatchResult.Valid:
discard
sig
@ -1230,7 +1260,7 @@ proc validateContribution*(
msg.message.contribution.signature.load().valueOr:
return dag.checkedReject("SyncCommitteeMessage: unable to load signature")
return ok((sig, participants))
return ok((blck.bid, sig, participants))
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/light-client/p2p-interface.md#light_client_finality_update
proc validateLightClientFinalityUpdate*(

View File

@ -353,14 +353,20 @@ proc installMessageValidators*(
contextFork = consensusFork # Avoid capturing `Deneb` (Nim 1.6)
digest = forkDigests[].atConsensusFork(contextFork)
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/light-client/p2p-interface.md#light_client_finality_update
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest),
proc(msg: lcDataFork.LightClientFinalityUpdate): ValidationResult =
getLightClientFinalityUpdateTopic(digest), proc (
msg: lcDataFork.LightClientFinalityUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientFinalityUpdate))
# light_client_optimistic_update
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/light-client/p2p-interface.md#light_client_optimistic_update
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest),
proc(msg: lcDataFork.LightClientOptimisticUpdate): ValidationResult =
getLightClientOptimisticUpdateTopic(digest), proc (
msg: lcDataFork.LightClientOptimisticUpdate
): ValidationResult =
validate(msg, contextFork, processLightClientOptimisticUpdate))
proc updateGossipStatus*(

View File

@ -312,7 +312,7 @@ proc initFullNode(
attestationPool = newClone(
AttestationPool.init(dag, quarantine, onAttestationReceived))
syncCommitteeMsgPool = newClone(
SyncCommitteeMsgPool.init(rng, onSyncContribution))
SyncCommitteeMsgPool.init(rng, dag.cfg, onSyncContribution))
lightClientPool = newClone(
LightClientPool())
validatorChangePool = newClone(
@ -1435,160 +1435,130 @@ proc installRestHandlers(restServer: RestServerRef, node: BeaconNode) =
from ./spec/datatypes/capella import SignedBeaconBlock
proc installMessageValidators(node: BeaconNode) =
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attestations-and-aggregation
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync-committees-and-aggregation
# These validators stay around the whole time, regardless of which specific
# subnets are subscribed to during any given epoch.
let forkDigests = node.dag.forkDigests
node.network.addValidator(
getBeaconBlocksTopic(forkDigests.phase0),
proc (signedBlock: phase0.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
for fork in ConsensusFork:
withConsensusFork(fork):
let digest = forkDigests[].atConsensusFork(consensusFork)
template installPhase0Validators(digest: auto) =
for it in SubnetId:
closureScope:
let subnet_id = it
node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id),
# This proc needs to be within closureScope; don't lift out of loop.
proc(attestation: Attestation): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id)))
# beacon_block
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_block
node.network.addValidator(
getBeaconBlocksTopic(digest), proc (
signedBlock: consensusFork.SignedBeaconBlock
): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(
signedBlock))
else:
toValidationResult(
node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
node.network.addAsyncValidator(
getAggregateAndProofsTopic(digest),
proc(signedAggregateAndProof: SignedAggregateAndProof):
Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processSignedAggregateAndProof(
MsgSource.gossip, signedAggregateAndProof, false)))
# beacon_attestation_{subnet_id}
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id
for it in SubnetId:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let subnet_id = it
node.network.addAsyncValidator(
getAttestationTopic(digest, subnet_id), proc (
attestation: Attestation
): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processAttestation(
MsgSource.gossip, attestation, subnet_id)))
node.network.addValidator(
getAttesterSlashingsTopic(digest),
proc (attesterSlashing: AttesterSlashing): ValidationResult =
toValidationResult(
node.processor[].processAttesterSlashing(
MsgSource.gossip, attesterSlashing)))
# beacon_aggregate_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_aggregate_and_proof
node.network.addAsyncValidator(
getAggregateAndProofsTopic(digest), proc (
signedAggregateAndProof: SignedAggregateAndProof
): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processSignedAggregateAndProof(
MsgSource.gossip, signedAggregateAndProof)))
node.network.addValidator(
getProposerSlashingsTopic(digest),
proc (proposerSlashing: ProposerSlashing): ValidationResult =
toValidationResult(
node.processor[].processProposerSlashing(
MsgSource.gossip, proposerSlashing)))
node.network.addValidator(
getVoluntaryExitsTopic(digest),
proc (signedVoluntaryExit: SignedVoluntaryExit): ValidationResult =
toValidationResult(
node.processor[].processSignedVoluntaryExit(
MsgSource.gossip, signedVoluntaryExit)))
installPhase0Validators(forkDigests.phase0)
# Validators introduced in phase0 are also used in Altair and Bellatrix, but
# with different fork digests
installPhase0Validators(forkDigests.altair)
installPhase0Validators(forkDigests.bellatrix)
installPhase0Validators(forkDigests.capella)
if node.dag.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
installPhase0Validators(forkDigests.deneb)
node.network.addValidator(
getBeaconBlocksTopic(forkDigests.altair),
proc (signedBlock: altair.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
node.network.addValidator(
getBeaconBlocksTopic(forkDigests.bellatrix),
proc (signedBlock: bellatrix.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
node.network.addValidator(
getBeaconBlocksTopic(forkDigests.capella),
proc (signedBlock: capella.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
if node.dag.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
node.network.addValidator(
getBeaconBlocksTopic(forkDigests.deneb),
proc (signedBlock: deneb.SignedBeaconBlock): ValidationResult =
if node.shouldSyncOptimistically(node.currentSlot):
# attester_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#attester_slashing
node.network.addValidator(
getAttesterSlashingsTopic(digest), proc (
attesterSlashing: AttesterSlashing
): ValidationResult =
toValidationResult(
node.optimisticProcessor.processSignedBeaconBlock(signedBlock))
else:
toValidationResult(node.processor[].processSignedBeaconBlock(
MsgSource.gossip, signedBlock)))
node.processor[].processAttesterSlashing(
MsgSource.gossip, attesterSlashing)))
for i in 0 ..< MAX_BLOBS_PER_BLOCK:
closureScope:
let idx = i
node.network.addValidator(
getBlobSidecarTopic(forkDigests.deneb, idx),
proc (signedBlobSidecar: deneb.SignedBlobSidecar): ValidationResult =
toValidationResult(node.processor[].processSignedBlobSidecar(
MsgSource.gossip, signedBlobSidecar, idx)))
# proposer_slashing
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#proposer_slashing
node.network.addValidator(
getProposerSlashingsTopic(digest), proc (
proposerSlashing: ProposerSlashing
): ValidationResult =
toValidationResult(
node.processor[].processProposerSlashing(
MsgSource.gossip, proposerSlashing)))
template installSyncCommitteeeValidators(digest: auto) =
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope:
let idx = subcommitteeIdx
# voluntary_exit
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#voluntary_exit
node.network.addValidator(
getVoluntaryExitsTopic(digest), proc (
signedVoluntaryExit: SignedVoluntaryExit
): ValidationResult =
toValidationResult(
node.processor[].processSignedVoluntaryExit(
MsgSource.gossip, signedVoluntaryExit)))
when consensusFork >= ConsensusFork.Altair:
# sync_committee_{subnet_id}
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync_committee_subnet_id
for subcommitteeIdx in SyncSubcommitteeIndex:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let idx = subcommitteeIdx
node.network.addAsyncValidator(
getSyncCommitteeTopic(digest, idx), proc (
msg: SyncCommitteeMessage
): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processSyncCommitteeMessage(
MsgSource.gossip, msg, idx)))
# sync_committee_contribution_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
node.network.addAsyncValidator(
getSyncCommitteeTopic(digest, idx),
# This proc needs to be within closureScope; don't lift out of loop.
proc(msg: SyncCommitteeMessage): Future[ValidationResult] {.async.} =
getSyncCommitteeContributionAndProofTopic(digest), proc (
msg: SignedContributionAndProof
): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processSyncCommitteeMessage(
MsgSource.gossip, msg, idx)))
await node.processor.processSignedContributionAndProof(
MsgSource.gossip, msg)))
node.network.addAsyncValidator(
getSyncCommitteeContributionAndProofTopic(digest),
proc(msg: SignedContributionAndProof): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processSignedContributionAndProof(
MsgSource.gossip, msg)))
when consensusFork >= ConsensusFork.Capella:
# sync_committee_contribution_and_proof
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/capella/p2p-interface.md#bls_to_execution_change
node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest), proc (
msg: SignedBLSToExecutionChange
): Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processBlsToExecutionChange(
MsgSource.gossip, msg)))
installSyncCommitteeeValidators(forkDigests.altair)
installSyncCommitteeeValidators(forkDigests.bellatrix)
installSyncCommitteeeValidators(forkDigests.capella)
if node.dag.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
installSyncCommitteeeValidators(forkDigests.deneb)
template installBlsToExecutionChangeValidators(digest: auto) =
node.network.addAsyncValidator(
getBlsToExecutionChangeTopic(digest),
proc(msg: SignedBLSToExecutionChange):
Future[ValidationResult] {.async.} =
return toValidationResult(
await node.processor.processBlsToExecutionChange(
MsgSource.gossip, msg)))
installBlsToExecutionChangeValidators(forkDigests.capella)
if node.dag.cfg.DENEB_FORK_EPOCH != FAR_FUTURE_EPOCH:
installBlsToExecutionChangeValidators(forkDigests.deneb)
when consensusFork >= ConsensusFork.Deneb:
# blob_sidecar_{index}
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/deneb/p2p-interface.md#blob_sidecar_index
for i in 0 ..< MAX_BLOBS_PER_BLOCK:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let idx = i
node.network.addValidator(
getBlobSidecarTopic(digest, idx), proc (
signedBlobSidecar: SignedBlobSidecar
): ValidationResult =
toValidationResult(
node.processor[].processSignedBlobSidecar(
MsgSource.gossip, signedBlobSidecar, idx)))
node.installLightClientMessageValidators()

View File

@ -1,4 +1,4 @@
# Copyright (c) 2021-2022 Status Research & Development GmbH
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -126,7 +126,8 @@ proc handleAddRemoteValidatorReq(host: KeymanagerHost,
keystore: RemoteKeystore): RequestItemStatus =
let res = importKeystore(host.validatorPool[], host.validatorsDir, keystore)
if res.isOk:
host.addValidator(res.get())
host.addValidator(
res.get(), host.getValidatorWithdrawalAddress(keystore.pubkey))
RequestItemStatus(status: $KeystoreStatus.imported)
else:
@ -193,7 +194,8 @@ proc installKeymanagerHandlers*(router: var RestRouter, host: KeymanagerHost) =
response.data.add(
RequestItemStatus(status: $KeystoreStatus.duplicate))
else:
host.addValidator(res.get())
host.addValidator(
res.get(), host.getValidatorWithdrawalAddress(res.get.pubkey))
response.data.add(
RequestItemStatus(status: $KeystoreStatus.imported))
@ -333,7 +335,11 @@ proc installKeymanagerHandlers*(router: var RestRouter, host: KeymanagerHost) =
let
pubkey = pubkey.valueOr:
return keymanagerApiError(Http400, InvalidValidatorPublicKey)
ethaddress = host.getSuggestedFeeRecipient(pubkey)
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
host.defaultFeeRecipient,
host.getValidatorWithdrawalAddress(pubkey))
ethaddress = host.getSuggestedFeeRecipient(
pubkey, perValidatorDefaultFeeRecipient)
return if ethaddress.isOk:
RestApiResponse.jsonResponse(ListFeeRecipientResponse(

View File

@ -272,6 +272,17 @@ func keysToIndices*(cacheTable: var Table[ValidatorPubKey, ValidatorIndex],
indices[listIndex[]] = some(ValidatorIndex(validatorIndex))
indices
proc getShufflingOptimistic*(node: BeaconNode,
dependentSlot: Slot,
dependentRoot: Eth2Digest): Option[bool] =
if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH:
if dependentSlot <= node.dag.finalizedHead.slot:
some[bool](false)
else:
some[bool](node.dag.is_optimistic(dependentRoot))
else:
none[bool]()
proc getStateOptimistic*(node: BeaconNode,
state: ForkedHashedBeaconState): Option[bool] =
if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH:

View File

@ -65,11 +65,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
"Cannot request duties past next epoch")
res
let (qhead, qoptimistic) =
let (qhead, _) =
block:
let res = node.getSyncedHead(qepoch)
if res.isErr():
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
$res.error())
res.get()
let shufflingRef = node.dag.getShufflingRef(qhead, qepoch, true).valueOr:
return RestApiResponse.jsonError(Http400, PrunedStateError)
@ -102,11 +103,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
)
res
let optimistic =
if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH:
some(qoptimistic)
else:
none[bool]()
let optimistic = node.getShufflingOptimistic(
shufflingRef.attester_dependent_slot,
shufflingRef.attester_dependent_root)
return RestApiResponse.jsonResponseWRoot(
duties, shufflingRef.attester_dependent_root, optimistic)
@ -127,11 +126,12 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http400, InvalidEpochValueError,
"Cannot request duties past next epoch")
res
let (qhead, qoptimistic) =
let (qhead, _) =
block:
let res = node.getSyncedHead(qepoch)
if res.isErr():
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
$res.error())
res.get()
let epochRef = node.dag.getEpochRef(qhead, qepoch, true).valueOr:
return RestApiResponse.jsonError(Http400, PrunedStateError, $error)
@ -155,11 +155,9 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
)
res
let optimistic =
if node.currentSlot().epoch() >= node.dag.cfg.BELLATRIX_FORK_EPOCH:
some(qoptimistic)
else:
none[bool]()
let optimistic = node.getShufflingOptimistic(
epochRef.proposer_dependent_slot,
epochRef.proposer_dependent_root)
return RestApiResponse.jsonResponseWRoot(
duties, epochRef.proposer_dependent_root, optimistic)
@ -245,8 +243,22 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
headEpoch = node.dag.head.slot.epoch
headSyncPeriod = sync_committee_period(headEpoch)
dependentSlot = max(
node.dag.cfg.ALTAIR_FORK_EPOCH.start_slot,
if qSyncPeriod >= 2.SyncCommitteePeriod:
(qSyncPeriod - 1).start_slot
else:
GENESIS_SLOT + 1) - 1
dependentRoot =
if dependentSlot <= node.dag.finalizedHead.slot:
node.dag.finalizedHead.blck.root # No need to look up the actual root
else:
let bsi = node.dag.head.atSlot(dependentSlot)
doAssert bsi.blck != nil, "Non-finalized block has `BlockRef`"
bsi.blck.root
optimistic = node.getShufflingOptimistic(dependentSlot, dependentRoot)
if qSyncPeriod == headSyncPeriod:
let optimistic = node.getStateOptimistic(node.dag.headState)
let res = withState(node.dag.headState):
when consensusFork >= ConsensusFork.Altair:
produceResponse(indexList,
@ -256,7 +268,6 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
emptyResponse()
return RestApiResponse.jsonResponseWOpt(res, optimistic)
elif qSyncPeriod == (headSyncPeriod + 1):
let optimistic = node.getStateOptimistic(node.dag.headState)
let res = withState(node.dag.headState):
when consensusFork >= ConsensusFork.Altair:
produceResponse(indexList,
@ -288,7 +299,6 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
return RestApiResponse.jsonError(Http404, StateNotFoundError)
node.withStateForBlockSlotId(bsi):
let optimistic = node.getStateOptimistic(state)
let res = withState(state):
when consensusFork >= ConsensusFork.Altair:
produceResponse(indexList,
@ -817,13 +827,21 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
res.get()
# Check if node is fully synced.
let sres = node.getSyncedHead(qslot)
if sres.isErr() or sres.get().optimistic:
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
block:
let res = node.getSyncedHead(qslot)
if res.isErr():
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError,
$res.error())
let tres = res.get()
if tres.optimistic:
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
var contribution = SyncCommitteeContribution()
let res = node.syncCommitteeMsgPool[].produceContribution(
qslot, qroot, qindex, contribution)
let
blck = node.dag.getBlockRef(qroot).valueOr:
return RestApiResponse.jsonError(Http404, BlockNotFoundError)
res = node.syncCommitteeMsgPool[].produceContribution(
qslot, blck.bid, qindex, contribution)
if not(res):
return RestApiResponse.jsonError(Http400, ProduceContributionError)
return RestApiResponse.jsonResponse(contribution)

View File

@ -254,6 +254,12 @@ template start_slot*(period: SyncCommitteePeriod): Slot =
if period >= maxPeriod: FAR_FUTURE_SLOT
else: Slot(period * SLOTS_PER_SYNC_COMMITTEE_PERIOD)
func proposer_dependent_slot*(epoch: Epoch): Slot =
if epoch >= 1: epoch.start_slot() - 1 else: Slot(0)
func attester_dependent_slot*(epoch: Epoch): Slot =
if epoch >= 2: (epoch - 1).start_slot() - 1 else: Slot(0)
func `$`*(t: BeaconTime): string =
if t.ns_since_genesis >= 0:
$(timer.nanoseconds(t.ns_since_genesis))

View File

@ -278,6 +278,20 @@ template BeaconBlockType*(fork: static ConsensusFork): auto =
template BeaconBlockBodyType*(fork: static ConsensusFork): auto =
getSymbolFromForkModule(fork, "BeaconBlockBody")
template SignedBeaconBlock*(kind: static ConsensusFork): auto =
when kind == ConsensusFork.Deneb:
typedesc[deneb.SignedBeaconBlock]
elif kind == ConsensusFork.Capella:
typedesc[capella.SignedBeaconBlock]
elif kind == ConsensusFork.Bellatrix:
typedesc[bellatrix.SignedBeaconBlock]
elif kind == ConsensusFork.Altair:
typedesc[altair.SignedBeaconBlock]
elif kind == ConsensusFork.Phase0:
typedesc[phase0.SignedBeaconBlock]
else:
static: raiseAssert "Unreachable"
template ExecutionPayloadForSigning*(kind: static ConsensusFork): auto =
when kind == ConsensusFork.Deneb:
typedesc[deneb.ExecutionPayloadForSigning]

View File

@ -699,14 +699,6 @@ proc readValue*(reader: var JsonReader, value: var RemoteKeystore)
if remoteType.isSome:
reader.raiseUnexpectedField("Multiple `type` fields found",
"RemoteKeystore")
if version.isNone:
reader.raiseUnexpectedField(
"The `type` field should be specified after the `version` field of the keystore",
"RemoteKeystore")
if version.get < 2:
reader.raiseUnexpectedField(
"The `type` field is valid only past version 2 of the remote keystore format",
"RemoteKeystore")
let remoteTypeValue = case reader.readValue(string).toLowerAscii()
of "web3signer":
RemoteSignerType.Web3Signer

View File

@ -66,7 +66,10 @@ const
# https://github.com/ethereum/builder-specs/blob/v0.3.0/specs/bellatrix/validator.md#constants
EPOCHS_PER_VALIDATOR_REGISTRATION_SUBMISSION* = 1
BUILDER_PROPOSAL_DELAY_TOLERANCE* = 1.seconds
# Spec is 1 second, but mev-boost indirection can induce delay when the relay
# itself has already consumed the entire second.
BUILDER_PROPOSAL_DELAY_TOLERANCE* = 1500.milliseconds
func shortLog*(v: BlindedBeaconBlock): auto =
(

View File

@ -388,7 +388,7 @@ proc collectSignatureSets*(
# ----------------------------------------------------
withState(state):
when consensusFork >= ConsensusFork.Altair:
if signed_block.message.body.sync_aggregate.sync_committee_bits.countOnes() == 0:
if signed_block.message.body.sync_aggregate.sync_committee_bits.isZeros:
if signed_block.message.body.sync_aggregate.sync_committee_signature != ValidatorSig.infinity():
return err("collectSignatureSets: empty sync aggregates need signature of point at infinity")
else:

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2021-2022 Status Research & Development GmbH
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -693,9 +693,16 @@ proc currentSlot*(vc: ValidatorClientRef): Slot =
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
let
slot = vc.currentSlot()
withdrawalAddress =
if vc.keymanagerHost.isNil:
Opt.none Eth1Address
else:
vc.keymanagerHost[].getValidatorWithdrawalAddress(keystore.pubkey)
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
vc.config.defaultFeeRecipient, withdrawalAddress)
feeRecipient = vc.config.validatorsDir.getSuggestedFeeRecipient(
keystore.pubkey, vc.config.defaultFeeRecipient).valueOr(
vc.config.defaultFeeRecipient)
keystore.pubkey, perValidatorDefaultFeeRecipient).valueOr(
perValidatorDefaultFeeRecipient)
gasLimit = vc.config.validatorsDir.getSuggestedGasLimit(
keystore.pubkey, vc.config.suggestedGasLimit).valueOr(
vc.config.suggestedGasLimit)
@ -730,8 +737,16 @@ proc getFeeRecipient*(vc: ValidatorClientRef, pubkey: ValidatorPubKey,
if dynamicRecipient.isSome():
Opt.some(dynamicRecipient.get())
else:
let staticRecipient = getSuggestedFeeRecipient(
vc.config.validatorsDir, pubkey, vc.config.defaultFeeRecipient)
let
withdrawalAddress =
if vc.keymanagerHost.isNil:
Opt.none Eth1Address
else:
vc.keymanagerHost[].getValidatorWithdrawalAddress(pubkey)
perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
vc.config.defaultFeeRecipient, withdrawalAddress)
staticRecipient = getSuggestedFeeRecipient(
vc.config.validatorsDir, pubkey, perValidatorDefaultFeeRecipient)
if staticRecipient.isOk():
Opt.some(staticRecipient.get())
else:

View File

@ -75,7 +75,7 @@ type
keymanagerToken*: string
validatorsDir*: string
secretsDir*: string
defaultFeeRecipient*: Eth1Address
defaultFeeRecipient*: Opt[Eth1Address]
defaultGasLimit*: uint64
getValidatorAndIdxFn*: ValidatorPubKeyToDataFn
getBeaconTimeFn*: GetBeaconTimeFn
@ -101,7 +101,7 @@ func init*(T: type KeymanagerHost,
keymanagerToken: string,
validatorsDir: string,
secretsDir: string,
defaultFeeRecipient: Eth1Address,
defaultFeeRecipient: Opt[Eth1Address],
defaultGasLimit: uint64,
getValidatorAndIdxFn: ValidatorPubKeyToDataFn,
getBeaconTimeFn: GetBeaconTimeFn): T =
@ -744,9 +744,9 @@ func gasLimitPath(validatorsDir: string,
validatorsDir.validatorKeystoreDir(pubkey) / GasLimitFilename
proc getSuggestedFeeRecipient*(
validatorsDir: string,
pubkey: ValidatorPubKey,
defaultFeeRecipient: Eth1Address): Result[Eth1Address, ValidatorConfigFileStatus] =
validatorsDir: string, pubkey: ValidatorPubKey,
defaultFeeRecipient: Eth1Address):
Result[Eth1Address, ValidatorConfigFileStatus] =
# In this particular case, an error might be by design. If the file exists,
# but doesn't load or parse that's a more urgent matter to fix. Many people
# people might prefer, however, not to override their default suggested fee
@ -1386,20 +1386,60 @@ proc setGasLimit*(host: KeymanagerHost,
io2.writeFile(validatorKeystoreDir / GasLimitFilename, $gasLimit)
.mapErr(proc(e: auto): string = "Failed to write gas limit file: " & $e)
from ".."/spec/beaconstate import has_eth1_withdrawal_credential
proc getValidatorWithdrawalAddress*(
host: KeymanagerHost, pubkey: ValidatorPubKey): Opt[Eth1Address] =
if host.getValidatorAndIdxFn.isNil:
Opt.none Eth1Address
else:
let validatorAndIndex = host.getValidatorAndIdxFn(pubkey)
if validatorAndIndex.isNone:
Opt.none Eth1Address
else:
template validator: auto = validatorAndIndex.get.validator
if has_eth1_withdrawal_credential(validator):
var address: distinctBase(Eth1Address)
address[0..^1] =
validator.withdrawal_credentials.data[12..^1]
Opt.some Eth1Address address
else:
Opt.none Eth1Address
func getPerValidatorDefaultFeeRecipient*(
defaultFeeRecipient: Opt[Eth1Address],
withdrawalAddress: Opt[Eth1Address]): Eth1Address =
defaultFeeRecipient.valueOr:
withdrawalAddress.valueOr:
(static(default(Eth1Address)))
proc getSuggestedFeeRecipient*(
host: KeymanagerHost,
pubkey: ValidatorPubKey): Result[Eth1Address, ValidatorConfigFileStatus] =
host.validatorsDir.getSuggestedFeeRecipient(pubkey, host.defaultFeeRecipient)
host: KeymanagerHost, pubkey: ValidatorPubKey,
defaultFeeRecipient: Eth1Address):
Result[Eth1Address, ValidatorConfigFileStatus] {.deprecated.} =
host.validatorsDir.getSuggestedFeeRecipient(pubkey, defaultFeeRecipient)
proc getSuggestedFeeRecipient(
host: KeyManagerHost, pubkey: ValidatorPubKey,
withdrawalAddress: Opt[Eth1Address]): Eth1Address =
# Enforce the gsfr(foo).valueOr(foo) pattern where feasible
let perValidatorDefaultFeeRecipient = getPerValidatorDefaultFeeRecipient(
host.defaultFeeRecipient, withdrawalAddress)
host.getSuggestedFeeRecipient(
pubkey, perValidatorDefaultFeeRecipient).valueOr:
perValidatorDefaultFeeRecipient
proc getSuggestedGasLimit*(
host: KeymanagerHost,
pubkey: ValidatorPubKey): Result[uint64, ValidatorConfigFileStatus] =
host.validatorsDir.getSuggestedGasLimit(pubkey, host.defaultGasLimit)
proc addValidator*(host: KeymanagerHost, keystore: KeystoreData) =
proc addValidator*(
host: KeymanagerHost, keystore: KeystoreData,
withdrawalAddress: Opt[Eth1Address]) =
let
feeRecipient = host.getSuggestedFeeRecipient(keystore.pubkey).valueOr(
host.defaultFeeRecipient)
feeRecipient = host.getSuggestedFeeRecipient(
keystore.pubkey, withdrawalAddress)
gasLimit = host.getSuggestedGasLimit(keystore.pubkey).valueOr(
host.defaultGasLimit)
v = host.validatorPool[].addValidator(keystore, feeRecipient, gasLimit)

View File

@ -408,10 +408,10 @@ proc makeBeaconBlockForHeadAndSlot*(
node.validatorChangePool[].getBeaconBlockValidatorChanges(
node.dag.cfg, forkyState.data)
syncAggregate =
if slot.epoch < node.dag.cfg.ALTAIR_FORK_EPOCH:
SyncAggregate.init()
if slot.epoch >= node.dag.cfg.ALTAIR_FORK_EPOCH:
node.syncCommitteeMsgPool[].produceSyncAggregate(head.bid, slot)
else:
node.syncCommitteeMsgPool[].produceSyncAggregate(head.root)
SyncAggregate.init()
payload = (await payloadFut).valueOr:
beacon_block_production_errors.inc()
warn "Unable to get execution payload. Skipping block proposal",
@ -1190,7 +1190,7 @@ proc signAndSendContribution(node: BeaconNode,
if not node.syncCommitteeMsgPool[].produceContribution(
slot,
head.root,
head.bid,
subcommitteeIdx,
msg.message.contribution):
return

View File

@ -43,7 +43,7 @@ pip-tools==6.8.0
# via -r requirements.in
pygments==2.12.0
# via mkdocs-material
pymdown-extensions==9.5
pymdown-extensions==10.0
# via mkdocs-material
pyparsing==3.0.9
# via packaging
@ -52,6 +52,7 @@ python-dateutil==2.8.2
pyyaml==6.0
# via
# mkdocs
# pymdown-extensions
# pyyaml-env-tag
pyyaml-env-tag==0.1
# via mkdocs

View File

@ -325,7 +325,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
batchCrypto = BatchCrypto.new(
keys.newRng(), eager = func(): bool = true,
genesis_validators_root = dag.genesis_validators_root, taskpool)
syncCommitteePool = newClone SyncCommitteeMsgPool.init(keys.newRng())
syncCommitteePool = newClone SyncCommitteeMsgPool.init(keys.newRng(), cfg)
timers: array[Timers, RunningStat]
attesters: RunningStat
r = initRand(1)
@ -394,7 +394,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
let
validatorPrivKey = MockPrivKeys[validatorIdx]
signature = get_sync_committee_message_signature(
fork, genesis_validators_root, slot, dag.head.root, validatorPrivKey)
fork, genesis_validators_root,
slot, dag.head.root, validatorPrivKey)
msg = SyncCommitteeMessage(
slot: slot,
beacon_block_root: dag.head.root,
@ -402,6 +403,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
signature: signature.toValidatorSig)
let res = waitFor dag.validateSyncCommitteeMessage(
quarantine,
batchCrypto,
syncCommitteePool,
msg,
@ -411,11 +413,11 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
doAssert res.isOk
let (positions, cookedSig) = res.get()
let (bid, cookedSig, positions) = res.get()
syncCommitteePool[].addSyncCommitteeMessage(
msg.slot,
msg.beacon_block_root,
bid,
msg.validator_index,
cookedSig,
subcommitteeIdx,
@ -435,7 +437,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
for aggregator in aggregators:
var contribution: SyncCommitteeContribution
let contributionWasProduced = syncCommitteePool[].produceContribution(
slot, dag.head.root, aggregator.subcommitteeIdx, contribution)
slot, dag.head.bid, aggregator.subcommitteeIdx, contribution)
if contributionWasProduced:
let
@ -454,19 +456,20 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
validatorPrivKey).toValidatorSig)
res = waitFor dag.validateContribution(
quarantine,
batchCrypto,
syncCommitteePool,
signedContributionAndProof,
contributionsTime,
false)
if res.isOk():
let (bid, sig, _) = res.get
syncCommitteePool[].addContribution(
signedContributionAndProof, res.get()[0])
signedContributionAndProof, bid, sig)
else:
# We ignore duplicates / already-covered contributions
doAssert res.error()[0] == ValidationResult.Ignore
proc getNewBlock[T](
state: var ForkedHashedBeaconState, slot: Slot, cache: var StateCache): T =
let
@ -479,13 +482,10 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
finalizedEpochRef.eth1_data,
finalizedEpochRef.eth1_deposit_index)
sync_aggregate =
when T is phase0.SignedBeaconBlock:
SyncAggregate.init()
elif T is altair.SignedBeaconBlock or T is bellatrix.SignedBeaconBlock or
T is capella.SignedBeaconBlock or T is deneb.SignedBeaconBlock:
syncCommitteePool[].produceSyncAggregate(dag.head.root)
when T.toFork >= ConsensusFork.Altair:
syncCommitteePool[].produceSyncAggregate(dag.head.bid, slot)
else:
static: doAssert false
SyncAggregate.init()
hashedState =
when T is phase0.SignedBeaconBlock:
addr state.phase0Data

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2019-2022 Status Research & Development GmbH
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -129,7 +129,7 @@ cli do(slots = SLOTS_PER_EPOCH * 5,
if (rand(r, high(int)).float * attesterRatio).int <= high(int):
attestation.aggregation_bits.setBit index_in_committee
if attestation.aggregation_bits.countOnes() > 0:
if not attestation.aggregation_bits.isZeros:
if validate:
attestation.signature = makeAttestationSig(
forkyState.data.fork, genesis_validators_root,

View File

@ -240,13 +240,13 @@ cli do(validatorsDir: string, secretsDir: string,
signature = get_attestation_signature(
fork, genesis_validators_root, attestation.data,
validators[validator_index])
if attestation.aggregation_bits.countOnes() == 0:
if attestation.aggregation_bits.isZeros:
agg = AggregateSignature.init(signature)
else:
agg.aggregate(signature)
attestation.aggregation_bits.setBit(index_in_committee)
if attestation.aggregation_bits.countOnes() > 0:
if not attestation.aggregation_bits.isZeros:
attestation.signature = agg.finish().toValidatorSig()
if aggregates.len == 128:

View File

@ -54,3 +54,13 @@ suite "Beacon time":
counts += 1
check:
counts == 2
test "Dependent slots":
check:
Epoch(0).proposer_dependent_slot() == Slot(0)
Epoch(1).proposer_dependent_slot() == Epoch(1).start_slot() - 1
Epoch(2).proposer_dependent_slot() == Epoch(2).start_slot() - 1
Epoch(0).attester_dependent_slot() == Slot(0)
Epoch(1).attester_dependent_slot() == Slot(0)
Epoch(2).attester_dependent_slot() == Epoch(1).start_slot() - 1

View File

@ -49,7 +49,7 @@ suite "Block processor" & preset():
consensusManager = ConsensusManager.new(
dag, attestationPool, quarantine, elManager, actionTracker,
newClone(DynamicFeeRecipientsStore.init()), "",
default(Eth1Address), defaultGasLimit)
Opt.some default(Eth1Address), defaultGasLimit)
state = newClone(dag.headState)
cache = StateCache()
b1 = addTestBlock(state[], cache).phase0Data

View File

@ -226,7 +226,6 @@ suite "Gossip validation - Extra": # Not based on preset config
var
state = assignClone(dag.headState.altairData)
slot = state[].data.slot
beaconBlockRoot = state[].latest_block_root
subcommitteeIdx = 0.SyncSubcommitteeIndex
syncCommittee = @(dag.syncCommitteeParticipants(slot))
@ -241,47 +240,50 @@ suite "Gossip validation - Extra": # Not based on preset config
kind: ValidatorKind.Local, data: keystoreData, index: Opt.some index)
resMsg = waitFor getSyncCommitteeMessage(
validator, state[].data.fork, state[].data.genesis_validators_root,
slot, beaconBlockRoot)
slot, state[].latest_block_root)
msg = resMsg.get()
syncCommitteeMsgPool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
syncCommitteePool = newClone(
SyncCommitteeMsgPool.init(keys.newRng(), cfg))
res = waitFor validateSyncCommitteeMessage(
dag, batchCrypto, syncCommitteeMsgPool, msg, subcommitteeIdx,
slot.start_beacon_time(), true)
(positions, cookedSig) = res.get()
dag, quarantine, batchCrypto, syncCommitteePool,
msg, subcommitteeIdx, slot.start_beacon_time(), true)
(bid, cookedSig, positions) = res.get()
syncCommitteeMsgPool[].addSyncCommitteeMessage(
syncCommitteePool[].addSyncCommitteeMessage(
msg.slot,
msg.beacon_block_root,
bid,
msg.validator_index,
cookedSig,
subcommitteeIdx,
positions)
let
contribution = block:
let contribution = (ref SignedContributionAndProof)()
contrib = block:
let contrib = (ref SignedContributionAndProof)()
check:
syncCommitteeMsgPool[].produceContribution(
slot, beaconBlockRoot, subcommitteeIdx,
contribution.message.contribution)
syncCommitteeMsgPool[].addContribution(
contribution[], contribution.message.contribution.signature.load.get)
syncCommitteePool[].produceContribution(
slot, bid, subcommitteeIdx,
contrib.message.contribution)
syncCommitteePool[].addContribution(
contrib[], bid,
contrib.message.contribution.signature.load.get)
let signRes = waitFor validator.getContributionAndProofSignature(
state[].data.fork, state[].data.genesis_validators_root,
contribution[].message)
contrib[].message)
doAssert(signRes.isOk())
contribution[].signature = signRes.get()
contribution
aggregate = syncCommitteeMsgPool[].produceSyncAggregate(beaconBlockRoot)
contrib[].signature = signRes.get()
contrib
aggregate = syncCommitteePool[].produceSyncAggregate(bid, slot)
check:
expectedCount > 1 # Cover edge case
res.isOk
contribution.message.contribution.aggregation_bits.countOnes == expectedCount
contrib.message.contribution.aggregation_bits.countOnes == expectedCount
aggregate.sync_committee_bits.countOnes == expectedCount
# Same message twice should be ignored
validateSyncCommitteeMessage(
dag, batchCrypto, syncCommitteeMsgPool, msg, subcommitteeIdx,
state[].data.slot.start_beacon_time(), true).waitFor().isErr()
dag, quarantine, batchCrypto, syncCommitteePool,
msg, subcommitteeIdx, state[].data.slot.start_beacon_time(), true
).waitFor().isErr()

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2021-2022 Status Research & Development GmbH
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -109,13 +109,13 @@ const
func specifiedFeeRecipient(x: int): Eth1Address =
copyMem(addr result, unsafeAddr x, sizeof x)
proc contains*(keylist: openArray[KeystoreInfo], key: ValidatorPubKey): bool =
func contains*(keylist: openArray[KeystoreInfo], key: ValidatorPubKey): bool =
for item in keylist:
if item.validating_pubkey == key:
return true
false
proc contains*(keylist: openArray[KeystoreInfo], key: string): bool =
func contains*(keylist: openArray[KeystoreInfo], key: string): bool =
let pubkey = ValidatorPubKey.fromHex(key).tryGet()
contains(keylist, pubkey)
@ -363,7 +363,7 @@ proc listRemoteValidators(validatorsDir,
validatorsDir, secretsDir, err = err.msg
validators
proc `==`(a: seq[ValidatorPubKey],
func `==`(a: seq[ValidatorPubKey],
b: seq[KeystoreInfo | RemoteKeystoreInfo]): bool =
if len(a) != len(b):
return false
@ -507,7 +507,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
testFlavour = " [" & keymanager.ident & "]" & preset()
suite "Serialization/deserialization" & testFlavour:
proc `==`(a, b: Kdf): bool =
func `==`(a, b: Kdf): bool =
if (a.function != b.function) or (a.message != b.message):
return false
case a.function
@ -523,14 +523,14 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
(a.scryptParams.r == b.scryptParams.r) and
(seq[byte](a.scryptParams.salt) == seq[byte](b.scryptParams.salt))
proc `==`(a, b: Checksum): bool =
func `==`(a, b: Checksum): bool =
if a.function != b.function:
return false
case a.function
of ChecksumFunctionKind.sha256Checksum:
a.message.data == b.message.data
proc `==`(a, b: Cipher): bool =
func `==`(a, b: Cipher): bool =
if (a.function != b.function) or
(seq[byte](a.message) != seq[byte](b.message)):
return false
@ -538,11 +538,11 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
of CipherFunctionKind.aes128CtrCipher:
seq[byte](a.params.iv) == seq[byte](b.params.iv)
proc `==`(a, b: Crypto): bool =
func `==`(a, b: Crypto): bool =
(a.kdf == b.kdf) and (a.checksum == b.checksum) and
(a.cipher == b.cipher)
proc `==`(a, b: Keystore): bool =
func `==`(a, b: Keystore): bool =
(a.crypto == b.crypto) and (a.pubkey == b.pubkey) and
(string(a.path) == string(b.path)) and
(a.description == b.description) and (a.uuid == b.uuid) and
@ -1047,7 +1047,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
response.status == 403
responseJson["message"].getStr() == InvalidAuthorizationError
asyncTest "Obtaining the fee recpient of a missing validator returns 404" & testFlavour:
asyncTest "Obtaining the fee recipient of a missing validator returns 404" & testFlavour:
let
pubkey = ValidatorPubKey.fromHex(unusedPublicKeys[0]).expect("valid key")
response = await client.listFeeRecipientPlain(
@ -1068,7 +1068,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
check:
resultFromApi == feeRecipient
asyncTest "Obtaining the fee recpient of an unconfigured validator returns the suggested default" & testFlavour:
asyncTest "Obtaining the fee recipient of an unconfigured validator returns the suggested default" & testFlavour:
let
pubkey = ValidatorPubKey.fromHex(oldPublicKeys[0]).expect("valid key")
resultFromApi = await client.listFeeRecipient(pubkey, correctTokenValue)
@ -1076,7 +1076,7 @@ proc runTests(keymanager: KeymanagerToTest) {.async.} =
check:
resultFromApi == defaultFeeRecipient
asyncTest "Configuring the fee recpient" & testFlavour:
asyncTest "Configuring the fee recipient" & testFlavour:
let
pubkey = ValidatorPubKey.fromHex(oldPublicKeys[1]).expect("valid key")
firstFeeRecipient = specifiedFeeRecipient(2)

View File

@ -36,7 +36,7 @@ suite "Remove keystore testing suite":
check keystore.remotes[0].id == 0
check keystore.remotes[0].pubkey.toHex == "8b9c875fbe539c6429c4fc304675062579ce47fb6b2ac6b6a1ba1188ca123a80affbfe381dbbc8e7f2437709a4c3325c"
for version in [3]:
for version in [1, 3]:
let remoteKeyStores = """{
"version": """ & $version & """,
"type": "web3signer",

View File

@ -1,3 +1,10 @@
# beacon_chain
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
import
@ -16,21 +23,27 @@ func aggregate(sigs: openArray[CookedSig]): CookedSig =
suite "Sync committee pool":
setup:
var pool = SyncCommitteeMsgPool.init(keys.newRng())
let cfg = block:
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = 0.Epoch
res.BELLATRIX_FORK_EPOCH = 20.Epoch
res
var pool = SyncCommitteeMsgPool.init(keys.newRng(), cfg)
test "An empty pool is safe to use":
let headRoot = eth2digest(@[1.byte, 2, 3])
let headBid =
BlockId(slot: Slot(1), root: eth2digest(@[1.byte, 2, 3]))
var outContribution: SyncCommitteeContribution
let success = pool.produceContribution(
Slot(1),
headRoot,
headBid,
SyncSubcommitteeIndex(0),
outContribution)
check(success == false)
let aggregate = pool.produceSyncAggregate(headRoot)
let aggregate = pool.produceSyncAggregate(headBid, headBid.slot)
check:
aggregate.sync_committee_bits.isZeros
@ -42,9 +55,124 @@ suite "Sync committee pool":
test "An empty pool is safe to prune 2":
pool.pruneData(Slot(10000))
test "Missed slots across sync committee period boundary":
let
genesis_validators_root = eth2digest(@[5.byte, 6, 7])
privkey1 = MockPrivKeys[1.ValidatorIndex]
privkey2 = MockPrivKeys[2.ValidatorIndex]
nextPeriod = cfg.BELLATRIX_FORK_EPOCH.sync_committee_period + 1
bid1 = BlockId(
slot: Slot(nextPeriod.start_slot - 2), # Committee based on `slot + 1`
root: eth2digest(@[1.byte]))
sig1 = get_sync_committee_message_signature(
bellatrixFork(cfg), genesis_validators_root,
bid1.slot, bid1.root, privkey1)
sig2 = get_sync_committee_message_signature(
bellatrixFork(cfg), genesis_validators_root,
bid1.slot + 1, bid1.root, privkey2)
pool.addSyncCommitteeMessage(
bid1.slot, bid1, 1, sig1, SyncSubcommitteeIndex(0), @[1'u64])
pool.addSyncCommitteeMessage(
# Same participant index in next period represents different validator
bid1.slot + 1, bid1, 2, sig2, SyncSubcommitteeIndex(0), @[1'u64])
var contribution: SyncCommitteeContribution
let success = pool.produceContribution(
bid1.slot + 1, bid1, SyncSubcommitteeIndex(0), contribution)
check:
success
contribution.slot == bid1.slot + 1
contribution.beacon_block_root == bid1.root
contribution.subcommittee_index == SyncSubcommitteeIndex(0).uint64
contribution.aggregation_bits.countOnes == 1
contribution.aggregation_bits[1] == true
contribution.signature == sig2.toValidatorSig
test "Missed slots across fork transition":
let
genesis_validators_root = eth2digest(@[5.byte, 6, 7])
privkey1 = MockPrivKeys[1.ValidatorIndex]
privkey2 = MockPrivKeys[1.ValidatorIndex]
bid1 = BlockId(
slot: Slot(cfg.BELLATRIX_FORK_EPOCH.start_slot - 1),
root: eth2digest(@[1.byte]))
sig1 = get_sync_committee_message_signature(
altairFork(cfg), genesis_validators_root,
bid1.slot, bid1.root, privkey1)
sig2 = get_sync_committee_message_signature(
bellatrixFork(cfg), genesis_validators_root,
bid1.slot + 1, bid1.root, privkey2)
pool.addSyncCommitteeMessage(
bid1.slot, bid1, 1, sig1, SyncSubcommitteeIndex(0), @[1'u64])
pool.addSyncCommitteeMessage(
bid1.slot + 1, bid1, 2, sig2, SyncSubcommitteeIndex(0), @[2'u64])
var contribution: SyncCommitteeContribution
let success = pool.produceContribution(
bid1.slot + 1, bid1, SyncSubcommitteeIndex(0), contribution)
check:
success
contribution.slot == bid1.slot + 1
contribution.beacon_block_root == bid1.root
contribution.subcommittee_index == SyncSubcommitteeIndex(0).uint64
contribution.aggregation_bits.countOnes == 1
contribution.aggregation_bits[2] == true
contribution.signature == sig2.toValidatorSig
test "isSeen":
let
fork = altairFork(cfg)
genesis_validators_root = eth2digest(@[5.byte, 6, 7])
privkey1 = MockPrivKeys[1.ValidatorIndex]
bid1 = BlockId(slot: Slot(100), root: eth2digest(@[1.byte]))
bid2 = BlockId(slot: Slot(101), root: eth2digest(@[1.byte, 2]))
sig1 = get_sync_committee_message_signature(
fork, genesis_validators_root, bid2.slot, bid1.root, privkey1)
sig2 = get_sync_committee_message_signature(
fork, genesis_validators_root, bid2.slot, bid2.root, privkey1)
msg1 = SyncCommitteeMessage(
slot: bid2.slot,
beacon_block_root: bid1.root,
validator_index: 1,
signature: sig1.toValidatorSig)
msg2 = SyncCommitteeMessage(
slot: bid2.slot,
beacon_block_root: bid2.root,
validator_index: 1,
signature: sig2.toValidatorSig)
check:
not pool.isSeen(msg1, SyncSubcommitteeIndex(0), bid2)
not pool.isSeen(msg2, SyncSubcommitteeIndex(0), bid2)
pool.addSyncCommitteeMessage(
bid2.slot, bid1, 1, sig1, SyncSubcommitteeIndex(0), @[1'u64])
check:
pool.isSeen(msg1, SyncSubcommitteeIndex(0), bid2)
not pool.isSeen(msg2, SyncSubcommitteeIndex(0), bid2)
pool.addSyncCommitteeMessage(
bid2.slot, bid2, 1, sig1, SyncSubcommitteeIndex(0), @[1'u64])
check:
pool.isSeen(msg1, SyncSubcommitteeIndex(0), bid2)
pool.isSeen(msg2, SyncSubcommitteeIndex(0), bid2)
test "Aggregating votes":
let
fork = altairFork(defaultRuntimeConfig)
fork = altairFork(cfg)
genesis_validators_root = eth2digest(@[5.byte, 6, 7])
privkey1 = MockPrivKeys[1.ValidatorIndex]
@ -52,35 +180,36 @@ suite "Sync committee pool":
privkey3 = MockPrivKeys[3.ValidatorIndex]
privkey4 = MockPrivKeys[4.ValidatorIndex]
root1 = eth2digest(@[1.byte])
root2 = eth2digest(@[1.byte, 2])
root3 = eth2digest(@[1.byte, 2, 3])
root1Slot = Slot(100)
root2Slot = Slot(101)
root3Slot = Slot(101)
bid1 = BlockId(slot: Slot(100), root: eth2digest(@[1.byte]))
bid2 = BlockId(slot: Slot(101), root: eth2digest(@[1.byte, 2]))
bid3 = BlockId(slot: Slot(101), root: eth2digest(@[1.byte, 2, 3]))
subcommittee1 = SyncSubcommitteeIndex(0)
subcommittee2 = SyncSubcommitteeIndex(1)
sig1 = get_sync_committee_message_signature(
fork, genesis_validators_root, root1Slot, root1, privkey1)
fork, genesis_validators_root, bid1.slot, bid1.root, privkey1)
sig2 = get_sync_committee_message_signature(
fork, genesis_validators_root, root2Slot, root2, privkey1)
fork, genesis_validators_root, bid2.slot, bid2.root, privkey1)
sig3 = get_sync_committee_message_signature(
fork, genesis_validators_root, root3Slot, root3, privkey1)
fork, genesis_validators_root, bid3.slot, bid3.root, privkey1)
sig4 = get_sync_committee_message_signature(
fork, genesis_validators_root, root3Slot, root2, privkey1)
fork, genesis_validators_root, bid3.slot, bid2.root, privkey1)
# Inserting sync committee messages
#
pool.addSyncCommitteeMessage(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
pool.addSyncCommitteeMessage(root1Slot, root1, 2, sig2, subcommittee1, [10'u64])
pool.addSyncCommitteeMessage(root2Slot, root1, 3, sig3, subcommittee2, [7'u64])
pool.addSyncCommitteeMessage(root2Slot, root2, 4, sig4, subcommittee2, [3'u64])
pool.addSyncCommitteeMessage(
bid1.slot, bid1, 1, sig1, subcommittee1, @[1'u64])
pool.addSyncCommitteeMessage(
bid1.slot, bid1, 2, sig2, subcommittee1, @[10'u64])
pool.addSyncCommitteeMessage(
bid2.slot, bid1, 3, sig3, subcommittee2, @[7'u64])
pool.addSyncCommitteeMessage(
bid2.slot, bid2, 4, sig4, subcommittee2, @[3'u64])
# Insert a duplicate message (this should be handled gracefully)
pool.addSyncCommitteeMessage(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
pool.addSyncCommitteeMessage(
bid1.slot, bid1, 1, sig1, subcommittee1, @[1'u64])
# Producing contributions
#
@ -88,8 +217,8 @@ suite "Sync committee pool":
# Checking a committee where there was no activity:
var outContribution: SyncCommitteeContribution
let success = pool.produceContribution(
root2Slot,
root2,
bid2.slot,
bid2,
subcommittee1,
outContribution)
@ -101,56 +230,56 @@ suite "Sync committee pool":
var outContribution: SignedContributionAndProof
template contribution: untyped = outContribution.message.contribution
let success = pool.produceContribution(
root1Slot,
root1,
bid1.slot,
bid1,
subcommittee1,
contribution)
let expectedSig = aggregate [sig1, sig2]
let sig = aggregate [sig1, sig2]
check:
success
contribution.slot == root1Slot
contribution.beacon_block_root == root1
contribution.slot == bid1.slot
contribution.beacon_block_root == bid1.root
contribution.subcommittee_index == subcommittee1.uint64
contribution.aggregation_bits.countOnes == 2
contribution.aggregation_bits[1] == true
contribution.aggregation_bits[8] == false
contribution.aggregation_bits[10] == true
contribution.signature == expectedSig.toValidatorSig
contribution.signature == sig.toValidatorSig
check:
not pool.covers(contribution)
not pool.covers(contribution, bid1)
pool.addContribution(outContribution, expectedSig)
pool.addContribution(outContribution, bid1, sig)
check:
pool.isSeen(outContribution.message)
pool.covers(contribution)
pool.covers(contribution, bid1)
block:
# Checking a committee with a signle participant:
var outContribution: SignedContributionAndProof
template contribution: untyped = outContribution.message.contribution
let success = pool.produceContribution(
root1Slot,
root1,
bid1.slot,
bid1,
subcommittee2,
contribution)
check:
success
contribution.slot == root1Slot
contribution.beacon_block_root == root1
contribution.slot == bid1.slot
contribution.beacon_block_root == bid1.root
contribution.subcommittee_index == subcommittee2.uint64
contribution.aggregation_bits.countOnes == 1
contribution.aggregation_bits[7] == true
contribution.signature == sig3.toValidatorSig
check:
not pool.covers(contribution)
pool.addContribution(outContribution, sig3)
not pool.covers(contribution, bid1)
pool.addContribution(outContribution, bid1, sig3)
check:
pool.isSeen(outContribution.message)
pool.covers(contribution)
pool.covers(contribution, bid1)
block:
# Checking another committee with a signle participant
@ -158,35 +287,35 @@ suite "Sync committee pool":
var outContribution: SignedContributionAndProof
template contribution: untyped = outContribution.message.contribution
let success = pool.produceContribution(
root2Slot,
root2,
bid2.slot,
bid2,
subcommittee2,
contribution)
check:
success
contribution.slot == root2Slot
contribution.beacon_block_root == root2
contribution.slot == bid2.slot
contribution.beacon_block_root == bid2.root
contribution.subcommittee_index == subcommittee2.uint64
contribution.aggregation_bits.countOnes == 1
contribution.aggregation_bits[3] == true
contribution.signature == sig4.toValidatorSig
check:
not pool.covers(contribution)
pool.addContribution(outContribution, sig4)
not pool.covers(contribution, bid2)
pool.addContribution(outContribution, bid2, sig4)
check:
pool.isSeen(outContribution.message)
pool.covers(contribution)
pool.covers(contribution, bid2)
block:
# Checking a block root nobody voted for
var outContribution: SignedContributionAndProof
template contribution: untyped = outContribution.message.contribution
let success = pool.produceContribution(
root3Slot,
root3,
bid3.slot,
bid3,
subcommittee2,
contribution)
@ -197,43 +326,43 @@ suite "Sync committee pool":
#
block:
# Checking for a block that got no votes
let aggregate = pool.produceSyncAggregate(root3)
let aggregate = pool.produceSyncAggregate(bid3, bid3.slot)
check:
aggregate.sync_committee_bits.isZeros
aggregate.sync_committee_signature == ValidatorSig.infinity
block:
# Checking for a block that got votes from 1 committee
let aggregate = pool.produceSyncAggregate(root2)
let aggregate = pool.produceSyncAggregate(bid2, bid2.slot)
check:
aggregate.sync_committee_bits.countOnes == 1
aggregate.sync_committee_signature == sig4.toValidatorSig
block:
# Checking for a block that got votes from 2 committees
let aggregate = pool.produceSyncAggregate(root1)
let expectedSig = aggregate [sig1, sig2, sig3]
let aggregate = pool.produceSyncAggregate(bid1, bid1.slot)
let sig = aggregate [sig1, sig2, sig3]
check:
aggregate.sync_committee_bits.countOnes == 3
aggregate.sync_committee_signature == expectedSig.toValidatorSig
aggregate.sync_committee_signature == sig.toValidatorSig
# Pruning the data
#
pool.pruneData(Slot(200))
pool.pruneData(Slot(200), force = true)
block:
# After pruning, all votes are gone
var outContribution: SyncCommitteeContribution
let success = pool.produceContribution(
root1Slot,
root1,
bid1.slot,
bid1,
subcommittee1,
outContribution)
check:
not success
let aggregate = pool.produceSyncAggregate(root2)
let aggregate = pool.produceSyncAggregate(bid2, bid2.slot)
check:
aggregate.sync_committee_bits.isZeros
aggregate.sync_committee_signature == ValidatorSig.infinity

View File

@ -421,9 +421,9 @@ proc makeSyncAggregate(
getStateField(state, genesis_validators_root)
slot =
getStateField(state, slot)
latest_block_root =
withState(state): forkyState.latest_block_root
syncCommitteePool = newClone(SyncCommitteeMsgPool.init(keys.newRng()))
latest_block_id =
withState(state): forkyState.latest_block_id
syncCommitteePool = newClone(SyncCommitteeMsgPool.init(keys.newRng(), cfg))
type
Aggregator = object
@ -484,11 +484,11 @@ proc makeSyncAggregate(
let signature = get_sync_committee_message_signature(
fork, genesis_validators_root,
slot, latest_block_root,
slot, latest_block_id.root,
MockPrivKeys[validatorIdx])
syncCommitteePool[].addSyncCommitteeMessage(
slot,
latest_block_root,
latest_block_id,
uint64 validatorIdx,
signature,
subcommitteeIdx,
@ -497,7 +497,7 @@ proc makeSyncAggregate(
for aggregator in aggregators:
var contribution: SyncCommitteeContribution
if syncCommitteePool[].produceContribution(
slot, latest_block_root, aggregator.subcommitteeIdx, contribution):
slot, latest_block_id, aggregator.subcommitteeIdx, contribution):
let
contributionAndProof = ContributionAndProof(
aggregator_index: uint64 aggregator.validatorIdx,
@ -511,9 +511,10 @@ proc makeSyncAggregate(
message: contributionAndProof,
signature: contributionSig.toValidatorSig)
syncCommitteePool[].addContribution(
signedContributionAndProof, contribution.signature.load.get)
signedContributionAndProof,
latest_block_id, contribution.signature.load.get)
syncCommitteePool[].produceSyncAggregate(latest_block_root)
syncCommitteePool[].produceSyncAggregate(latest_block_id, slot)
iterator makeTestBlocks*(
state: ForkedHashedBeaconState,

2
vendor/nim-chronos vendored

@ -1 +1 @@
Subproject commit b9f482e8f01be1cfa823e7cceb8f619cac1d3393
Subproject commit 956ae5af557de8e5201db43a425d95c122857dca