mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-24 20:28:33 +00:00
Cleanups (#3123)
Renames and cleanups split out from the validator monitoring branch, so as to reduce conflict area vs other PR:s * add constants for expected message timing * name validators after the messages they validate, mostly, to make grepping easier * unify field naming of EpochInfo across forks to make cross-fork code easier
This commit is contained in:
parent
cc0dbd5bc0
commit
a223d62b07
@ -35,6 +35,18 @@ type
|
||||
|
||||
GetBeaconTimeFn* = proc(): BeaconTime {.gcsafe, raises: [Defect].}
|
||||
|
||||
const
|
||||
# Offsets from the start of the slot to when the corresponding message should
|
||||
# be sent
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/validator.md#attesting
|
||||
attestationSlotOffset* = seconds(SECONDS_PER_SLOT.int) div 3
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/validator.md#broadcast-aggregate
|
||||
aggregateSlotOffset* = seconds(SECONDS_PER_SLOT.int) * 2 div 3
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/validator.md#prepare-sync-committee-message
|
||||
syncCommitteeMessageSlotOffset* = seconds(SECONDS_PER_SLOT.int) div 3
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/validator.md#broadcast-sync-committee-contribution
|
||||
syncContributionSlotOffset* = seconds(SECONDS_PER_SLOT.int) * 2 div 3
|
||||
|
||||
proc init*(T: type BeaconClock, genesis_time: uint64): T =
|
||||
# ~290 billion years into the future
|
||||
doAssert genesis_time <= high(int64).uint64
|
||||
|
@ -96,7 +96,7 @@ func isSeen*(
|
||||
subcommitteeIndex: subcommitteeIndex.uint64)
|
||||
seenKey in pool.seenSyncMsgByAuthor
|
||||
|
||||
func addSyncCommitteeMsg*(
|
||||
func addSyncCommitteeMessage*(
|
||||
pool: var SyncCommitteeMsgPool,
|
||||
slot: Slot,
|
||||
blockRoot: Eth2Digest,
|
||||
@ -187,7 +187,7 @@ func isSeen*(
|
||||
subcommitteeIndex: msg.contribution.subcommittee_index)
|
||||
seenKey in pool.seenContributionByAuthor
|
||||
|
||||
proc addSyncContribution(pool: var SyncCommitteeMsgPool,
|
||||
proc addContribution(pool: var SyncCommitteeMsgPool,
|
||||
aggregator_index: uint64,
|
||||
contribution: SyncCommitteeContribution,
|
||||
signature: CookedSig) =
|
||||
@ -217,10 +217,10 @@ proc addSyncContribution(pool: var SyncCommitteeMsgPool,
|
||||
except KeyError:
|
||||
raiseAssert "We have checked for the key upfront"
|
||||
|
||||
proc addSyncContribution*(pool: var SyncCommitteeMsgPool,
|
||||
proc addContribution*(pool: var SyncCommitteeMsgPool,
|
||||
scproof: SignedContributionAndProof,
|
||||
signature: CookedSig) =
|
||||
pool.addSyncContribution(
|
||||
pool.addContribution(
|
||||
scproof.message.aggregator_index, scproof.message.contribution, signature)
|
||||
|
||||
if not(isNil(pool.onContributionReceived)):
|
||||
|
@ -406,7 +406,7 @@ proc voluntaryExitValidator*(
|
||||
|
||||
v
|
||||
|
||||
proc syncCommitteeMsgValidator*(
|
||||
proc syncCommitteeMessageValidator*(
|
||||
self: ref Eth2Processor,
|
||||
syncCommitteeMsg: SyncCommitteeMessage,
|
||||
subcommitteeIdx: SyncSubcommitteeIndex,
|
||||
@ -432,7 +432,7 @@ proc syncCommitteeMsgValidator*(
|
||||
trace "Sync committee message validated"
|
||||
let (positions, cookedSig) = v.get()
|
||||
|
||||
self.syncCommitteeMsgPool[].addSyncCommitteeMsg(
|
||||
self.syncCommitteeMsgPool[].addSyncCommitteeMessage(
|
||||
syncCommitteeMsg.slot,
|
||||
syncCommitteeMsg.beacon_block_root,
|
||||
syncCommitteeMsg.validator_index,
|
||||
@ -448,7 +448,7 @@ proc syncCommitteeMsgValidator*(
|
||||
beacon_sync_committee_messages_dropped.inc(1, [$v.error[0]])
|
||||
err(v.error())
|
||||
|
||||
proc syncCommitteeContributionValidator*(
|
||||
proc contributionValidator*(
|
||||
self: ref Eth2Processor,
|
||||
contributionAndProof: SignedContributionAndProof,
|
||||
checkSignature: bool = true): Result[void, ValidationError] =
|
||||
@ -468,14 +468,13 @@ proc syncCommitteeContributionValidator*(
|
||||
debug "Contribution received", delay
|
||||
|
||||
# Now proceed to validation
|
||||
let v = validateSignedContributionAndProof(self.dag,
|
||||
self.syncCommitteeMsgPool[],
|
||||
contributionAndProof, wallTime,
|
||||
let v = validateContribution(
|
||||
self.dag, self.syncCommitteeMsgPool[], contributionAndProof, wallTime,
|
||||
checkSignature)
|
||||
|
||||
return if v.isOk():
|
||||
trace "Contribution validated"
|
||||
self.syncCommitteeMsgPool[].addSyncContribution(contributionAndProof, v.get)
|
||||
self.syncCommitteeMsgPool[].addContribution(contributionAndProof, v.get)
|
||||
beacon_sync_committee_contributions_received.inc()
|
||||
|
||||
ok()
|
||||
|
@ -786,7 +786,7 @@ proc validateSyncCommitteeMessage*(
|
||||
ok((positionsInSubcommittee, cookedSignature.get()))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.5/specs/altair/p2p-interface.md#sync_committee_contribution_and_proof
|
||||
proc validateSignedContributionAndProof*(
|
||||
proc validateContribution*(
|
||||
dag: ChainDAGRef,
|
||||
syncCommitteeMsgPool: var SyncCommitteeMsgPool,
|
||||
msg: SignedContributionAndProof,
|
||||
|
@ -1059,13 +1059,13 @@ proc installMessageValidators(node: BeaconNode) =
|
||||
# This proc needs to be within closureScope; don't lift out of loop.
|
||||
proc(msg: SyncCommitteeMessage): ValidationResult =
|
||||
toValidationResult(
|
||||
node.processor.syncCommitteeMsgValidator(msg, idx)))
|
||||
node.processor.syncCommitteeMessageValidator(msg, idx)))
|
||||
|
||||
node.network.addValidator(
|
||||
getSyncCommitteeContributionAndProofTopic(digest),
|
||||
proc(msg: SignedContributionAndProof): ValidationResult =
|
||||
toValidationResult(
|
||||
node.processor.syncCommitteeContributionValidator(msg)))
|
||||
node.processor.contributionValidator(msg)))
|
||||
|
||||
installSyncCommitteeeValidators(node.dag.forkDigests.altair)
|
||||
installSyncCommitteeeValidators(node.dag.forkDigests.merge)
|
||||
|
@ -244,14 +244,14 @@ type
|
||||
|
||||
EpochInfo* = object
|
||||
## Information about the outcome of epoch processing
|
||||
statuses*: seq[RewardStatus]
|
||||
total_balances*: TotalBalances
|
||||
validators*: seq[RewardStatus]
|
||||
balances*: TotalBalances
|
||||
|
||||
chronicles.formatIt BeaconBlock: it.shortLog
|
||||
|
||||
func clear*(info: var EpochInfo) =
|
||||
info.statuses.setLen(0)
|
||||
info.total_balances = TotalBalances()
|
||||
info.validators.setLen(0)
|
||||
info.balances = TotalBalances()
|
||||
|
||||
Json.useCustomSerialization(BeaconState.justification_bits):
|
||||
read:
|
||||
|
@ -53,8 +53,8 @@ template previous_epoch_head_attesters*(v: TotalBalances): Gwei =
|
||||
max(EFFECTIVE_BALANCE_INCREMENT, v.previous_epoch_head_attesters_raw)
|
||||
|
||||
func init*(info: var phase0.EpochInfo, state: phase0.BeaconState) =
|
||||
info.total_balances = TotalBalances()
|
||||
info.statuses.setLen(state.validators.len)
|
||||
info.balances = TotalBalances()
|
||||
info.validators.setLen(state.validators.len)
|
||||
|
||||
for i in 0..<state.validators.len:
|
||||
let v = unsafeAddr state.validators[i]
|
||||
@ -66,13 +66,13 @@ func init*(info: var phase0.EpochInfo, state: phase0.BeaconState) =
|
||||
flags.incl RewardFlags.canWithdrawInCurrentEpoch
|
||||
|
||||
if v[].is_active_validator(state.get_current_epoch()):
|
||||
info.total_balances.current_epoch_raw += v[].effective_balance
|
||||
info.balances.current_epoch_raw += v[].effective_balance
|
||||
|
||||
if v[].is_active_validator(state.get_previous_epoch()):
|
||||
flags.incl RewardFlags.isActiveInPreviousEpoch
|
||||
info.total_balances.previous_epoch_raw += v[].effective_balance
|
||||
info.balances.previous_epoch_raw += v[].effective_balance
|
||||
|
||||
info.statuses[i] = RewardStatus(
|
||||
info.validators[i] = RewardStatus(
|
||||
current_epoch_effective_balance: v[].effective_balance,
|
||||
flags: flags,
|
||||
)
|
||||
@ -110,7 +110,7 @@ func process_attestation(
|
||||
# Update the cache for all participants
|
||||
for validator_index in get_attesting_indices(
|
||||
state, a.data, a.aggregation_bits, cache):
|
||||
template v(): untyped = info.statuses[validator_index]
|
||||
template v(): untyped = info.validators[validator_index]
|
||||
|
||||
v.flags = v.flags + flags
|
||||
|
||||
@ -130,26 +130,26 @@ func process_attestations*(
|
||||
for a in state.current_epoch_attestations:
|
||||
process_attestation(info, state, a, cache)
|
||||
|
||||
for idx, v in info.statuses:
|
||||
for idx, v in info.validators:
|
||||
if v.flags.contains RewardFlags.isSlashed:
|
||||
continue
|
||||
|
||||
let validator_balance = state.validators[idx].effective_balance
|
||||
|
||||
if v.flags.contains RewardFlags.isCurrentEpochAttester:
|
||||
info.total_balances.current_epoch_attesters_raw += validator_balance
|
||||
info.balances.current_epoch_attesters_raw += validator_balance
|
||||
|
||||
if v.flags.contains RewardFlags.isCurrentEpochTargetAttester:
|
||||
info.total_balances.current_epoch_target_attesters_raw += validator_balance
|
||||
info.balances.current_epoch_target_attesters_raw += validator_balance
|
||||
|
||||
if v.is_previous_epoch_attester.isSome():
|
||||
info.total_balances.previous_epoch_attesters_raw += validator_balance
|
||||
info.balances.previous_epoch_attesters_raw += validator_balance
|
||||
|
||||
if v.flags.contains RewardFlags.isPreviousEpochTargetAttester:
|
||||
info.total_balances.previous_epoch_target_attesters_raw += validator_balance
|
||||
info.balances.previous_epoch_target_attesters_raw += validator_balance
|
||||
|
||||
if v.flags.contains RewardFlags.isPreviousEpochHeadAttester:
|
||||
info.total_balances.previous_epoch_head_attesters_raw += validator_balance
|
||||
info.balances.previous_epoch_head_attesters_raw += validator_balance
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#helpers
|
||||
# get_eligible_validator_indices
|
||||
@ -236,7 +236,7 @@ func is_unslashed_participating_index(
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||
proc process_justification_and_finalization*(state: var phase0.BeaconState,
|
||||
total_balances: TotalBalances, flags: UpdateFlags = {}) {.nbench.} =
|
||||
balances: TotalBalances, flags: UpdateFlags = {}) {.nbench.} =
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might
|
||||
# result in modifying this stub.
|
||||
@ -262,8 +262,8 @@ proc process_justification_and_finalization*(state: var phase0.BeaconState,
|
||||
state.justification_bits = (state.justification_bits shl 1) and
|
||||
cast[uint8]((2^JUSTIFICATION_BITS_LENGTH) - 1)
|
||||
|
||||
let total_active_balance = total_balances.current_epoch
|
||||
if total_balances.previous_epoch_target_attesters * 3 >=
|
||||
let total_active_balance = balances.current_epoch
|
||||
if balances.previous_epoch_target_attesters * 3 >=
|
||||
total_active_balance * 2:
|
||||
state.current_justified_checkpoint =
|
||||
Checkpoint(epoch: previous_epoch,
|
||||
@ -275,9 +275,9 @@ proc process_justification_and_finalization*(state: var phase0.BeaconState,
|
||||
checkpoint = shortLog(state.current_justified_checkpoint)
|
||||
elif verifyFinalization in flags:
|
||||
warn "Low attestation participation in previous epoch",
|
||||
total_balances, epoch = get_current_epoch(state)
|
||||
balances, epoch = get_current_epoch(state)
|
||||
|
||||
if total_balances.current_epoch_target_attesters * 3 >=
|
||||
if balances.current_epoch_target_attesters * 3 >=
|
||||
total_active_balance * 2:
|
||||
state.current_justified_checkpoint =
|
||||
Checkpoint(epoch: current_epoch,
|
||||
@ -492,40 +492,40 @@ func get_attestation_component_delta(is_unslashed_attester: bool,
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.0/specs/phase0/beacon-chain.md#components-of-attestation-deltas
|
||||
func get_source_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
balances: TotalBalances,
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for source-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.is_previous_epoch_attester.isSome() and
|
||||
not (validator.flags.contains RewardFlags.isSlashed),
|
||||
total_balances.previous_epoch_attesters,
|
||||
total_balances.current_epoch,
|
||||
balances.previous_epoch_attesters,
|
||||
balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
func get_target_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
balances: TotalBalances,
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for target-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.flags.contains(RewardFlags.isPreviousEpochTargetAttester) and
|
||||
not (validator.flags.contains(RewardFlags.isSlashed)),
|
||||
total_balances.previous_epoch_target_attesters,
|
||||
total_balances.current_epoch,
|
||||
balances.previous_epoch_target_attesters,
|
||||
balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
func get_head_delta*(validator: RewardStatus,
|
||||
base_reward: uint64,
|
||||
total_balances: TotalBalances,
|
||||
balances: TotalBalances,
|
||||
finality_delay: uint64): RewardDelta =
|
||||
## Return attester micro-rewards/penalties for head-vote for each validator.
|
||||
get_attestation_component_delta(
|
||||
validator.flags.contains(RewardFlags.isPreviousEpochHeadAttester) and
|
||||
((not validator.flags.contains(RewardFlags.isSlashed))),
|
||||
total_balances.previous_epoch_head_attesters,
|
||||
total_balances.current_epoch,
|
||||
balances.previous_epoch_head_attesters,
|
||||
balances.current_epoch,
|
||||
base_reward,
|
||||
finality_delay)
|
||||
|
||||
@ -573,13 +573,13 @@ func get_attestation_deltas(state: phase0.BeaconState, info: var phase0.EpochInf
|
||||
|
||||
let
|
||||
finality_delay = get_finality_delay(state)
|
||||
total_balance = info.total_balances.current_epoch
|
||||
total_balance = info.balances.current_epoch
|
||||
total_balance_sqrt = integer_squareroot(total_balance)
|
||||
# Filter out ineligible validators. All sub-functions of the spec do this
|
||||
# except for `get_inclusion_delay_deltas`. It's safe to do so here because
|
||||
# any validator that is in the unslashed indices of the matching source
|
||||
# attestations is active, and therefore eligible.
|
||||
for index, validator in info.statuses.mpairs():
|
||||
for index, validator in info.validators.mpairs():
|
||||
if not is_eligible_validator(validator):
|
||||
continue
|
||||
|
||||
@ -589,11 +589,11 @@ func get_attestation_deltas(state: phase0.BeaconState, info: var phase0.EpochInf
|
||||
|
||||
let
|
||||
source_delta = get_source_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay)
|
||||
validator, base_reward, info.balances, finality_delay)
|
||||
target_delta = get_target_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay)
|
||||
validator, base_reward, info.balances, finality_delay)
|
||||
head_delta = get_head_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay)
|
||||
validator, base_reward, info.balances, finality_delay)
|
||||
(inclusion_delay_delta, proposer_delta) =
|
||||
get_inclusion_delay_delta(validator, base_reward)
|
||||
inactivity_delta = get_inactivity_penalty_delta(
|
||||
@ -607,8 +607,8 @@ func get_attestation_deltas(state: phase0.BeaconState, info: var phase0.EpochInf
|
||||
|
||||
if proposer_delta.isSome:
|
||||
let proposer_index = proposer_delta.get()[0]
|
||||
if proposer_index < info.statuses.lenu64:
|
||||
info.statuses[proposer_index].delta.add(
|
||||
if proposer_index < info.validators.lenu64:
|
||||
info.validators[proposer_index].delta.add(
|
||||
proposer_delta.get()[1])
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/altair/beacon-chain.md#get_base_reward
|
||||
@ -625,7 +625,7 @@ func get_base_reward_increment(
|
||||
iterator get_flag_index_deltas*(
|
||||
state: altair.BeaconState | merge.BeaconState, flag_index: int,
|
||||
base_reward_per_increment: Gwei,
|
||||
info: altair.EpochInfo):
|
||||
info: var altair.EpochInfo):
|
||||
(ValidatorIndex, RewardDelta) =
|
||||
## Return the deltas for a given ``flag_index`` by scanning through the
|
||||
## participation flags.
|
||||
@ -648,6 +648,15 @@ iterator get_flag_index_deltas*(
|
||||
yield
|
||||
if is_unslashed_participating_index(
|
||||
state, flag_index, previous_epoch, vidx):
|
||||
|
||||
let pflag = case flag_index
|
||||
of TIMELY_SOURCE_FLAG_INDEX: ParticipationFlag.timelySourceAttester
|
||||
of TIMELY_TARGET_FLAG_INDEX: ParticipationFlag.timelyTargetAttester
|
||||
of TIMELY_HEAD_FLAG_INDEX: ParticipationFlag.timelyHeadAttester
|
||||
else: raiseAssert "Unknown flag index " & $flag_index
|
||||
|
||||
info.validators[vidx].flags.incl pflag
|
||||
|
||||
if not is_in_inactivity_leak(state):
|
||||
let reward_numerator =
|
||||
base_reward * weight * unslashed_participating_increments
|
||||
@ -714,7 +723,7 @@ func process_rewards_and_penalties(
|
||||
state: var phase0.BeaconState, info: var phase0.EpochInfo) {.nbench.} =
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are
|
||||
# for work done in the previous epoch
|
||||
doAssert info.statuses.len == state.validators.len
|
||||
doAssert info.validators.len == state.validators.len
|
||||
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
@ -726,7 +735,7 @@ func process_rewards_and_penalties(
|
||||
# recursive nature of cache clearing - instead, we clear the whole cache then
|
||||
# update the raw list directly
|
||||
state.balances.clearCache()
|
||||
for idx, v in info.statuses:
|
||||
for idx, v in info.validators:
|
||||
var balance = state.balances.asSeq()[idx]
|
||||
increase_balance(balance, v.delta.rewards)
|
||||
decrease_balance(balance, v.delta.penalties)
|
||||
@ -982,7 +991,7 @@ proc process_epoch*(
|
||||
info.process_attestations(state, cache)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/specs/phase0/beacon-chain.md#justification-and-finalization
|
||||
process_justification_and_finalization(state, info.total_balances, flags)
|
||||
process_justification_and_finalization(state, info.balances, flags)
|
||||
|
||||
# state.slot hasn't been incremented yet.
|
||||
if verifyFinalization in flags and currentEpoch >= 2:
|
||||
@ -1001,7 +1010,7 @@ proc process_epoch*(
|
||||
process_registry_updates(cfg, state, cache)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#slashings
|
||||
process_slashings(state, info.total_balances.current_epoch)
|
||||
process_slashings(state, info.balances.current_epoch)
|
||||
|
||||
# https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/phase0/beacon-chain.md#eth1-data-votes-updates
|
||||
process_eth1_data_reset(state)
|
||||
|
@ -49,7 +49,7 @@ proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||
debug "Sending attestation", attestation = shortLog(attestation),
|
||||
validator = shortLog(validator), validator_index = vindex,
|
||||
attestation_root = shortLog(attestationRoot),
|
||||
delay = vc.getDelay(seconds(int64(SECONDS_PER_SLOT) div 3))
|
||||
delay = vc.getDelay(attestationSlotOffset)
|
||||
|
||||
let res =
|
||||
try:
|
||||
@ -68,7 +68,7 @@ proc serveAttestation(service: AttestationServiceRef, adata: AttestationData,
|
||||
err_name = exc.name, err_msg = exc.msg
|
||||
return false
|
||||
|
||||
let delay = vc.getDelay(seconds(int64(SECONDS_PER_SLOT) div 3))
|
||||
let delay = vc.getDelay(attestationSlotOffset)
|
||||
if res:
|
||||
notice "Attestation published", attestation = shortLog(attestation),
|
||||
validator = shortLog(validator),
|
||||
@ -103,7 +103,7 @@ proc serveAggregateAndProof*(service: AttestationServiceRef,
|
||||
attestation = shortLog(signedProof.message.aggregate),
|
||||
validator = shortLog(validator), validator_index = vindex,
|
||||
aggregationSlot = aggregationSlot,
|
||||
delay = vc.getDelay(seconds((int64(SECONDS_PER_SLOT) div 3) * 2))
|
||||
delay = vc.getDelay(aggregateSlotOffset)
|
||||
|
||||
let res =
|
||||
try:
|
||||
@ -185,7 +185,7 @@ proc produceAndPublishAttestations*(service: AttestationServiceRef,
|
||||
inc(errored)
|
||||
(succeed, errored, failed)
|
||||
|
||||
let delay = vc.getDelay(seconds(int64(SECONDS_PER_SLOT) div 3))
|
||||
let delay = vc.getDelay(attestationSlotOffset)
|
||||
debug "Attestation statistics", total = len(pendingAttestations),
|
||||
succeed = statistics[0], failed_to_deliver = statistics[1],
|
||||
not_accepted = statistics[2], delay = delay, slot = slot,
|
||||
@ -273,7 +273,7 @@ proc produceAndPublishAggregates(service: AttestationServiceRef,
|
||||
inc(errored)
|
||||
(succeed, errored, failed)
|
||||
|
||||
let delay = vc.getDelay(seconds((int64(SECONDS_PER_SLOT) div 3) * 2))
|
||||
let delay = vc.getDelay(aggregateSlotOffset)
|
||||
debug "Aggregated attestation statistics", total = len(pendingAggregates),
|
||||
succeed = statistics[0], failed_to_deliver = statistics[1],
|
||||
not_accepted = statistics[2], delay = delay, slot = slot,
|
||||
@ -291,7 +291,7 @@ proc publishAttestationsAndAggregates(service: AttestationServiceRef,
|
||||
# Waiting for blocks to be published before attesting.
|
||||
let startTime = Moment.now()
|
||||
try:
|
||||
let timeout = seconds(int64(SECONDS_PER_SLOT) div 3) # 4.seconds in mainnet
|
||||
let timeout = attestationSlotOffset # 4.seconds in mainnet
|
||||
await vc.waitForBlockPublished(slot).wait(timeout)
|
||||
let dur = Moment.now() - startTime
|
||||
debug "Block proposal awaited", slot = slot, duration = dur
|
||||
@ -300,7 +300,7 @@ proc publishAttestationsAndAggregates(service: AttestationServiceRef,
|
||||
debug "Block was not produced in time", slot = slot, duration = dur
|
||||
|
||||
block:
|
||||
let delay = vc.getDelay(seconds(int64(SECONDS_PER_SLOT) div 3))
|
||||
let delay = vc.getDelay(attestationSlotOffset)
|
||||
debug "Producing attestations", delay = delay, slot = slot,
|
||||
committee_index = committee_index,
|
||||
duties_count = len(duties)
|
||||
@ -325,7 +325,7 @@ proc publishAttestationsAndAggregates(service: AttestationServiceRef,
|
||||
await sleepAsync(aggregateTime)
|
||||
|
||||
block:
|
||||
let delay = vc.getDelay(seconds((int64(SECONDS_PER_SLOT) div 3) * 2))
|
||||
let delay = vc.getDelay(aggregateSlotOffset)
|
||||
debug "Producing aggregate and proofs", delay = delay
|
||||
await service.produceAndPublishAggregates(ad, duties)
|
||||
|
||||
@ -350,7 +350,7 @@ proc mainLoop(service: AttestationServiceRef) {.async.} =
|
||||
try:
|
||||
while true:
|
||||
let sleepTime = vc.beaconClock.durationToNextSlot() +
|
||||
seconds(int64(SECONDS_PER_SLOT) div 3)
|
||||
attestationSlotOffset
|
||||
let sres = vc.getCurrentSlot()
|
||||
if sres.isSome():
|
||||
let currentSlot = sres.get()
|
||||
|
@ -210,7 +210,7 @@ proc sendSyncCommitteeMessage*(
|
||||
# validation will also register the message with the sync committee
|
||||
# message pool. Notably, although libp2p calls the data handler for
|
||||
# any subscription on the subnet topic, it does not perform validation.
|
||||
let res = node.processor.syncCommitteeMsgValidator(msg, subcommitteeIdx,
|
||||
let res = node.processor.syncCommitteeMessageValidator(msg, subcommitteeIdx,
|
||||
checkSignature)
|
||||
return
|
||||
if res.isOk():
|
||||
@ -308,7 +308,7 @@ proc sendSyncCommitteeContribution*(
|
||||
node: BeaconNode,
|
||||
msg: SignedContributionAndProof,
|
||||
checkSignature: bool): Future[SendResult] {.async.} =
|
||||
let res = node.processor.syncCommitteeContributionValidator(
|
||||
let res = node.processor.contributionValidator(
|
||||
msg, checkSignature)
|
||||
|
||||
return
|
||||
@ -345,8 +345,7 @@ proc createAndSendAttestation(node: BeaconNode,
|
||||
validator.pubKey)
|
||||
|
||||
let wallTime = node.beaconClock.now()
|
||||
let deadline = attestationData.slot.toBeaconTime() +
|
||||
seconds(int(SECONDS_PER_SLOT div 3))
|
||||
let deadline = attestationData.slot.toBeaconTime(attestationSlotOffset)
|
||||
|
||||
let (delayStr, delaySecs) =
|
||||
if wallTime < deadline:
|
||||
@ -654,8 +653,7 @@ proc createAndSendSyncCommitteeMessage(node: BeaconNode,
|
||||
|
||||
let
|
||||
wallTime = node.beaconClock.now()
|
||||
deadline = msg.slot.toBeaconTime() +
|
||||
seconds(int(SECONDS_PER_SLOT div 3))
|
||||
deadline = msg.slot.toBeaconTime(syncCommitteeMessageSlotOffset)
|
||||
|
||||
let (delayStr, delaySecs) =
|
||||
if wallTime < deadline:
|
||||
@ -975,14 +973,9 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||
|
||||
head = await handleProposal(node, head, slot)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#attesting
|
||||
# Milliseconds to wait from the start of the slot before sending out
|
||||
# attestations
|
||||
const attestationOffset = SECONDS_PER_SLOT.int64 * 1000 div 3
|
||||
|
||||
let
|
||||
# The latest point in time when we'll be sending out attestations
|
||||
attestationCutoffTime = slot.toBeaconTime(millis(attestationOffset))
|
||||
attestationCutoffTime = slot.toBeaconTime(attestationSlotOffset)
|
||||
attestationCutoff = node.beaconClock.fromNow(attestationCutoffTime)
|
||||
|
||||
if attestationCutoff.inFuture:
|
||||
@ -1024,25 +1017,28 @@ proc handleValidatorDuties*(node: BeaconNode, lastSlot, slot: Slot) {.async.} =
|
||||
node.consensusManager[].updateHead(slot)
|
||||
head = node.dag.head
|
||||
|
||||
static: doAssert attestationSlotOffset == syncCommitteeMessageSlotOffset
|
||||
|
||||
handleAttestations(node, head, slot)
|
||||
handleSyncCommitteeMessages(node, head, slot)
|
||||
|
||||
updateValidatorMetrics(node) # the important stuff is done, update the vanity numbers
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.1/specs/phase0/validator.md#broadcast-aggregate
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.1.5/specs/phase0/validator.md#broadcast-aggregate
|
||||
# If the validator is selected to aggregate (is_aggregator), then they
|
||||
# broadcast their best aggregate as a SignedAggregateAndProof to the global
|
||||
# aggregate channel (beacon_aggregate_and_proof) two-thirds of the way
|
||||
# through the slot-that is, SECONDS_PER_SLOT * 2 / 3 seconds after the start
|
||||
# of slot.
|
||||
if slot > 2:
|
||||
static: doAssert aggregateSlotOffset == syncContributionSlotOffset
|
||||
let
|
||||
aggregateWaitTime = node.beaconClock.fromNow(
|
||||
slot.toBeaconTime(seconds(int64(SECONDS_PER_SLOT * 2) div 3)))
|
||||
if aggregateWaitTime.inFuture:
|
||||
aggregateCutoffTime = slot.toBeaconTime(aggregateSlotOffset)
|
||||
aggregateCutoff = node.beaconClock.fromNow(aggregateCutoffTime)
|
||||
if aggregateCutoff.inFuture:
|
||||
debug "Waiting to send aggregate attestations",
|
||||
aggregateWaitTime = shortLog(aggregateWaitTime.offset)
|
||||
await sleepAsync(aggregateWaitTime.offset)
|
||||
aggregateCutoff = shortLog(aggregateCutoff.offset)
|
||||
await sleepAsync(aggregateCutoff.offset)
|
||||
|
||||
let sendAggregatedAttestationsFut =
|
||||
sendAggregatedAttestations(node, head, slot)
|
||||
@ -1083,8 +1079,7 @@ proc sendAttestation*(node: BeaconNode,
|
||||
|
||||
let
|
||||
wallTime = node.processor.getCurrentBeaconTime()
|
||||
deadline = attestation.data.slot.toBeaconTime() +
|
||||
seconds(int(SECONDS_PER_SLOT div 3))
|
||||
deadline = attestation.data.slot.toBeaconTime(attestationSlotOffset)
|
||||
(delayStr, delaySecs) =
|
||||
if wallTime < deadline:
|
||||
("-" & $(deadline - wallTime), -toFloatSeconds(deadline - wallTime))
|
||||
|
@ -223,7 +223,7 @@ proc bench_process_justification_and_finalization(state: var phase0.BeaconState)
|
||||
info: phase0.EpochInfo
|
||||
info.init(state)
|
||||
info.process_attestations(state, cache)
|
||||
process_justification_and_finalization(state, info.total_balances)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
|
||||
func bench_process_slashings(state: var phase0.BeaconState) =
|
||||
var
|
||||
@ -231,7 +231,7 @@ func bench_process_slashings(state: var phase0.BeaconState) =
|
||||
info: phase0.EpochInfo
|
||||
info.init(state)
|
||||
info.process_attestations(state, cache)
|
||||
process_slashings(state, info.total_balances.current_epoch)
|
||||
process_slashings(state, info.balances.current_epoch)
|
||||
|
||||
template processBlockScenarioImpl(
|
||||
dir, preState: string, skipBLS: bool,
|
||||
|
@ -607,7 +607,7 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
||||
case info.kind
|
||||
of EpochInfoFork.Phase0:
|
||||
template info: untyped = info.phase0Data
|
||||
for i, s in info.statuses.pairs():
|
||||
for i, s in info.validators.pairs():
|
||||
let perf = addr perfs[i]
|
||||
if RewardFlags.isActiveInPreviousEpoch in s.flags:
|
||||
if s.is_previous_epoch_attester.isSome():
|
||||
@ -836,16 +836,16 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
||||
template info: untyped = info.phase0Data
|
||||
insertEpochInfo.exec(
|
||||
(getStateField(state[].data, slot).epoch.int64,
|
||||
info.total_balances.current_epoch_raw.int64,
|
||||
info.total_balances.previous_epoch_raw.int64,
|
||||
info.total_balances.current_epoch_attesters_raw.int64,
|
||||
info.total_balances.current_epoch_target_attesters_raw.int64,
|
||||
info.total_balances.previous_epoch_attesters_raw.int64,
|
||||
info.total_balances.previous_epoch_target_attesters_raw.int64,
|
||||
info.total_balances.previous_epoch_head_attesters_raw.int64)
|
||||
info.balances.current_epoch_raw.int64,
|
||||
info.balances.previous_epoch_raw.int64,
|
||||
info.balances.current_epoch_attesters_raw.int64,
|
||||
info.balances.current_epoch_target_attesters_raw.int64,
|
||||
info.balances.previous_epoch_attesters_raw.int64,
|
||||
info.balances.previous_epoch_target_attesters_raw.int64,
|
||||
info.balances.previous_epoch_head_attesters_raw.int64)
|
||||
).expect("DB")
|
||||
|
||||
for index, status in info.statuses.pairs():
|
||||
for index, status in info.validators.pairs():
|
||||
if not is_eligible_validator(status):
|
||||
continue
|
||||
let
|
||||
|
@ -178,7 +178,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
|
||||
let (positions, cookedSig) = res.get()
|
||||
|
||||
syncCommitteePool[].addSyncCommitteeMsg(
|
||||
syncCommitteePool[].addSyncCommitteeMessage(
|
||||
msg.slot,
|
||||
msg.beacon_block_root,
|
||||
msg.validator_index,
|
||||
@ -221,7 +221,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
message: contributionAndProof,
|
||||
signature: blsSign(validarorPrivKey, signingRoot.data).toValidatorSig)
|
||||
|
||||
res = dag.validateSignedContributionAndProof(
|
||||
res = dag.validateContribution(
|
||||
syncCommitteePool[],
|
||||
signedContributionAndProof,
|
||||
contributionsTime,
|
||||
@ -229,8 +229,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
||||
|
||||
doAssert res.isOk
|
||||
|
||||
syncCommitteePool[].addSyncContribution(
|
||||
signedContributionAndProof, res.get)
|
||||
syncCommitteePool[].addContribution(
|
||||
signedContributionAndProof, res.get())
|
||||
|
||||
proc getNewBlock[T](
|
||||
stateData: var StateData, slot: Slot, cache: var StateCache): T =
|
||||
|
@ -61,7 +61,7 @@ proc runTest(rewardsDir, identifier: string) =
|
||||
info.init(state[])
|
||||
info.process_attestations(state[], cache)
|
||||
let
|
||||
total_balance = info.total_balances.current_epoch
|
||||
total_balance = info.balances.current_epoch
|
||||
total_balance_sqrt = integer_squareroot(total_balance)
|
||||
|
||||
var
|
||||
@ -71,7 +71,7 @@ proc runTest(rewardsDir, identifier: string) =
|
||||
inclusionDelayDeltas2 = Deltas.init(state[].validators.len)
|
||||
inactivityPenaltyDeltas2 = Deltas.init(state[].validators.len)
|
||||
|
||||
for index, validator in info.statuses.mpairs():
|
||||
for index, validator in info.validators.mpairs():
|
||||
if not is_eligible_validator(validator):
|
||||
continue
|
||||
|
||||
@ -80,11 +80,11 @@ proc runTest(rewardsDir, identifier: string) =
|
||||
state[], index.ValidatorIndex, total_balance_sqrt)
|
||||
|
||||
sourceDeltas2.add(index, get_source_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay))
|
||||
validator, base_reward, info.balances, finality_delay))
|
||||
targetDeltas2.add(index, get_target_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay))
|
||||
validator, base_reward, info.balances, finality_delay))
|
||||
headDeltas2.add(index, get_head_delta(
|
||||
validator, base_reward, info.total_balances, finality_delay))
|
||||
validator, base_reward, info.balances, finality_delay))
|
||||
|
||||
let
|
||||
(inclusion_delay_delta, proposer_delta) =
|
||||
|
@ -51,7 +51,7 @@ template runSuite(suiteDir, testName: string, transitionProc: untyped): untyped
|
||||
const JustificationFinalizationDir = RootDir/"justification_and_finalization"/"pyspec_tests"
|
||||
runSuite(JustificationFinalizationDir, "Justification & Finalization"):
|
||||
info.process_attestations(state, cache)
|
||||
process_justification_and_finalization(state, info.total_balances)
|
||||
process_justification_and_finalization(state, info.balances)
|
||||
|
||||
# Rewards & Penalties
|
||||
# ---------------------------------------------------------------
|
||||
@ -71,7 +71,7 @@ runSuite(RegistryUpdatesDir, "Registry updates"):
|
||||
const SlashingsDir = RootDir/"slashings"/"pyspec_tests"
|
||||
runSuite(SlashingsDir, "Slashings"):
|
||||
info.process_attestations(state, cache)
|
||||
process_slashings(state, info.total_balances.current_epoch)
|
||||
process_slashings(state, info.balances.current_epoch)
|
||||
|
||||
# Final updates
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -39,4 +39,4 @@ proc transitionEpochUntilJustificationFinalization*(state: var ForkedHashedBeaco
|
||||
info.init(state.phase0Data.data)
|
||||
info.process_attestations(state.phase0Data.data, cache)
|
||||
process_justification_and_finalization(
|
||||
state.phase0Data.data, info.total_balances)
|
||||
state.phase0Data.data, info.balances)
|
||||
|
@ -10,7 +10,8 @@
|
||||
import chronicles, chronos
|
||||
import eth/keys
|
||||
import ../beacon_chain/spec/[datatypes/base, forks, presets]
|
||||
import ../beacon_chain/consensus_object_pools/[block_quarantine, blockchain_dag, exit_pool]
|
||||
import ../beacon_chain/consensus_object_pools/[
|
||||
block_quarantine, blockchain_dag, exit_pool]
|
||||
import "."/[testutil, testdbutil]
|
||||
|
||||
suite "Exit pool testing suite":
|
||||
|
@ -223,7 +223,7 @@ suite "Gossip validation - Extra": # Not based on preset config
|
||||
slot.toBeaconTime(), true)
|
||||
(positions, cookedSig) = res.get()
|
||||
|
||||
syncCommitteeMsgPool[].addSyncCommitteeMsg(
|
||||
syncCommitteeMsgPool[].addSyncCommitteeMessage(
|
||||
msg.slot,
|
||||
msg.beacon_block_root,
|
||||
msg.validator_index,
|
||||
@ -237,7 +237,7 @@ suite "Gossip validation - Extra": # Not based on preset config
|
||||
check: syncCommitteeMsgPool[].produceContribution(
|
||||
slot, state[].root, subcommitteeIdx,
|
||||
contribution.message.contribution)
|
||||
syncCommitteeMsgPool[].addSyncContribution(
|
||||
syncCommitteeMsgPool[].addContribution(
|
||||
contribution[], contribution.message.contribution.signature.load.get)
|
||||
waitFor validator.sign(
|
||||
contribution, state[].data.fork, state[].data.genesis_validators_root)
|
||||
|
@ -76,13 +76,13 @@ suite "Sync committee pool":
|
||||
|
||||
# Inserting sync committee messages
|
||||
#
|
||||
pool.addSyncCommitteeMsg(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
|
||||
pool.addSyncCommitteeMsg(root1Slot, root1, 2, sig2, subcommittee1, [10'u64])
|
||||
pool.addSyncCommitteeMsg(root2Slot, root1, 3, sig3, subcommittee2, [7'u64])
|
||||
pool.addSyncCommitteeMsg(root2Slot, root2, 4, sig4, subcommittee2, [3'u64])
|
||||
pool.addSyncCommitteeMessage(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
|
||||
pool.addSyncCommitteeMessage(root1Slot, root1, 2, sig2, subcommittee1, [10'u64])
|
||||
pool.addSyncCommitteeMessage(root2Slot, root1, 3, sig3, subcommittee2, [7'u64])
|
||||
pool.addSyncCommitteeMessage(root2Slot, root2, 4, sig4, subcommittee2, [3'u64])
|
||||
|
||||
# Insert a duplicate message (this should be handled gracefully)
|
||||
pool.addSyncCommitteeMsg(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
|
||||
pool.addSyncCommitteeMessage(root1Slot, root1, 1, sig1, subcommittee1, [1'u64])
|
||||
|
||||
# Producing contributions
|
||||
#
|
||||
@ -120,7 +120,7 @@ suite "Sync committee pool":
|
||||
contribution.aggregation_bits[10] == true
|
||||
contribution.signature == expectedSig.toValidatorSig
|
||||
|
||||
pool.addSyncContribution(outContribution, expectedSig)
|
||||
pool.addContribution(outContribution, expectedSig)
|
||||
check: pool.isSeen(outContribution.message)
|
||||
|
||||
block:
|
||||
@ -142,7 +142,7 @@ suite "Sync committee pool":
|
||||
contribution.aggregation_bits[7] == true
|
||||
contribution.signature == sig3.toValidatorSig
|
||||
|
||||
pool.addSyncContribution(outContribution, sig3)
|
||||
pool.addContribution(outContribution, sig3)
|
||||
check: pool.isSeen(outContribution.message)
|
||||
|
||||
block:
|
||||
@ -165,7 +165,7 @@ suite "Sync committee pool":
|
||||
contribution.aggregation_bits[3] == true
|
||||
contribution.signature == sig4.toValidatorSig
|
||||
|
||||
pool.addSyncContribution(outContribution, sig4)
|
||||
pool.addContribution(outContribution, sig4)
|
||||
check: pool.isSeen(outContribution.message)
|
||||
|
||||
block:
|
||||
|
Loading…
x
Reference in New Issue
Block a user