mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-02-17 08:56:45 +00:00
tone down validator exit logging (#1939)
Validators exiting is normal, no need to scream about it * avoid reallocating seq on big exit queue * avoid fetching state cache when updating head (it's rarely needed) * remove incorrectly implemented live validator counts (avoids memory allocs)
This commit is contained in:
parent
e7943b088b
commit
ca63d48b82
@ -802,17 +802,11 @@ proc updateHead*(
|
||||
lastHead = dag.head
|
||||
dag.db.putHeadBlock(newHead.root)
|
||||
|
||||
# Start off by making sure we have the right state - as a special case, we'll
|
||||
# check the last block that was cleared by clearance - it might be just the
|
||||
# thing we're looking for
|
||||
|
||||
if dag.clearanceState.blck == newHead and
|
||||
dag.clearanceState.data.data.slot == newHead.slot:
|
||||
assign(dag.headState, dag.clearanceState)
|
||||
else:
|
||||
var cache = getStateCache(newHead, newHead.slot.epoch())
|
||||
updateStateData(
|
||||
dag, dag.headState, newHead.atSlot(newHead.slot), false, cache)
|
||||
# Start off by making sure we have the right state - updateStateData will try
|
||||
# to use existing in-memory states to make this smooth
|
||||
var cache: StateCache
|
||||
updateStateData(
|
||||
dag, dag.headState, newHead.atSlot(newHead.slot), false, cache)
|
||||
|
||||
dag.head = newHead
|
||||
|
||||
|
@ -145,18 +145,26 @@ func initiate_validator_exit*(state: var BeaconState,
|
||||
if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||
return
|
||||
|
||||
# Compute exit queue epoch
|
||||
var exit_epochs = mapIt(
|
||||
filterIt(state.validators, it.exit_epoch != FAR_FUTURE_EPOCH),
|
||||
it.exit_epoch)
|
||||
exit_epochs.add compute_activation_exit_epoch(get_current_epoch(state))
|
||||
var exit_queue_epoch = max(exit_epochs)
|
||||
let exit_queue_churn = foldl(
|
||||
state.validators,
|
||||
a + (if b.exit_epoch == exit_queue_epoch: 1'u64 else: 0'u64),
|
||||
0'u64)
|
||||
trace "Validator exiting",
|
||||
index = index,
|
||||
num_validators = state.validators.len,
|
||||
current_epoch = get_current_epoch(state),
|
||||
validator_slashed = validator.slashed,
|
||||
validator_withdrawable_epoch = validator.withdrawable_epoch,
|
||||
validator_exit_epoch = validator.exit_epoch,
|
||||
validator_effective_balance = validator.effective_balance
|
||||
|
||||
if exit_queue_churn >= get_validator_churn_limit(state, cache):
|
||||
var exit_queue_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
# Compute max exit epoch
|
||||
for v in state.validators:
|
||||
if v.exit_epoch != FAR_FUTURE_EPOCH and v.exit_epoch > exit_queue_epoch:
|
||||
exit_queue_epoch = v.exit_epoch
|
||||
|
||||
let
|
||||
exit_queue_churn = countIt(
|
||||
state.validators, it.exit_epoch == exit_queue_epoch)
|
||||
|
||||
if exit_queue_churn.uint64 >= get_validator_churn_limit(state, cache):
|
||||
exit_queue_epoch += 1
|
||||
|
||||
# Set validator exit epoch and withdrawable epoch
|
||||
@ -172,7 +180,7 @@ proc slash_validator*(state: var BeaconState, slashed_index: ValidatorIndex,
|
||||
initiate_validator_exit(state, slashed_index, cache)
|
||||
let validator = addr state.validators[slashed_index]
|
||||
|
||||
notice "slash_validator: ejecting validator via slashing (validator_leaving)",
|
||||
trace "slash_validator: ejecting validator via slashing (validator_leaving)",
|
||||
index = slashed_index,
|
||||
num_validators = state.validators.len,
|
||||
current_epoch = get_current_epoch(state),
|
||||
@ -389,14 +397,6 @@ proc process_registry_updates*(state: var BeaconState,
|
||||
|
||||
if is_active_validator(validator, get_current_epoch(state)) and
|
||||
validator.effective_balance <= EJECTION_BALANCE:
|
||||
notice "Registry updating: ejecting validator due to low balance (validator_leaving)",
|
||||
index = index,
|
||||
num_validators = state.validators.len,
|
||||
current_epoch = get_current_epoch(state),
|
||||
validator_slashed = validator.slashed,
|
||||
validator_withdrawable_epoch = validator.withdrawable_epoch,
|
||||
validator_exit_epoch = validator.exit_epoch,
|
||||
validator_effective_balance = validator.effective_balance
|
||||
initiate_validator_exit(state, index.ValidatorIndex, cache)
|
||||
|
||||
## Queue validators eligible for activation and not dequeued for activation
|
||||
|
@ -34,10 +34,6 @@ import
|
||||
./signatures, ./presets,
|
||||
../../nbench/bench_lab
|
||||
|
||||
# https://github.com/ethereum/eth2.0-metrics/blob/master/metrics.md#additional-metrics
|
||||
declareGauge beacon_current_live_validators, "Number of active validators that successfully included attestation on chain for current epoch" # On block
|
||||
declareGauge beacon_previous_live_validators, "Number of active validators that successfully included attestation on chain for previous epoch" # On block
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v1.0.0-rc.0/specs/phase0/beacon-chain.md#block-header
|
||||
func process_block_header*(
|
||||
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
|
||||
@ -355,17 +351,8 @@ proc process_block*(
|
||||
state: var BeaconState, blck: SomeBeaconBlock, flags: UpdateFlags,
|
||||
stateCache: var StateCache): bool {.nbench.}=
|
||||
## When there's a new block, we need to verify that the block is sane and
|
||||
## update the state accordingly
|
||||
|
||||
# TODO when there's a failure, we should reset the state!
|
||||
# TODO probably better to do all verification first, then apply state changes
|
||||
|
||||
# Adds nontrivial additional computation, but only does so when metrics
|
||||
# enabled.
|
||||
beacon_current_live_validators.set(toHashSet(
|
||||
mapIt(state.current_epoch_attestations, it.proposerIndex)).len.int64)
|
||||
beacon_previous_live_validators.set(toHashSet(
|
||||
mapIt(state.previous_epoch_attestations, it.proposerIndex)).len.int64)
|
||||
## update the state accordingly - the state is left in an unknown state when
|
||||
## block application fails (!)
|
||||
|
||||
logScope:
|
||||
blck = shortLog(blck)
|
||||
|
Loading…
x
Reference in New Issue
Block a user