Fork choice fixes 2 (#1356)
* fork choice cleanup * enable v2 pruning * prefer `get_current_epoch` * fix finalization check to use correct epoch * small cleanups * add `count_active_validators` * remove misleading logs * fix justified checkpoint slot calculation in rpc
This commit is contained in:
parent
5a4f6b7e5e
commit
fb2f742972
|
@ -185,9 +185,6 @@ proc addResolved(pool: var AttestationPool, blck: BlockRef, attestation: Attesta
|
|||
# reasonable to involve the head being voted for as well as the intended
|
||||
# slot of the attestation - double-check this with spec
|
||||
|
||||
# TODO: How fast is state rewind?
|
||||
# Can this be a DOS vector.
|
||||
|
||||
# TODO: filter valid attestation as much as possible before state rewind
|
||||
# TODO: the below check does not respect the inclusion delay
|
||||
# we should use isValidAttestationSlot instead
|
||||
|
@ -537,23 +534,23 @@ proc selectHead_v1(pool: AttestationPool): BlockRef =
|
|||
# Fork choice v2
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
func getAttesterBalances(state: StateData): seq[Gwei] {.noInit.}=
|
||||
func getAttesterBalances*(state: BeaconState): seq[Gwei] =
|
||||
## Get the balances from a state
|
||||
result.newSeq(state.data.data.validators.len) # zero-init
|
||||
result.newSeq(state.validators.len) # zero-init
|
||||
|
||||
let epoch = state.data.data.slot.compute_epoch_at_slot()
|
||||
let epoch = state.get_current_epoch()
|
||||
|
||||
for i in 0 ..< result.len:
|
||||
# All non-active validators have a 0 balance
|
||||
template validator: Validator = state.data.data.validators[i]
|
||||
template validator: Validator = state.validators[i]
|
||||
if validator.is_active_validator(epoch):
|
||||
result[i] = validator.effective_balance
|
||||
|
||||
proc selectHead_v2(pool: var AttestationPool): BlockRef =
|
||||
let attesterBalances = pool.blockPool.justifiedState.getAttesterBalances()
|
||||
let attesterBalances = pool.blockPool.justifiedState.data.data.getAttesterBalances()
|
||||
|
||||
let newHead = pool.forkChoice_v2.find_head(
|
||||
justified_epoch = pool.blockPool.justifiedState.data.data.slot.compute_epoch_at_slot(),
|
||||
justified_epoch = pool.blockPool.justifiedState.data.data.get_current_epoch(),
|
||||
justified_root = pool.blockPool.head.justified.blck.root,
|
||||
finalized_epoch = pool.blockPool.headState.data.data.finalized_checkpoint.epoch,
|
||||
justified_state_balances = attesterBalances
|
||||
|
@ -565,8 +562,8 @@ proc selectHead_v2(pool: var AttestationPool): BlockRef =
|
|||
else:
|
||||
pool.blockPool.getRef(newHead.get())
|
||||
|
||||
proc pruneBefore*(pool: var AttestationPool, finalizedhead: BlockSlot) =
|
||||
if (let v = pool.forkChoice_v2.maybe_prune(finalizedHead.blck.root); v.isErr):
|
||||
proc pruneBefore*(pool: var AttestationPool, finalizedHead: BlockRef) =
|
||||
if (let v = pool.forkChoice_v2.maybe_prune(finalizedHead.root); v.isErr):
|
||||
error "Pruning failed", err = v.error() # TODO should never happen
|
||||
|
||||
# Dual-Headed Fork choice
|
||||
|
|
|
@ -363,7 +363,7 @@ func verifyFinalization(node: BeaconNode, slot: Slot) =
|
|||
# during testing.
|
||||
if epoch >= 4 and slot mod SLOTS_PER_EPOCH > SETTLING_TIME_OFFSET:
|
||||
let finalizedEpoch =
|
||||
node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot()
|
||||
node.blockPool.finalizedHead.slot.compute_epoch_at_slot()
|
||||
# Finalization rule 234, that has the most lag slots among the cases, sets
|
||||
# state.finalized_checkpoint = old_previous_justified_checkpoint.epoch + 3
|
||||
# and then state.slot gets incremented, to increase the maximum offset, if
|
||||
|
@ -389,11 +389,9 @@ proc onSlotStart(node: BeaconNode, lastSlot, scheduledSlot: Slot) {.gcsafe, asyn
|
|||
scheduledSlot = shortLog(scheduledSlot),
|
||||
beaconTime = shortLog(beaconTime),
|
||||
peers = node.network.peersCount,
|
||||
headSlot = shortLog(node.blockPool.head.blck.slot),
|
||||
head = shortLog(node.blockPool.head.blck),
|
||||
headEpoch = shortLog(node.blockPool.head.blck.slot.compute_epoch_at_slot()),
|
||||
headRoot = shortLog(node.blockPool.head.blck.root),
|
||||
finalizedSlot = shortLog(node.blockPool.finalizedHead.blck.slot),
|
||||
finalizedRoot = shortLog(node.blockPool.finalizedHead.blck.root),
|
||||
finalized = shortLog(node.blockPool.finalizedHead.blck),
|
||||
finalizedEpoch = shortLog(node.blockPool.finalizedHead.blck.slot.compute_epoch_at_slot())
|
||||
|
||||
# Check before any re-scheduling of onSlotStart()
|
||||
|
|
|
@ -68,16 +68,18 @@ proc updateHead*(node: BeaconNode): BlockRef =
|
|||
|
||||
# Store the new head in the block pool - this may cause epochs to be
|
||||
# justified and finalized
|
||||
let oldFinalized = node.blockPool.finalizedHead.blck
|
||||
|
||||
node.blockPool.updateHead(newHead)
|
||||
beacon_head_root.set newHead.root.toGaugeValue
|
||||
|
||||
# TODO - deactivated
|
||||
# Cleanup the fork choice v2 if we have a finalized head
|
||||
# node.attestationPool.pruneBefore(node.blockPool.finalizedHead)
|
||||
if oldFinalized != node.blockPool.finalizedHead.blck:
|
||||
node.attestationPool.pruneBefore(node.blockPool.finalizedHead.blck)
|
||||
|
||||
newHead
|
||||
|
||||
template findIt*(s: openarray, predicate: untyped): int64 =
|
||||
template findIt*(s: openarray, predicate: untyped): int =
|
||||
var res = -1
|
||||
for i, it {.inject.} in s:
|
||||
if predicate:
|
||||
|
|
|
@ -60,9 +60,10 @@ func parent*(bs: BlockSlot): BlockSlot =
|
|||
slot: bs.slot - 1
|
||||
)
|
||||
|
||||
func populateEpochCache(state: BeaconState, epoch: Epoch): EpochRef =
|
||||
(EpochRef)(
|
||||
epoch: state.slot.compute_epoch_at_slot,
|
||||
func populateEpochCache(state: BeaconState): EpochRef =
|
||||
let epoch = state.get_current_epoch()
|
||||
EpochRef(
|
||||
epoch: epoch,
|
||||
shuffled_active_validator_indices:
|
||||
get_shuffled_active_validator_indices(state, epoch))
|
||||
|
||||
|
@ -165,11 +166,11 @@ func atSlot*(blck: BlockRef, slot: Slot): BlockSlot =
|
|||
func getEpochInfo*(blck: BlockRef, state: BeaconState): EpochRef =
|
||||
# This is the only intended mechanism by which to get an EpochRef
|
||||
let
|
||||
state_epoch = state.slot.compute_epoch_at_slot
|
||||
state_epoch = state.get_current_epoch()
|
||||
matching_epochinfo = blck.epochsInfo.filterIt(it.epoch == state_epoch)
|
||||
|
||||
if matching_epochinfo.len == 0:
|
||||
let cache = populateEpochCache(state, state_epoch)
|
||||
let cache = populateEpochCache(state)
|
||||
|
||||
# Don't use BlockRef caching as far as the epoch where the active
|
||||
# validator indices can diverge.
|
||||
|
@ -187,8 +188,7 @@ func getEpochInfo*(blck: BlockRef, state: BeaconState): EpochRef =
|
|||
func getEpochCache*(blck: BlockRef, state: BeaconState): StateCache =
|
||||
let epochInfo = getEpochInfo(blck, state)
|
||||
result = StateCache()
|
||||
result.shuffled_active_validator_indices[
|
||||
state.slot.compute_epoch_at_slot] =
|
||||
result.shuffled_active_validator_indices[state.get_current_epoch()] =
|
||||
epochInfo.shuffled_active_validator_indices
|
||||
|
||||
func init(T: type BlockRef, root: Eth2Digest, slot: Slot): BlockRef =
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import
|
||||
chronicles, sequtils, tables,
|
||||
metrics, stew/results,
|
||||
../ssz/merkleization, ../extras,
|
||||
../extras,
|
||||
../spec/[crypto, datatypes, digest, helpers, signatures, state_transition],
|
||||
block_pools_types, candidate_chains, quarantine
|
||||
|
||||
|
@ -56,7 +56,7 @@ proc addResolvedBlock(
|
|||
blockRoot = signedBlock.root
|
||||
blockRef = BlockRef.init(blockRoot, signedBlock.message)
|
||||
blockRef.epochsInfo = filterIt(parent.epochsInfo,
|
||||
it.epoch + 1 >= state.data.slot.compute_epoch_at_slot)
|
||||
it.epoch + 1 >= state.data.get_current_epoch())
|
||||
link(parent, blockRef)
|
||||
|
||||
dag.blocks[blockRoot] = blockRef
|
||||
|
|
|
@ -114,20 +114,6 @@ func process_attestation*(
|
|||
trace "Integrating vote in fork choice",
|
||||
validator_index = validator_index,
|
||||
new_vote = shortlog(vote)
|
||||
else:
|
||||
{.noSideEffect.}:
|
||||
if vote.next_epoch != target_epoch or vote.next_root != block_root:
|
||||
warn "Change of vote ignored for fork choice. Potential for slashing.",
|
||||
validator_index = validator_index,
|
||||
current_vote = shortlog(vote),
|
||||
ignored_block_root = shortlog(block_root),
|
||||
ignored_target_epoch = target_epoch
|
||||
else:
|
||||
trace "Ignoring double-vote for fork choice",
|
||||
validator_index = validator_index,
|
||||
current_vote = shortlog(vote),
|
||||
ignored_block_root = shortlog(block_root),
|
||||
ignored_target_epoch = target_epoch
|
||||
|
||||
func contains*(self: ForkChoice, block_root: Eth2Digest): bool =
|
||||
## Returns `true` if a block is known to the fork choice
|
||||
|
|
|
@ -506,8 +506,8 @@ proc createBeaconStateAux(preset: RuntimePreset,
|
|||
eth1Block.voteData.block_hash,
|
||||
eth1Block.timestamp.uint64,
|
||||
deposits, {})
|
||||
let activeValidators = get_active_validator_indices(result[], GENESIS_EPOCH)
|
||||
eth1Block.knownGoodDepositsCount = some len(activeValidators).uint64
|
||||
let activeValidators = count_active_validator_indices(result[], GENESIS_EPOCH)
|
||||
eth1Block.knownGoodDepositsCount = some activeValidators.uint64
|
||||
|
||||
proc createBeaconState(m: MainchainMonitor, eth1Block: Eth1Block): BeaconStateRef =
|
||||
createBeaconStateAux(
|
||||
|
|
|
@ -590,13 +590,13 @@ proc check_attestation*(
|
|||
|
||||
let committee_count_at_slot =
|
||||
get_committee_count_at_slot(get_shuffled_active_validator_indices(
|
||||
state, stateSlot.compute_epoch_at_slot, stateCache).len.uint64).uint64
|
||||
state, state.get_current_epoch(), stateCache).len.uint64).uint64
|
||||
if not (data.index < committee_count_at_slot):
|
||||
warn "Data index exceeds committee count",
|
||||
committee_count = committee_count_at_slot
|
||||
return
|
||||
|
||||
if not isValidAttestationTargetEpoch(state.slot.compute_epoch_at_slot, data):
|
||||
if not isValidAttestationTargetEpoch(state.get_current_epoch(), data):
|
||||
# Logging in isValidAttestationTargetEpoch
|
||||
return
|
||||
|
||||
|
|
|
@ -56,6 +56,12 @@ func is_active_validator*(validator: Validator, epoch: Epoch): bool =
|
|||
### Check if ``validator`` is active
|
||||
validator.activation_epoch <= epoch and epoch < validator.exit_epoch
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_active_validator_indices
|
||||
func count_active_validator_indices*(state: BeaconState, epoch: Epoch): int =
|
||||
for val in state.validators:
|
||||
if is_active_validator(val, epoch):
|
||||
result += 1
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#get_active_validator_indices
|
||||
func get_active_validator_indices*(state: BeaconState, epoch: Epoch):
|
||||
seq[ValidatorIndex] =
|
||||
|
@ -79,8 +85,8 @@ func get_committee_count_at_slot*(state: BeaconState, slot: Slot): uint64 =
|
|||
# CommitteeIndex return type here.
|
||||
let
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
result = get_committee_count_at_slot(len(active_validator_indices).uint64)
|
||||
active_validator_count = count_active_validator_indices(state, epoch)
|
||||
result = get_committee_count_at_slot(active_validator_count.uint64)
|
||||
|
||||
# Otherwise, get_beacon_committee(...) cannot access some committees.
|
||||
doAssert (SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT).uint64 >= result
|
||||
|
|
|
@ -219,7 +219,7 @@ proc state_transition*(
|
|||
# bool return values...)
|
||||
doAssert not rollback.isNil, "use noRollback if it's ok to mess up state"
|
||||
doAssert stateCache.shuffled_active_validator_indices.hasKey(
|
||||
state.data.slot.compute_epoch_at_slot)
|
||||
state.data.get_current_epoch())
|
||||
|
||||
if not process_slots(state, signedBlock.message.slot, flags):
|
||||
rollback(state)
|
||||
|
@ -265,9 +265,9 @@ proc state_transition*(
|
|||
# and fuzzing code should always be coming from blockpool which should
|
||||
# always be providing cache or equivalent
|
||||
var cache = StateCache()
|
||||
cache.shuffled_active_validator_indices[state.data.slot.compute_epoch_at_slot] =
|
||||
cache.shuffled_active_validator_indices[state.data.get_current_epoch()] =
|
||||
get_shuffled_active_validator_indices(
|
||||
state.data, state.data.slot.compute_epoch_at_slot)
|
||||
state.data, state.data.get_current_epoch())
|
||||
state_transition(preset, state, signedBlock, cache, flags, rollback)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/validator.md#preparing-for-a-beaconblock
|
||||
|
|
|
@ -60,7 +60,7 @@ func get_total_active_balance*(state: BeaconState, cache: var StateCache): Gwei
|
|||
# minimum to avoid divisions by zero.
|
||||
|
||||
let
|
||||
epoch = state.slot.compute_epoch_at_slot
|
||||
epoch = state.get_current_epoch()
|
||||
try:
|
||||
if epoch notin cache.shuffled_active_validator_indices:
|
||||
cache.shuffled_active_validator_indices[epoch] =
|
||||
|
|
|
@ -26,7 +26,7 @@ func shortLog*(x: Checkpoint): string =
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#helper-functions-1
|
||||
func get_attesting_indices*(
|
||||
state: BeaconState, attestations: openarray[PendingAttestation],
|
||||
state: BeaconState, attestations: openArray[PendingAttestation],
|
||||
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
||||
# This is part of get_unslashed_attesting_indices(...) in spec.
|
||||
# Exported bceause of external trace-level chronicles logging.
|
||||
|
@ -37,7 +37,7 @@ func get_attesting_indices*(
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#helper-functions-1
|
||||
func get_unslashed_attesting_indices*(
|
||||
state: BeaconState, attestations: openarray[PendingAttestation],
|
||||
state: BeaconState, attestations: openArray[PendingAttestation],
|
||||
stateCache: var StateCache): HashSet[ValidatorIndex] =
|
||||
result = get_attesting_indices(state, attestations, stateCache)
|
||||
for index in result:
|
||||
|
|
|
@ -160,7 +160,8 @@ proc stateIdToBlockSlot(node: BeaconNode, stateId: string): BlockSlot =
|
|||
of "finalized":
|
||||
node.blockPool.finalizedHead
|
||||
of "justified":
|
||||
node.blockPool.justifiedState.blck.toBlockSlot()
|
||||
node.blockPool.justifiedState.blck.atSlot(
|
||||
node.blockPool.justifiedState.data.data.slot)
|
||||
else:
|
||||
if stateId.startsWith("0x"):
|
||||
let blckRoot = parseRoot(stateId)
|
||||
|
|
|
@ -188,7 +188,7 @@ template processEpochScenarioImpl(
|
|||
|
||||
when needCache:
|
||||
var cache = StateCache()
|
||||
let epoch = state.data.slot.compute_epoch_at_slot
|
||||
let epoch = state.data.get_current_epoch()
|
||||
cache.shuffled_active_validator_indices[epoch] =
|
||||
get_shuffled_active_validator_indices(state.data, epoch)
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ proc cmdBench(conf: DbConf) =
|
|||
|
||||
for b in blocks:
|
||||
let
|
||||
isEpoch = state[].data.slot.compute_epoch_at_slot !=
|
||||
isEpoch = state[].data.get_current_epoch() !=
|
||||
b.message.slot.compute_epoch_at_slot
|
||||
withTimer(timers[if isEpoch: tApplyEpochBlock else: tApplyBlock]):
|
||||
if not state_transition(defaultRuntimePreset, state[], b, {}, noRollback):
|
||||
|
|
|
@ -153,7 +153,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
|
||||
if (state[].data.slot).isEpoch:
|
||||
echo &" slot: {shortLog(state[].data.slot)} ",
|
||||
&"epoch: {shortLog(state[].data.slot.compute_epoch_at_slot)}"
|
||||
&"epoch: {shortLog(state[].data.get_current_epoch())}"
|
||||
|
||||
|
||||
maybeWrite(true) # catch that last state as well..
|
||||
|
|
|
@ -370,7 +370,7 @@ suiteReport "Attestation pool processing" & preset():
|
|||
|
||||
doAssert: blockPool[].finalizedHead.slot != 0
|
||||
|
||||
pool[].pruneBefore(blockPool[].finalizedHead)
|
||||
pool[].pruneBefore(blockPool[].finalizedHead.blck)
|
||||
doAssert: b10.root notin pool.forkChoice_v2
|
||||
|
||||
# Add back the old block to ensure we have a duplicate error
|
||||
|
|
Loading…
Reference in New Issue