restrict best LC update collection to canonical blocks (#5613)

Simplify best `LightClientUpdate` collection by tracking only canonical
data instead of tracking the best update across all branches within the
sync committee period.

- https://github.com/ethereum/consensus-specs/pull/3553
This commit is contained in:
Etan Kissling 2023-11-21 14:51:05 -08:00 committed by GitHub
parent 946ebe54cf
commit c33dd2c170
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 153 additions and 213 deletions

View File

@ -89,20 +89,17 @@ type
getStmt: SqliteStmt[int64, (int64, seq[byte])]
putStmt: SqliteStmt[(int64, seq[byte]), void]
delStmt: SqliteStmt[int64, void]
delFromStmt: SqliteStmt[int64, void]
keepFromStmt: SqliteStmt[int64, void]
BestLightClientUpdateStore = object
getStmt: SqliteStmt[int64, (int64, seq[byte])]
putStmt: SqliteStmt[(int64, int64, seq[byte]), void]
delStmt: SqliteStmt[int64, void]
delFromStmt: SqliteStmt[int64, void]
keepFromStmt: SqliteStmt[int64, void]
SealedSyncCommitteePeriodStore = object
containsStmt: SqliteStmt[int64, int64]
putStmt: SqliteStmt[int64, void]
delFromStmt: SqliteStmt[int64, void]
keepFromStmt: SqliteStmt[int64, void]
LightClientDataDB* = ref object
@ -405,10 +402,6 @@ proc initLegacyBestUpdatesStore(
DELETE FROM `""" & name & """`
WHERE `period` = ?;
""", int64, void, managed = false).expect("SQL query OK")
delFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` >= ?;
""", int64, void, managed = false).expect("SQL query OK")
keepFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` < ?;
@ -418,14 +411,12 @@ proc initLegacyBestUpdatesStore(
getStmt: getStmt,
putStmt: putStmt,
delStmt: delStmt,
delFromStmt: delFromStmt,
keepFromStmt: keepFromStmt)
func close(store: var LegacyBestLightClientUpdateStore) =
store.getStmt.disposeSafe()
store.putStmt.disposeSafe()
store.delStmt.disposeSafe()
store.delFromStmt.disposeSafe()
store.keepFromStmt.disposeSafe()
proc initBestUpdatesStore(
@ -470,10 +461,6 @@ proc initBestUpdatesStore(
DELETE FROM `""" & name & """`
WHERE `period` = ?;
""", int64, void, managed = false).expect("SQL query OK")
delFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` >= ?;
""", int64, void, managed = false).expect("SQL query OK")
keepFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` < ?;
@ -483,14 +470,12 @@ proc initBestUpdatesStore(
getStmt: getStmt,
putStmt: putStmt,
delStmt: delStmt,
delFromStmt: delFromStmt,
keepFromStmt: keepFromStmt)
func close(store: var BestLightClientUpdateStore) =
store.getStmt.disposeSafe()
store.putStmt.disposeSafe()
store.delStmt.disposeSafe()
store.delFromStmt.disposeSafe()
store.keepFromStmt.disposeSafe()
proc getBestUpdate*(
@ -559,13 +544,6 @@ func putBestUpdate*(
let res = db.legacyBestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
proc putUpdateIfBetter*(
db: LightClientDataDB, period: SyncCommitteePeriod,
update: ForkedLightClientUpdate) =
let existing = db.getBestUpdate(period)
if is_better_update(update, existing):
db.putBestUpdate(period, update)
proc initSealedPeriodsStore(
backend: SqStoreRef,
name: string): KvResult[SealedSyncCommitteePeriodStore] =
@ -589,10 +567,6 @@ proc initSealedPeriodsStore(
`period`
) VALUES (?);
""", int64, void, managed = false).expect("SQL query OK")
delFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` >= ?;
""", int64, void, managed = false).expect("SQL query OK")
keepFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` < ?;
@ -601,13 +575,11 @@ proc initSealedPeriodsStore(
ok SealedSyncCommitteePeriodStore(
containsStmt: containsStmt,
putStmt: putStmt,
delFromStmt: delFromStmt,
keepFromStmt: keepFromStmt)
func close(store: var SealedSyncCommitteePeriodStore) =
store.containsStmt.disposeSafe()
store.putStmt.disposeSafe()
store.delFromStmt.disposeSafe()
store.keepFromStmt.disposeSafe()
func isPeriodSealed*(
@ -629,21 +601,6 @@ func sealPeriod*(
let res = db.sealedPeriods.putStmt.exec(period.int64)
res.expect("SQL query OK")
func delNonFinalizedPeriodsFrom*(
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
doAssert not db.backend.readOnly # All `stmt` are non-nil
doAssert minPeriod.isSupportedBySQLite
block:
let res = db.sealedPeriods.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.bestUpdates.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.legacyBestUpdates.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
# `syncCommittees`, `currentBranches` and `headers` only have finalized data
func keepPeriodsFrom*(
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
doAssert not db.backend.readOnly # All `stmt` are non-nil

View File

@ -39,18 +39,14 @@ type
finalized_slot*: Slot
finality_branch*: altair.FinalityBranch
current_period_best_update*: ref ForkedLightClientUpdate
LightClientDataCache* = object
data*: Table[BlockId, CachedLightClientData]
## Cached data for creating future `LightClientUpdate` instances.
## Key is the block ID of which the post state was used to get the data.
## Data stored for the finalized head block and all non-finalized blocks.
pendingBest*:
Table[(SyncCommitteePeriod, Eth2Digest), ForkedLightClientUpdate]
## Same as `db.bestUpdates`, but for `SyncCommitteePeriod` with not yet
## finalized `next_sync_committee`. Key is `(attested_period,
## hash_tree_root(current_sync_committee | next_sync_committee)`.
latest*: ForkedLightClientFinalityUpdate
## Tracks light client data for the latest slot that was signed by
## at least `MIN_SYNC_COMMITTEE_PARTICIPANTS`. May be older than head.

View File

@ -799,21 +799,14 @@ proc currentSyncCommitteeForPeriod*(
else: err()
do: err()
func isNextSyncCommitteeFinalized*(
dag: ChainDAGRef, period: SyncCommitteePeriod): bool =
let finalizedSlot = dag.finalizedHead.slot
if finalizedSlot < period.start_slot:
false
elif finalizedSlot < dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
false # Fork epoch not necessarily tied to sync committee period boundary
proc getBlockIdAtSlot*(
dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] =
if slot >= state.data.slot:
Opt.some state.latest_block_id
elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT:
dag.getBlockId(state.data.get_block_root_at_slot(slot))
else:
true
func firstNonFinalizedPeriod*(dag: ChainDAGRef): SyncCommitteePeriod =
if dag.finalizedHead.slot >= dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
dag.finalizedHead.slot.sync_committee_period + 1
else:
dag.cfg.ALTAIR_FORK_EPOCH.sync_committee_period
Opt.none(BlockId)
proc updateBeaconMetrics(
state: ForkedHashedBeaconState, bid: BlockId, cache: var StateCache) =
@ -1338,15 +1331,6 @@ proc getFinalizedEpochRef*(dag: ChainDAGRef): EpochRef =
dag.finalizedHead.blck, dag.finalizedHead.slot.epoch, false).expect(
"getEpochRef for finalized head should always succeed")
proc getBlockIdAtSlot(
dag: ChainDAGRef, state: ForkyHashedBeaconState, slot: Slot): Opt[BlockId] =
if slot >= state.data.slot:
Opt.some state.latest_block_id
elif state.data.slot <= slot + SLOTS_PER_HISTORICAL_ROOT:
dag.getBlockId(state.data.get_block_root_at_slot(slot))
else:
Opt.none(BlockId)
proc ancestorSlot*(
dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId,
lowSlot: Slot): Opt[Slot] =

View File

@ -153,7 +153,9 @@ proc initLightClientBootstrapForPeriod(
period: SyncCommitteePeriod): Opt[void] =
## Compute and cache `LightClientBootstrap` data for all finalized
## epoch boundary blocks within a given sync committee period.
if not dag.isNextSyncCommitteeFinalized(period):
if dag.finalizedHead.slot < period.start_slot:
return ok()
if dag.finalizedHead.slot < dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
return ok()
if dag.lcDataStore.db.isPeriodSealed(period):
return ok()
@ -217,7 +219,10 @@ proc initLightClientUpdateForPeriod(
## Compute and cache the best `LightClientUpdate` within a given
## sync committee period up through the finalized head block.
## Non-finalized blocks are processed incrementally by other functions.
if not dag.isNextSyncCommitteeFinalized(period):
## Should not be called for periods for which incremental computation started.
if dag.finalizedHead.slot < period.start_slot:
return ok()
if dag.finalizedHead.slot < dag.cfg.ALTAIR_FORK_EPOCH.start_slot:
return ok()
if dag.lcDataStore.db.isPeriodSealed(period):
return ok()
@ -277,7 +282,6 @@ proc initLightClientUpdateForPeriod(
tailSlot = max(dag.targetLightClientTailSlot, dag.tail.slot)
lowSlot = max(periodStartSlot, tailSlot)
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
fullPeriodCovered = (dag.finalizedHead.slot > periodEndSlot)
highBsi = dag.getExistingBlockIdAtSlot(highSlot).valueOr:
dag.handleUnexpectedLightClientError(highSlot)
return err()
@ -285,10 +289,7 @@ proc initLightClientUpdateForPeriod(
maxParticipantsRes = dag.maxParticipantsBlock(highBid, lowSlot)
maxParticipantsBid = maxParticipantsRes.bid.valueOr:
const update = default(ForkedLightClientUpdate)
if fullPeriodCovered and maxParticipantsRes.res.isOk: # No block in period
dag.lcDataStore.db.putBestUpdate(period, update)
else:
dag.lcDataStore.db.putUpdateIfBetter(period, update)
return maxParticipantsRes.res
# The block with highest participation may refer to a `finalized_checkpoint`
@ -392,10 +393,7 @@ proc initLightClientUpdateForPeriod(
when lcDataFork > LightClientDataFork.None:
forkyUpdate.signature_slot = signatureBid.slot
if fullPeriodCovered and res.isOk:
dag.lcDataStore.db.putBestUpdate(period, update)
else:
dag.lcDataStore.db.putUpdateIfBetter(period, update)
res
proc initLightClientDataForPeriod(
@ -422,7 +420,8 @@ proc getLightClientData(
except KeyError: raiseAssert "Unreachable"
proc cacheLightClientData(
dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId) =
dag: ChainDAGRef, state: ForkyHashedBeaconState, bid: BlockId,
current_period_best_update: ref ForkedLightClientUpdate) =
## Cache data for a given block and its post-state to speed up creating future
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
## block and state.
@ -434,7 +433,9 @@ proc cacheLightClientData(
finalized_slot:
state.data.finalized_checkpoint.epoch.start_slot,
finality_branch:
state.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get)
state.data.build_proof(altair.FINALIZED_ROOT_GINDEX).get,
current_period_best_update:
current_period_best_update)
if dag.lcDataStore.cache.data.hasKeyOrPut(bid, cachedData):
doAssert false, "Redundant `cacheLightClientData` call"
@ -490,14 +491,6 @@ template lazy_header(name: untyped): untyped {.dirty.} =
`name _ ptr` = addr obj.forky(data_fork).name
`name _ ok`
template lazy_data(name: untyped): untyped {.dirty.} =
## `createLightClientUpdates` helper to lazily load cached light client state.
var `name` {.noinit.}: CachedLightClientData
`name`.finalized_slot = FAR_FUTURE_SLOT
template `load _ name`(bid: BlockId) =
if `name`.finalized_slot == FAR_FUTURE_SLOT:
`name` = dag.getLightClientData(bid)
template lazy_bid(name: untyped): untyped {.dirty.} =
## `createLightClientUpdates` helper to lazily load a known to exist block id.
var
@ -519,26 +512,40 @@ proc createLightClientUpdates(
state: ForkyHashedBeaconState,
blck: ForkyTrustedSignedBeaconBlock,
parent_bid: BlockId,
data_fork: static LightClientDataFork) =
data_fork: static LightClientDataFork): ref ForkedLightClientUpdate =
## Create `LightClientUpdate` instances for a given block and its post-state,
## and keep track of best / latest ones. Data about the parent block's
## post-state must be cached (`cacheLightClientData`) before calling this.
# Verify sync committee has sufficient participants
template sync_aggregate(): auto = blck.asSigned().message.body.sync_aggregate
let num_active_participants = sync_aggregate.num_active_participants.uint64
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
return
## Returns the best `LightClientUpdate` for the block's sync committee period.
# Verify attested block (parent) is recent enough and that state is available
template attested_bid(): auto = parent_bid
let attested_slot = attested_bid.slot
if attested_slot < dag.lcDataStore.cache.tailSlot:
return
return (ref ForkedLightClientUpdate)()
# `blck` and `parent_bid` must be in the same sync committee period
# to update the best per-period `LightClientUpdate`
let
attested_period = attested_slot.sync_committee_period
signature_slot = blck.message.slot
signature_period = signature_slot.sync_committee_period
var
attested_data = dag.getLightClientData(attested_bid)
best =
if attested_period != signature_period:
(ref ForkedLightClientUpdate)()
else:
attested_data.current_period_best_update
# Verify sync committee has sufficient participants
template sync_aggregate(): auto = blck.asSigned().message.body.sync_aggregate
let num_active_participants = sync_aggregate.num_active_participants.uint64
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
return best
# Lazy variables to hold historic data
lazy_header(attested_header)
lazy_data(attested_data)
lazy_bid(finalized_bid)
lazy_header(finalized_header)
@ -547,9 +554,7 @@ proc createLightClientUpdates(
var
newFinality = false
newOptimistic = false
let
signature_slot = blck.message.slot
is_later = withForkyFinalityUpdate(latest):
let is_later = withForkyFinalityUpdate(latest):
when lcDataFork > LightClientDataFork.None:
if attested_slot != forkyFinalityUpdate.attested_header.beacon.slot:
attested_slot > forkyFinalityUpdate.attested_header.beacon.slot
@ -559,7 +564,6 @@ proc createLightClientUpdates(
true
if is_later and latest.assign_attested_header_with_migration(attested_bid):
template forkyLatest: untyped = latest.forky(data_fork)
load_attested_data(attested_bid)
var finalized_slot = attested_data.finalized_slot
if finalized_slot == forkyLatest.finalized_header.beacon.slot:
forkyLatest.finality_branch = attested_data.finality_branch
@ -593,21 +597,7 @@ proc createLightClientUpdates(
newOptimistic = true
# Track best light client data for current period
let
attested_period = attested_slot.sync_committee_period
signature_period = signature_slot.sync_committee_period
if attested_period == signature_period:
template next_sync_committee(): auto = state.data.next_sync_committee
let isCommitteeFinalized = dag.isNextSyncCommitteeFinalized(attested_period)
var best =
if isCommitteeFinalized:
dag.lcDataStore.db.getBestUpdate(attested_period)
else:
let key = (attested_period, state.syncCommitteeRoot)
dag.lcDataStore.cache.pendingBest.getOrDefault(key)
load_attested_data(attested_bid)
let
finalized_slot = attested_data.finalized_slot
has_finality =
@ -619,10 +609,15 @@ proc createLightClientUpdates(
has_sync_committee: true,
has_finality: has_finality,
num_active_participants: num_active_participants)
is_better = is_better_data(meta, best.toMeta)
if is_better and best.assign_attested_header_with_migration(attested_bid):
template forkyBest: untyped = best.forky(data_fork)
forkyBest.next_sync_committee = next_sync_committee
is_better = is_better_data(
meta, attested_data.current_period_best_update[].toMeta())
if is_better:
best = newClone attested_data.current_period_best_update[]
if not best[].assign_attested_header_with_migration(attested_bid):
best = attested_data.current_period_best_update
else:
template forkyBest: untyped = best[].forky(data_fork)
forkyBest.next_sync_committee = state.data.next_sync_committee
forkyBest.next_sync_committee_branch =
attested_data.next_sync_committee_branch
if finalized_slot == forkyBest.finalized_header.beacon.slot:
@ -638,21 +633,14 @@ proc createLightClientUpdates(
forkyBest.finality_branch.reset()
forkyBest.sync_aggregate = sync_aggregate
forkyBest.signature_slot = signature_slot
if isCommitteeFinalized:
dag.lcDataStore.db.putBestUpdate(attested_period, best)
debug "Best LC update improved",
period = attested_period, update = forkyBest
else:
let key = (attested_period, state.syncCommitteeRoot)
dag.lcDataStore.cache.pendingBest[key] = best
debug "Best LC update improved",
period = key, update = forkyBest
if newFinality and dag.lcDataStore.onLightClientFinalityUpdate != nil:
dag.lcDataStore.onLightClientFinalityUpdate(latest)
if newOptimistic and dag.lcDataStore.onLightClientOptimisticUpdate != nil:
dag.lcDataStore.onLightClientOptimisticUpdate(latest.toOptimistic)
best
proc createLightClientUpdates(
dag: ChainDAGRef,
@ -660,60 +648,74 @@ proc createLightClientUpdates(
blck: ForkyTrustedSignedBeaconBlock,
parent_bid: BlockId) =
# Attested block (parent) determines `LightClientUpdate` fork
withLcDataFork(dag.cfg.lcDataForkAtEpoch(parent_bid.slot.epoch)):
let best = withLcDataFork(dag.cfg.lcDataForkAtEpoch(parent_bid.slot.epoch)):
when lcDataFork > LightClientDataFork.None:
dag.createLightClientUpdates(state, blck, parent_bid, lcDataFork)
else:
(ref ForkedLightClientUpdate)()
dag.cacheLightClientData(state, blck.toBlockId(), best)
proc initLightClientDataCache*(dag: ChainDAGRef) =
## Initialize cached light client data
if not dag.shouldImportLcData:
return
# Prune non-finalized data
dag.lcDataStore.db.delNonFinalizedPeriodsFrom(dag.firstNonFinalizedPeriod)
# Initialize tail slot
let targetTailSlot = max(dag.targetLightClientTailSlot, dag.tail.slot)
dag.lcDataStore.cache.tailSlot = max(dag.head.slot, targetTailSlot)
# In `OnlyNew` mode, only head state needs to be cached
if dag.head.slot < dag.lcDataStore.cache.tailSlot:
return
if dag.lcDataStore.importMode == LightClientDataImportMode.OnlyNew:
withState(dag.headState):
when consensusFork >= ConsensusFork.Altair:
dag.cacheLightClientData(forkyState, dag.head.bid)
else: raiseAssert "Unreachable" # `tailSlot` cannot be before Altair
return
# Import light client data for finalized period through finalized head
let
finalizedSlot = max(dag.finalizedHead.blck.slot, targetTailSlot)
finalizedPeriod = finalizedSlot.sync_committee_period
var res =
if finalizedSlot < dag.lcDataStore.cache.tailSlot:
if dag.lcDataStore.importMode == LightClientDataImportMode.OnlyNew:
Opt[void].ok()
elif finalizedSlot >= dag.lcDataStore.cache.tailSlot:
Opt[void].ok()
else:
dag.lcDataStore.cache.tailSlot = finalizedSlot
dag.initLightClientDataForPeriod(finalizedPeriod)
else:
Opt[void].ok()
let lightClientStartTick = Moment.now()
logScope: lightClientDataMaxPeriods = dag.lcDataStore.maxPeriods
logScope:
lightClientDataMaxPeriods = dag.lcDataStore.maxPeriods
importMode = dag.lcDataStore.importMode
debug "Initializing cached LC data", res
proc isSyncAggregateCanonical(
dag: ChainDAGRef, state: ForkyHashedBeaconState,
sync_aggregate: TrustedSyncAggregate, signature_slot: Slot): bool =
if signature_slot > state.data.slot:
return false
let bid = dag.getBlockIdAtSlot(state, signature_slot).valueOr:
return false
if bid.slot != signature_slot:
return false
let bdata = dag.getForkedBlock(bid).valueOr:
return false
withBlck(bdata):
when consensusFork >= ConsensusFork.Altair:
forkyBlck.message.body.sync_aggregate == sync_aggregate
else:
false
# Build list of block to process.
# As it is slow to load states in descending order,
# build a reverse todo list to then process them in ascending order
let tailSlot = dag.lcDataStore.cache.tailSlot
var
blocks = newSeqOfCap[BlockId](dag.head.slot - finalizedSlot + 1)
blocks = newSeqOfCap[BlockId](dag.head.slot - tailSlot + 1)
bid = dag.head.bid
while bid.slot > finalizedSlot:
while bid.slot > tailSlot:
blocks.add bid
bid = dag.existingParent(bid).valueOr:
dag.handleUnexpectedLightClientError(bid.slot)
res.err()
break
if bid.slot == finalizedSlot:
if bid.slot == tailSlot:
blocks.add bid
# Process blocks (reuses `dag.headState`, but restores it to the current head)
@ -721,7 +723,7 @@ proc initLightClientDataCache*(dag: ChainDAGRef) =
for i in countdown(blocks.high, blocks.low):
bid = blocks[i]
if not dag.updateExistingState(
dag.headState, bid.atSlot, save = false, cache):
dag.headState, bid.atSlot(), save = false, cache):
dag.handleUnexpectedLightClientError(bid.slot)
res.err()
continue
@ -731,13 +733,30 @@ proc initLightClientDataCache*(dag: ChainDAGRef) =
continue
withStateAndBlck(dag.headState, bdata):
when consensusFork >= ConsensusFork.Altair:
# Create `LightClientUpdate` instances
if i < blocks.high:
if i == blocks.high:
let
period = bid.slot.sync_committee_period
best = newClone dag.lcDataStore.db.getBestUpdate(period)
withForkyUpdate(best[]):
when lcDataFork > LightClientDataFork.None:
let
attestedSlot = forkyUpdate.attested_header.beacon.slot
signatureSlot = forkyUpdate.signature_slot
if attestedSlot.sync_committee_period != period or
signatureSlot.sync_committee_period != period:
error "Invalid LC data cached", best = best[], period
best[].reset()
elif not dag.isSyncAggregateCanonical(
forkyState,
forkyUpdate.sync_aggregate.asTrusted(), # From DB, is trusted
forkyUpdate.signature_slot):
best[].reset() # Cached data is too recent or from other branch
else:
discard # Cached data is ancestor of `bid`
dag.cacheLightClientData(forkyState, bid, best)
else:
dag.createLightClientUpdates(
forkyState, forkyBlck, parentBid = blocks[i + 1])
# Cache light client data (non-finalized blocks may refer to this)
dag.cacheLightClientData(forkyState, bid)
else: raiseAssert "Unreachable"
let lightClientEndTick = Moment.now()
@ -778,7 +797,6 @@ proc processNewBlockForLightClient*(
when consensusFork >= ConsensusFork.Altair:
template forkyState: untyped = state.forky(consensusFork)
dag.createLightClientUpdates(forkyState, signedBlock, parentBid)
dag.cacheLightClientData(forkyState, signedBlock.toBlockId())
else:
raiseAssert "Unreachable" # `tailSlot` cannot be before Altair
@ -789,31 +807,21 @@ proc processHeadChangeForLightClient*(dag: ChainDAGRef) =
return
if dag.head.slot < dag.lcDataStore.cache.tailSlot:
return
let
headPeriod = dag.head.slot.sync_committee_period
lowSlot = max(dag.lcDataStore.cache.tailSlot, dag.finalizedHead.slot)
lowPeriod = lowSlot.sync_committee_period
# Update `bestUpdates` from `pendingBest` to ensure light client data
# only refers to sync committees as selected by fork choice
let headPeriod = dag.head.slot.sync_committee_period
if not dag.isNextSyncCommitteeFinalized(headPeriod):
let
tailPeriod = dag.lcDataStore.cache.tailSlot.sync_committee_period
lowPeriod = max(dag.firstNonFinalizedPeriod, tailPeriod)
if headPeriod > lowPeriod:
let tmpState = assignClone(dag.headState)
for period in lowPeriod ..< headPeriod:
let
syncCommitteeRoot =
dag.syncCommitteeRootForPeriod(tmpState[], period).valueOr:
dag.handleUnexpectedLightClientError(period.start_slot)
continue
key = (period, syncCommitteeRoot)
var blck = dag.head
for period in countdown(headPeriod, lowPeriod):
blck = blck.get_ancestor((period + 1).start_slot - 1)
if blck == nil:
return
if blck.slot < lowSlot:
return
dag.lcDataStore.db.putBestUpdate(
period, dag.lcDataStore.cache.pendingBest.getOrDefault(key))
withState(dag.headState): # Common case separate to avoid `tmpState` copy
when consensusFork >= ConsensusFork.Altair:
let key = (headPeriod, forkyState.syncCommitteeRoot)
dag.lcDataStore.db.putBestUpdate(
headPeriod, dag.lcDataStore.cache.pendingBest.getOrDefault(key))
else: raiseAssert "Unreachable" # `tailSlot` cannot be before Altair
blck.slot.sync_committee_period,
dag.getLightClientData(blck.bid).current_period_best_update[])
proc processFinalizationForLightClient*(
dag: ChainDAGRef, oldFinalizedHead: BlockSlot) =
@ -900,16 +908,6 @@ proc processFinalizationForLightClient*(
let targetTailPeriod = dag.targetLightClientTailSlot.sync_committee_period
dag.lcDataStore.db.keepPeriodsFrom(targetTailPeriod)
# Prune best `LightClientUpdate` referring to non-finalized sync committees
# that are no longer relevant, i.e., orphaned or too old
let firstNonFinalizedPeriod = dag.firstNonFinalizedPeriod
var keysToDelete: seq[(SyncCommitteePeriod, Eth2Digest)]
for (period, committeeRoot) in dag.lcDataStore.cache.pendingBest.keys:
if period < firstNonFinalizedPeriod:
keysToDelete.add (period, committeeRoot)
for key in keysToDelete:
dag.lcDataStore.cache.pendingBest.del key
proc getLightClientBootstrap(
dag: ChainDAGRef,
header: ForkyLightClientHeader): ForkedLightClientBootstrap =
@ -1003,7 +1001,8 @@ proc getLightClientUpdateForPeriod*(
if not dag.lcDataStore.serve:
return default(ForkedLightClientUpdate)
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand:
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and
period < dag.finalizedHead.blck.slot.sync_committee_period:
if dag.initLightClientUpdateForPeriod(period).isErr:
return default(ForkedLightClientUpdate)
let

View File

@ -743,3 +743,7 @@ template asTrusted*(
SigVerifiedSignedBeaconBlock |
MsgTrustedSignedBeaconBlock): TrustedSignedBeaconBlock =
isomorphicCast[TrustedSignedBeaconBlock](x)
template asTrusted*(
x: SyncAggregate): TrustedSyncAggregate =
isomorphicCast[TrustedSyncAggregate](x)