add option to collect light client data (#3474)

Light clients require full nodes to serve additional data so that they
can stay in sync with the network. This patch adds a new launch option
`--import-light-client-data` to configure what data to make available.
For now, data is only kept in memory; it is not persisted at this time.
Note that data is only locally collected, a separate patch is needed to
actually make it availble over the network. `--serve-light-client-data`
will be used for serving data, but is not functional yet outside tests.
This commit is contained in:
Etan Kissling 2022-03-11 21:28:10 +01:00 committed by GitHub
parent 21b71bd29c
commit ae408c279a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1273 additions and 10 deletions

View File

@ -252,6 +252,13 @@ OK: 3/3 Fail: 0/3 Skip: 0/3
+ [SCRYPT] Network Keystore encryption OK
```
OK: 9/9 Fail: 0/9 Skip: 0/9
## Light client [Preset: mainnet]
```diff
+ Init from checkpoint OK
+ Light client sync OK
+ Pre-Altair OK
```
OK: 3/3 Fail: 0/3 Skip: 0/3
## ListKeys requests [Preset: mainnet]
```diff
+ Correct token provided [Preset: mainnet] OK
@ -513,4 +520,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 1/1 Fail: 0/1 Skip: 0/1
---TOTAL---
OK: 282/287 Fail: 0/287 Skip: 5/287
OK: 285/290 Fail: 0/290 Skip: 5/290

View File

@ -28,6 +28,9 @@ import
./validators/slashing_protection_common,
./filepath
from consensus_object_pools/block_pools_types_light_client
import ImportLightClientData
export
uri, nat, enr,
defaultEth2TcpPort, enabledLogLevel, ValidIpAddress,
@ -419,6 +422,19 @@ type
desc: "A file specifying the authorization token required for accessing the keymanager API"
name: "keymanager-token-file" }: Option[InputFile]
serveLightClientData* {.
hidden
desc: "BETA: Serve data for enabling light clients to stay in sync with the network"
defaultValue: false
name: "serve-light-client-data"}: bool
importLightClientData* {.
hidden
desc: "BETA: Which classes of light client data to import. " &
"Must be one of: none, only-new, full (slow startup), on-demand (may miss validator duties)"
defaultValue: ImportLightClientData.None
name: "import-light-client-data"}: ImportLightClientData
inProcessValidators* {.
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.

View File

@ -11,7 +11,7 @@ import
chronicles,
stew/[assign2, results],
../spec/[forks, signatures, signatures_batch, state_transition],
"."/[block_dag, blockchain_dag]
"."/[block_dag, blockchain_dag, blockchain_dag_light_client]
export results, signatures_batch, block_dag, blockchain_dag
@ -84,6 +84,9 @@ proc addResolvedHeadBlock(
putBlockDur = putBlockTick - startTick,
epochRefDur = epochRefTick - putBlockTick
# Update light client data
dag.processNewBlockForLightClient(state, trustedBlock, parent)
# Notify others of the new block before processing the quarantine, such that
# notifications for parents happens before those of the children
if onBlockAdded != nil:

View File

@ -17,11 +17,11 @@ import
../spec/datatypes/[phase0, altair, bellatrix],
".."/beacon_chain_db,
../validators/validator_monitor,
./block_dag
./block_dag, block_pools_types_light_client
export
options, sets, tables, hashes, helpers, beacon_chain_db, block_dag,
validator_monitor
block_pools_types_light_client, validator_monitor
# ChainDAG and types related to forming a DAG of blocks, keeping track of their
# relationships and allowing various forms of lookups
@ -178,6 +178,12 @@ type
cfg*: RuntimeConfig
serveLightClientData*: bool
## Whether to make local light client data available or not
importLightClientData*: ImportLightClientData
## Which classes of light client data to import
epochRefs*: array[32, EpochRef]
## Cached information about a particular epoch ending with the given
## block - we limit the number of held EpochRefs to put a cap on
@ -189,6 +195,14 @@ type
## value with other components which don't have access to the
## full ChainDAG.
# -----------------------------------
# Data to enable light clients to stay in sync with the network
lightClientCache*: LightClientCache
# -----------------------------------
# Callbacks
onBlockAdded*: OnBlockCallback
## On block added callback
onHeadChanged*: OnHeadCallback
@ -197,6 +211,8 @@ type
## On beacon chain reorganization
onFinHappened*: OnFinalizedCallback
## On finalization callback
onOptimisticLightClientUpdate*: OnOptimisticLightClientUpdateCallback
## On `OptimisticLightClientUpdate` updated callback
headSyncCommittees*: SyncCommitteeCache
## A cache of the sync committees, as they appear in the head state -

View File

@ -0,0 +1,94 @@
# beacon_chain
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
# This implements the pre-release proposal of the libp2p based light client sync
# protocol. See https://github.com/ethereum/consensus-specs/pull/2802
import
# Status libraries
stew/bitops2,
# Beacon chain internals
../spec/datatypes/altair,
./block_dag
type
OnOptimisticLightClientUpdateCallback* =
proc(data: OptimisticLightClientUpdate) {.gcsafe, raises: [Defect].}
ImportLightClientData* {.pure.} = enum
## Controls which classes of light client data are imported.
None = "none"
## Import no light client data.
OnlyNew = "only-new"
## Import only new light client data.
Full = "full"
## Import light client data for entire weak subjectivity period.
OnDemand = "on-demand"
## No precompute of historic data. Is slow and may miss validator duties.
CachedLightClientData* = object
## Cached data from historical non-finalized states to improve speed when
## creating future `LightClientUpdate` and `LightClientBootstrap` instances.
current_sync_committee_branch*:
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
next_sync_committee_branch*:
array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
finalized_bid*: BlockId
finality_branch*:
array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest]
CachedLightClientBootstrap* = object
## Cached data from historical finalized epoch boundary blocks to improve
## speed when creating future `LightClientBootstrap` instances.
current_sync_committee_branch*:
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
LightClientCache* = object
data*: Table[BlockId, CachedLightClientData]
## Cached data for creating future `LightClientUpdate` instances.
## Key is the block ID of which the post state was used to get the data.
## Data is stored for the most recent 4 finalized checkpoints, as well as
## for all non-finalized blocks.
bootstrap*: Table[Slot, CachedLightClientBootstrap]
## Cached data for creating future `LightClientBootstrap` instances.
## Key is the block slot of which the post state was used to get the data.
## Data is stored for finalized epoch boundary blocks.
latestCheckpoints*: array[4, Checkpoint]
## Keeps track of the latest four `finalized_checkpoint` references
## leading to `finalizedHead`. Used to prune `data`.
## Non-finalized states may only refer to these checkpoints.
lastCheckpointIndex*: int
## Last index that was modified in `latestCheckpoints`.
bestUpdates*: Table[SyncCommitteePeriod, altair.LightClientUpdate]
## Stores the `LightClientUpdate` with the most `sync_committee_bits` per
## `SyncCommitteePeriod`. Updates with a finality proof have precedence.
pendingBestUpdates*:
Table[(SyncCommitteePeriod, Eth2Digest), altair.LightClientUpdate]
## Same as `bestUpdates`, but for `SyncCommitteePeriod` with
## `next_sync_committee` that are not finalized. Key is `(period,
## hash_tree_root(current_sync_committee | next_sync_committee)`.
latestUpdate*: altair.LightClientUpdate
## Tracks the `LightClientUpdate` for the latest slot. This may be older
## than head for empty slots or if not signed by sync committee.
optimisticUpdate*: OptimisticLightClientUpdate
## Tracks the `OptimisticLightClientUpdate` for the latest slot. This may
## be older than head for empty slots or if not signed by sync committee.
importTailSlot*: Slot
## The earliest slot for which light client data is collected.
## Only relevant for `ImportLightClientData.OnlyNew`.

View File

@ -477,11 +477,23 @@ proc updateBeaconMetrics(state: StateData, cache: var StateCache) =
beacon_active_validators.set(active_validators)
beacon_current_active_validators.set(active_validators)
import blockchain_dag_light_client
export
blockchain_dag_light_client.getBestLightClientUpdateForPeriod,
blockchain_dag_light_client.getLatestLightClientUpdate,
blockchain_dag_light_client.getOptimisticLightClientUpdate,
blockchain_dag_light_client.getLightClientBootstrap
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags,
onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil,
onReorgCb: OnReorgCallback = nil,
onFinCb: OnFinalizedCallback = nil): ChainDAGRef =
onFinCb: OnFinalizedCallback = nil,
onOptimisticLCUpdateCb: OnOptimisticLightClientUpdateCallback = nil,
serveLightClientData = false,
importLightClientData = ImportLightClientData.None
): ChainDAGRef =
# TODO we require that the db contains both a head and a tail block -
# asserting here doesn't seem like the right way to go about it however..
@ -653,11 +665,14 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# allow skipping some validation.
updateFlags: {verifyFinalization} * updateFlags,
cfg: cfg,
serveLightClientData: serveLightClientData,
importLightClientData: importLightClientData,
onBlockAdded: onBlockCb,
onHeadChanged: onHeadCb,
onReorgHappened: onReorgCb,
onFinHappened: onFinCb
onFinHappened: onFinCb,
onOptimisticLightClientUpdate: onOptimisticLCUpdateCb
)
block: # Initialize dag states
@ -779,6 +794,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
stateDur = stateTick - summariesTick,
indexDur = Moment.now() - stateTick
dag.initLightClientCache()
dag
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
@ -1286,6 +1303,9 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
var cur = head.atSlot()
while not cur.blck.isAncestorOf(dag.finalizedHead.blck):
# Update light client data
dag.deleteLightClientData(cur.blck.bid)
dag.delState(cur) # TODO: should we move that disk I/O to `onSlotEnd`
if cur.isProposed():
@ -1485,6 +1505,9 @@ proc updateHead*(
doAssert (not finalizedHead.blck.isNil),
"Block graph should always lead to a finalized block"
# Update light client data
dag.processHeadChangeForLightClient()
let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead)
if not(isAncestor):
notice "Updated head block with chain reorg",
@ -1571,6 +1594,9 @@ proc updateHead*(
# therefore no longer be considered as part of the chain we're following
dag.pruneBlocksDAG()
# Update light client data
dag.processFinalizationForLightClient()
# Send notification about new finalization point via callback.
if not(isNil(dag.onFinHappened)):
let stateRoot =

View File

@ -0,0 +1,901 @@
# beacon_chain
# Copyright (c) 2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [Defect].}
# This implements the pre-release proposal of the libp2p based light client sync
# protocol. See https://github.com/ethereum/consensus-specs/pull/2802
import
# Standard library
std/[algorithm, sequtils],
# Status libraries
stew/[bitops2, objects],
chronos,
# Beacon chain internals
../spec/datatypes/[phase0, altair, bellatrix],
"."/[block_pools_types, blockchain_dag]
logScope: topics = "chaindag"
type
HashedBeaconStateWithSyncCommittee =
bellatrix.HashedBeaconState |
altair.HashedBeaconState
TrustedSignedBeaconBlockWithSyncAggregate =
bellatrix.TrustedSignedBeaconBlock |
altair.TrustedSignedBeaconBlock
template nextEpochBoundarySlot(slot: Slot): Slot =
## Compute the first possible epoch boundary state slot of a `Checkpoint`
## referring to a block at given slot.
(slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot
func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
## Compute the earliest slot for which light client data is retained.
let
minSupportedSlot = max(
dag.cfg.ALTAIR_FORK_EPOCH.start_slot,
dag.lightClientCache.importTailSlot)
currentSlot = getStateField(dag.headState.data, slot)
if currentSlot < minSupportedSlot:
return minSupportedSlot
let
MIN_EPOCHS_FOR_BLOCK_REQUESTS =
dag.cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY +
dag.cfg.CHURN_LIMIT_QUOTIENT div 2
MIN_SLOTS_FOR_BLOCK_REQUESTS =
MIN_EPOCHS_FOR_BLOCK_REQUESTS * SLOTS_PER_EPOCH
minSlot = max(minSupportedSlot, dag.tail.slot)
if currentSlot - minSlot < MIN_SLOTS_FOR_BLOCK_REQUESTS:
return minSlot
let earliestSlot = currentSlot - MIN_SLOTS_FOR_BLOCK_REQUESTS
max(earliestSlot.sync_committee_period.start_slot, minSlot)
proc currentSyncCommitteeForPeriod(
dag: ChainDAGRef,
tmpState: var StateData,
period: SyncCommitteePeriod): SyncCommittee =
## Fetch a `SyncCommittee` for a given sync committee period.
## For non-finalized periods, follow the chain as selected by fork choice.
let earliestSlot = dag.computeEarliestLightClientSlot
doAssert period >= earliestSlot.sync_committee_period
let
periodStartSlot = period.start_slot
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
dag.withUpdatedState(tmpState, dag.getBlockAtSlot(syncCommitteeSlot)) do:
withState(stateData.data):
when stateFork >= BeaconStateFork.Altair:
state.data.current_sync_committee
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
template syncCommitteeRoot(
state: HashedBeaconStateWithSyncCommittee): Eth2Digest =
## Compute a root to uniquely identify `current_sync_committee` and
## `next_sync_committee`.
withEth2Hash:
h.update state.data.current_sync_committee.hash_tree_root().data
h.update state.data.next_sync_committee.hash_tree_root().data
proc syncCommitteeRootForPeriod(
dag: ChainDAGRef,
tmpState: var StateData,
period: SyncCommitteePeriod): Eth2Digest =
## Compute a root to uniquely identify `current_sync_committee` and
## `next_sync_committee` for a given sync committee period.
## For non-finalized periods, follow the chain as selected by fork choice.
let earliestSlot = dag.computeEarliestLightClientSlot
doAssert period >= earliestSlot.sync_committee_period
let
periodStartSlot = period.start_slot
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
dag.withUpdatedState(tmpState, dag.getBlockAtSlot(syncCommitteeSlot)) do:
withState(stateData.data):
when stateFork >= BeaconStateFork.Altair:
state.syncCommitteeRoot
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
proc getLightClientData(
dag: ChainDAGRef,
bid: BlockId): CachedLightClientData =
## Fetch cached light client data about a given block.
## Data must be cached (`cacheLightClientData`) before calling this function.
try: dag.lightClientCache.data[bid]
except KeyError: raiseAssert "Unreachable"
template getLightClientData(
dag: ChainDAGRef,
blck: BlockRef): CachedLightClientData =
getLightClientData(dag, blck.bid)
proc cacheLightClientData*(
dag: ChainDAGRef,
state: HashedBeaconStateWithSyncCommittee,
blck: TrustedSignedBeaconBlockWithSyncAggregate,
isNew = true) =
## Cache data for a given block and its post-state to speed up creating future
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
## block and state.
var current_sync_committee_branch {.noinit.}:
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
state.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX, current_sync_committee_branch)
var next_sync_committee_branch {.noinit.}:
array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
state.data.build_proof(
altair.NEXT_SYNC_COMMITTEE_INDEX, next_sync_committee_branch)
var finality_branch {.noinit.}:
array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest]
state.data.build_proof(
altair.FINALIZED_ROOT_INDEX, finality_branch)
template finalized_checkpoint(): auto = state.data.finalized_checkpoint
let
bid =
BlockId(root: blck.root, slot: blck.message.slot)
finalized_bid =
dag.getBlockIdAtSlot(finalized_checkpoint.epoch.start_slot).bid
if dag.lightClientCache.data.hasKeyOrPut(
bid,
CachedLightClientData(
current_sync_committee_branch:
current_sync_committee_branch,
next_sync_committee_branch:
next_sync_committee_branch,
finalized_bid:
finalized_bid,
finality_branch:
finality_branch)):
doAssert false, "Redundant `cacheLightClientData` call"
proc deleteLightClientData*(dag: ChainDAGRef, bid: BlockId) =
## Delete cached light client data for a given block. This needs to be called
## when a block becomes unreachable due to finalization of a different fork.
if dag.importLightClientData == ImportLightClientData.None:
return
dag.lightClientCache.data.del bid
template lazy_header(name: untyped): untyped {.dirty.} =
## `createLightClientUpdates` helper to lazily load a known block header.
var `name ptr`: ptr[BeaconBlockHeader]
template `assign name`(target: var BeaconBlockHeader,
blockParam: BlockId | BlockRef): untyped =
if `name ptr` != nil:
target = `name ptr`[]
else:
let bid =
when blockParam is BlockId:
blockParam
else:
blockParam.bid
target = toBeaconBlockHeader(dag.getForkedBlock(bid).get)
`name ptr` = addr target
template lazy_data(name: untyped): untyped {.dirty.} =
## `createLightClientUpdates` helper to lazily load cached light client state.
var `name` {.noinit.}: CachedLightClientData
`name`.finalized_bid.slot = FAR_FUTURE_SLOT
template `load name`(bid: BlockId | BlockRef): untyped =
if `name`.finalized_bid.slot == FAR_FUTURE_SLOT:
`name` = dag.getLightClientData(bid)
proc createLightClientUpdates(
dag: ChainDAGRef,
state: HashedBeaconStateWithSyncCommittee,
blck: TrustedSignedBeaconBlockWithSyncAggregate,
parent: BlockRef) =
## Create `LightClientUpdate` and `OptimisticLightClientUpdate` instances for
## a given block and its post-state, and keep track of best / latest ones.
## Data about the parent block's post-state and its `finalized_checkpoint`'s
## block's post-state needs to be cached (`cacheLightClientData`) before
## calling this function.
# Parent needs to be known to continue
if parent == nil:
return
# Verify sync committee has sufficient participants
template sync_aggregate(): auto = blck.message.body.sync_aggregate
template sync_committee_bits(): auto = sync_aggregate.sync_committee_bits
let num_active_participants = countOnes(sync_committee_bits).uint64
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
return
# Verify attested block (parent) is recent enough and that state is available
let
earliest_slot = dag.computeEarliestLightClientSlot
attested_slot = parent.slot
if attested_slot < earliest_slot:
return
# Verify signature does not skip a sync committee period
let
signature_slot = blck.message.slot
signature_period = signature_slot.sync_committee_period
attested_period = attested_slot.sync_committee_period
if signature_period > attested_period + 1:
return
# Update to new `OptimisticLightClientUpdate` if it attests to a later slot
lazy_header(attested_header)
template optimistic_update(): auto = dag.lightClientCache.optimisticUpdate
if attested_slot > optimistic_update.attested_header.slot:
optimistic_update.attested_header
.assign_attested_header(parent)
optimistic_update.sync_aggregate =
isomorphicCast[SyncAggregate](sync_aggregate)
optimistic_update.fork_version =
state.data.fork.current_version
optimistic_update.is_signed_by_next_sync_committee =
signature_period == attested_period + 1
if dag.onOptimisticLightClientUpdate != nil:
dag.onOptimisticLightClientUpdate(optimistic_update)
# Update to new latest `LightClientUpdate` if it attests to a later slot
lazy_data(attested_data)
lazy_data(finalized_data)
lazy_header(finalized_header)
template latest_update(): auto = dag.lightClientCache.latestUpdate
if attested_slot > latest_update.attested_header.slot:
latest_update.attested_header
.assign_attested_header(parent)
latest_update.sync_aggregate =
isomorphicCast[SyncAggregate](sync_aggregate)
latest_update.fork_version =
state.data.fork.current_version
load_attested_data(parent)
let finalized_slot = attested_data.finalized_bid.slot
if finalized_slot + UPDATE_TIMEOUT < attested_slot or
finalized_slot < earliest_slot:
latest_update.finalized_header = BeaconBlockHeader()
latest_update.finality_branch.fill(Eth2Digest())
if signature_period == attested_period + 1:
latest_update.next_sync_committee = SyncCommittee()
latest_update.next_sync_committee_branch.fill(Eth2Digest())
else:
latest_update.next_sync_committee =
state.data.next_sync_committee
latest_update.next_sync_committee_branch =
attested_data.next_sync_committee_branch
else:
latest_update.finalized_header
.assign_finalized_header(attested_data.finalized_bid)
latest_update.finality_branch =
attested_data.finality_branch
if signature_period == finalized_slot.sync_committee_period + 1:
latest_update.next_sync_committee = SyncCommittee()
latest_update.next_sync_committee_branch.fill(Eth2Digest())
else:
load_finalized_data(attested_data.finalized_bid)
latest_update.next_sync_committee =
state.data.next_sync_committee
latest_update.next_sync_committee_branch =
finalized_data.next_sync_committee_branch
# Update best `LightClientUpdate` for current period if it improved
if signature_period == attested_period:
let isNextSyncCommitteeFinalized =
signature_period.start_slot <= dag.finalizedHead.slot
var best_update =
if isNextSyncCommitteeFinalized:
dag.lightClientCache.bestUpdates.getOrDefault(signature_period)
else:
let key = (signature_period, state.syncCommitteeRoot)
dag.lightClientCache.pendingBestUpdates.getOrDefault(key)
type Verdict = enum
unknown
new_update_is_worse
new_update_is_better
var verdict = unknown
# If no best update has been recorded, new update is better
template best_sync_aggregate(): auto = best_update.sync_aggregate
let best_num_active_participants =
countOnes(best_sync_aggregate.sync_committee_bits).uint64
if best_num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
verdict = new_update_is_better
else:
# If finality changes, the finalized update is better
template finalized_period_at_signature(): auto =
state.data.finalized_checkpoint.epoch.sync_committee_period
template best_attested_slot(): auto =
best_update.attested_header.slot
if best_update.finality_branch.isZeroMemory:
if (attested_slot > dag.finalizedHead.slot or
attested_slot > best_attested_slot) and
signature_period == finalized_period_at_signature:
load_attested_data(parent)
let finalized_slot = attested_data.finalized_bid.slot
if signature_period == finalized_slot.sync_committee_period and
finalized_slot >= earliest_slot:
verdict = new_update_is_better
elif attested_slot > dag.finalizedHead.slot or
attested_slot < best_attested_slot:
if signature_period == finalized_period_at_signature:
load_attested_data(parent)
let finalized_slot = attested_data.finalized_bid.slot
if signature_period != finalized_slot.sync_committee_period or
finalized_slot < earliest_slot:
verdict = new_update_is_worse
else:
verdict = new_update_is_worse
if verdict == unknown:
# If participation changes, higher participation is better
if num_active_participants < best_num_active_participants:
verdict = new_update_is_worse
elif num_active_participants > best_num_active_participants:
verdict = new_update_is_better
else:
# Older updates are better
if attested_slot >= best_attested_slot:
verdict = new_update_is_worse
else:
verdict = new_update_is_better
if verdict == new_update_is_better:
best_update.attested_header
.assign_attested_header(parent)
best_update.sync_aggregate =
isomorphicCast[SyncAggregate](sync_aggregate)
best_update.fork_version =
state.data.fork.current_version
load_attested_data(parent)
let finalized_slot = attested_data.finalized_bid.slot
if signature_period != finalized_slot.sync_committee_period or
finalized_slot < earliest_slot:
best_update.finalized_header = BeaconBlockHeader()
best_update.finality_branch.fill(Eth2Digest())
best_update.next_sync_committee =
state.data.next_sync_committee
best_update.next_sync_committee_branch =
attested_data.next_sync_committee_branch
else:
best_update.finalized_header
.assign_finalized_header(attested_data.finalized_bid)
best_update.finality_branch =
attested_data.finality_branch
load_finalized_data(attested_data.finalized_bid)
best_update.next_sync_committee =
state.data.next_sync_committee
best_update.next_sync_committee_branch =
finalized_data.next_sync_committee_branch
if isNextSyncCommitteeFinalized:
dag.lightClientCache.bestUpdates[signature_period] = best_update
debug "Best update for period improved",
period = signature_period, update = best_update
else:
let key = (signature_period, state.syncCommitteeRoot)
dag.lightClientCache.pendingBestUpdates[key] = best_update
debug "Best update for period improved",
period = key, update = best_update
proc processNewBlockForLightClient*(
dag: ChainDAGRef,
state: StateData,
signedBlock: ForkyTrustedSignedBeaconBlock,
parent: BlockRef) =
## Update light client data with information from a new block.
if dag.importLightClientData == ImportLightClientData.None:
return
if signedBlock.message.slot < dag.computeEarliestLightClientSlot:
return
when signedBlock is bellatrix.TrustedSignedBeaconBlock:
dag.cacheLightClientData(state.data.bellatrixData, signedBlock)
dag.createLightClientUpdates(state.data.bellatrixData, signedBlock, parent)
elif signedBlock is altair.TrustedSignedBeaconBlock:
dag.cacheLightClientData(state.data.altairData, signedBlock)
dag.createLightClientUpdates(state.data.altairData, signedBlock, parent)
elif signedBlock is phase0.TrustedSignedBeaconBlock:
discard
else:
{.error: "Unreachable".}
proc processHeadChangeForLightClient*(dag: ChainDAGRef) =
## Update light client data to account for a new head block.
## Note that `dag.finalizedHead` is not yet updated when this is called.
if dag.importLightClientData == ImportLightClientData.None:
return
if dag.head.slot < dag.computeEarliestLightClientSlot:
return
let headPeriod = dag.head.slot.sync_committee_period
if headPeriod.start_slot > dag.finalizedHead.slot:
let finalizedPeriod = dag.finalizedHead.slot.sync_committee_period
if headPeriod > finalizedPeriod + 1:
var tmpState = assignClone(dag.headState)
for period in finalizedPeriod + 1 ..< headPeriod:
let key = (period, dag.syncCommitteeRootForPeriod(tmpState[], period))
dag.lightClientCache.bestUpdates[period] =
dag.lightClientCache.pendingBestUpdates.getOrDefault(key)
withState(dag.headState.data):
when stateFork >= BeaconStateFork.Altair:
let key = (headPeriod, state.syncCommitteeRoot)
dag.lightClientCache.bestUpdates[headPeriod] =
dag.lightClientCache.pendingBestUpdates.getOrDefault(key)
else: raiseAssert "Unreachable"
proc processFinalizationForLightClient*(dag: ChainDAGRef) =
## Prune cached data that is no longer useful for creating future
## `LightClientUpdate` and `LightClientBootstrap` instances.
## This needs to be called whenever `finalized_checkpoint` changes.
if dag.importLightClientData == ImportLightClientData.None:
return
let
altairStartSlot = dag.cfg.ALTAIR_FORK_EPOCH.start_slot
finalizedSlot = dag.finalizedHead.slot
if finalizedSlot < altairStartSlot:
return
let earliestSlot = dag.computeEarliestLightClientSlot
# Keep track of latest four finalized checkpoints
let
lastIndex = dag.lightClientCache.lastCheckpointIndex
lastCheckpoint = addr dag.lightClientCache.latestCheckpoints[lastIndex]
if dag.finalizedHead.slot.epoch != lastCheckpoint.epoch or
dag.finalizedHead.blck.root != lastCheckpoint.root:
let
nextIndex = (lastIndex + 1) mod dag.lightClientCache.latestCheckpoints.len
nextCheckpoint = addr dag.lightClientCache.latestCheckpoints[nextIndex]
nextCheckpoint[].epoch = dag.finalizedHead.slot.epoch
nextCheckpoint[].root = dag.finalizedHead.blck.root
dag.lightClientCache.lastCheckpointIndex = nextIndex
# Cache `LightClientBootstrap` for newly finalized epoch boundary blocks.
# Epoch boundary blocks are the block for the initial slot of an epoch,
# or the most recent block if no block was proposed at that slot
let lowSlot = max(lastCheckpoint.epoch.start_slot, earliestSlot)
var boundarySlot = dag.finalizedHead.slot
while boundarySlot >= lowSlot:
let blck = dag.getBlockAtSlot(boundarySlot).blck
if blck.slot >= lowSlot:
dag.lightClientCache.bootstrap[blck.slot] =
CachedLightClientBootstrap(
current_sync_committee_branch:
dag.getLightClientData(blck.bid).current_sync_committee_branch)
boundarySlot = blck.slot.nextEpochBoundarySlot
if boundarySlot < SLOTS_PER_EPOCH:
break
boundarySlot -= SLOTS_PER_EPOCH
# Prune light client data that is no longer relevant,
# i.e., can no longer be referred to by future updates, or is too old
var bidsToDelete: seq[BlockId]
for bid, data in dag.lightClientCache.data:
if bid.slot >= earliestSlot:
if bid.slot >= finalizedSlot:
continue
if dag.lightClientCache.latestCheckpoints.anyIt(bid.root == it.root):
continue
bidsToDelete.add bid
for bid in bidsToDelete:
dag.lightClientCache.data.del bid
# Prune bootstrap data that is no longer relevant
var slotsToDelete: seq[Slot]
for slot in dag.lightClientCache.bootstrap.keys:
if slot < earliestSlot:
slotsToDelete.add slot
for slot in slotsToDelete:
dag.lightClientCache.bootstrap.del slot
# Prune best `LightClientUpdate` that are no longer relevant
let earliestPeriod = earliestSlot.sync_committee_period
var periodsToDelete: seq[SyncCommitteePeriod]
for period in dag.lightClientCache.bestUpdates.keys:
if period < earliestPeriod:
periodsToDelete.add period
for period in periodsToDelete:
dag.lightClientCache.bestUpdates.del period
# Prune best `LightClientUpdate` referring to non-finalized sync committees
# that are no longer relevant, i.e., orphaned or too old
let finalizedPeriod = finalizedSlot.sync_committee_period
var keysToDelete: seq[(SyncCommitteePeriod, Eth2Digest)]
for (period, committeeRoot) in dag.lightClientCache.pendingBestUpdates.keys:
if period <= finalizedPeriod:
keysToDelete.add (period, committeeRoot)
for key in keysToDelete:
dag.lightClientCache.pendingBestUpdates.del key
proc initBestLightClientUpdateForPeriod(
dag: ChainDAGRef, period: SyncCommitteePeriod) =
## Compute and cache the `LightClientUpdate` with the most sync committee
## signatures (i.e., participation) for a given sync committee period.
let periodStartSlot = period.start_slot
if periodStartSlot > dag.finalizedHead.slot:
return
let
earliestSlot = dag.computeEarliestLightClientSlot
periodEndSlot = periodStartSlot + SLOTS_PER_SYNC_COMMITTEE_PERIOD - 1
if periodEndSlot < earliestSlot:
return
if dag.lightClientCache.bestUpdates.hasKey(period):
return
let startTick = Moment.now()
debug "Computing best update for period", period
defer:
let endTick = Moment.now()
debug "Best update for period computed",
period, update = dag.lightClientCache.bestUpdates.getOrDefault(period),
computeDur = endTick - startTick
proc maxParticipantsBlock(highBlck: BlockRef, lowSlot: Slot): BlockRef =
## Determine the earliest block with most sync committee signatures among
## ancestors of `highBlck` with at least `lowSlot` as parent block slot.
## Return `nil` if no block with `MIN_SYNC_COMMITTEE_PARTICIPANTS` is found.
var
maxParticipants = 0
maxBlockRef: BlockRef
blockRef = highBlck
while blockRef.parent != nil and blockRef.parent.slot >= lowSlot:
let
bdata = dag.getForkedBlock(blockRef.bid).get
numParticipants =
withBlck(bdata):
when stateFork >= BeaconStateFork.Altair:
countOnes(blck.message.body.sync_aggregate.sync_committee_bits)
else: raiseAssert "Unreachable"
if numParticipants >= maxParticipants:
maxParticipants = numParticipants
maxBlockRef = blockRef
blockRef = blockRef.parent
if maxParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
maxBlockRef = nil
maxBlockRef
# Determine the block in the period with highest sync committee participation
let
lowSlot = max(periodStartSlot, earliestSlot)
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
highBlck = dag.getBlockAtSlot(highSlot).blck
bestNonFinalizedRef = maxParticipantsBlock(highBlck, lowSlot)
if bestNonFinalizedRef == nil:
dag.lightClientCache.bestUpdates[period] = default(altair.LightClientUpdate)
return
# The block with highest participation may refer to a `finalized_checkpoint`
# in a different sync committee period. If that is the case, search for a
# later block with a `finalized_checkpoint` within the given sync committee
# period, despite it having a lower sync committee participation
var
tmpState = assignClone(dag.headState)
bestFinalizedRef = bestNonFinalizedRef
finalizedBlck {.noinit.}: BlockRef
while bestFinalizedRef != nil:
let
finalizedEpoch = block:
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
withState(stateData.data):
when stateFork >= BeaconStateFork.Altair:
state.data.finalized_checkpoint.epoch
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
finalizedEpochStartSlot = finalizedEpoch.start_slot
if finalizedEpochStartSlot >= lowSlot:
finalizedBlck = dag.getBlockAtSlot(finalizedEpochStartSlot).blck
if finalizedBlck.slot >= lowSlot:
break
bestFinalizedRef = maxParticipantsBlock(highBlck, bestFinalizedRef.slot + 1)
# If a finalized block has been found within the sync commitee period,
# create a `LightClientUpdate` for that one. Otherwise, create a non-finalized
# `LightClientUpdate`
var update {.noinit.}: LightClientUpdate
if bestFinalizedRef != nil:
# Fill data from attested block
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata):
when stateFork >= BeaconStateFork.Altair:
update.attested_header =
blck.toBeaconBlockHeader
state.data.build_proof(
altair.FINALIZED_ROOT_INDEX, update.finality_branch)
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
# Fill data from signature block
let bdata = dag.getForkedBlock(bestFinalizedRef.bid).get
withBlck(bdata):
when stateFork >= BeaconStateFork.Altair:
update.sync_aggregate =
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
else: raiseAssert "Unreachable"
update.fork_version =
dag.cfg.forkAtEpoch(bestFinalizedRef.slot.epoch).current_version
# Fill data from finalized block
dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata):
when stateFork >= BeaconStateFork.Altair:
update.next_sync_committee =
state.data.next_sync_committee
state.data.build_proof(
altair.NEXT_SYNC_COMMITTEE_INDEX, update.next_sync_committee_branch)
update.finalized_header =
blck.toBeaconBlockHeader
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
else:
# Fill data from attested block
dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do:
let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata):
when stateFork >= BeaconStateFork.Altair:
update.attested_header =
blck.toBeaconBlockHeader
update.next_sync_committee =
state.data.next_sync_committee
state.data.build_proof(
altair.NEXT_SYNC_COMMITTEE_INDEX, update.next_sync_committee_branch)
update.finalized_header = BeaconBlockHeader()
update.finality_branch.fill(Eth2Digest())
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
# Fill data from signature block
let bdata = dag.getForkedBlock(bestNonFinalizedRef.bid).get
withBlck(bdata):
when stateFork >= BeaconStateFork.Altair:
update.sync_aggregate =
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
else: raiseAssert "Unreachable"
update.fork_version =
dag.cfg.forkAtEpoch(bestNonFinalizedRef.slot.epoch).current_version
dag.lightClientCache.bestUpdates[period] = update
proc initLightClientBootstrapForPeriod(
dag: ChainDAGRef,
period: SyncCommitteePeriod) =
## Compute and cache `LightClientBootstrap` data for all epoch boundary blocks
## within a given sync committee period.
let periodStartSlot = period.start_slot
if periodStartSlot > dag.finalizedHead.slot:
return
let
earliestSlot = dag.computeEarliestLightClientSlot
periodEndSlot = periodStartSlot + SLOTS_PER_SYNC_COMMITTEE_PERIOD - 1
if periodEndSlot < earliestSlot:
return
let startTick = Moment.now()
debug "Caching bootstrap data for period", period
defer:
let endTick = Moment.now()
debug "Bootstrap data for period cached", period,
cacheDur = endTick - startTick
let
lowSlot = max(periodStartSlot, earliestSlot)
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
lowBoundarySlot = lowSlot.nextEpochBoundarySlot
highBoundarySlot = highSlot.nextEpochBoundarySlot
var
tmpState = assignClone(dag.headState)
tmpCache: StateCache
nextBoundarySlot = lowBoundarySlot
while nextBoundarySlot <= highBoundarySlot:
let
blck = dag.getBlockAtSlot(nextBoundarySlot).blck
boundarySlot = blck.slot.nextEpochBoundarySlot
if boundarySlot == nextBoundarySlot and
blck.slot >= lowSlot and blck.slot <= highSlot and
not dag.lightClientCache.bootstrap.hasKey(blck.slot):
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
doAssert dag.updateStateData(
tmpState[], blck.atSlot, save = false, tmpCache)
withStateVars(tmpState[]):
withState(stateData.data):
when stateFork >= BeaconStateFork.Altair:
state.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX,
cachedBootstrap.current_sync_committee_branch)
else: raiseAssert "Unreachable"
dag.lightClientCache.bootstrap[blck.slot] = cachedBootstrap
nextBoundarySlot += SLOTS_PER_EPOCH
proc initLightClientCache*(dag: ChainDAGRef) =
## Initialize cached light client data
if dag.importLightClientData == ImportLightClientData.None:
return
if dag.importLightClientData == ImportLightClientData.OnlyNew:
dag.lightClientCache.importTailSlot = dag.head.slot + 1
return
let earliestSlot = dag.computeEarliestLightClientSlot
if dag.head.slot < earliestSlot:
return
let
finalizedSlot = dag.finalizedHead.slot
finalizedPeriod = finalizedSlot.sync_committee_period
dag.initBestLightClientUpdateForPeriod(finalizedPeriod)
let lightClientStartTick = Moment.now()
debug "Initializing cached light client data"
# Build lists of block to process.
# As it is slow to load states in descending order,
# first build a todo list, then process them in ascending order
let lowSlot = max(finalizedSlot, dag.computeEarliestLightClientSlot)
var
blocksBetween = newSeqOfCap[BlockRef](dag.head.slot - lowSlot + 1)
blockRef = dag.head
while blockRef.slot > lowSlot:
blocksBetween.add blockRef
blockRef = blockRef.parent
blocksBetween.add blockRef
# Process blocks (reuses `dag.headState`, but restores it to the current head)
var
tmpState = assignClone(dag.headState)
tmpCache, cache: StateCache
oldCheckpoint: Checkpoint
cpIndex = 0
for i in countdown(blocksBetween.high, blocksBetween.low):
blockRef = blocksBetween[i]
doAssert dag.updateStateData(
dag.headState, blockRef.atSlot(blockRef.slot), save = false, cache)
withStateVars(dag.headState):
let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata):
when stateFork >= BeaconStateFork.Altair:
# Cache data for `LightClientUpdate` of descendant blocks
dag.cacheLightClientData(state, blck, isNew = false)
# Cache data for the block's `finalized_checkpoint`.
# The `finalized_checkpoint` may refer to:
# 1. `finalizedHead.blck -> finalized_checkpoint`
# This may happen when there were skipped slots.
# 2. `finalizedHead -> finalized_checkpoint`
# 3. One epoch boundary that got justified then finalized
# between `finalizedHead -> finalized_checkpoint`
# and `finalizedHead`
# 4. `finalizedHead`
let checkpoint = state.data.finalized_checkpoint
if checkpoint != oldCheckpoint:
oldCheckpoint = checkpoint
doAssert cpIndex < dag.lightClientCache.latestCheckpoints.len
dag.lightClientCache.latestCheckpoints[cpIndex] = checkpoint
dag.lightClientCache.lastCheckpointIndex = cpIndex
inc cpIndex
# Save new checkpoint block using `tmpState` (avoid replay after it)
# Note that light client finality proofs refer to checkpoint blocks
# at their original slot, without advancing to an epoch boundary.
# This is because light clients are unable to advance slots.
if checkpoint.root != dag.finalizedHead.blck.root:
let cpRef =
dag.getBlockAtSlot(checkpoint.epoch.start_slot).blck
if cpRef != nil and cpRef.slot >= earliestSlot:
assert cpRef.bid.root == checkpoint.root
doAssert dag.updateStateData(
tmpState[], cpRef.atSlot, save = false, tmpCache)
withStateVars(tmpState[]):
let bdata = dag.getForkedBlock(blck.bid).get
withStateAndBlck(stateData.data, bdata):
when stateFork >= BeaconStateFork.Altair:
dag.cacheLightClientData(state, blck, isNew = false)
else: raiseAssert "Unreachable"
# Create `LightClientUpdate` for non-finalized blocks.
if blockRef.slot > finalizedSlot:
dag.createLightClientUpdates(state, blck, blockRef.parent)
else: raiseAssert "Unreachable"
let lightClientEndTick = Moment.now()
debug "Initialized cached light client data",
initDur = lightClientEndTick - lightClientStartTick
# Import historic data
if dag.importLightClientData == ImportLightClientData.Full:
let
earliestSlot = dag.computeEarliestLightClientSlot
earliestPeriod = earliestSlot.sync_committee_period
for period in earliestPeriod ..< finalizedPeriod:
dag.initBestLightClientUpdateForPeriod(period)
dag.initLightClientBootstrapForPeriod(period)
dag.initLightClientBootstrapForPeriod(finalizedPeriod)
proc getBestLightClientUpdateForPeriod*(
dag: ChainDAGRef,
period: SyncCommitteePeriod): Option[altair.LightClientUpdate] =
if not dag.serveLightClientData:
return none(altair.LightClientUpdate)
if dag.importLightClientData == ImportLightClientData.OnDemand:
dag.initBestLightClientUpdateForPeriod(period)
result = some(dag.lightClientCache.bestUpdates.getOrDefault(period))
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
result = none(altair.LightClientUpdate)
proc getLatestLightClientUpdate*(
dag: ChainDAGRef): Option[altair.LightClientUpdate] =
if not dag.serveLightClientData:
return none(altair.LightClientUpdate)
result = some(dag.lightClientCache.latestUpdate)
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
result = none(altair.LightClientUpdate)
proc getOptimisticLightClientUpdate*(
dag: ChainDAGRef): Option[OptimisticLightClientUpdate] =
if not dag.serveLightClientData:
return none(OptimisticLightClientUpdate)
result = some(dag.lightClientCache.optimisticUpdate)
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
result = none(OptimisticLightClientUpdate)
proc getLightClientBootstrap*(
dag: ChainDAGRef,
blockRoot: Eth2Digest): Option[altair.LightClientBootstrap] =
if not dag.serveLightClientData:
return none(altair.LightClientBootstrap)
let bdata = dag.getForkedBlock(blockRoot).valueOr:
debug "Bootstrap unavailable: Block not found", blockRoot
return none(altair.LightClientBootstrap)
withBlck(bdata):
let
slot = blck.message.slot
period = slot.sync_committee_period
when stateFork >= BeaconStateFork.Altair:
let earliestSlot = dag.computeEarliestLightClientSlot
if slot < earliestSlot:
debug "Bootstrap unavailable: Block too old", slot
return none(altair.LightClientBootstrap)
if slot > dag.finalizedHead.blck.slot:
debug "Bootstrap unavailable: Not finalized", blockRoot
return none(altair.LightClientBootstrap)
var cachedBootstrap =
dag.lightClientCache.bootstrap.getOrDefault(slot)
if cachedBootstrap.current_sync_committee_branch.isZeroMemory:
if dag.importLightClientData == ImportLightClientData.OnDemand:
var tmpState = assignClone(dag.headState)
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot)) do:
withState(stateData.data):
when stateFork >= BeaconStateFork.Altair:
state.data.build_proof(
altair.CURRENT_SYNC_COMMITTEE_INDEX,
cachedBootstrap.current_sync_committee_branch)
else: raiseAssert "Unreachable"
do: raiseAssert "Unreachable"
dag.lightClientCache.bootstrap[slot] = cachedBootstrap
else:
debug "Bootstrap unavailable: Data not cached", slot
return none(altair.LightClientBootstrap)
var tmpState = assignClone(dag.headState)
var bootstrap {.noinit.}: altair.LightClientBootstrap
bootstrap.header =
blck.toBeaconBlockHeader
bootstrap.current_sync_committee =
dag.currentSyncCommitteeForPeriod(tmpState[], period)
bootstrap.current_sync_committee_branch =
cachedBootstrap.current_sync_committee_branch
return some(bootstrap)
else:
debug "Bootstrap unavailable: Block before Altair", slot
return none(altair.LightClientBootstrap)

View File

@ -1,3 +1,4 @@
# beacon_chain
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
@ -215,6 +216,8 @@ proc init*(T: type BeaconNode,
eventBus.emit("finalization", data)
proc onSyncContribution(data: SignedContributionAndProof) =
eventBus.emit("sync-contribution-and-proof", data)
proc onOptimisticLightClientUpdate(data: OptimisticLightClientUpdate) =
discard
if config.finalizedCheckpointState.isSome:
let checkpointStatePath = config.finalizedCheckpointState.get.string
@ -379,11 +382,17 @@ proc init*(T: type BeaconNode,
info "Loading block DAG from database", path = config.databaseDir
let
chainDagFlags = if config.verifyFinalization: {verifyFinalization}
else: {}
chainDagFlags =
if config.verifyFinalization: {verifyFinalization}
else: {}
onOptimisticLightClientUpdateCb =
if config.serveLightClientData: onOptimisticLightClientUpdate
else: nil
dag = ChainDAGRef.init(
cfg, db, validatorMonitor, chainDagFlags, onBlockAdded, onHeadChanged,
onChainReorg)
onChainReorg, onOptimisticLCUpdateCb = onOptimisticLightClientUpdateCb,
serveLightClientData = config.serveLightClientData,
importLightClientData = config.importLightClientData)
quarantine = newClone(Quarantine.init())
databaseGenesisValidatorsRoot =
getStateField(dag.headState.data, genesis_validators_root)

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2018-2021 Status Research & Development GmbH
# Copyright (c) 2018-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -31,6 +31,7 @@ import # Unit test
./test_helpers,
./test_honest_validator,
./test_interop,
./test_light_client,
./test_message_signatures,
./test_peer_pool,
./test_spec,

190
tests/test_light_client.nim Normal file
View File

@ -0,0 +1,190 @@
# beacon_chain
# Copyright (c) 2021-2022 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
{.used.}
# This implements the pre-release proposal of the libp2p based light client sync
# protocol. See https://github.com/ethereum/consensus-specs/pull/2802
import
# Status libraries
chronicles, eth/keys, stew/objects, taskpools,
# Beacon chain internals
../beacon_chain/consensus_object_pools/
[block_clearance, block_quarantine, blockchain_dag],
../beacon_chain/spec/[forks, helpers, light_client_sync, state_transition],
# Test utilities
./testutil, ./testdbutil
suite "Light client" & preset():
let
cfg = block:
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1
res
altairStartSlot = cfg.ALTAIR_FORK_EPOCH.start_slot
proc advanceToSlot(
dag: ChainDAGRef,
targetSlot: Slot,
verifier: var BatchVerifier,
quarantine: var Quarantine,
attested = true,
syncCommitteeRatio = 0.75) =
var cache: StateCache
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
while true:
var slot = getStateField(dag.headState.data, slot)
doAssert targetSlot >= slot
if targetSlot == slot: break
# When there is a large jump, skip to the end of the current period,
# create blocks for a few epochs to finalize it, then proceed
let
nextPeriod = slot.sync_committee_period + 1
periodEpoch = nextPeriod.start_epoch
periodSlot = periodEpoch.start_slot
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
var info: ForkedEpochInfo
doAssert process_slots(cfg, dag.headState.data, checkpointSlot,
cache, info, flags = {}).isOk()
slot = checkpointSlot
# Create blocks for final few epochs
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
for blck in makeTestBlocks(dag.headState.data, cache, blocks.int,
attested, syncCommitteeRatio, cfg):
let added =
case blck.kind
of BeaconBlockFork.Phase0:
const nilCallback = OnPhase0BlockAdded(nil)
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
of BeaconBlockFork.Altair:
const nilCallback = OnAltairBlockAdded(nil)
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
of BeaconBlockFork.Bellatrix:
const nilCallback = OnBellatrixBlockAdded(nil)
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
check: added.isOk()
dag.updateHead(added[], quarantine)
setup:
const num_validators = SLOTS_PER_EPOCH
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(
cfg, makeTestDB(num_validators), validatorMonitor, {},
serveLightClientData = true,
importLightClientData = ImportLightClientData.OnlyNew)
quarantine = newClone(Quarantine.init())
taskpool = TaskPool.new()
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
test "Pre-Altair":
# Genesis
check:
dag.headState.data.kind == BeaconStateFork.Phase0
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone
# Advance to last slot before Altair
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
check:
dag.headState.data.kind == BeaconStateFork.Phase0
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
check:
dag.headState.data.kind == BeaconStateFork.Altair
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
dag.getLatestLightClientUpdate.isNone
test "Light client sync":
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
# Track trusted checkpoint for light client
let
genesis_validators_root = dag.genesisValidatorsRoot
trusted_block_root = dag.headState.blck.root
# Advance to target slot
const
headPeriod = 2.SyncCommitteePeriod
periodEpoch = headPeriod.start_epoch
headSlot = (periodEpoch + 2).start_slot + 5
dag.advanceToSlot(headSlot, verifier, quarantine[])
let currentSlot = getStateField(dag.headState.data, slot)
# Initialize light client store
let bootstrap = dag.getLightClientBootstrap(trusted_block_root)
check bootstrap.isSome
var storeRes = initialize_light_client_store(
trusted_block_root, bootstrap.get)
check storeRes.isSome
template store(): auto = storeRes.get
# Sync to latest sync committee period
var numIterations = 0
while store.finalized_header.slot.sync_committee_period + 1 < headPeriod:
let
period =
if store.next_sync_committee.isZeroMemory:
store.finalized_header.slot.sync_committee_period
else:
store.finalized_header.slot.sync_committee_period + 1
bestUpdate = dag.getBestLightClientUpdateForPeriod(period)
res = process_light_client_update(
store, bestUpdate.get, currentSlot, cfg, genesis_validators_root)
check:
bestUpdate.isSome
bestUpdate.get.finalized_header.slot.sync_committee_period == period
res
store.finalized_header == bestUpdate.get.finalized_header
inc numIterations
if numIterations > 20: doAssert false # Avoid endless loop on test failure
# Sync to latest update
let
latestUpdate = dag.getLatestLightClientUpdate
res = process_light_client_update(
store, latestUpdate.get, currentSlot, cfg, genesis_validators_root)
check:
latestUpdate.isSome
latestUpdate.get.attested_header.slot == dag.headState.blck.parent.slot
res
store.finalized_header == latestUpdate.get.finalized_header
store.optimistic_header == latestUpdate.get.attested_header
test "Init from checkpoint":
# Fetch genesis state
let genesisState = assignClone dag.headState.data
# Advance to target slot for checkpoint
let finalizedSlot =
((altairStartSlot.sync_committee_period + 1).start_epoch + 2).start_slot
dag.advanceToSlot(finalizedSlot, verifier, quarantine[])
# Initialize new DAG from checkpoint
let cpDb = BeaconChainDB.new("", inMemory = true)
ChainDAGRef.preInit(
cpDB, genesisState[],
dag.headState.data, dag.getForkedBlock(dag.head.bid).get)
let cpDag = ChainDAGRef.init(
cfg, cpDb, validatorMonitor, {},
serveLightClientData = true,
importLightClientData = ImportLightClientData.Full)
# Advance by a couple epochs
for i in 1'u64 .. 10:
let headSlot = (finalizedSlot.epoch + i).start_slot
cpDag.advanceToSlot(headSlot, verifier, quarantine[])
check true