add option to produce light client data
Light clients require full nodes to serve additional data so that they can stay in sync with the network. This patch adds a new launch option `--serve-light-client-data` to enable collection of light client data. `--import-light-client-data` configures the classes of data to import. This can be set to `none`, `only-new`, `full`, or `on-demand`. Note that data is only locally collected, a separate patch is needed to actually make it availble over the network. Likewise, data is only kept in memory; it is not persisted at this time.
This commit is contained in:
parent
a38b6f4c45
commit
97b1ed9b38
|
@ -252,6 +252,12 @@ OK: 3/3 Fail: 0/3 Skip: 0/3
|
||||||
+ [SCRYPT] Network Keystore encryption OK
|
+ [SCRYPT] Network Keystore encryption OK
|
||||||
```
|
```
|
||||||
OK: 9/9 Fail: 0/9 Skip: 0/9
|
OK: 9/9 Fail: 0/9 Skip: 0/9
|
||||||
|
## Light client [Preset: mainnet]
|
||||||
|
```diff
|
||||||
|
+ Light client sync OK
|
||||||
|
+ Pre-Altair OK
|
||||||
|
```
|
||||||
|
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||||
## ListKeys requests [Preset: mainnet]
|
## ListKeys requests [Preset: mainnet]
|
||||||
```diff
|
```diff
|
||||||
+ Correct token provided [Preset: mainnet] OK
|
+ Correct token provided [Preset: mainnet] OK
|
||||||
|
@ -512,4 +518,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||||
|
|
||||||
---TOTAL---
|
---TOTAL---
|
||||||
OK: 282/286 Fail: 0/286 Skip: 4/286
|
OK: 284/288 Fail: 0/288 Skip: 4/288
|
||||||
|
|
|
@ -24,6 +24,9 @@ import
|
||||||
./validators/slashing_protection_common,
|
./validators/slashing_protection_common,
|
||||||
./filepath
|
./filepath
|
||||||
|
|
||||||
|
from consensus_object_pools/block_pools_types_light_client
|
||||||
|
import ImportLightClientData
|
||||||
|
|
||||||
export
|
export
|
||||||
uri, nat, enr,
|
uri, nat, enr,
|
||||||
defaultEth2TcpPort, enabledLogLevel, ValidIpAddress,
|
defaultEth2TcpPort, enabledLogLevel, ValidIpAddress,
|
||||||
|
@ -391,6 +394,19 @@ type
|
||||||
desc: "A file specifying the authorizition token required for accessing the keymanager API"
|
desc: "A file specifying the authorizition token required for accessing the keymanager API"
|
||||||
name: "keymanager-token-file" }: Option[InputFile]
|
name: "keymanager-token-file" }: Option[InputFile]
|
||||||
|
|
||||||
|
serveLightClientData* {.
|
||||||
|
hidden
|
||||||
|
desc: "BETA: Serve data for enabling light clients to stay in sync with the network"
|
||||||
|
defaultValue: false
|
||||||
|
name: "serve-light-client-data"}: bool
|
||||||
|
|
||||||
|
importLightClientData* {.
|
||||||
|
hidden
|
||||||
|
desc: "BETA: Which classes of light client data to import. " &
|
||||||
|
"Must be one of: none, only-new, full (slow startup), on-demand (may miss validator duties)"
|
||||||
|
defaultValue: ImportLightClientData.None
|
||||||
|
name: "import-light-client-data"}: ImportLightClientData
|
||||||
|
|
||||||
inProcessValidators* {.
|
inProcessValidators* {.
|
||||||
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
|
desc: "Disable the push model (the beacon node tells a signing process with the private keys of the validators what to sign and when) and load the validators in the beacon node itself"
|
||||||
defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.
|
defaultValue: true # the use of the nimbus_signing_process binary by default will be delayed until async I/O over stdin/stdout is developed for the child process.
|
||||||
|
|
|
@ -11,7 +11,7 @@ import
|
||||||
chronicles,
|
chronicles,
|
||||||
stew/[assign2, results],
|
stew/[assign2, results],
|
||||||
../spec/[forks, signatures, signatures_batch, state_transition],
|
../spec/[forks, signatures, signatures_batch, state_transition],
|
||||||
"."/[block_dag, blockchain_dag]
|
"."/[block_dag, blockchain_dag, blockchain_dag_light_client]
|
||||||
|
|
||||||
export results, signatures_batch, block_dag, blockchain_dag
|
export results, signatures_batch, block_dag, blockchain_dag
|
||||||
|
|
||||||
|
@ -84,6 +84,9 @@ proc addResolvedHeadBlock(
|
||||||
putBlockDur = putBlockTick - startTick,
|
putBlockDur = putBlockTick - startTick,
|
||||||
epochRefDur = epochRefTick - putBlockTick
|
epochRefDur = epochRefTick - putBlockTick
|
||||||
|
|
||||||
|
# Update light client data
|
||||||
|
dag.processNewBlockForLightClient(state, trustedBlock, parent)
|
||||||
|
|
||||||
# Notify others of the new block before processing the quarantine, such that
|
# Notify others of the new block before processing the quarantine, such that
|
||||||
# notifications for parents happens before those of the children
|
# notifications for parents happens before those of the children
|
||||||
if onBlockAdded != nil:
|
if onBlockAdded != nil:
|
||||||
|
|
|
@ -57,6 +57,12 @@ type
|
||||||
## Slot time for this BlockSlot which may differ from blck.slot when time
|
## Slot time for this BlockSlot which may differ from blck.slot when time
|
||||||
## has advanced without blocks
|
## has advanced without blocks
|
||||||
|
|
||||||
|
func hash*(bid: BlockId): Hash =
|
||||||
|
var h: Hash = 0
|
||||||
|
h = h !& hash(bid.root)
|
||||||
|
h = h !& hash(bid.slot)
|
||||||
|
!$h
|
||||||
|
|
||||||
template root*(blck: BlockRef): Eth2Digest = blck.bid.root
|
template root*(blck: BlockRef): Eth2Digest = blck.bid.root
|
||||||
template slot*(blck: BlockRef): Slot = blck.bid.slot
|
template slot*(blck: BlockRef): Slot = blck.bid.slot
|
||||||
|
|
||||||
|
|
|
@ -17,11 +17,11 @@ import
|
||||||
../spec/datatypes/[phase0, altair, bellatrix],
|
../spec/datatypes/[phase0, altair, bellatrix],
|
||||||
".."/beacon_chain_db,
|
".."/beacon_chain_db,
|
||||||
../validators/validator_monitor,
|
../validators/validator_monitor,
|
||||||
./block_dag
|
./block_dag, block_pools_types_light_client
|
||||||
|
|
||||||
export
|
export
|
||||||
options, sets, tables, hashes, helpers, beacon_chain_db, block_dag,
|
options, sets, tables, hashes, helpers, beacon_chain_db, block_dag,
|
||||||
validator_monitor
|
block_pools_types_light_client, validator_monitor
|
||||||
|
|
||||||
# ChainDAG and types related to forming a DAG of blocks, keeping track of their
|
# ChainDAG and types related to forming a DAG of blocks, keeping track of their
|
||||||
# relationships and allowing various forms of lookups
|
# relationships and allowing various forms of lookups
|
||||||
|
@ -148,6 +148,12 @@ type
|
||||||
|
|
||||||
cfg*: RuntimeConfig
|
cfg*: RuntimeConfig
|
||||||
|
|
||||||
|
serveLightClientData*: bool
|
||||||
|
## Whether to make local light client data available.
|
||||||
|
|
||||||
|
importLightClientData*: ImportLightClientData
|
||||||
|
## Which classes of light client data to import.
|
||||||
|
|
||||||
epochRefs*: array[32, EpochRef]
|
epochRefs*: array[32, EpochRef]
|
||||||
## Cached information about a particular epoch ending with the given
|
## Cached information about a particular epoch ending with the given
|
||||||
## block - we limit the number of held EpochRefs to put a cap on
|
## block - we limit the number of held EpochRefs to put a cap on
|
||||||
|
@ -159,6 +165,14 @@ type
|
||||||
## value with other components which don't have access to the
|
## value with other components which don't have access to the
|
||||||
## full ChainDAG.
|
## full ChainDAG.
|
||||||
|
|
||||||
|
# -----------------------------------
|
||||||
|
# Data to enable light clients to stay in sync with the network
|
||||||
|
|
||||||
|
lightClientDb*: LightClientDatabase
|
||||||
|
|
||||||
|
# -----------------------------------
|
||||||
|
# Callbacks
|
||||||
|
|
||||||
onBlockAdded*: OnBlockCallback
|
onBlockAdded*: OnBlockCallback
|
||||||
## On block added callback
|
## On block added callback
|
||||||
onHeadChanged*: OnHeadCallback
|
onHeadChanged*: OnHeadCallback
|
||||||
|
@ -167,6 +181,8 @@ type
|
||||||
## On beacon chain reorganization
|
## On beacon chain reorganization
|
||||||
onFinHappened*: OnFinalizedCallback
|
onFinHappened*: OnFinalizedCallback
|
||||||
## On finalization callback
|
## On finalization callback
|
||||||
|
onOptimisticLightClientUpdate*: OnOptimisticLightClientUpdateCallback
|
||||||
|
## On `OptimisticLightClientUpdate` updated callback
|
||||||
|
|
||||||
headSyncCommittees*: SyncCommitteeCache
|
headSyncCommittees*: SyncCommitteeCache
|
||||||
## A cache of the sync committees, as they appear in the head state -
|
## A cache of the sync committees, as they appear in the head state -
|
||||||
|
|
|
@ -0,0 +1,85 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Status libraries
|
||||||
|
stew/bitops2,
|
||||||
|
# Beacon chain internals
|
||||||
|
../spec/datatypes/altair,
|
||||||
|
./block_dag
|
||||||
|
|
||||||
|
type
|
||||||
|
OnOptimisticLightClientUpdateCallback* =
|
||||||
|
proc(data: OptimisticLightClientUpdate) {.gcsafe, raises: [Defect].}
|
||||||
|
|
||||||
|
ImportLightClientData* {.pure.} = enum
|
||||||
|
## Controls which classes of light client data are imported.
|
||||||
|
None = "none"
|
||||||
|
## Import no light client data.
|
||||||
|
OnlyNew = "only-new"
|
||||||
|
## Import only new light client data (new non-finalized blocks).
|
||||||
|
Full = "full"
|
||||||
|
## Import light client data for entire weak subjectivity period.
|
||||||
|
OnDemand = "on-demand"
|
||||||
|
## No precompute of historic data. Is slow and may miss validator duties.
|
||||||
|
|
||||||
|
CachedLightClientData* = object
|
||||||
|
## Cached data from historical non-finalized states to improve speed when
|
||||||
|
## creating future `LightClientUpdate` and `LightClientBootstrap` instances.
|
||||||
|
current_sync_committee_branch*:
|
||||||
|
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
|
||||||
|
next_sync_committee_branch*:
|
||||||
|
array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
|
||||||
|
finalized_bid*: BlockId
|
||||||
|
finality_branch*:
|
||||||
|
array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest]
|
||||||
|
|
||||||
|
CachedLightClientBootstrap* = object
|
||||||
|
## Cached data from historical finalized epoch boundary blocks to improve
|
||||||
|
## speed when creating future `LightClientBootstrap` instances.
|
||||||
|
current_sync_committee_branch*:
|
||||||
|
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
|
||||||
|
LightClientDatabase* = object
|
||||||
|
cachedData*: Table[BlockId, CachedLightClientData]
|
||||||
|
## Cached data for creating future `LightClientUpdate` instances.
|
||||||
|
## Key is the block ID of which the post state was used to get the data.
|
||||||
|
## Data is stored for the most recent 4 finalized checkpoints, as well as
|
||||||
|
## for all non-finalized blocks.
|
||||||
|
|
||||||
|
cachedBootstrap*: Table[Slot, CachedLightClientBootstrap]
|
||||||
|
## Cached data for creating future `LightClientBootstrap` instances.
|
||||||
|
## Key is the block slot of which the post state was used to get the data.
|
||||||
|
## Data is stored for finalized epoch boundary blocks.
|
||||||
|
|
||||||
|
latestCheckpoints*: array[4, Checkpoint]
|
||||||
|
## Keeps track of the latest four `finalized_checkpoint` references
|
||||||
|
## leading to `finalizedHead`. Used to prune `cachedData`.
|
||||||
|
## Non-finalized states may only refer to these checkpoints.
|
||||||
|
|
||||||
|
lastCheckpointIndex*: int
|
||||||
|
## Last index that was modified in `latestCheckpoints`.
|
||||||
|
|
||||||
|
bestUpdates*: Table[SyncCommitteePeriod, altair.LightClientUpdate]
|
||||||
|
## Stores the `LightClientUpdate` with the most `sync_committee_bits` per
|
||||||
|
## `SyncCommitteePeriod`. Updates with a finality proof have precedence.
|
||||||
|
|
||||||
|
pendingBestUpdates*:
|
||||||
|
Table[(SyncCommitteePeriod, Eth2Digest), altair.LightClientUpdate]
|
||||||
|
## Same as `bestUpdates`, but for `SyncCommitteePeriod` with
|
||||||
|
## `next_sync_committee` that are not finalized. Key is `(period,
|
||||||
|
## hash_tree_root(current_sync_committee | next_sync_committee)`.
|
||||||
|
|
||||||
|
latestUpdate*: altair.LightClientUpdate
|
||||||
|
## Tracks the `LightClientUpdate` for the latest slot. This may be older
|
||||||
|
## than head for empty slots or if not signed by sync committee.
|
||||||
|
|
||||||
|
optimisticUpdate*: OptimisticLightClientUpdate
|
||||||
|
## Tracks the `OptimisticLightClientUpdate` for the latest slot. This may
|
||||||
|
## be older than head for empty slots or if not signed by sync committee.
|
|
@ -454,11 +454,23 @@ proc updateBeaconMetrics(state: StateData, cache: var StateCache) =
|
||||||
beacon_active_validators.set(active_validators)
|
beacon_active_validators.set(active_validators)
|
||||||
beacon_current_active_validators.set(active_validators)
|
beacon_current_active_validators.set(active_validators)
|
||||||
|
|
||||||
|
import blockchain_dag_light_client
|
||||||
|
|
||||||
|
export
|
||||||
|
blockchain_dag_light_client.getBestLightClientUpdateForPeriod,
|
||||||
|
blockchain_dag_light_client.getLatestLightClientUpdate,
|
||||||
|
blockchain_dag_light_client.getOptimisticLightClientUpdate,
|
||||||
|
blockchain_dag_light_client.getLightClientBootstrap
|
||||||
|
|
||||||
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags,
|
validatorMonitor: ref ValidatorMonitor, updateFlags: UpdateFlags,
|
||||||
onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil,
|
onBlockCb: OnBlockCallback = nil, onHeadCb: OnHeadCallback = nil,
|
||||||
onReorgCb: OnReorgCallback = nil,
|
onReorgCb: OnReorgCallback = nil,
|
||||||
onFinCb: OnFinalizedCallback = nil): ChainDAGRef =
|
onFinCb: OnFinalizedCallback = nil,
|
||||||
|
onOptimisticLCUpdateCb: OnOptimisticLightClientUpdateCallback = nil,
|
||||||
|
serveLightClientData = false,
|
||||||
|
importLightClientData = ImportLightClientData.None
|
||||||
|
): ChainDAGRef =
|
||||||
# TODO we require that the db contains both a head and a tail block -
|
# TODO we require that the db contains both a head and a tail block -
|
||||||
# asserting here doesn't seem like the right way to go about it however..
|
# asserting here doesn't seem like the right way to go about it however..
|
||||||
|
|
||||||
|
@ -601,11 +613,14 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
# allow skipping some validation.
|
# allow skipping some validation.
|
||||||
updateFlags: {verifyFinalization} * updateFlags,
|
updateFlags: {verifyFinalization} * updateFlags,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
serveLightClientData: serveLightClientData,
|
||||||
|
importLightClientData: importLightClientData,
|
||||||
|
|
||||||
onBlockAdded: onBlockCb,
|
onBlockAdded: onBlockCb,
|
||||||
onHeadChanged: onHeadCb,
|
onHeadChanged: onHeadCb,
|
||||||
onReorgHappened: onReorgCb,
|
onReorgHappened: onReorgCb,
|
||||||
onFinHappened: onFinCb
|
onFinHappened: onFinCb,
|
||||||
|
onOptimisticLightClientUpdate: onOptimisticLCUpdateCb
|
||||||
)
|
)
|
||||||
|
|
||||||
block: # Initialize dag states
|
block: # Initialize dag states
|
||||||
|
@ -720,6 +735,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
|
||||||
stateDur = stateTick - summariesTick,
|
stateDur = stateTick - summariesTick,
|
||||||
indexDur = Moment.now() - stateTick
|
indexDur = Moment.now() - stateTick
|
||||||
|
|
||||||
|
dag.initLightClientDb()
|
||||||
|
|
||||||
dag
|
dag
|
||||||
|
|
||||||
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
|
template genesisValidatorsRoot*(dag: ChainDAGRef): Eth2Digest =
|
||||||
|
@ -1228,6 +1245,9 @@ proc pruneBlocksDAG(dag: ChainDAGRef) =
|
||||||
|
|
||||||
var cur = head.atSlot()
|
var cur = head.atSlot()
|
||||||
while not cur.blck.isAncestorOf(dag.finalizedHead.blck):
|
while not cur.blck.isAncestorOf(dag.finalizedHead.blck):
|
||||||
|
# Update light client data
|
||||||
|
dag.deleteLightClientData(cur.blck.bid)
|
||||||
|
|
||||||
dag.delState(cur) # TODO: should we move that disk I/O to `onSlotEnd`
|
dag.delState(cur) # TODO: should we move that disk I/O to `onSlotEnd`
|
||||||
|
|
||||||
if cur.isProposed():
|
if cur.isProposed():
|
||||||
|
@ -1427,6 +1447,9 @@ proc updateHead*(
|
||||||
doAssert (not finalizedHead.blck.isNil),
|
doAssert (not finalizedHead.blck.isNil),
|
||||||
"Block graph should always lead to a finalized block"
|
"Block graph should always lead to a finalized block"
|
||||||
|
|
||||||
|
# Update light client data
|
||||||
|
dag.processHeadChangeForLightClient()
|
||||||
|
|
||||||
let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead)
|
let (isAncestor, ancestorDepth) = lastHead.getDepth(newHead)
|
||||||
if not(isAncestor):
|
if not(isAncestor):
|
||||||
notice "Updated head block with chain reorg",
|
notice "Updated head block with chain reorg",
|
||||||
|
@ -1513,6 +1536,9 @@ proc updateHead*(
|
||||||
# therefore no longer be considered as part of the chain we're following
|
# therefore no longer be considered as part of the chain we're following
|
||||||
dag.pruneBlocksDAG()
|
dag.pruneBlocksDAG()
|
||||||
|
|
||||||
|
# Update light client data
|
||||||
|
dag.processFinalizationForLightClient()
|
||||||
|
|
||||||
# Send notification about new finalization point via callback.
|
# Send notification about new finalization point via callback.
|
||||||
if not(isNil(dag.onFinHappened)):
|
if not(isNil(dag.onFinHappened)):
|
||||||
let stateRoot =
|
let stateRoot =
|
||||||
|
|
|
@ -0,0 +1,919 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2022 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [Defect].}
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
std/[algorithm, sequtils],
|
||||||
|
# Status libraries
|
||||||
|
stew/[bitops2, objects],
|
||||||
|
chronos,
|
||||||
|
# Beacon chain internals
|
||||||
|
../spec/datatypes/[phase0, altair, bellatrix],
|
||||||
|
"."/[block_pools_types, blockchain_dag]
|
||||||
|
|
||||||
|
logScope: topics = "chaindag"
|
||||||
|
|
||||||
|
type
|
||||||
|
HashedBeaconStateWithSyncCommittee =
|
||||||
|
bellatrix.HashedBeaconState |
|
||||||
|
altair.HashedBeaconState
|
||||||
|
|
||||||
|
TrustedSignedBeaconBlockWithSyncAggregate =
|
||||||
|
bellatrix.TrustedSignedBeaconBlock |
|
||||||
|
altair.TrustedSignedBeaconBlock
|
||||||
|
|
||||||
|
func fromBlock(
|
||||||
|
T: type BeaconBlockHeader,
|
||||||
|
blck: ForkyTrustedSignedBeaconBlock): T =
|
||||||
|
## Reduce a given full block to just its `BeaconBlockHeader`.
|
||||||
|
BeaconBlockHeader(
|
||||||
|
slot: blck.message.slot,
|
||||||
|
proposer_index: blck.message.proposer_index,
|
||||||
|
parent_root: blck.message.parent_root,
|
||||||
|
state_root: blck.message.state_root,
|
||||||
|
body_root: blck.message.body.hash_tree_root())
|
||||||
|
|
||||||
|
func fromBlock(
|
||||||
|
T: type BeaconBlockHeader,
|
||||||
|
blck: ForkedTrustedSignedBeaconBlock): T =
|
||||||
|
## Reduce a given full block to just its `BeaconBlockHeader`.
|
||||||
|
withBlck(blck):
|
||||||
|
BeaconBlockHeader.fromBlock(blck)
|
||||||
|
|
||||||
|
template nextEpochBoundarySlot(slot: Slot): Slot =
|
||||||
|
## Compute the first possible epoch boundary state slot of a `Checkpoint`
|
||||||
|
## referring to a block at given slot.
|
||||||
|
(slot + (SLOTS_PER_EPOCH - 1)).epoch.start_slot
|
||||||
|
|
||||||
|
func computeEarliestLightClientSlot*(dag: ChainDAGRef): Slot =
|
||||||
|
## Compute the earliest slot for which light client data is retained.
|
||||||
|
let
|
||||||
|
altairStartSlot = dag.cfg.ALTAIR_FORK_EPOCH.start_slot
|
||||||
|
currentSlot = getStateField(dag.headState.data, slot)
|
||||||
|
if currentSlot < altairStartSlot:
|
||||||
|
return altairStartSlot
|
||||||
|
|
||||||
|
let
|
||||||
|
MIN_EPOCHS_FOR_BLOCK_REQUESTS =
|
||||||
|
dag.cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY +
|
||||||
|
dag.cfg.CHURN_LIMIT_QUOTIENT div 2
|
||||||
|
MIN_SLOTS_FOR_BLOCK_REQUESTS =
|
||||||
|
MIN_EPOCHS_FOR_BLOCK_REQUESTS * SLOTS_PER_EPOCH
|
||||||
|
minSlot = max(altairStartSlot, dag.tail.slot)
|
||||||
|
if currentSlot - minSlot < MIN_SLOTS_FOR_BLOCK_REQUESTS:
|
||||||
|
return minSlot
|
||||||
|
|
||||||
|
let earliestSlot = currentSlot - MIN_SLOTS_FOR_BLOCK_REQUESTS
|
||||||
|
max(earliestSlot.sync_committee_period.start_slot, minSlot)
|
||||||
|
|
||||||
|
proc currentSyncCommitteeForPeriod(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
tmpState: ref StateData,
|
||||||
|
period: SyncCommitteePeriod): SyncCommittee =
|
||||||
|
## Fetch a `SyncCommittee` for a given sync committee period.
|
||||||
|
## For non-finalized periods, follow the chain as selected by fork choice.
|
||||||
|
let earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
doAssert period >= earliestSlot.sync_committee_period
|
||||||
|
let
|
||||||
|
periodStartSlot = period.start_slot
|
||||||
|
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
|
||||||
|
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(syncCommitteeSlot)) do:
|
||||||
|
withState(stateData.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
state.data.current_sync_committee
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
template syncCommitteeRoot(
|
||||||
|
state: HashedBeaconStateWithSyncCommittee): Eth2Digest =
|
||||||
|
## Compute a root to uniquely identify `current_sync_committee` and
|
||||||
|
## `next_sync_committee`.
|
||||||
|
withEth2Hash:
|
||||||
|
h.update state.data.current_sync_committee.hash_tree_root().data
|
||||||
|
h.update state.data.next_sync_committee.hash_tree_root().data
|
||||||
|
|
||||||
|
proc syncCommitteeRootForPeriod(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
tmpState: ref StateData,
|
||||||
|
period: SyncCommitteePeriod): Eth2Digest =
|
||||||
|
## Compute a root to uniquely identify `current_sync_committee` and
|
||||||
|
## `next_sync_committee` for a given sync committee period.
|
||||||
|
## For non-finalized periods, follow the chain as selected by fork choice.
|
||||||
|
let earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
doAssert period >= earliestSlot.sync_committee_period
|
||||||
|
let
|
||||||
|
periodStartSlot = period.start_slot
|
||||||
|
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
|
||||||
|
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(syncCommitteeSlot)) do:
|
||||||
|
withState(stateData.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
state.syncCommitteeRoot
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
proc getLightClientData(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
bid: BlockId): CachedLightClientData =
|
||||||
|
## Fetch cached light client data about a given block.
|
||||||
|
## Data must be cached (`cacheLightClientData`) before calling this function.
|
||||||
|
try: dag.lightClientDb.cachedData[bid]
|
||||||
|
except KeyError: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
template getLightClientData(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
blck: BlockRef): CachedLightClientData =
|
||||||
|
getLightClientData(dag, blck.bid)
|
||||||
|
|
||||||
|
proc cacheLightClientData*(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
state: HashedBeaconStateWithSyncCommittee,
|
||||||
|
blck: TrustedSignedBeaconBlockWithSyncAggregate,
|
||||||
|
isNew = true) =
|
||||||
|
## Cache data for a given block and its post-state to speed up creating future
|
||||||
|
## `LightClientUpdate` and `LightClientBootstrap` instances that refer to this
|
||||||
|
## block and state.
|
||||||
|
let startTick = Moment.now()
|
||||||
|
|
||||||
|
var current_sync_committee_branch {.noinit.}:
|
||||||
|
array[log2trunc(altair.CURRENT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.CURRENT_SYNC_COMMITTEE_INDEX, current_sync_committee_branch)
|
||||||
|
|
||||||
|
var next_sync_committee_branch {.noinit.}:
|
||||||
|
array[log2trunc(altair.NEXT_SYNC_COMMITTEE_INDEX), Eth2Digest]
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.NEXT_SYNC_COMMITTEE_INDEX, next_sync_committee_branch)
|
||||||
|
|
||||||
|
var finality_branch {.noinit.}:
|
||||||
|
array[log2trunc(altair.FINALIZED_ROOT_INDEX), Eth2Digest]
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.FINALIZED_ROOT_INDEX, finality_branch)
|
||||||
|
|
||||||
|
template finalized_checkpoint(): auto = state.data.finalized_checkpoint
|
||||||
|
let
|
||||||
|
bid =
|
||||||
|
BlockId(root: blck.root, slot: blck.message.slot)
|
||||||
|
finalized_bid =
|
||||||
|
dag.getBlockIdAtSlot(finalized_checkpoint.epoch.start_slot).bid
|
||||||
|
if dag.lightClientDb.cachedData.hasKeyOrPut(
|
||||||
|
bid,
|
||||||
|
CachedLightClientData(
|
||||||
|
current_sync_committee_branch:
|
||||||
|
current_sync_committee_branch,
|
||||||
|
next_sync_committee_branch:
|
||||||
|
next_sync_committee_branch,
|
||||||
|
finalized_bid:
|
||||||
|
finalized_bid,
|
||||||
|
finality_branch:
|
||||||
|
finality_branch)):
|
||||||
|
doAssert false, "Redundant `cacheLightClientData` call"
|
||||||
|
|
||||||
|
let endTick = Moment.now()
|
||||||
|
if isNew and endTick - startTick > chronos.milliseconds(30):
|
||||||
|
debug "Caching light client data took longer than usual",
|
||||||
|
root = blck.root, slot = blck.message.slot,
|
||||||
|
createDur = endTick - startTick
|
||||||
|
|
||||||
|
proc deleteLightClientData*(dag: ChainDAGRef, bid: BlockId) =
|
||||||
|
## Delete cached light client data for a given block. This needs to be called
|
||||||
|
## when a block becomes unreachable due to finalization of a different fork.
|
||||||
|
if dag.importLightClientData == ImportLightClientData.None:
|
||||||
|
return
|
||||||
|
|
||||||
|
dag.lightClientDb.cachedData.del bid
|
||||||
|
|
||||||
|
template lazy_header(name: untyped): untyped {.dirty.} =
|
||||||
|
## `createLightClientUpdates` helper to lazily load a known block header.
|
||||||
|
var `name ptr`: ptr[BeaconBlockHeader]
|
||||||
|
template `assign name`(target: var BeaconBlockHeader,
|
||||||
|
bid: BlockId | BlockRef): untyped =
|
||||||
|
if `name ptr` != nil:
|
||||||
|
target = `name ptr`[]
|
||||||
|
else:
|
||||||
|
target = BeaconBlockHeader.fromBlock(
|
||||||
|
when bid is BlockID:
|
||||||
|
dag.getForkedBlock(bid).get
|
||||||
|
else:
|
||||||
|
dag.getForkedBlock(bid))
|
||||||
|
`name ptr` = addr target
|
||||||
|
|
||||||
|
template lazy_data(name: untyped): untyped {.dirty.} =
|
||||||
|
## `createLightClientUpdates` helper to lazily load cached light client state.
|
||||||
|
var `name` {.noinit.}: CachedLightClientData
|
||||||
|
`name`.finalized_bid.slot = FAR_FUTURE_SLOT
|
||||||
|
template `load name`(bid: BlockId | BlockRef): untyped =
|
||||||
|
if `name`.finalized_bid.slot == FAR_FUTURE_SLOT:
|
||||||
|
`name` = dag.getLightClientData(bid)
|
||||||
|
|
||||||
|
proc createLightClientUpdates(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
state: HashedBeaconStateWithSyncCommittee,
|
||||||
|
blck: TrustedSignedBeaconBlockWithSyncAggregate,
|
||||||
|
parent: BlockRef) =
|
||||||
|
## Create `LightClientUpdate` and `OptimisticLightClientUpdate` instances for
|
||||||
|
## a given block and its post-state, and keep track of best / latest ones.
|
||||||
|
## Data about the parent block's post-state and its `finalized_checkpoint`'s
|
||||||
|
## block's post-state needs to be cached (`cacheLightClientData`) before
|
||||||
|
## calling this function.
|
||||||
|
let startTick = Moment.now()
|
||||||
|
|
||||||
|
# Parent needs to be known to continue
|
||||||
|
if parent == nil:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify sync committee has sufficient participants
|
||||||
|
template sync_aggregate(): auto = blck.message.body.sync_aggregate
|
||||||
|
template sync_committee_bits(): auto = sync_aggregate.sync_committee_bits
|
||||||
|
let num_active_participants = countOnes(sync_committee_bits).uint64
|
||||||
|
if num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify attested block (parent) is recent enough and that state is available
|
||||||
|
let
|
||||||
|
earliest_slot = dag.computeEarliestLightClientSlot
|
||||||
|
attested_slot = parent.slot
|
||||||
|
if attested_slot < earliest_slot:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify signature does not skip a sync committee period
|
||||||
|
let
|
||||||
|
signature_slot = blck.message.slot
|
||||||
|
signature_period = signature_slot.sync_committee_period
|
||||||
|
attested_period = attested_slot.sync_committee_period
|
||||||
|
if signature_period > attested_period + 1:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Update to new `OptimisticLightClientUpdate` if it attests to a later slot
|
||||||
|
lazy_header(attested_header)
|
||||||
|
template optimistic_update(): auto = dag.lightClientDb.optimisticUpdate
|
||||||
|
if attested_slot > optimistic_update.attested_header.slot:
|
||||||
|
optimistic_update.attested_header
|
||||||
|
.assign_attested_header(parent)
|
||||||
|
optimistic_update.sync_aggregate =
|
||||||
|
isomorphicCast[SyncAggregate](sync_aggregate)
|
||||||
|
optimistic_update.fork_version =
|
||||||
|
state.data.fork.current_version
|
||||||
|
optimistic_update.is_signed_by_next_sync_committee =
|
||||||
|
signature_period == attested_period + 1
|
||||||
|
if dag.onOptimisticLightClientUpdate != nil:
|
||||||
|
dag.onOptimisticLightClientUpdate(optimistic_update)
|
||||||
|
|
||||||
|
# Update to new latest `LightClientUpdate` if it attests to a later slot
|
||||||
|
lazy_data(attested_data)
|
||||||
|
lazy_data(finalized_data)
|
||||||
|
lazy_header(finalized_header)
|
||||||
|
template latest_update(): auto = dag.lightClientDb.latestUpdate
|
||||||
|
if attested_slot > latest_update.attested_header.slot:
|
||||||
|
latest_update.attested_header
|
||||||
|
.assign_attested_header(parent)
|
||||||
|
latest_update.sync_aggregate =
|
||||||
|
isomorphicCast[SyncAggregate](sync_aggregate)
|
||||||
|
latest_update.fork_version =
|
||||||
|
state.data.fork.current_version
|
||||||
|
|
||||||
|
load_attested_data(parent)
|
||||||
|
let finalized_slot = attested_data.finalized_bid.slot
|
||||||
|
if finalized_slot + UPDATE_TIMEOUT < attested_slot or
|
||||||
|
finalized_slot < earliest_slot:
|
||||||
|
latest_update.finalized_header = BeaconBlockHeader()
|
||||||
|
latest_update.finality_branch.fill(Eth2Digest())
|
||||||
|
if signature_period == attested_period + 1:
|
||||||
|
latest_update.next_sync_committee = SyncCommittee()
|
||||||
|
latest_update.next_sync_committee_branch.fill(Eth2Digest())
|
||||||
|
else:
|
||||||
|
latest_update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
latest_update.next_sync_committee_branch =
|
||||||
|
attested_data.next_sync_committee_branch
|
||||||
|
else:
|
||||||
|
latest_update.finalized_header
|
||||||
|
.assign_finalized_header(attested_data.finalized_bid)
|
||||||
|
latest_update.finality_branch =
|
||||||
|
attested_data.finality_branch
|
||||||
|
if signature_period == finalized_slot.sync_committee_period + 1:
|
||||||
|
latest_update.next_sync_committee = SyncCommittee()
|
||||||
|
latest_update.next_sync_committee_branch.fill(Eth2Digest())
|
||||||
|
else:
|
||||||
|
load_finalized_data(attested_data.finalized_bid)
|
||||||
|
latest_update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
latest_update.next_sync_committee_branch =
|
||||||
|
finalized_data.next_sync_committee_branch
|
||||||
|
|
||||||
|
# Update best `LightClientUpdate` for current period if it improved
|
||||||
|
if signature_period == attested_period:
|
||||||
|
let isNextSyncCommitteeFinalized =
|
||||||
|
signature_period.start_slot <= dag.finalizedHead.slot
|
||||||
|
var best_update =
|
||||||
|
if isNextSyncCommitteeFinalized:
|
||||||
|
dag.lightClientDb.bestUpdates.getOrDefault(signature_period)
|
||||||
|
else:
|
||||||
|
let key = (signature_period, state.syncCommitteeRoot)
|
||||||
|
dag.lightClientDb.pendingBestUpdates.getOrDefault(key)
|
||||||
|
|
||||||
|
type Verdict = enum
|
||||||
|
unknown
|
||||||
|
new_update_is_worse
|
||||||
|
new_update_is_better
|
||||||
|
var verdict = unknown
|
||||||
|
|
||||||
|
# If no best update has been recorded, new update is better
|
||||||
|
template best_sync_aggregate(): auto = best_update.sync_aggregate
|
||||||
|
let best_num_active_participants =
|
||||||
|
countOnes(best_sync_aggregate.sync_committee_bits).uint64
|
||||||
|
if best_num_active_participants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
verdict = new_update_is_better
|
||||||
|
else:
|
||||||
|
# If finality changes, the finalized update is better
|
||||||
|
template finalized_period_at_signature(): auto =
|
||||||
|
state.data.finalized_checkpoint.epoch.sync_committee_period
|
||||||
|
template best_attested_slot(): auto =
|
||||||
|
best_update.attested_header.slot
|
||||||
|
if best_update.finality_branch.isZeroMemory:
|
||||||
|
if (attested_slot > dag.finalizedHead.slot or
|
||||||
|
attested_slot > best_attested_slot) and
|
||||||
|
signature_period == finalized_period_at_signature:
|
||||||
|
load_attested_data(parent)
|
||||||
|
let finalized_slot = attested_data.finalized_bid.slot
|
||||||
|
if signature_period == finalized_slot.sync_committee_period and
|
||||||
|
finalized_slot >= earliest_slot:
|
||||||
|
verdict = new_update_is_better
|
||||||
|
elif attested_slot > dag.finalizedHead.slot or
|
||||||
|
attested_slot < best_attested_slot:
|
||||||
|
if signature_period == finalized_period_at_signature:
|
||||||
|
load_attested_data(parent)
|
||||||
|
let finalized_slot = attested_data.finalized_bid.slot
|
||||||
|
if signature_period != finalized_slot.sync_committee_period or
|
||||||
|
finalized_slot < earliest_slot:
|
||||||
|
verdict = new_update_is_worse
|
||||||
|
else:
|
||||||
|
verdict = new_update_is_worse
|
||||||
|
if verdict == unknown:
|
||||||
|
# If participation changes, higher participation is better
|
||||||
|
if num_active_participants < best_num_active_participants:
|
||||||
|
verdict = new_update_is_worse
|
||||||
|
elif num_active_participants > best_num_active_participants:
|
||||||
|
verdict = new_update_is_better
|
||||||
|
else:
|
||||||
|
# Older updates are better
|
||||||
|
if attested_slot >= best_attested_slot:
|
||||||
|
verdict = new_update_is_worse
|
||||||
|
else:
|
||||||
|
verdict = new_update_is_better
|
||||||
|
|
||||||
|
if verdict == new_update_is_better:
|
||||||
|
best_update.attested_header
|
||||||
|
.assign_attested_header(parent)
|
||||||
|
best_update.sync_aggregate =
|
||||||
|
isomorphicCast[SyncAggregate](sync_aggregate)
|
||||||
|
best_update.fork_version =
|
||||||
|
state.data.fork.current_version
|
||||||
|
|
||||||
|
load_attested_data(parent)
|
||||||
|
let finalized_slot = attested_data.finalized_bid.slot
|
||||||
|
if signature_period != finalized_slot.sync_committee_period or
|
||||||
|
finalized_slot < earliest_slot:
|
||||||
|
best_update.finalized_header = BeaconBlockHeader()
|
||||||
|
best_update.finality_branch.fill(Eth2Digest())
|
||||||
|
best_update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
best_update.next_sync_committee_branch =
|
||||||
|
attested_data.next_sync_committee_branch
|
||||||
|
else:
|
||||||
|
best_update.finalized_header
|
||||||
|
.assign_finalized_header(attested_data.finalized_bid)
|
||||||
|
best_update.finality_branch =
|
||||||
|
attested_data.finality_branch
|
||||||
|
load_finalized_data(attested_data.finalized_bid)
|
||||||
|
best_update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
best_update.next_sync_committee_branch =
|
||||||
|
finalized_data.next_sync_committee_branch
|
||||||
|
|
||||||
|
if isNextSyncCommitteeFinalized:
|
||||||
|
dag.lightClientDb.bestUpdates[signature_period] = best_update
|
||||||
|
debug "Best `LightClientUpdate` improved",
|
||||||
|
period = signature_period, update = best_update
|
||||||
|
else:
|
||||||
|
let key = (signature_period, state.syncCommitteeRoot)
|
||||||
|
dag.lightClientDb.pendingBestUpdates[key] = best_update
|
||||||
|
debug "Best `LightClientUpdate` improved",
|
||||||
|
period = key, update = best_update
|
||||||
|
|
||||||
|
let endTick = Moment.now()
|
||||||
|
if endTick - startTick > chronos.milliseconds(100):
|
||||||
|
debug "`LightClientUpdate` creation took longer than usual",
|
||||||
|
root = dag.head.root, slot = dag.head.slot,
|
||||||
|
createDur = endTick - startTick
|
||||||
|
|
||||||
|
proc processNewBlockForLightClient*(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
state: StateData,
|
||||||
|
signedBlock: ForkyTrustedSignedBeaconBlock,
|
||||||
|
parent: BlockRef) =
|
||||||
|
## Update light client data with information from a new block.
|
||||||
|
if dag.importLightClientData == ImportLightClientData.None:
|
||||||
|
return
|
||||||
|
if signedBlock.message.slot < dag.computeEarliestLightClientSlot:
|
||||||
|
return
|
||||||
|
|
||||||
|
when signedBlock is bellatrix.TrustedSignedBeaconBlock:
|
||||||
|
dag.cacheLightClientData(state.data.bellatrixData, signedBlock)
|
||||||
|
dag.createLightClientUpdates(state.data.bellatrixData, signedBlock, parent)
|
||||||
|
elif signedBlock is altair.TrustedSignedBeaconBlock:
|
||||||
|
dag.cacheLightClientData(state.data.altairData, signedBlock)
|
||||||
|
dag.createLightClientUpdates(state.data.altairData, signedBlock, parent)
|
||||||
|
elif signedBlock is phase0.TrustedSignedBeaconBlock:
|
||||||
|
discard
|
||||||
|
else:
|
||||||
|
{.error: "Unreachable".}
|
||||||
|
|
||||||
|
proc processHeadChangeForLightClient*(dag: ChainDAGRef) =
|
||||||
|
## Update light client data to account for a new head block.
|
||||||
|
## Note that `dag.finalizedHead` is not yet updated when this is called.
|
||||||
|
if dag.importLightClientData == ImportLightClientData.None:
|
||||||
|
return
|
||||||
|
if dag.head.slot < dag.computeEarliestLightClientSlot:
|
||||||
|
return
|
||||||
|
|
||||||
|
let headPeriod = dag.head.slot.sync_committee_period
|
||||||
|
if headPeriod.start_slot > dag.finalizedHead.slot:
|
||||||
|
let finalizedPeriod = dag.finalizedHead.slot.sync_committee_period
|
||||||
|
if headPeriod > finalizedPeriod + 1:
|
||||||
|
var tmpState = assignClone(dag.headState)
|
||||||
|
for period in finalizedPeriod + 1 ..< headPeriod:
|
||||||
|
let key = (period, dag.syncCommitteeRootForPeriod(tmpState, period))
|
||||||
|
dag.lightClientDb.bestUpdates[period] =
|
||||||
|
dag.lightClientDb.pendingBestUpdates.getOrDefault(key)
|
||||||
|
withState(dag.headState.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
let key = (headPeriod, state.syncCommitteeRoot)
|
||||||
|
dag.lightClientDb.bestUpdates[headPeriod] =
|
||||||
|
dag.lightClientDb.pendingBestUpdates.getOrDefault(key)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
proc processFinalizationForLightClient*(dag: ChainDAGRef) =
|
||||||
|
## Prune cached data that is no longer useful for creating future
|
||||||
|
## `LightClientUpdate` and `LightClientBootstrap` instances.
|
||||||
|
## This needs to be called whenever `finalized_checkpoint` changes.
|
||||||
|
if dag.importLightClientData == ImportLightClientData.None:
|
||||||
|
return
|
||||||
|
let
|
||||||
|
altairStartSlot = dag.cfg.ALTAIR_FORK_EPOCH.start_slot
|
||||||
|
finalizedSlot = dag.finalizedHead.slot
|
||||||
|
if finalizedSlot < altairStartSlot:
|
||||||
|
return
|
||||||
|
let earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
|
||||||
|
# Keep track of latest four finalized checkpoints
|
||||||
|
let
|
||||||
|
lastIndex = dag.lightClientDb.lastCheckpointIndex
|
||||||
|
lastCheckpoint = addr dag.lightClientDb.latestCheckpoints[lastIndex]
|
||||||
|
if dag.finalizedHead.slot.epoch != lastCheckpoint.epoch or
|
||||||
|
dag.finalizedHead.blck.root != lastCheckpoint.root:
|
||||||
|
let
|
||||||
|
nextIndex = (lastIndex + 1) mod dag.lightClientDb.latestCheckpoints.len
|
||||||
|
nextCheckpoint = addr dag.lightClientDb.latestCheckpoints[nextIndex]
|
||||||
|
nextCheckpoint[].epoch = dag.finalizedHead.slot.epoch
|
||||||
|
nextCheckpoint[].root = dag.finalizedHead.blck.root
|
||||||
|
dag.lightClientDb.lastCheckpointIndex = nextIndex
|
||||||
|
|
||||||
|
# Cache `LightClientBootstrap` for newly finalized epoch boundary blocks.
|
||||||
|
# Epoch boundary blocks are the block for the initial slot of an epoch,
|
||||||
|
# or the most recent block if no block was proposed at that slot
|
||||||
|
let lowSlot = max(lastCheckpoint.epoch.start_slot, earliestSlot)
|
||||||
|
var boundarySlot = dag.finalizedHead.slot
|
||||||
|
while boundarySlot >= lowSlot:
|
||||||
|
let blck = dag.getBlockAtSlot(boundarySlot).blck
|
||||||
|
if blck.slot >= lowSlot:
|
||||||
|
dag.lightClientDb.cachedBootstrap[blck.slot] =
|
||||||
|
CachedLightClientBootstrap(
|
||||||
|
current_sync_committee_branch:
|
||||||
|
dag.getLightClientData(blck.bid).current_sync_committee_branch)
|
||||||
|
boundarySlot = blck.slot.nextEpochBoundarySlot
|
||||||
|
if boundarySlot < SLOTS_PER_EPOCH:
|
||||||
|
break
|
||||||
|
boundarySlot -= SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# Prune light client data that is no longer relevant,
|
||||||
|
# i.e., can no longer be referred to by future updates, or is too old
|
||||||
|
var bidsToDelete: seq[BlockId]
|
||||||
|
for bid, data in dag.lightClientDb.cachedData:
|
||||||
|
if bid.slot >= earliestSlot:
|
||||||
|
if bid.slot >= finalizedSlot:
|
||||||
|
continue
|
||||||
|
if dag.lightClientDb.latestCheckpoints.anyIt(bid.root == it.root):
|
||||||
|
continue
|
||||||
|
bidsToDelete.add bid
|
||||||
|
for bid in bidsToDelete:
|
||||||
|
dag.lightClientDb.cachedData.del bid
|
||||||
|
|
||||||
|
# Prune bootstrap data that is no longer relevant
|
||||||
|
var slotsToDelete: seq[Slot]
|
||||||
|
for slot in dag.lightClientDb.cachedBootstrap.keys:
|
||||||
|
if slot < earliestSlot:
|
||||||
|
slotsToDelete.add slot
|
||||||
|
for slot in slotsToDelete:
|
||||||
|
dag.lightClientDb.cachedBootstrap.del slot
|
||||||
|
|
||||||
|
# Prune best `LightClientUpdate` that are no longer relevant
|
||||||
|
let earliestPeriod = earliestSlot.sync_committee_period
|
||||||
|
var periodsToDelete: seq[SyncCommitteePeriod]
|
||||||
|
for period in dag.lightClientDb.bestUpdates.keys:
|
||||||
|
if period < earliestPeriod:
|
||||||
|
periodsToDelete.add period
|
||||||
|
for period in periodsToDelete:
|
||||||
|
dag.lightClientDb.bestUpdates.del period
|
||||||
|
|
||||||
|
# Prune best `LightClientUpdate` referring to non-finalized sync committees
|
||||||
|
# that are no longer relevant, i.e., orphaned or too old
|
||||||
|
let finalizedPeriod = finalizedSlot.sync_committee_period
|
||||||
|
var keysToDelete: seq[(SyncCommitteePeriod, Eth2Digest)]
|
||||||
|
for (period, syncCommitteeRoot) in dag.lightClientDb.pendingBestUpdates.keys:
|
||||||
|
if period <= finalizedPeriod:
|
||||||
|
keysToDelete.add (period, syncCommitteeRoot)
|
||||||
|
for key in keysToDelete:
|
||||||
|
dag.lightClientDb.pendingBestUpdates.del key
|
||||||
|
|
||||||
|
proc initBestLightClientUpdateForPeriod(
|
||||||
|
dag: ChainDAGRef, period: SyncCommitteePeriod) =
|
||||||
|
## Compute and cache the `LightClientUpdate` with the most sync committee
|
||||||
|
## signatures (i.e., participation) for a given sync committee period.
|
||||||
|
let periodStartSlot = period.start_slot
|
||||||
|
if periodStartSlot > dag.finalizedHead.slot:
|
||||||
|
return
|
||||||
|
let
|
||||||
|
earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
periodEndSlot = periodStartSlot + SLOTS_PER_SYNC_COMMITTEE_PERIOD - 1
|
||||||
|
if periodEndSlot < earliestSlot:
|
||||||
|
return
|
||||||
|
if dag.lightClientDb.bestUpdates.hasKey(period):
|
||||||
|
return
|
||||||
|
let startTick = Moment.now()
|
||||||
|
debug "Computing best `LightClientUpdate`", period
|
||||||
|
defer:
|
||||||
|
let endTick = Moment.now()
|
||||||
|
debug "Best `LightClientUpdate` computed",
|
||||||
|
period, update = dag.lightClientDb.bestUpdates.getOrDefault(period),
|
||||||
|
computeDur = endTick - startTick
|
||||||
|
|
||||||
|
proc maxParticipantsBlock(highBlck: BlockRef, lowSlot: Slot): BlockRef =
|
||||||
|
## Determine the earliest block with most sync committee signatures among
|
||||||
|
## ancestors of `highBlck` with at least `lowSlot` as parent block slot.
|
||||||
|
## Return `nil` if no block with `MIN_SYNC_COMMITTEE_PARTICIPANTS` is found.
|
||||||
|
var
|
||||||
|
maxParticipants = 0
|
||||||
|
maxBlockRef: BlockRef
|
||||||
|
blockRef = highBlck
|
||||||
|
while blockRef.parent != nil and blockRef.parent.slot >= lowSlot:
|
||||||
|
let numParticipants =
|
||||||
|
withBlck(dag.getForkedBlock(blockRef)):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
countOnes(blck.message.body.sync_aggregate.sync_committee_bits)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
if numParticipants >= maxParticipants:
|
||||||
|
maxParticipants = numParticipants
|
||||||
|
maxBlockRef = blockRef
|
||||||
|
blockRef = blockRef.parent
|
||||||
|
if maxParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
maxBlockRef = nil
|
||||||
|
maxBlockRef
|
||||||
|
|
||||||
|
# Determine the block in the period with highest sync committee participation
|
||||||
|
let
|
||||||
|
lowSlot = max(periodStartSlot, earliestSlot)
|
||||||
|
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
|
||||||
|
highBlck = dag.getBlockAtSlot(highSlot).blck
|
||||||
|
bestNonFinalizedRef = maxParticipantsBlock(highBlck, lowSlot)
|
||||||
|
if bestNonFinalizedRef == nil:
|
||||||
|
dag.lightClientDb.bestUpdates[period] = default(altair.LightClientUpdate)
|
||||||
|
return
|
||||||
|
|
||||||
|
# The block with highest participation may refer to a `finalized_checkpoint`
|
||||||
|
# in a different sync committee period. If that is the case, search for a
|
||||||
|
# later block with a `finalized_checkpoint` within the given sync committee
|
||||||
|
# period, despite it having a lower sync committee participation
|
||||||
|
var
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
bestFinalizedRef = bestNonFinalizedRef
|
||||||
|
finalizedBlck {.noinit.}: BlockRef
|
||||||
|
while bestFinalizedRef != nil:
|
||||||
|
let
|
||||||
|
finalizedEpoch = block:
|
||||||
|
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||||
|
withState(stateData.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
state.data.finalized_checkpoint.epoch
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
finalizedEpochStartSlot = finalizedEpoch.start_slot
|
||||||
|
if finalizedEpochStartSlot >= lowSlot:
|
||||||
|
finalizedBlck = dag.getBlockAtSlot(finalizedEpochStartSlot).blck
|
||||||
|
if finalizedBlck.slot >= lowSlot:
|
||||||
|
break
|
||||||
|
bestFinalizedRef = maxParticipantsBlock(highBlck, bestFinalizedRef.slot + 1)
|
||||||
|
|
||||||
|
# If a finalized block has been found within the sync commitee period,
|
||||||
|
# create a `LightClientUpdate` for that one. Otherwise, create a non-finalized
|
||||||
|
# `LightClientUpdate`
|
||||||
|
var update {.noinit.}: LightClientUpdate
|
||||||
|
if bestFinalizedRef != nil:
|
||||||
|
# Fill data from attested block
|
||||||
|
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||||
|
let bdata = dag.getForkedBlock(blck)
|
||||||
|
withStateAndBlck(stateData.data, bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
update.attested_header =
|
||||||
|
BeaconBlockHeader.fromBlock(blck)
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.FINALIZED_ROOT_INDEX, update.finality_branch)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
# Fill data from signature block
|
||||||
|
let bdata = dag.getForkedBlock(bestFinalizedRef)
|
||||||
|
withBlck(bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
update.sync_aggregate =
|
||||||
|
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
update.fork_version =
|
||||||
|
dag.cfg.forkAtEpoch(bestFinalizedRef.slot.epoch).current_version
|
||||||
|
|
||||||
|
# Fill data from finalized block
|
||||||
|
dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do:
|
||||||
|
let bdata = dag.getForkedBlock(blck)
|
||||||
|
withStateAndBlck(stateData.data, bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.NEXT_SYNC_COMMITTEE_INDEX, update.next_sync_committee_branch)
|
||||||
|
update.finalized_header =
|
||||||
|
BeaconBlockHeader.fromBlock(blck)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
else:
|
||||||
|
# Fill data from attested block
|
||||||
|
dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do:
|
||||||
|
let bdata = dag.getForkedBlock(blck)
|
||||||
|
withStateAndBlck(stateData.data, bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
update.attested_header =
|
||||||
|
BeaconBlockHeader.fromBlock(blck)
|
||||||
|
update.next_sync_committee =
|
||||||
|
state.data.next_sync_committee
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.NEXT_SYNC_COMMITTEE_INDEX, update.next_sync_committee_branch)
|
||||||
|
update.finalized_header = BeaconBlockHeader()
|
||||||
|
update.finality_branch.fill(Eth2Digest())
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
# Fill data from signature block
|
||||||
|
let bdata = dag.getForkedBlock(bestNonFinalizedRef)
|
||||||
|
withBlck(bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
update.sync_aggregate =
|
||||||
|
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
update.fork_version =
|
||||||
|
dag.cfg.forkAtEpoch(bestNonFinalizedRef.slot.epoch).current_version
|
||||||
|
dag.lightClientDb.bestUpdates[period] = update
|
||||||
|
|
||||||
|
proc initLightClientBootstrapForPeriod(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
period: SyncCommitteePeriod) =
|
||||||
|
## Compute and cache `LightClientBootstrap` data for all epoch boundary blocks
|
||||||
|
## within a given sync committee period.
|
||||||
|
let periodStartSlot = period.start_slot
|
||||||
|
if periodStartSlot > dag.finalizedHead.slot:
|
||||||
|
return
|
||||||
|
let
|
||||||
|
earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
periodEndSlot = periodStartSlot + SLOTS_PER_SYNC_COMMITTEE_PERIOD - 1
|
||||||
|
if periodEndSlot < earliestSlot:
|
||||||
|
return
|
||||||
|
|
||||||
|
let startTick = Moment.now()
|
||||||
|
debug "Caching `LightClientBootstrap` data", period
|
||||||
|
defer:
|
||||||
|
let endTick = Moment.now()
|
||||||
|
debug "`LightClientBootstrap` data cached", period,
|
||||||
|
cacheDur = endTick - startTick
|
||||||
|
|
||||||
|
let
|
||||||
|
lowSlot = max(periodStartSlot, earliestSlot)
|
||||||
|
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
|
||||||
|
lowBoundarySlot = lowSlot.nextEpochBoundarySlot
|
||||||
|
highBoundarySlot = highSlot.nextEpochBoundarySlot
|
||||||
|
var
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
tmpCache: StateCache
|
||||||
|
nextBoundarySlot = lowBoundarySlot
|
||||||
|
while nextBoundarySlot <= highBoundarySlot:
|
||||||
|
let
|
||||||
|
blck = dag.getBlockAtSlot(nextBoundarySlot).blck
|
||||||
|
boundarySlot = blck.slot.nextEpochBoundarySlot
|
||||||
|
if boundarySlot == nextBoundarySlot and
|
||||||
|
blck.slot >= lowSlot and blck.slot <= highSlot and
|
||||||
|
not dag.lightClientDb.cachedBootstrap.hasKey(blck.slot):
|
||||||
|
var cachedBootstrap {.noinit.}: CachedLightClientBootstrap
|
||||||
|
doAssert dag.updateStateData(
|
||||||
|
tmpState[], blck.atSlot, save = false, tmpCache)
|
||||||
|
withStateVars(tmpState[]):
|
||||||
|
withState(stateData.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||||
|
cachedBootstrap.current_sync_committee_branch)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
dag.lightClientDb.cachedBootstrap[blck.slot] = cachedBootstrap
|
||||||
|
nextBoundarySlot += SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
proc initLightClientDb*(dag: ChainDAGRef) =
|
||||||
|
## Initialize cached light client data
|
||||||
|
if dag.importLightClientData == ImportLightClientData.None:
|
||||||
|
return
|
||||||
|
let earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
if dag.head.slot < earliestSlot:
|
||||||
|
return
|
||||||
|
|
||||||
|
let
|
||||||
|
finalizedSlot = dag.finalizedHead.slot
|
||||||
|
finalizedPeriod = finalizedSlot.sync_committee_period
|
||||||
|
dag.initBestLightClientUpdateForPeriod(finalizedPeriod)
|
||||||
|
|
||||||
|
let lightClientStartTick = Moment.now()
|
||||||
|
debug "Initializing cached light client data"
|
||||||
|
|
||||||
|
# Build lists of block to process.
|
||||||
|
# As it is slow to load states in descending order,
|
||||||
|
# first build a todo list, then process them in ascending order
|
||||||
|
let lowSlot = max(finalizedSlot, dag.computeEarliestLightClientSlot)
|
||||||
|
var
|
||||||
|
blocksBetween = newSeqOfCap[BlockRef](dag.head.slot - lowSlot + 1)
|
||||||
|
blockRef = dag.head
|
||||||
|
while blockRef.slot > lowSlot:
|
||||||
|
blocksBetween.add blockRef
|
||||||
|
blockRef = blockRef.parent
|
||||||
|
blocksBetween.add blockRef
|
||||||
|
|
||||||
|
# Process blocks (reuses `dag.headState`, but restores it to the current head)
|
||||||
|
var
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
tmpCache, cache: StateCache
|
||||||
|
oldCheckpoint: Checkpoint
|
||||||
|
checkpointIndex = 0
|
||||||
|
for i in countdown(blocksBetween.high, blocksBetween.low):
|
||||||
|
blockRef = blocksBetween[i]
|
||||||
|
doAssert dag.updateStateData(
|
||||||
|
dag.headState, blockRef.atSlot(blockRef.slot), save = false, cache)
|
||||||
|
withStateVars(dag.headState):
|
||||||
|
let bdata = dag.getForkedBlock(blck)
|
||||||
|
withStateAndBlck(stateData.data, bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
# Cache data for `LightClientUpdate` of descendant blocks
|
||||||
|
dag.cacheLightClientData(state, blck, isNew = false)
|
||||||
|
|
||||||
|
# Cache data for the block's `finalized_checkpoint`.
|
||||||
|
# The `finalized_checkpoint` may refer to:
|
||||||
|
# 1. `finalizedHead.blck -> finalized_checkpoint`
|
||||||
|
# This may happen when there were skipped slots.
|
||||||
|
# 2. `finalizedHead -> finalized_checkpoint`
|
||||||
|
# 3. One epoch boundary that got justified then finalized
|
||||||
|
# between `finalizedHead -> finalized_checkpoint`
|
||||||
|
# and `finalizedHead`
|
||||||
|
# 4. `finalizedHead`
|
||||||
|
let checkpoint = state.data.finalized_checkpoint
|
||||||
|
if checkpoint != oldCheckpoint:
|
||||||
|
oldCheckpoint = checkpoint
|
||||||
|
doAssert checkpointIndex < dag.lightClientDb.latestCheckpoints.len
|
||||||
|
dag.lightClientDb.latestCheckpoints[checkpointIndex] = checkpoint
|
||||||
|
dag.lightClientDb.lastCheckpointIndex = checkpointIndex
|
||||||
|
inc checkpointIndex
|
||||||
|
|
||||||
|
# Save new checkpoint block using `tmpState` (avoid replay after it)
|
||||||
|
if checkpoint.root != dag.finalizedHead.blck.root:
|
||||||
|
let cpRef =
|
||||||
|
dag.getBlockAtSlot(checkpoint.epoch.start_slot).blck
|
||||||
|
if cpRef != nil and cpRef.slot >= earliestSlot:
|
||||||
|
assert cpRef.bid.root == checkpoint.root
|
||||||
|
doAssert dag.updateStateData(
|
||||||
|
tmpState[], cpRef.atSlot, save = false, tmpCache)
|
||||||
|
withStateVars(tmpState[]):
|
||||||
|
let bdata = dag.getForkedBlock(blck)
|
||||||
|
withStateAndBlck(stateData.data, bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
dag.cacheLightClientData(state, blck, isNew = false)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
# Create `LightClientUpdate` for non-finalized blocks.
|
||||||
|
if blockRef.slot > finalizedSlot:
|
||||||
|
dag.createLightClientUpdates(state, blck, blockRef.parent)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
|
||||||
|
let lightClientEndTick = Moment.now()
|
||||||
|
debug "Initialized cached light client data",
|
||||||
|
initDur = lightClientEndTick - lightClientStartTick
|
||||||
|
|
||||||
|
# Import historic data
|
||||||
|
if dag.importLightClientData == ImportLightClientData.Full:
|
||||||
|
let
|
||||||
|
earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
earliestPeriod = earliestSlot.sync_committee_period
|
||||||
|
for period in earliestPeriod ..< finalizedPeriod:
|
||||||
|
dag.initBestLightClientUpdateForPeriod(period)
|
||||||
|
dag.initLightClientBootstrapForPeriod(period)
|
||||||
|
dag.initLightClientBootstrapForPeriod(finalizedPeriod)
|
||||||
|
|
||||||
|
proc getBestLightClientUpdateForPeriod*(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
period: SyncCommitteePeriod): Option[altair.LightClientUpdate] =
|
||||||
|
if not dag.serveLightClientData:
|
||||||
|
return none(altair.LightClientUpdate)
|
||||||
|
|
||||||
|
if dag.importLightClientData == ImportLightClientData.OnDemand:
|
||||||
|
dag.initBestLightClientUpdateForPeriod(period)
|
||||||
|
result = some(dag.lightClientDb.bestUpdates.getOrDefault(period))
|
||||||
|
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
|
||||||
|
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
result = none(altair.LightClientUpdate)
|
||||||
|
|
||||||
|
proc getLatestLightClientUpdate*(
|
||||||
|
dag: ChainDAGRef): Option[altair.LightClientUpdate] =
|
||||||
|
if not dag.serveLightClientData:
|
||||||
|
return none(altair.LightClientUpdate)
|
||||||
|
|
||||||
|
result = some(dag.lightClientDb.latestUpdate)
|
||||||
|
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
|
||||||
|
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
result = none(altair.LightClientUpdate)
|
||||||
|
|
||||||
|
proc getOptimisticLightClientUpdate*(
|
||||||
|
dag: ChainDAGRef): Option[OptimisticLightClientUpdate] =
|
||||||
|
if not dag.serveLightClientData:
|
||||||
|
return none(OptimisticLightClientUpdate)
|
||||||
|
|
||||||
|
result = some(dag.lightClientDb.optimisticUpdate)
|
||||||
|
let numParticipants = countOnes(result.get.sync_aggregate.sync_committee_bits)
|
||||||
|
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||||
|
result = none(OptimisticLightClientUpdate)
|
||||||
|
|
||||||
|
proc getLightClientBootstrap*(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
blockRoot: Eth2Digest): Option[altair.LightClientBootstrap] =
|
||||||
|
if not dag.serveLightClientData:
|
||||||
|
return none(altair.LightClientBootstrap)
|
||||||
|
|
||||||
|
let blck = dag.getForkedBlock(blockRoot)
|
||||||
|
if blck.isErr:
|
||||||
|
debug "`LightClientBootstrap` unavailable: Block not found", blockRoot
|
||||||
|
return none(altair.LightClientBootstrap)
|
||||||
|
|
||||||
|
withBlck(blck.get):
|
||||||
|
let slot = blck.message.slot
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
let earliestSlot = dag.computeEarliestLightClientSlot
|
||||||
|
if slot < earliestSlot:
|
||||||
|
debug "`LightClientBootstrap` unavailable: Block too old", slot
|
||||||
|
return none(altair.LightClientBootstrap)
|
||||||
|
if slot > dag.finalizedHead.blck.slot:
|
||||||
|
debug "`LightClientBootstrap` unavailable: Not finalized", blockRoot
|
||||||
|
return none(altair.LightClientBootstrap)
|
||||||
|
var cachedBootstrap =
|
||||||
|
dag.lightClientDb.cachedBootstrap.getOrDefault(slot)
|
||||||
|
if cachedBootstrap.current_sync_committee_branch.isZeroMemory:
|
||||||
|
if dag.importLightClientData == ImportLightClientData.OnDemand:
|
||||||
|
var tmpState = assignClone(dag.headState)
|
||||||
|
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot)) do:
|
||||||
|
withState(stateData.data):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
state.data.build_proof(
|
||||||
|
altair.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||||
|
cachedBootstrap.current_sync_committee_branch)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: raiseAssert "Unreachable"
|
||||||
|
dag.lightClientDb.cachedBootstrap[slot] = cachedBootstrap
|
||||||
|
else:
|
||||||
|
debug "`LightClientBootstrap` unavailable: Data not cached", slot
|
||||||
|
return none(altair.LightClientBootstrap)
|
||||||
|
|
||||||
|
var tmpState = assignClone(dag.headState)
|
||||||
|
var bootstrap {.noinit.}: altair.LightClientBootstrap
|
||||||
|
bootstrap.header =
|
||||||
|
BeaconBlockHeader.fromBlock(blck)
|
||||||
|
bootstrap.current_sync_committee =
|
||||||
|
dag.currentSyncCommitteeForPeriod(tmpState, slot.sync_committee_period)
|
||||||
|
bootstrap.current_sync_committee_branch =
|
||||||
|
cachedBootstrap.current_sync_committee_branch
|
||||||
|
return some(bootstrap)
|
||||||
|
else:
|
||||||
|
debug "`LightClientBootstrap` unavailable: Block before Altair", slot
|
||||||
|
return none(altair.LightClientBootstrap)
|
|
@ -1,3 +1,4 @@
|
||||||
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
@ -157,6 +158,8 @@ proc init*(T: type BeaconNode,
|
||||||
eventBus.emit("finalization", data)
|
eventBus.emit("finalization", data)
|
||||||
proc onSyncContribution(data: SignedContributionAndProof) =
|
proc onSyncContribution(data: SignedContributionAndProof) =
|
||||||
eventBus.emit("sync-contribution-and-proof", data)
|
eventBus.emit("sync-contribution-and-proof", data)
|
||||||
|
proc onOptimisticLightClientUpdate(data: OptimisticLightClientUpdate) =
|
||||||
|
discard
|
||||||
|
|
||||||
if config.finalizedCheckpointState.isSome:
|
if config.finalizedCheckpointState.isSome:
|
||||||
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
let checkpointStatePath = config.finalizedCheckpointState.get.string
|
||||||
|
@ -321,11 +324,17 @@ proc init*(T: type BeaconNode,
|
||||||
info "Loading block DAG from database", path = config.databaseDir
|
info "Loading block DAG from database", path = config.databaseDir
|
||||||
|
|
||||||
let
|
let
|
||||||
chainDagFlags = if config.verifyFinalization: {verifyFinalization}
|
chainDagFlags =
|
||||||
else: {}
|
if config.verifyFinalization: {verifyFinalization}
|
||||||
|
else: {}
|
||||||
|
onOptimisticLightClientUpdateCb =
|
||||||
|
if config.serveLightClientData: onOptimisticLightClientUpdate
|
||||||
|
else: nil
|
||||||
dag = ChainDAGRef.init(
|
dag = ChainDAGRef.init(
|
||||||
cfg, db, validatorMonitor, chainDagFlags, onBlockAdded, onHeadChanged,
|
cfg, db, validatorMonitor, chainDagFlags, onBlockAdded, onHeadChanged,
|
||||||
onChainReorg)
|
onChainReorg, onOptimisticLCUpdateCb = onOptimisticLightClientUpdateCb,
|
||||||
|
serveLightClientData = config.serveLightClientData,
|
||||||
|
importLightClientData = config.importLightClientData)
|
||||||
quarantine = newClone(Quarantine.init())
|
quarantine = newClone(Quarantine.init())
|
||||||
databaseGenesisValidatorsRoot =
|
databaseGenesisValidatorsRoot =
|
||||||
getStateField(dag.headState.data, genesis_validators_root)
|
getStateField(dag.headState.data, genesis_validators_root)
|
||||||
|
|
|
@ -236,6 +236,13 @@ template start_epoch*(period: SyncCommitteePeriod): Epoch =
|
||||||
if period >= maxPeriod: FAR_FUTURE_EPOCH
|
if period >= maxPeriod: FAR_FUTURE_EPOCH
|
||||||
else: Epoch(period * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
else: Epoch(period * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
|
template start_slot*(period: SyncCommitteePeriod): Slot =
|
||||||
|
## Return the start slot of ``period``.
|
||||||
|
const maxPeriod = SyncCommitteePeriod(
|
||||||
|
FAR_FUTURE_EPOCH div EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
if period >= maxPeriod: FAR_FUTURE_SLOT
|
||||||
|
else: Slot(period * SLOTS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
func `$`*(t: BeaconTime): string =
|
func `$`*(t: BeaconTime): string =
|
||||||
if t.ns_since_genesis >= 0:
|
if t.ns_since_genesis >= 0:
|
||||||
$(timer.nanoseconds(t.ns_since_genesis))
|
$(timer.nanoseconds(t.ns_since_genesis))
|
||||||
|
|
|
@ -27,7 +27,7 @@
|
||||||
import
|
import
|
||||||
std/[typetraits, sets, hashes],
|
std/[typetraits, sets, hashes],
|
||||||
chronicles,
|
chronicles,
|
||||||
stew/[assign2, bitops2],
|
stew/[assign2, bitops2, objects],
|
||||||
"."/[base, phase0]
|
"."/[base, phase0]
|
||||||
|
|
||||||
export base, sets
|
export base, sets
|
||||||
|
@ -609,6 +609,16 @@ chronicles.formatIt SignedContributionAndProof: shortLog(it)
|
||||||
template hash*(x: LightClientUpdate): Hash =
|
template hash*(x: LightClientUpdate): Hash =
|
||||||
hash(x.header)
|
hash(x.header)
|
||||||
|
|
||||||
|
func shortLog*(v: LightClientUpdate): auto =
|
||||||
|
(
|
||||||
|
attested: shortLog(v.attested_header),
|
||||||
|
finalized: shortLog(v.finalized_header),
|
||||||
|
sync_committee_participants: countOnes(v.sync_aggregate.sync_committee_bits),
|
||||||
|
is_signed_by_next: v.next_sync_committee.isZeroMemory
|
||||||
|
)
|
||||||
|
|
||||||
|
chronicles.formatIt LightClientUpdate: it.shortLog
|
||||||
|
|
||||||
func clear*(info: var EpochInfo) =
|
func clear*(info: var EpochInfo) =
|
||||||
info.validators.setLen(0)
|
info.validators.setLen(0)
|
||||||
info.balances = UnslashedParticipatingBalances()
|
info.balances = UnslashedParticipatingBalances()
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
# beacon_chain
|
# beacon_chain
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
# Copyright (c) 2018-2022 Status Research & Development GmbH
|
||||||
# Licensed and distributed under either of
|
# Licensed and distributed under either of
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
@ -30,6 +30,7 @@ import # Unit test
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
./test_honest_validator,
|
./test_honest_validator,
|
||||||
./test_interop,
|
./test_interop,
|
||||||
|
./test_light_client,
|
||||||
./test_message_signatures,
|
./test_message_signatures,
|
||||||
./test_peer_pool,
|
./test_peer_pool,
|
||||||
./test_spec,
|
./test_spec,
|
||||||
|
|
|
@ -0,0 +1,158 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2021-2022 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
{.used.}
|
||||||
|
|
||||||
|
import
|
||||||
|
# Status libraries
|
||||||
|
chronicles, eth/keys, stew/objects, taskpools,
|
||||||
|
# Beacon chain internals
|
||||||
|
../beacon_chain/consensus_object_pools/
|
||||||
|
[block_clearance, block_quarantine, blockchain_dag],
|
||||||
|
../beacon_chain/spec/[forks, helpers, light_client_sync, state_transition],
|
||||||
|
# Test utilities
|
||||||
|
./testutil, ./testdbutil
|
||||||
|
|
||||||
|
suite "Light client" & preset():
|
||||||
|
let
|
||||||
|
cfg = block:
|
||||||
|
var res = defaultRuntimeConfig
|
||||||
|
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1
|
||||||
|
res
|
||||||
|
altairStartSlot = cfg.ALTAIR_FORK_EPOCH.start_slot
|
||||||
|
|
||||||
|
proc advanceToSlot(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
targetSlot: Slot,
|
||||||
|
verifier: var BatchVerifier,
|
||||||
|
quarantine: var Quarantine,
|
||||||
|
attested = true,
|
||||||
|
syncCommitteeRatio = 0.75) =
|
||||||
|
var cache: StateCache
|
||||||
|
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
|
||||||
|
while true:
|
||||||
|
var slot = getStateField(dag.headState.data, slot)
|
||||||
|
doAssert targetSlot >= slot
|
||||||
|
if targetSlot == slot: break
|
||||||
|
|
||||||
|
# When there is a large jump, skip to the end of the current period,
|
||||||
|
# create blocks for a few epochs to finalize it, then proceed
|
||||||
|
let
|
||||||
|
nextPeriod = slot.sync_committee_period + 1
|
||||||
|
periodEpoch = nextPeriod.start_epoch
|
||||||
|
periodSlot = periodEpoch.start_slot
|
||||||
|
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
|
||||||
|
if targetSlot > checkpointSlot:
|
||||||
|
var info: ForkedEpochInfo
|
||||||
|
doAssert process_slots(cfg, dag.headState.data, checkpointSlot,
|
||||||
|
cache, info, flags = {}).isOk()
|
||||||
|
slot = checkpointSlot
|
||||||
|
|
||||||
|
# Create blocks for final few epochs
|
||||||
|
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
|
||||||
|
for blck in makeTestBlocks(dag.headState.data, cache, blocks.int,
|
||||||
|
attested, syncCommitteeRatio, cfg):
|
||||||
|
let added =
|
||||||
|
case blck.kind
|
||||||
|
of BeaconBlockFork.Phase0:
|
||||||
|
const nilCallback = OnPhase0BlockAdded(nil)
|
||||||
|
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
|
||||||
|
of BeaconBlockFork.Altair:
|
||||||
|
const nilCallback = OnAltairBlockAdded(nil)
|
||||||
|
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
|
||||||
|
of BeaconBlockFork.Bellatrix:
|
||||||
|
const nilCallback = OnBellatrixBlockAdded(nil)
|
||||||
|
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
|
||||||
|
check: added.isOk()
|
||||||
|
dag.updateHead(added[], quarantine)
|
||||||
|
|
||||||
|
setup:
|
||||||
|
const num_validators = SLOTS_PER_EPOCH
|
||||||
|
let
|
||||||
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||||
|
dag = ChainDAGRef.init(
|
||||||
|
cfg, makeTestDB(num_validators), validatorMonitor, {},
|
||||||
|
serveLightClientData = true,
|
||||||
|
importLightClientData = ImportLightClientData.OnlyNew)
|
||||||
|
quarantine = newClone(Quarantine.init())
|
||||||
|
taskpool = TaskPool.new()
|
||||||
|
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
|
||||||
|
|
||||||
|
test "Pre-Altair":
|
||||||
|
# Genesis
|
||||||
|
check:
|
||||||
|
dag.headState.data.kind == BeaconStateFork.Phase0
|
||||||
|
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||||
|
dag.getLatestLightClientUpdate.isNone
|
||||||
|
|
||||||
|
# Advance to last slot before Altair
|
||||||
|
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
|
||||||
|
check:
|
||||||
|
dag.headState.data.kind == BeaconStateFork.Phase0
|
||||||
|
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||||
|
dag.getLatestLightClientUpdate.isNone
|
||||||
|
|
||||||
|
# Advance to Altair
|
||||||
|
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
|
||||||
|
check:
|
||||||
|
dag.headState.data.kind == BeaconStateFork.Altair
|
||||||
|
dag.getBestLightClientUpdateForPeriod(0.SyncCommitteePeriod).isNone
|
||||||
|
dag.getLatestLightClientUpdate.isNone
|
||||||
|
|
||||||
|
test "Light client sync":
|
||||||
|
# Advance to Altair
|
||||||
|
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
|
||||||
|
|
||||||
|
# Track trusted checkpoint for light client
|
||||||
|
let
|
||||||
|
genesis_validators_root = dag.genesisValidatorsRoot
|
||||||
|
trusted_block_root = dag.headState.blck.root
|
||||||
|
|
||||||
|
# Advance to target slot
|
||||||
|
const
|
||||||
|
headPeriod = 2.SyncCommitteePeriod
|
||||||
|
periodEpoch = headPeriod.start_epoch
|
||||||
|
headSlot = (periodEpoch + 2).start_slot + 5
|
||||||
|
dag.advanceToSlot(headSlot, verifier, quarantine[])
|
||||||
|
let currentSlot = getStateField(dag.headState.data, slot)
|
||||||
|
|
||||||
|
# Initialize light client store
|
||||||
|
let bootstrap = dag.getLightClientBootstrap(trusted_block_root)
|
||||||
|
check bootstrap.isSome
|
||||||
|
var storeRes = initialize_light_client_store(
|
||||||
|
trusted_block_root, bootstrap.get)
|
||||||
|
check storeRes.isSome
|
||||||
|
template store(): auto = storeRes.get
|
||||||
|
|
||||||
|
# Sync to latest sync committee period
|
||||||
|
while store.finalized_header.slot.sync_committee_period + 1 < headPeriod:
|
||||||
|
let
|
||||||
|
period =
|
||||||
|
if store.next_sync_committee.isZeroMemory:
|
||||||
|
store.finalized_header.slot.sync_committee_period
|
||||||
|
else:
|
||||||
|
store.finalized_header.slot.sync_committee_period + 1
|
||||||
|
bestUpdate = dag.getBestLightClientUpdateForPeriod(period)
|
||||||
|
res = process_light_client_update(
|
||||||
|
store, bestUpdate.get, currentSlot, cfg, genesis_validators_root)
|
||||||
|
check:
|
||||||
|
bestUpdate.isSome
|
||||||
|
bestUpdate.get.finalized_header.slot.sync_committee_period == period
|
||||||
|
res
|
||||||
|
store.finalized_header == bestUpdate.get.finalized_header
|
||||||
|
|
||||||
|
# Sync to latest update
|
||||||
|
let
|
||||||
|
latestUpdate = dag.getLatestLightClientUpdate
|
||||||
|
res = process_light_client_update(
|
||||||
|
store, latestUpdate.get, currentSlot, cfg, genesis_validators_root)
|
||||||
|
check:
|
||||||
|
latestUpdate.isSome
|
||||||
|
latestUpdate.get.attested_header.slot == dag.headState.blck.parent.slot
|
||||||
|
res
|
||||||
|
store.finalized_header == latestUpdate.get.finalized_header
|
||||||
|
store.optimistic_header == latestUpdate.get.attested_header
|
Loading…
Reference in New Issue