2022-03-11 20:28:10 +00:00
|
|
|
# beacon_chain
|
2024-01-06 14:26:56 +00:00
|
|
|
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
2022-03-11 20:28:10 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
{.used.}
|
|
|
|
|
|
|
|
import
|
|
|
|
# Status libraries
|
2023-06-19 22:43:50 +00:00
|
|
|
taskpools,
|
2022-03-11 20:28:10 +00:00
|
|
|
# Beacon chain internals
|
|
|
|
../beacon_chain/consensus_object_pools/
|
|
|
|
[block_clearance, block_quarantine, blockchain_dag],
|
|
|
|
../beacon_chain/spec/[forks, helpers, light_client_sync, state_transition],
|
|
|
|
# Test utilities
|
|
|
|
./testutil, ./testdbutil
|
|
|
|
|
2024-02-09 23:46:51 +00:00
|
|
|
from ./testbcutil import addHeadBlock
|
|
|
|
|
2023-01-16 15:53:45 +00:00
|
|
|
suite "Light client" & preset():
|
|
|
|
const # Test config, should be long enough to cover interesting transitions
|
|
|
|
headPeriod = 3.SyncCommitteePeriod
|
|
|
|
let
|
|
|
|
cfg = block: # Fork schedule so that each `LightClientDataFork` is covered
|
2023-03-04 13:35:39 +00:00
|
|
|
static: doAssert ConsensusFork.high == ConsensusFork.Deneb
|
2023-01-16 15:53:45 +00:00
|
|
|
var res = defaultRuntimeConfig
|
|
|
|
res.ALTAIR_FORK_EPOCH = 1.Epoch
|
|
|
|
res.BELLATRIX_FORK_EPOCH = 2.Epoch
|
2023-03-11 20:11:33 +00:00
|
|
|
res.CAPELLA_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 1).Epoch
|
|
|
|
res.DENEB_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 2).Epoch
|
2023-01-16 15:53:45 +00:00
|
|
|
res
|
|
|
|
altairStartSlot = cfg.ALTAIR_FORK_EPOCH.start_slot
|
|
|
|
|
|
|
|
proc advanceToSlot(
|
|
|
|
dag: ChainDAGRef,
|
|
|
|
targetSlot: Slot,
|
|
|
|
verifier: var BatchVerifier,
|
|
|
|
quarantine: var Quarantine,
|
|
|
|
attested = true,
|
|
|
|
syncCommitteeRatio = 0.82) =
|
|
|
|
var cache: StateCache
|
|
|
|
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
|
|
|
|
while true:
|
|
|
|
var slot = getStateField(dag.headState, slot)
|
|
|
|
doAssert targetSlot >= slot
|
|
|
|
if targetSlot == slot: break
|
|
|
|
|
|
|
|
# When there is a large jump, skip to the end of the current period,
|
|
|
|
# create blocks for a few epochs to finalize it, then proceed
|
|
|
|
let
|
|
|
|
nextPeriod = slot.sync_committee_period + 1
|
|
|
|
periodEpoch = nextPeriod.start_epoch
|
|
|
|
periodSlot = periodEpoch.start_slot
|
|
|
|
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
|
|
|
|
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
|
|
|
|
var info: ForkedEpochInfo
|
|
|
|
doAssert process_slots(cfg, dag.headState, checkpointSlot,
|
2023-03-11 20:11:33 +00:00
|
|
|
cache, info, flags = {}).isOk()
|
2023-01-16 15:53:45 +00:00
|
|
|
slot = checkpointSlot
|
|
|
|
|
|
|
|
# Create blocks for final few epochs
|
|
|
|
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
|
2023-05-15 15:41:30 +00:00
|
|
|
for blck in makeTestBlocks(
|
|
|
|
dag.headState, cache, blocks.int, attested = attested,
|
|
|
|
syncCommitteeRatio = syncCommitteeRatio, cfg = cfg):
|
2023-09-13 17:57:54 +00:00
|
|
|
let added = withBlck(blck):
|
|
|
|
const nilCallback = (consensusFork.OnBlockAddedCallback)(nil)
|
2023-09-21 10:49:14 +00:00
|
|
|
dag.addHeadBlock(verifier, forkyBlck, nilCallback)
|
2023-01-16 15:53:45 +00:00
|
|
|
check: added.isOk()
|
2023-03-02 16:13:35 +00:00
|
|
|
dag.updateHead(added[], quarantine, [])
|
2023-01-16 15:53:45 +00:00
|
|
|
|
|
|
|
setup:
|
|
|
|
const num_validators = SLOTS_PER_EPOCH
|
2022-03-11 20:28:10 +00:00
|
|
|
let
|
2023-01-16 15:53:45 +00:00
|
|
|
validatorMonitor = newClone(ValidatorMonitor.init())
|
|
|
|
dag = ChainDAGRef.init(
|
2023-03-11 20:11:33 +00:00
|
|
|
cfg, makeTestDB(num_validators, cfg = cfg), validatorMonitor, {},
|
2023-01-16 15:53:45 +00:00
|
|
|
lcDataConfig = LightClientDataConfig(
|
|
|
|
serve: true,
|
|
|
|
importMode: LightClientDataImportMode.OnlyNew))
|
|
|
|
quarantine = newClone(Quarantine.init())
|
2023-06-19 22:43:50 +00:00
|
|
|
rng = HmacDrbgContext.new()
|
2023-01-16 15:53:45 +00:00
|
|
|
taskpool = Taskpool.new()
|
2023-08-03 08:36:45 +00:00
|
|
|
var
|
|
|
|
verifier = BatchVerifier.init(rng, taskpool)
|
2023-01-16 15:53:45 +00:00
|
|
|
|
|
|
|
test "Pre-Altair":
|
|
|
|
# Genesis
|
|
|
|
block:
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
2023-01-16 15:53:45 +00:00
|
|
|
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
|
|
|
|
finalityUpdate = dag.getLightClientFinalityUpdate
|
|
|
|
optimisticUpdate = dag.getLightClientOptimisticUpdate
|
|
|
|
check:
|
2023-01-28 19:53:41 +00:00
|
|
|
dag.headState.kind == ConsensusFork.Phase0
|
2023-01-16 15:53:45 +00:00
|
|
|
update.kind == LightClientDataFork.None
|
|
|
|
finalityUpdate.kind == LightClientDataFork.None
|
|
|
|
optimisticUpdate.kind == LightClientDataFork.None
|
|
|
|
|
|
|
|
# Advance to last slot before Altair
|
|
|
|
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
|
|
|
|
finalityUpdate = dag.getLightClientFinalityUpdate
|
|
|
|
optimisticUpdate = dag.getLightClientOptimisticUpdate
|
|
|
|
check:
|
2023-01-28 19:53:41 +00:00
|
|
|
dag.headState.kind == ConsensusFork.Phase0
|
2023-01-16 15:53:45 +00:00
|
|
|
update.kind == LightClientDataFork.None
|
|
|
|
finalityUpdate.kind == LightClientDataFork.None
|
|
|
|
optimisticUpdate.kind == LightClientDataFork.None
|
|
|
|
|
|
|
|
# Advance to Altair
|
|
|
|
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
|
|
|
|
block:
|
|
|
|
let
|
|
|
|
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
|
|
|
|
finalityUpdate = dag.getLightClientFinalityUpdate
|
|
|
|
optimisticUpdate = dag.getLightClientOptimisticUpdate
|
|
|
|
check:
|
2023-01-28 19:53:41 +00:00
|
|
|
dag.headState.kind == ConsensusFork.Altair
|
2023-01-16 15:53:45 +00:00
|
|
|
update.kind == LightClientDataFork.None
|
|
|
|
finalityUpdate.kind == LightClientDataFork.None
|
|
|
|
optimisticUpdate.kind == LightClientDataFork.None
|
2023-01-14 21:19:50 +00:00
|
|
|
|
2023-01-16 15:53:45 +00:00
|
|
|
test "Light client sync":
|
|
|
|
# Advance to Altair
|
|
|
|
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
|
2023-01-14 21:19:50 +00:00
|
|
|
|
2023-01-16 15:53:45 +00:00
|
|
|
# Track trusted checkpoint for light client
|
|
|
|
let
|
|
|
|
genesis_validators_root = dag.genesis_validators_root
|
|
|
|
trusted_block_root = dag.head.root
|
|
|
|
|
|
|
|
# Advance to target slot
|
|
|
|
const
|
|
|
|
periodEpoch = headPeriod.start_epoch
|
|
|
|
headSlot = (periodEpoch + 2).start_slot + 5
|
|
|
|
dag.advanceToSlot(headSlot, verifier, quarantine[])
|
|
|
|
let currentSlot = getStateField(dag.headState, slot)
|
|
|
|
|
|
|
|
# Initialize light client store
|
|
|
|
var bootstrap = dag.getLightClientBootstrap(trusted_block_root)
|
|
|
|
check bootstrap.kind > LightClientDataFork.None
|
|
|
|
var store {.noinit.}: ForkedLightClientStore
|
|
|
|
withForkyBootstrap(bootstrap):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-09-04 13:05:11 +00:00
|
|
|
var storeRes = newClone(initialize_light_client_store(
|
|
|
|
trusted_block_root, forkyBootstrap, cfg))
|
|
|
|
check storeRes[].isOk
|
2023-10-04 16:11:45 +00:00
|
|
|
store = newClone(ForkedLightClientStore.init(storeRes[].get))[]
|
2023-01-16 15:53:45 +00:00
|
|
|
|
|
|
|
# Sync to latest sync committee period
|
|
|
|
var numIterations = 0
|
|
|
|
while true:
|
|
|
|
let storePeriod = withForkyStore(store):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
forkyStore.finalized_header.beacon.slot.sync_committee_period
|
|
|
|
else:
|
|
|
|
GENESIS_SLOT.SyncCommitteePeriod
|
|
|
|
if storePeriod + 1 >= headPeriod:
|
|
|
|
break
|
2023-01-12 17:11:38 +00:00
|
|
|
let
|
2023-01-16 15:53:45 +00:00
|
|
|
period = withForkyStore(store):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
if forkyStore.is_next_sync_committee_known:
|
2023-01-14 21:19:50 +00:00
|
|
|
storePeriod + 1
|
|
|
|
else:
|
|
|
|
storePeriod
|
2023-01-16 15:53:45 +00:00
|
|
|
else:
|
|
|
|
storePeriod
|
|
|
|
update = dag.getLightClientUpdateForPeriod(period)
|
|
|
|
check update.kind > LightClientDataFork.None
|
|
|
|
if update.kind > store.kind:
|
|
|
|
withForkyUpdate(update):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
store.migrateToDataFork(lcDataFork)
|
|
|
|
withForkyStore(store):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
2023-05-15 02:14:58 +00:00
|
|
|
# Reduce stack size by making this a `proc`
|
|
|
|
proc syncToPeriod() =
|
|
|
|
bootstrap.migrateToDataFork(lcDataFork)
|
|
|
|
template forkyBootstrap: untyped = bootstrap.forky(lcDataFork)
|
|
|
|
let upgradedUpdate = update.migratingToDataFork(lcDataFork)
|
|
|
|
template forkyUpdate: untyped = upgradedUpdate.forky(lcDataFork)
|
|
|
|
let res = process_light_client_update(
|
|
|
|
forkyStore, forkyUpdate, currentSlot, cfg,
|
|
|
|
genesis_validators_root)
|
|
|
|
check:
|
|
|
|
forkyUpdate.finalized_header.beacon.slot.sync_committee_period ==
|
|
|
|
period
|
|
|
|
res.isOk
|
|
|
|
if forkyUpdate.finalized_header.beacon.slot >
|
|
|
|
forkyBootstrap.header.beacon.slot:
|
|
|
|
forkyStore.finalized_header == forkyUpdate.finalized_header
|
|
|
|
else:
|
|
|
|
forkyStore.finalized_header == forkyBootstrap.header
|
|
|
|
syncToPeriod()
|
2023-01-16 15:53:45 +00:00
|
|
|
inc numIterations
|
|
|
|
if numIterations > 20: doAssert false # Avoid endless loop on test failure
|
|
|
|
|
|
|
|
# Sync to latest update
|
|
|
|
let finalityUpdate = dag.getLightClientFinalityUpdate
|
|
|
|
check finalityUpdate.kind > LightClientDataFork.None
|
|
|
|
if finalityUpdate.kind > store.kind:
|
|
|
|
withForkyFinalityUpdate(finalityUpdate):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
store.migrateToDataFork(lcDataFork)
|
|
|
|
withForkyStore(store):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
let upgradedUpdate = finalityUpdate.migratingToDataFork(lcDataFork)
|
|
|
|
template forkyUpdate: untyped = upgradedUpdate.forky(lcDataFork)
|
2023-01-14 21:19:50 +00:00
|
|
|
let res = process_light_client_update(
|
2023-01-16 15:53:45 +00:00
|
|
|
forkyStore, forkyUpdate, currentSlot, cfg, genesis_validators_root)
|
2023-01-14 21:19:50 +00:00
|
|
|
check:
|
2023-01-16 15:53:45 +00:00
|
|
|
forkyUpdate.attested_header.beacon.slot == dag.head.parent.slot
|
2023-01-14 21:19:50 +00:00
|
|
|
res.isOk
|
2023-01-16 15:53:45 +00:00
|
|
|
forkyStore.finalized_header == forkyUpdate.finalized_header
|
|
|
|
forkyStore.optimistic_header == forkyUpdate.attested_header
|
|
|
|
|
|
|
|
test "Init from checkpoint":
|
|
|
|
# Fetch genesis state
|
|
|
|
let genesisState = assignClone dag.headState
|
|
|
|
|
|
|
|
# Advance to target slot for checkpoint
|
|
|
|
let finalizedSlot =
|
|
|
|
((altairStartSlot.sync_committee_period + 1).start_epoch + 2).start_slot
|
|
|
|
dag.advanceToSlot(finalizedSlot, verifier, quarantine[])
|
|
|
|
|
|
|
|
# Initialize new DAG from checkpoint
|
2023-03-11 20:11:33 +00:00
|
|
|
let cpDb = BeaconChainDB.new("", cfg = cfg, inMemory = true)
|
2023-01-16 15:53:45 +00:00
|
|
|
ChainDAGRef.preInit(cpDb, genesisState[])
|
|
|
|
ChainDAGRef.preInit(cpDb, dag.headState) # dag.getForkedBlock(dag.head.bid).get)
|
|
|
|
let cpDag = ChainDAGRef.init(
|
|
|
|
cfg, cpDb, validatorMonitor, {},
|
|
|
|
lcDataConfig = LightClientDataConfig(
|
|
|
|
serve: true,
|
|
|
|
importMode: LightClientDataImportMode.Full))
|
|
|
|
|
|
|
|
# Advance by a couple epochs
|
|
|
|
for i in 1'u64 .. 10:
|
|
|
|
let headSlot = (finalizedSlot.epoch + i).start_slot
|
|
|
|
cpDag.advanceToSlot(headSlot, verifier, quarantine[])
|
|
|
|
|
|
|
|
check true
|