use `ForkedLightClientStore` internally (#4512)

When running `nimbus_light_client`, we persist the latest header from
`LightClientStore.finalized_header` in a database across restarts.
Because the data format is derived from the latest `LightClientStore`,
this could lead to data being persisted in pre-release formats.

To enable us to test later `LightClientStore` versions on devnets,
transition to a `ForkedLightClientStore` internally that is only
migrated to newer forks on-demand (instead of starting at latest).
This commit is contained in:
Etan Kissling 2023-01-16 16:53:45 +01:00 committed by GitHub
parent 87a34bff6c
commit fda03548e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 907 additions and 641 deletions

View File

@ -303,7 +303,7 @@ OK: 12/12 Fail: 0/12 Skip: 0/12
+ LVH searching OK
```
OK: 1/1 Fail: 0/1 Skip: 0/1
## Light client - Altair [Preset: mainnet]
## Light client [Preset: mainnet]
```diff
+ Init from checkpoint OK
+ Light client sync OK

View File

@ -51,6 +51,12 @@ type
putStmt: SqliteStmt[(int64, seq[byte]), void]
keepFromStmt: SqliteStmt[int64, void]
LegacyBestLightClientUpdateStore = object
putStmt: SqliteStmt[(int64, seq[byte]), void]
delStmt: SqliteStmt[int64, void]
delFromStmt: SqliteStmt[int64, void]
keepFromStmt: SqliteStmt[int64, void]
BestLightClientUpdateStore = object
getStmt: SqliteStmt[int64, (int64, seq[byte])]
putStmt: SqliteStmt[(int64, int64, seq[byte]), void]
@ -74,6 +80,10 @@ type
## Key is the block slot of which the post state was used to get the data.
## Data stored for all finalized epoch boundary blocks.
legacyBestUpdates: LegacyBestLightClientUpdateStore
## SyncCommitteePeriod -> altair.LightClientUpdate
## Used through Bellatrix.
bestUpdates: BestLightClientUpdateStore
## SyncCommitteePeriod -> (LightClientDataFork, LightClientUpdate)
## Stores the `LightClientUpdate` with the most `sync_committee_bits` per
@ -160,6 +170,48 @@ func putCurrentSyncCommitteeBranch*(
let res = db.currentBranches.putStmt.exec((slot.int64, SSZ.encode(branch)))
res.expect("SQL query OK")
proc initLegacyBestUpdatesStore(
backend: SqStoreRef,
name: string,
): KvResult[LegacyBestLightClientUpdateStore] =
? backend.exec("""
CREATE TABLE IF NOT EXISTS `""" & name & """` (
`period` INTEGER PRIMARY KEY, -- `SyncCommitteePeriod`
`update` BLOB -- `altair.LightClientUpdate` (SSZ)
);
""")
let
putStmt = backend.prepareStmt("""
REPLACE INTO `""" & name & """` (
`period`, `update`
) VALUES (?, ?);
""", (int64, seq[byte]), void, managed = false).expect("SQL query OK")
delStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` = ?;
""", int64, void, managed = false).expect("SQL query OK")
delFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` >= ?;
""", int64, void, managed = false).expect("SQL query OK")
keepFromStmt = backend.prepareStmt("""
DELETE FROM `""" & name & """`
WHERE `period` < ?;
""", int64, void, managed = false).expect("SQL query OK")
ok LegacyBestLightClientUpdateStore(
putStmt: putStmt,
delStmt: delStmt,
delFromStmt: delFromStmt,
keepFromStmt: keepFromStmt)
func close(store: LegacyBestLightClientUpdateStore) =
store.putStmt.dispose()
store.delStmt.dispose()
store.delFromStmt.dispose()
store.keepFromStmt.dispose()
proc initBestUpdatesStore(
backend: SqStoreRef,
name, legacyAltairName: string,
@ -171,8 +223,7 @@ proc initBestUpdatesStore(
`update` BLOB -- `LightClientUpdate` (SSZ)
);
""")
if backend.hasTable(legacyAltairName).expect("SQL query OK"):
info "Importing Altair light client data"
block:
# SyncCommitteePeriod -> altair.LightClientUpdate
const legacyKind = Base10.toString(ord(LightClientDataFork.Altair).uint)
? backend.exec("""
@ -182,9 +233,6 @@ proc initBestUpdatesStore(
SELECT `period`, """ & legacyKind & """ AS `kind`, `update`
FROM `""" & legacyAltairName & """`;
""")
? backend.exec("""
DROP TABLE `""" & legacyAltairName & """`;
""")
let
getStmt = backend.prepareStmt("""
@ -257,15 +305,31 @@ func putBestUpdate*(
when lcDataFork > LightClientDataFork.None:
let numParticipants = forkyUpdate.sync_aggregate.num_active_participants
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
block:
let res = db.bestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
block:
let res = db.legacyBestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
else:
block:
let res = db.bestUpdates.putStmt.exec(
(period.int64, lcDataFork.int64, SSZ.encode(forkyUpdate)))
res.expect("SQL query OK")
when lcDataFork == LightClientDataFork.Altair:
let res = db.legacyBestUpdates.putStmt.exec(
(period.int64, SSZ.encode(forkyUpdate)))
res.expect("SQL query OK")
else:
# Keep legacy table at best Altair update.
discard
else:
block:
let res = db.bestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
else:
let res = db.bestUpdates.putStmt.exec(
(period.int64, lcDataFork.int64, SSZ.encode(forkyUpdate)))
block:
let res = db.legacyBestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
else:
let res = db.bestUpdates.delStmt.exec(period.int64)
res.expect("SQL query OK")
proc putUpdateIfBetter*(
db: LightClientDataDB, period: SyncCommitteePeriod,
@ -334,23 +398,33 @@ func sealPeriod*(
func delNonFinalizedPeriodsFrom*(
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
doAssert minPeriod.isSupportedBySQLite
let res1 = db.sealedPeriods.delFromStmt.exec(minPeriod.int64)
res1.expect("SQL query OK")
let res2 = db.bestUpdates.delFromStmt.exec(minPeriod.int64)
res2.expect("SQL query OK")
block:
let res = db.sealedPeriods.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.bestUpdates.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.legacyBestUpdates.delFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
# `currentBranches` only has finalized data
func keepPeriodsFrom*(
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
doAssert minPeriod.isSupportedBySQLite
let res1 = db.sealedPeriods.keepFromStmt.exec(minPeriod.int64)
res1.expect("SQL query OK")
let res2 = db.bestUpdates.keepFromStmt.exec(minPeriod.int64)
res2.expect("SQL query OK")
let
minSlot = min(minPeriod.start_slot, int64.high.Slot)
res3 = db.currentBranches.keepFromStmt.exec(minSlot.int64)
res3.expect("SQL query OK")
block:
let res = db.sealedPeriods.keepFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.bestUpdates.keepFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
block:
let res = db.legacyBestUpdates.keepFromStmt.exec(minPeriod.int64)
res.expect("SQL query OK")
let minSlot = min(minPeriod.start_slot, int64.high.Slot)
block:
let res = db.currentBranches.keepFromStmt.exec(minSlot.int64)
res.expect("SQL query OK")
type LightClientDataDBNames* = object
altairCurrentBranches*: string
@ -364,6 +438,8 @@ proc initLightClientDataDB*(
let
currentBranches =
? backend.initCurrentBranchesStore(names.altairCurrentBranches)
legacyBestUpdates =
? backend.initLegacyBestUpdatesStore(names.legacyAltairBestUpdates)
bestUpdates =
? backend.initBestUpdatesStore(
names.bestUpdates, names.legacyAltairBestUpdates)
@ -373,12 +449,14 @@ proc initLightClientDataDB*(
ok LightClientDataDB(
backend: backend,
currentBranches: currentBranches,
legacyBestUpdates: legacyBestUpdates,
bestUpdates: bestUpdates,
sealedPeriods: sealedPeriods)
proc close*(db: LightClientDataDB) =
if db.backend != nil:
db.currentBranches.close()
db.legacyBestUpdates.close()
db.bestUpdates.close()
db.sealedPeriods.close()
db[].reset()

View File

@ -16,18 +16,18 @@ import
logScope: topics = "beacnde"
const storeDataFork = LightClient.storeDataFork
func shouldSyncOptimistically*(node: BeaconNode, wallSlot: Slot): bool =
if node.eth1Monitor == nil:
return false
let optimisticHeader = node.lightClient.optimisticHeader.valueOr:
return false
shouldSyncOptimistically(
optimisticSlot = optimisticHeader.beacon.slot,
dagSlot = getStateField(node.dag.headState, slot),
wallSlot = wallSlot)
let optimisticHeader = node.lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
shouldSyncOptimistically(
optimisticSlot = forkyHeader.beacon.slot,
dagSlot = getStateField(node.dag.headState, slot),
wallSlot = wallSlot)
else:
false
proc initLightClient*(
node: BeaconNode,
@ -88,8 +88,10 @@ proc initLightClient*(
if config.syncLightClient:
proc onOptimisticHeader(
lightClient: LightClient,
optimisticHeader: storeDataFork.LightClientHeader) =
optimisticProcessor.setOptimisticHeader(optimisticHeader.beacon)
optimisticHeader: ForkedLightClientHeader) =
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = config.trustedBlockRoot
@ -147,17 +149,22 @@ proc updateLightClientFromDag*(node: BeaconNode) =
return
let lcHeader = node.lightClient.finalizedHeader
if lcHeader.isSome:
if dagPeriod <= lcHeader.get.beacon.slot.sync_committee_period:
return
let
bdata = node.dag.getForkedBlock(dagHead.blck.bid).valueOr:
return
header = withBlck(bdata):
blck.toLightClientHeader(storeDataFork)
current_sync_committee = block:
let tmpState = assignClone(node.dag.headState)
node.dag.currentSyncCommitteeForPeriod(tmpState[], dagPeriod).valueOr:
withForkyHeader(lcHeader):
when lcDataFork > LightClientDataFork.None:
if dagPeriod <= forkyHeader.beacon.slot.sync_committee_period:
return
let bdata = node.dag.getForkedBlock(dagHead.blck.bid).valueOr:
return
var header {.noinit.}: ForkedLightClientHeader
withBlck(bdata):
const lcDataFork = lcDataForkAtStateFork(stateFork)
when lcDataFork > LightClientDataFork.None:
header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = blck.toLightClientHeader(lcDataFork)
else: raiseAssert "Unreachable"
let current_sync_committee = block:
let tmpState = assignClone(node.dag.headState)
node.dag.currentSyncCommitteeForPeriod(tmpState[], dagPeriod).valueOr:
return
node.lightClient.resetToFinalizedHeader(header, current_sync_committee)

View File

@ -29,8 +29,6 @@ logScope: topics = "gossip_lc"
declareHistogram light_client_store_object_duration_seconds,
"storeObject() duration", buckets = [0.25, 0.5, 1, 2, 4, 8, Inf]
template storeDataFork: LightClientDataFork = LightClientDataFork.high
type
Nothing = object
@ -101,7 +99,7 @@ type
# Consumer
# ----------------------------------------------------------------
store: ref Option[storeDataFork.LightClientStore]
store: ref ForkedLightClientStore
getBeaconTime: GetBeaconTimeFn
getTrustedBlockRoot: GetTrustedBlockRootCallback
onStoreInitialized, onFinalizedHeader, onOptimisticHeader: VoidCallback
@ -119,7 +117,7 @@ type
of LightClientFinalizationMode.Optimistic:
lastProgressTick: BeaconTime # Moment when last update made progress
lastDuplicateTick: BeaconTime # Moment when last duplicate update received
numDuplicatesSinceProgress: int # Number of duplicates since last progress
numDupsSinceProgress: int # Number of duplicates since last progress
latestFinalityUpdate: ForkedLightClientOptimisticUpdate
@ -130,9 +128,6 @@ const
minForceUpdateDelay = chronos.minutes(30) # Minimum delay until forced-update
minForceUpdateDuplicates = 100 # Minimum duplicates until forced-update
func storeDataFork*(x: typedesc[LightClientProcessor]): LightClientDataFork =
storeDataFork
# Initialization
# ------------------------------------------------------------------------------
@ -143,7 +138,7 @@ proc new*(
cfg: RuntimeConfig,
genesis_validators_root: Eth2Digest,
finalizationMode: LightClientFinalizationMode,
store: ref Option[storeDataFork.LightClientStore],
store: ref ForkedLightClientStore,
getBeaconTime: GetBeaconTimeFn,
getTrustedBlockRoot: GetTrustedBlockRootCallback,
onStoreInitialized: VoidCallback = nil,
@ -198,23 +193,22 @@ proc tryForceUpdate(
self: var LightClientProcessor,
wallTime: BeaconTime) =
## Try to force-update to the next sync committee period.
let
wallSlot = wallTime.slotOrZero()
store = self.store
let wallSlot = wallTime.slotOrZero()
doAssert self.finalizationMode == LightClientFinalizationMode.Optimistic
if store[].isSome:
doAssert self.finalizationMode == LightClientFinalizationMode.Optimistic
case store[].get.process_light_client_store_force_update(wallSlot)
of NoUpdate:
discard
of DidUpdateWithoutSupermajority:
warn "Light client force-updated without supermajority",
finalizedSlot = store[].get.finalized_header.beacon.slot,
optimisticSlot = store[].get.optimistic_header.beacon.slot
of DidUpdateWithoutFinality:
warn "Light client force-updated without finality proof",
finalizedSlot = store[].get.finalized_header.beacon.slot,
optimisticSlot = store[].get.optimistic_header.beacon.slot
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
case forkyStore.process_light_client_store_force_update(wallSlot)
of NoUpdate:
discard
of DidUpdateWithoutSupermajority:
warn "Light client force-updated without supermajority",
finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot
of DidUpdateWithoutFinality:
warn "Light client force-updated without finality proof",
finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot
proc processObject(
self: var LightClientProcessor,
@ -222,37 +216,39 @@ proc processObject(
wallTime: BeaconTime): Result[void, VerifierError] =
let
wallSlot = wallTime.slotOrZero()
store = self.store
res =
if obj.kind > storeDataFork:
err(VerifierError.MissingParent)
elif obj.kind > LightClientDataFork.None:
let upgradedObj = obj.migratingToDataFork(storeDataFork)
withForkyObject(upgradedObj):
when lcDataFork == storeDataFork:
when forkyObject is ForkyLightClientBootstrap:
if store[].isSome:
err(VerifierError.Duplicate)
res = withForkyObject(obj):
when lcDataFork > LightClientDataFork.None:
when forkyObject is ForkyLightClientBootstrap:
if self.store[].kind > LightClientDataFork.None:
err(VerifierError.Duplicate)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyObject, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
let trustedBlockRoot = self.getTrustedBlockRoot()
if trustedBlockRoot.isNone:
err(VerifierError.MissingParent)
else:
let initRes = initialize_light_client_store(
trustedBlockRoot.get, forkyObject, self.cfg)
if initRes.isErr:
err(initRes.error)
else:
store[] = some(initRes.get)
ok()
elif forkyObject is SomeForkyLightClientUpdate:
if store[].isNone:
err(VerifierError.MissingParent)
else:
store[].get.process_light_client_update(
forkyObject, wallSlot,
self.store[] = ForkedLightClientStore(kind: lcDataFork)
self.store[].forky(lcDataFork) = initRes.get
ok()
elif forkyObject is SomeForkyLightClientUpdate:
if self.store[].kind == LightClientDataFork.None:
err(VerifierError.MissingParent)
else:
if lcDataFork > self.store[].kind:
info "Upgrading light client",
oldFork = self.store[].kind, newFork = lcDataFork
self.store[].migrateToDataFork(lcDataFork)
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
let upgradedObject = obj.migratingToDataFork(lcDataFork)
process_light_client_update(
forkyStore, upgradedObject.forky(lcDataFork), wallSlot,
self.cfg, self.genesis_validators_root)
else: raiseAssert "Unreachable"
else: raiseAssert "Unreachable"
else:
err(VerifierError.Invalid)
@ -263,28 +259,30 @@ proc processObject(
if res.isErr:
when obj is ForkedLightClientUpdate:
if self.finalizationMode == LightClientFinalizationMode.Optimistic and
store[].isSome and store[].get.best_valid_update.isSome and
obj.kind > LightClientDataFork.None and obj.kind <= storeDataFork:
# `best_valid_update` gets set when no supermajority / finality proof
# is available. In that case, we will wait for a better update.
# If none is made available within reasonable time, the light client
# is force-updated using the best known data to ensure sync progress.
case res.error
of VerifierError.Duplicate:
if wallTime >= self.lastDuplicateTick + duplicateRateLimit:
if self.numDuplicatesSinceProgress < minForceUpdateDuplicates:
let upgradedObj = obj.migratingToDataFork(storeDataFork)
if upgradedObj.forky(storeDataFork).matches(
store[].get.best_valid_update.get):
self.lastDuplicateTick = wallTime
inc self.numDuplicatesSinceProgress
if self.numDuplicatesSinceProgress >= minForceUpdateDuplicates and
wallTime >= self.lastProgressTick + minForceUpdateDelay:
self.tryForceUpdate(wallTime)
self.lastProgressTick = wallTime
self.lastDuplicateTick = wallTime + duplicateCountDelay
self.numDuplicatesSinceProgress = 0
else: discard
obj.kind <= self.store[].kind:
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
if forkyStore.best_valid_update.isSome:
# `best_valid_update` is set when supermajority / finality proof
# is unavailable. In that case, we will wait for a better update.
# If none is made available within reasonable time, light client
# is force-updated with best known data to ensure sync progress.
case res.error
of VerifierError.Duplicate:
if wallTime >= self.lastDuplicateTick + duplicateRateLimit:
if self.numDupsSinceProgress < minForceUpdateDuplicates:
let upgradedObj = obj.migratingToDataFork(lcDataFork)
if upgradedObj.forky(lcDataFork).matches(
forkyStore.best_valid_update.get):
self.lastDuplicateTick = wallTime
inc self.numDupsSinceProgress
if self.numDupsSinceProgress >= minForceUpdateDuplicates and
wallTime >= self.lastProgressTick + minForceUpdateDelay:
self.tryForceUpdate(wallTime)
self.lastProgressTick = wallTime
self.lastDuplicateTick = wallTime + duplicateCountDelay
self.numDupsSinceProgress = 0
else: discard
return res
@ -292,7 +290,7 @@ proc processObject(
if self.finalizationMode == LightClientFinalizationMode.Optimistic:
self.lastProgressTick = wallTime
self.lastDuplicateTick = wallTime + duplicateCountDelay
self.numDuplicatesSinceProgress = 0
self.numDupsSinceProgress = 0
res
@ -300,22 +298,28 @@ template withReportedProgress(
obj: SomeForkedLightClientObject | Nothing, body: untyped): bool =
block:
let
previousWasInitialized = store[].isSome
previousNextCommitteeKnown =
if store[].isSome:
store[].get.is_next_sync_committee_known
oldIsInitialized =
self.store[].kind != LightClientDataFork.None
oldNextCommitteeKnown = withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
forkyStore.is_next_sync_committee_known
else:
false
previousFinalized =
if store[].isSome:
store[].get.finalized_header
var
oldFinalized = withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
var header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = forkyStore.finalized_header
header
else:
default(typeof(store[].get.finalized_header))
previousOptimistic =
if store[].isSome:
store[].get.optimistic_header
default(ForkedLightClientHeader)
oldOptimistic = withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
var header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = forkyStore.optimistic_header
header
else:
default(typeof(store[].get.optimistic_header))
default(ForkedLightClientHeader)
body
@ -323,29 +327,35 @@ template withReportedProgress(
didProgress = false
didSignificantProgress = false
if store[].isSome != previousWasInitialized:
let newIsInitialized = self.store[].kind != LightClientDataFork.None
if newIsInitialized > oldIsInitialized:
didProgress = true
didSignificantProgress = true
if self.onStoreInitialized != nil:
self.onStoreInitialized()
self.onStoreInitialized = nil
if store[].isSome:
if store[].get.optimistic_header != previousOptimistic:
didProgress = true
when obj isnot SomeForkedLightClientUpdateWithFinality:
didSignificantProgress = true
if self.onOptimisticHeader != nil:
self.onOptimisticHeader()
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
if oldOptimistic.kind <= lcDataFork:
oldOptimistic.migrateToDataFork(lcDataFork)
if forkyStore.optimistic_header != oldOptimistic.forky(lcDataFork):
didProgress = true
when obj isnot SomeForkedLightClientUpdateWithFinality:
didSignificantProgress = true
if self.onOptimisticHeader != nil:
self.onOptimisticHeader()
if store[].get.finalized_header != previousFinalized:
didProgress = true
didSignificantProgress = true
if self.onFinalizedHeader != nil:
self.onFinalizedHeader()
if oldFinalized.kind <= lcDataFork:
oldFinalized.migrateToDataFork(lcDataFork)
if forkyStore.finalized_header != oldFinalized.forky(lcDataFork):
didProgress = true
didSignificantProgress = true
if self.onFinalizedHeader != nil:
self.onFinalizedHeader()
if store[].get.is_next_sync_committee_known != previousNextCommitteeKnown:
didProgress = true
if forkyStore.is_next_sync_committee_known != oldNextCommitteeKnown:
didProgress = true
if didProgress:
when obj is Nothing:
@ -378,8 +388,6 @@ proc storeObject*(
## update the `LightClientStore` accordingly
let
startTick = Moment.now()
store = self.store
didSignificantProgress =
withReportedProgress(obj):
? self.processObject(obj, wallTime)
@ -401,29 +409,35 @@ proc storeObject*(
forkyObject.attested_header.beacon.slot
else:
GENESIS_SLOT
debug "LC object processed",
finalizedSlot = store[].get.finalized_header.beacon.slot,
optimisticSlot = store[].get.optimistic_header.beacon.slot,
kind = typeof(obj).name,
objectSlot = objSlot,
storeObjectDur
withForkyStore(self.store[]):
when lcDataFork > LightClientDataFork.None:
debug "LC object processed",
finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot,
kind = typeof(obj).name,
objectSlot = objSlot,
storeObjectDur
ok didSignificantProgress
proc resetToFinalizedHeader*(
self: var LightClientProcessor,
header: storeDataFork.LightClientHeader,
header: ForkedLightClientHeader,
current_sync_committee: SyncCommittee) =
let store = self.store
discard withReportedProgress:
store[] = some storeDataFork.LightClientStore(
finalized_header: header,
current_sync_committee: current_sync_committee,
optimistic_header: header)
debug "LC reset to finalized header",
finalizedSlot = store[].get.finalized_header.beacon.slot,
optimisticSlot = store[].get.optimistic_header.beacon.slot
withForkyHeader(header):
when lcDataFork > LightClientDataFork.None:
self.store[] = ForkedLightClientStore(kind: lcDataFork)
template forkyStore: untyped = self.store[].forky(lcDataFork)
forkyStore = lcDataFork.LightClientStore(
finalized_header: forkyHeader,
current_sync_committee: current_sync_committee,
optimistic_header: forkyHeader)
debug "LC reset to finalized header",
finalizedSlot = forkyStore.finalized_header.beacon.slot,
optimisticSlot = forkyStore.optimistic_header.beacon.slot
else:
self.store[].reset()
debug "LC reset"
# Enqueue
# ------------------------------------------------------------------------------

View File

@ -24,11 +24,9 @@ export LightClientFinalizationMode, eth2_network, conf_light_client
logScope: topics = "lightcl"
template storeDataFork: LightClientDataFork = LightClientProcessor.storeDataFork
type
LightClientHeaderCallback* =
proc(lightClient: LightClient, header: storeDataFork.LightClientHeader) {.
proc(lightClient: LightClient, header: ForkedLightClientHeader) {.
gcsafe, raises: [Defect].}
LightClientValueObserver[V] =
@ -47,7 +45,7 @@ type
cfg: RuntimeConfig
forkDigests: ref ForkDigests
getBeaconTime: GetBeaconTimeFn
store: ref Option[storeDataFork.LightClientStore]
store: ref ForkedLightClientStore
processor: ref LightClientProcessor
manager: LightClientManager
gossipState: GossipState
@ -58,29 +56,33 @@ type
optimisticUpdateObserver*: LightClientOptimisticUpdateObserver
trustedBlockRoot*: Option[Eth2Digest]
func storeDataFork*(x: typedesc[LightClient]): LightClientDataFork =
storeDataFork
func finalizedHeader*(
lightClient: LightClient): Opt[storeDataFork.LightClientHeader] =
if lightClient.store[].isSome:
ok lightClient.store[].get.finalized_header
else:
err()
lightClient: LightClient): ForkedLightClientHeader =
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
var header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = forkyStore.finalized_header
header
else:
default(ForkedLightClientHeader)
func optimisticHeader*(
lightClient: LightClient): Opt[storeDataFork.LightClientHeader] =
if lightClient.store[].isSome:
ok lightClient.store[].get.optimistic_header
else:
err()
lightClient: LightClient): ForkedLightClientHeader =
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
var header = ForkedLightClientHeader(kind: lcDataFork)
header.forky(lcDataFork) = forkyStore.optimistic_header
header
else:
default(ForkedLightClientHeader)
func finalizedSyncCommittee*(
lightClient: LightClient): Opt[altair.SyncCommittee] =
if lightClient.store[].isSome:
ok lightClient.store[].get.current_sync_committee
else:
err()
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
ok forkyStore.current_sync_committee
else:
Opt.none(altair.SyncCommittee)
proc createLightClient(
network: Eth2Node,
@ -98,7 +100,7 @@ proc createLightClient(
cfg: cfg,
forkDigests: forkDigests,
getBeaconTime: getBeaconTime,
store: (ref Option[storeDataFork.LightClientStore])())
store: (ref ForkedLightClientStore)())
func getTrustedBlockRoot(): Option[Eth2Digest] =
lightClient.trustedBlockRoot
@ -109,12 +111,12 @@ proc createLightClient(
proc onFinalizedHeader() =
if lightClient.onFinalizedHeader != nil:
lightClient.onFinalizedHeader(
lightClient, lightClient.finalizedHeader.get)
lightClient, lightClient.finalizedHeader)
proc onOptimisticHeader() =
if lightClient.onOptimisticHeader != nil:
lightClient.onOptimisticHeader(
lightClient, lightClient.optimisticHeader.get)
lightClient, lightClient.optimisticHeader)
proc bootstrapObserver(obj: ForkedLightClientBootstrap) =
if lightClient.bootstrapObserver != nil:
@ -154,27 +156,28 @@ proc createLightClient(
lightClientVerifier(obj)
func isLightClientStoreInitialized(): bool =
lightClient.store[].isSome
lightClient.store[].kind > LightClientDataFork.None
func isNextSyncCommitteeKnown(): bool =
if lightClient.store[].isSome:
lightClient.store[].get.is_next_sync_committee_known
else:
false
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
forkyStore.is_next_sync_committee_known
else:
false
func getFinalizedPeriod(): SyncCommitteePeriod =
if lightClient.store[].isSome:
lightClient.store[].get.finalized_header
.beacon.slot.sync_committee_period
else:
GENESIS_SLOT.sync_committee_period
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
forkyStore.finalized_header.beacon.slot.sync_committee_period
else:
GENESIS_SLOT.sync_committee_period
func getOptimisticPeriod(): SyncCommitteePeriod =
if lightClient.store[].isSome:
lightClient.store[].get.optimistic_header
.beacon.slot.sync_committee_period
else:
GENESIS_SLOT.sync_committee_period
withForkyStore(lightClient.store[]):
when lcDataFork > LightClientDataFork.None:
forkyStore.optimistic_header.beacon.slot.sync_committee_period
else:
GENESIS_SLOT.sync_committee_period
lightClient.manager = LightClientManager.init(
lightClient.network, rng, getTrustedBlockRoot,
@ -223,8 +226,8 @@ proc start*(lightClient: LightClient) =
proc resetToFinalizedHeader*(
lightClient: LightClient,
header: storeDataFork.LightClientHeader,
current_sync_committee: SyncCommittee) =
header: ForkedLightClientHeader,
current_sync_committee: altair.SyncCommittee) =
lightClient.processor[].resetToFinalizedHeader(header, current_sync_committee)
import metrics
@ -350,17 +353,19 @@ proc installMessageValidators*(
withLcDataFork(lcDataForkAtStateFork(stateFork)):
when lcDataFork > LightClientDataFork.None:
let digest = forkDigests[].atStateFork(stateFork)
let
contextFork = stateFork # Copy to avoid capturing `EIP4844` (Nim 1.6)
digest = forkDigests[].atStateFork(contextFork)
lightClient.network.addValidator(
getLightClientFinalityUpdateTopic(digest),
proc(msg: lcDataFork.LightClientFinalityUpdate): ValidationResult =
validate(msg, stateFork, processLightClientFinalityUpdate))
validate(msg, contextFork, processLightClientFinalityUpdate))
lightClient.network.addValidator(
getLightClientOptimisticUpdateTopic(digest),
proc(msg: lcDataFork.LightClientOptimisticUpdate): ValidationResult =
validate(msg, stateFork, processLightClientOptimisticUpdate))
validate(msg, contextFork, processLightClientOptimisticUpdate))
proc updateGossipStatus*(
lightClient: LightClient, slot: Slot, dagIsBehind = default(Option[bool])) =

View File

@ -12,6 +12,7 @@ else:
import
# Status libraries
stew/base10,
chronicles,
eth/db/kvstore_sqlite3,
# Beacon chain internals
@ -21,20 +22,21 @@ import
logScope: topics = "lcdb"
# `altair_lc_headers` holds the latest `LightClientStore.finalized_header`.
# `lc_headers` holds the latest `LightClientStore.finalized_header`.
#
# `altair_sync_committees` holds finalized `SyncCommittee` by period, needed to
# continue an interrupted sync process without having to obtain bootstrap info.
template dbDataFork: LightClientDataFork = LightClientDataFork.Altair
type
LightClientHeaderKind {.pure.} = enum # Append only, used in DB data!
Finalized = 1
LightClientHeaderKey {.pure.} = enum # Append only, used in DB data!
Finalized = 1 # Latest finalized header
LegacyLightClientHeadersStore = object
putStmt: SqliteStmt[(int64, seq[byte]), void]
LightClientHeadersStore = object
getStmt: SqliteStmt[int64, seq[byte]]
putStmt: SqliteStmt[(int64, seq[byte]), void]
getStmt: SqliteStmt[int64, (int64, seq[byte])]
putStmt: SqliteStmt[(int64, int64, seq[byte]), void]
SyncCommitteeStore = object
getStmt: SqliteStmt[int64, seq[byte]]
@ -45,36 +47,75 @@ type
backend: SqStoreRef
## SQLite backend
legacyHeaders: LegacyLightClientHeadersStore
## LightClientHeaderKey -> altair.LightClientHeader
## Used through Bellatrix.
headers: LightClientHeadersStore
## LightClientHeaderKind -> altair.LightClientHeader
## LightClientHeaderKey -> (LightClientDataFork, LightClientHeader)
## Stores the latest light client headers.
syncCommittees: SyncCommitteeStore
## SyncCommitteePeriod -> altair.SyncCommittee
## Stores finalized `SyncCommittee` by sync committee period.
func initLightClientHeadersStore(
proc initLegacyLightClientHeadersStore(
backend: SqStoreRef,
name: string): KvResult[LightClientHeadersStore] =
static: doAssert LightClientDataFork.high == LightClientDataFork.Altair
name: string): KvResult[LegacyLightClientHeadersStore] =
? backend.exec("""
CREATE TABLE IF NOT EXISTS `""" & name & """` (
`kind` INTEGER PRIMARY KEY, -- `LightClientHeaderKind`
`kind` INTEGER PRIMARY KEY, -- `LightClientHeaderKey`
`header` BLOB -- `altair.LightClientHeader` (SSZ)
);
""")
let
getStmt = backend.prepareStmt("""
SELECT `header`
FROM `""" & name & """`
WHERE `kind` = ?;
""", int64, seq[byte], managed = false).expect("SQL query OK")
putStmt = backend.prepareStmt("""
REPLACE INTO `""" & name & """` (
`kind`, `header`
) VALUES (?, ?);
""", (int64, seq[byte]), void, managed = false).expect("SQL query OK")
""", (int64, seq[byte]), void, managed = false)
.expect("SQL query OK")
ok LegacyLightClientHeadersStore(
putStmt: putStmt)
func close(store: LegacyLightClientHeadersStore) =
store.putStmt.dispose()
proc initLightClientHeadersStore(
backend: SqStoreRef,
name, legacyAltairName: string): KvResult[LightClientHeadersStore] =
? backend.exec("""
CREATE TABLE IF NOT EXISTS `""" & name & """` (
`key` INTEGER PRIMARY KEY, -- `LightClientHeaderKey`
`kind` INTEGER, -- `LightClientDataFork`
`header` BLOB -- `LightClientHeader` (SSZ)
);
""")
block:
# LightClientHeaderKey -> altair.LightClientHeader
const legacyKind = Base10.toString(ord(LightClientDataFork.Altair).uint)
? backend.exec("""
INSERT OR IGNORE INTO `""" & name & """` (
`key`, `kind`, `header`
)
SELECT `kind` AS `key`, """ & legacyKind & """ AS `kind`, `header`
FROM `""" & legacyAltairName & """`;
""")
let
getStmt = backend.prepareStmt("""
SELECT `kind`, `header`
FROM `""" & name & """`
WHERE `key` = ?;
""", int64, (int64, seq[byte]), managed = false).expect("SQL query OK")
putStmt = backend.prepareStmt("""
REPLACE INTO `""" & name & """` (
`key`, `kind`, `header`
) VALUES (?, ?, ?);
""", (int64, int64, seq[byte]), void, managed = false)
.expect("SQL query OK")
ok LightClientHeadersStore(
getStmt: getStmt,
@ -85,29 +126,47 @@ func close(store: LightClientHeadersStore) =
store.putStmt.dispose()
proc getLatestFinalizedHeader*(
db: LightClientDB): Opt[dbDataFork.LightClientHeader] =
var header: seq[byte]
for res in db.headers.getStmt.exec(
LightClientHeaderKind.Finalized.int64, header):
db: LightClientDB): ForkedLightClientHeader =
const key = LightClientHeaderKey.Finalized
var header: (int64, seq[byte])
for res in db.headers.getStmt.exec(key.int64, header):
res.expect("SQL query OK")
try:
return ok SSZ.decode(header, dbDataFork.LightClientHeader)
withAll(LightClientDataFork):
when lcDataFork > LightClientDataFork.None:
if header[0] == ord(lcDataFork).int64:
var obj = ForkedLightClientHeader(kind: lcDataFork)
obj.forky(lcDataFork) = SSZ.decode(
header[1], lcDataFork.LightClientHeader)
return obj
warn "Unsupported LC store kind", store = "headers",
key, kind = header[0]
return default(ForkedLightClientHeader)
except SszError as exc:
error "LC store corrupted", store = "headers",
kind = "Finalized", exc = exc.msg
return err()
key, kind = header[0], exc = exc.msg
return default(ForkedLightClientHeader)
func putLatestFinalizedHeader*(
db: LightClientDB, header: dbDataFork.LightClientHeader) =
block:
let res = db.headers.putStmt.exec(
(LightClientHeaderKind.Finalized.int64, SSZ.encode(header)))
res.expect("SQL query OK")
block:
let period = header.beacon.slot.sync_committee_period
doAssert period.isSupportedBySQLite
let res = db.syncCommittees.keepFromStmt.exec(period.int64)
res.expect("SQL query OK")
db: LightClientDB, header: ForkedLightClientHeader) =
withForkyHeader(header):
when lcDataFork > LightClientDataFork.None:
block:
const key = LightClientHeaderKey.Finalized
block:
let res = db.headers.putStmt.exec(
(key.int64, lcDataFork.int64, SSZ.encode(forkyHeader)))
res.expect("SQL query OK")
when lcDataFork == LightClientDataFork.Altair:
let res = db.legacyHeaders.putStmt.exec(
(key.int64, SSZ.encode(forkyHeader)))
res.expect("SQL query OK")
block:
let period = forkyHeader.beacon.slot.sync_committee_period
doAssert period.isSupportedBySQLite
let res = db.syncCommittees.keepFromStmt.exec(period.int64)
res.expect("SQL query OK")
else: raiseAssert "Cannot store empty `LightClientHeader`"
func initSyncCommitteesStore(
backend: SqStoreRef,
@ -156,7 +215,7 @@ proc getSyncCommittee*(
except SszError as exc:
error "LC store corrupted", store = "syncCommittees",
period, exc = exc.msg
return err()
return Opt.none(altair.SyncCommittee)
func putSyncCommittee*(
db: LightClientDB, period: SyncCommitteePeriod,
@ -167,25 +226,31 @@ func putSyncCommittee*(
res.expect("SQL query OK")
type LightClientDBNames* = object
altairHeaders*: string
legacyAltairHeaders*: string
headers*: string
altairSyncCommittees*: string
func initLightClientDB*(
proc initLightClientDB*(
backend: SqStoreRef,
names: LightClientDBNames): KvResult[LightClientDB] =
let
legacyHeaders =
? backend.initLegacyLightClientHeadersStore(names.legacyAltairHeaders)
headers =
? backend.initLightClientHeadersStore(names.altairHeaders)
? backend.initLightClientHeadersStore(
names.headers, names.legacyAltairHeaders)
syncCommittees =
? backend.initSyncCommitteesStore(names.altairSyncCommittees)
ok LightClientDB(
backend: backend,
legacyHeaders: legacyHeaders,
headers: headers,
syncCommittees: syncCommittees)
func close*(db: LightClientDB) =
if db.backend != nil:
db.legacyHeaders.close()
db.headers.close()
db.syncCommittees.close()
db[].reset()

View File

@ -52,9 +52,9 @@ programMain:
quit 1
let backend = SqStoreRef.init(dbDir, "nlc").expect("Database OK")
defer: backend.close()
static: doAssert LightClientDataFork.high == LightClientDataFork.Altair
let db = backend.initLightClientDB(LightClientDBNames(
altairHeaders: "altair_lc_headers",
legacyAltairHeaders: "altair_lc_headers",
headers: "lc_headers",
altairSyncCommittees: "altair_sync_committees")).expect("Database OK")
defer: db.close()
@ -159,41 +159,46 @@ programMain:
waitFor network.start()
proc onFinalizedHeader(
lightClient: LightClient, finalizedHeader: altair.LightClientHeader) =
info "New LC finalized header",
finalized_header = shortLog(finalizedHeader)
lightClient: LightClient, finalizedHeader: ForkedLightClientHeader) =
withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC finalized header",
finalized_header = shortLog(forkyHeader)
let
period = finalizedHeader.beacon.slot.sync_committee_period
syncCommittee = lightClient.finalizedSyncCommittee.expect("Bootstrap OK")
db.putSyncCommittee(period, syncCommittee)
db.putLatestFinalizedHeader(finalizedHeader)
let
period = forkyHeader.beacon.slot.sync_committee_period
syncCommittee = lightClient.finalizedSyncCommittee.expect("Init OK")
db.putSyncCommittee(period, syncCommittee)
db.putLatestFinalizedHeader(finalizedHeader)
proc onOptimisticHeader(
lightClient: LightClient, optimisticHeader: altair.LightClientHeader) =
info "New LC optimistic header",
optimistic_header = shortLog(optimisticHeader)
optimisticProcessor.setOptimisticHeader(optimisticHeader.beacon)
lightClient: LightClient, optimisticHeader: ForkedLightClientHeader) =
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
info "New LC optimistic header",
optimistic_header = shortLog(forkyHeader)
optimisticProcessor.setOptimisticHeader(forkyHeader.beacon)
lightClient.onFinalizedHeader = onFinalizedHeader
lightClient.onOptimisticHeader = onOptimisticHeader
lightClient.trustedBlockRoot = some config.trustedBlockRoot
let latestHeader = db.getLatestFinalizedHeader()
if latestHeader.isOk:
let
period = latestHeader.get.beacon.slot.sync_committee_period
syncCommittee = db.getSyncCommittee(period)
if syncCommittee.isErr:
error "LC store lacks sync committee", finalized_header = latestHeader.get
else:
lightClient.resetToFinalizedHeader(latestHeader.get, syncCommittee.get)
withForkyHeader(latestHeader):
when lcDataFork > LightClientDataFork.None:
let
period = forkyHeader.beacon.slot.sync_committee_period
syncCommittee = db.getSyncCommittee(period)
if syncCommittee.isErr:
error "LC store lacks sync committee", finalized_header = forkyHeader
else:
lightClient.resetToFinalizedHeader(latestHeader, syncCommittee.get)
# Full blocks gossip is required to portably drive an EL client:
# - EL clients may not sync when only driven with `forkChoiceUpdated`,
# e.g., Geth: "Forkchoice requested unknown head"
# - `newPayload` requires the full `ExecutionPayload` (most of block content)
# - `ExecutionPayload` block root is not available in
# - `ExecutionPayload` block hash is not available in
# `altair.LightClientHeader`, so won't be exchanged via light client gossip
#
# Future `ethereum/consensus-specs` versions may remove need for full blocks.
@ -201,16 +206,14 @@ programMain:
# optimized for reducing code duplication, e.g., with `nimbus_beacon_node`.
func isSynced(wallSlot: Slot): bool =
# Check whether light client is used
let optimisticHeader = lightClient.optimisticHeader.valueOr:
return false
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
if optimisticHeader.beacon.slot < max(wallSlot, maxAge.Slot) - maxAge:
return false
true
let optimisticHeader = lightClient.optimisticHeader
withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
# Check whether light client has synced sufficiently close to wall slot
const maxAge = 2 * SLOTS_PER_EPOCH
forkyHeader.beacon.slot >= max(wallSlot, maxAge.Slot) - maxAge
else:
false
func shouldSyncOptimistically(wallSlot: Slot): bool =
# Check whether an EL is connected
@ -273,19 +276,19 @@ programMain:
finalizedHeader = lightClient.finalizedHeader
optimisticHeader = lightClient.optimisticHeader
finalizedBid =
if finalizedHeader.isSome:
finalizedHeader.get.beacon.toBlockId()
finalizedBid = withForkyHeader(finalizedHeader):
when lcDataFork > LightClientDataFork.None:
forkyHeader.beacon.toBlockId()
else:
BlockId(root: genesisBlockRoot, slot: GENESIS_SLOT)
optimisticBid =
if optimisticHeader.isSome:
optimisticHeader.get.beacon.toBlockId()
optimisticBid = withForkyHeader(optimisticHeader):
when lcDataFork > LightClientDataFork.None:
forkyHeader.beacon.toBlockId()
else:
BlockId(root: genesisBlockRoot, slot: GENESIS_SLOT)
syncStatus =
if optimisticHeader.isNone:
if optimisticHeader.kind == LightClientDataFork.None:
"bootstrapping(" & $config.trustedBlockRoot & ")"
elif not isSynced(wallSlot):
"syncing"

View File

@ -52,6 +52,13 @@ type
ForkyLightClientStore* =
altair.LightClientStore
ForkedLightClientHeader* = object
case kind*: LightClientDataFork
of LightClientDataFork.None:
discard
of LightClientDataFork.Altair:
altairData*: altair.LightClientHeader
ForkedLightClientBootstrap* = object
case kind*: LightClientDataFork
of LightClientDataFork.None:
@ -157,6 +164,11 @@ template LightClientStore*(kind: static LightClientDataFork): auto =
else:
static: raiseAssert "Unreachable"
template Forky*(
x: typedesc[ForkedLightClientHeader],
kind: static LightClientDataFork): auto =
kind.LightClientHeader
template Forky*(
x: typedesc[ForkedLightClientBootstrap],
kind: static LightClientDataFork): auto =
@ -182,6 +194,9 @@ template Forky*(
kind: static LightClientDataFork): auto =
kind.LightClientStore
template Forked*(x: typedesc[ForkyLightClientHeader]): auto =
typedesc[ForkedLightClientHeader]
template Forked*(x: typedesc[ForkyLightClientBootstrap]): auto =
typedesc[ForkedLightClientBootstrap]
@ -217,6 +232,17 @@ template withLcDataFork*(
const lcDataFork {.inject, used.} = LightClientDataFork.None
body
template withForkyHeader*(
x: ForkedLightClientHeader, body: untyped): untyped =
case x.kind
of LightClientDataFork.Altair:
const lcDataFork {.inject, used.} = LightClientDataFork.Altair
template forkyHeader: untyped {.inject, used.} = x.altairData
body
of LightClientDataFork.None:
const lcDataFork {.inject, used.} = LightClientDataFork.None
body
template withForkyBootstrap*(
x: ForkedLightClientBootstrap, body: untyped): untyped =
case x.kind
@ -335,13 +361,35 @@ func matches*[A, B: SomeForkedLightClientUpdate](a: A, b: B): bool =
true
template forky*(
x: SomeForkedLightClientObject | ForkedLightClientStore,
x:
ForkedLightClientHeader |
SomeForkedLightClientObject |
ForkedLightClientStore,
kind: static LightClientDataFork): untyped =
when kind == LightClientDataFork.Altair:
x.altairData
else:
static: raiseAssert "Unreachable"
func migrateToDataFork*(
x: var ForkedLightClientHeader,
newKind: static LightClientDataFork) =
if newKind == x.kind:
# Already at correct kind
discard
elif newKind < x.kind:
# Downgrade not supported, re-initialize
x = ForkedLightClientHeader(kind: newKind)
else:
# Upgrade to Altair
when newKind >= LightClientDataFork.Altair:
if x.kind == LightClientDataFork.None:
x = ForkedLightClientHeader(
kind: LightClientDataFork.Altair)
static: doAssert LightClientDataFork.high == LightClientDataFork.Altair
doAssert x.kind == newKind
func migrateToDataFork*(
x: var ForkedLightClientBootstrap,
newKind: static LightClientDataFork) =
@ -438,7 +486,10 @@ func migrateToDataFork*(
doAssert x.kind == newKind
func migratingToDataFork*[
T: SomeForkedLightClientObject | ForkedLightClientStore](
T:
ForkedLightClientHeader |
SomeForkedLightClientObject |
ForkedLightClientStore](
x: T, newKind: static LightClientDataFork): T =
var upgradedObject = x
upgradedObject.migrateToDataFork(newKind)

View File

@ -10,8 +10,6 @@
import
# Standard library
std/[json, os, streams],
# Status libraries
stew/byteutils,
# Third-party
yaml,
# Beacon chain internals

View File

@ -9,7 +9,7 @@
import
# Status libraries
eth/keys, stew/objects, taskpools,
eth/keys, taskpools,
# Beacon chain internals
../beacon_chain/consensus_object_pools/
[block_clearance, block_quarantine, blockchain_dag],
@ -17,222 +17,238 @@ import
# Test utilities
./testutil, ./testdbutil
proc runTest(storeDataFork: static LightClientDataFork) =
suite "Light client - " & $storeDataFork & preset():
suite "Light client" & preset():
const # Test config, should be long enough to cover interesting transitions
headPeriod = 3.SyncCommitteePeriod
let
cfg = block: # Fork schedule so that each `LightClientDataFork` is covered
static: doAssert BeaconStateFork.high == BeaconStateFork.EIP4844
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = 1.Epoch
res.BELLATRIX_FORK_EPOCH = 2.Epoch
# $capellaImplementationMissing res.CAPELLA_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 1).Epoch
# $eip4844ImplementationMissing res.EIP4844_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 2).Epoch
res
altairStartSlot = cfg.ALTAIR_FORK_EPOCH.start_slot
proc advanceToSlot(
dag: ChainDAGRef,
targetSlot: Slot,
verifier: var BatchVerifier,
quarantine: var Quarantine,
attested = true,
syncCommitteeRatio = 0.82) =
var cache: StateCache
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
while true:
var slot = getStateField(dag.headState, slot)
doAssert targetSlot >= slot
if targetSlot == slot: break
# When there is a large jump, skip to the end of the current period,
# create blocks for a few epochs to finalize it, then proceed
let
nextPeriod = slot.sync_committee_period + 1
periodEpoch = nextPeriod.start_epoch
periodSlot = periodEpoch.start_slot
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
var info: ForkedEpochInfo
doAssert process_slots(cfg, dag.headState, checkpointSlot,
cache, info, flags = {}).isOk()
slot = checkpointSlot
# Create blocks for final few epochs
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
for blck in makeTestBlocks(dag.headState, cache, blocks.int,
attested, syncCommitteeRatio, cfg):
let added =
case blck.kind
of BeaconBlockFork.Phase0:
const nilCallback = OnPhase0BlockAdded(nil)
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
of BeaconBlockFork.Altair:
const nilCallback = OnAltairBlockAdded(nil)
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
of BeaconBlockFork.Bellatrix:
const nilCallback = OnBellatrixBlockAdded(nil)
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
of BeaconBlockFork.Capella:
const nilCallback = OnCapellaBlockAdded(nil)
dag.addHeadBlock(verifier, blck.capellaData, nilCallback)
of BeaconBlockFork.EIP4844:
const nilCallback = OnEIP4844BlockAdded(nil)
dag.addHeadBlock(verifier, blck.eip4844Data, nilCallback)
check: added.isOk()
dag.updateHead(added[], quarantine)
setup:
const num_validators = SLOTS_PER_EPOCH
let
cfg = block:
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1
res
altairStartSlot = cfg.ALTAIR_FORK_EPOCH.start_slot
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(
cfg, makeTestDB(num_validators), validatorMonitor, {},
lcDataConfig = LightClientDataConfig(
serve: true,
importMode: LightClientDataImportMode.OnlyNew))
quarantine = newClone(Quarantine.init())
taskpool = Taskpool.new()
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
proc advanceToSlot(
dag: ChainDAGRef,
targetSlot: Slot,
verifier: var BatchVerifier,
quarantine: var Quarantine,
attested = true,
syncCommitteeRatio = 0.82) =
var cache: StateCache
const maxAttestedSlotsPerPeriod = 3 * SLOTS_PER_EPOCH
while true:
var slot = getStateField(dag.headState, slot)
doAssert targetSlot >= slot
if targetSlot == slot: break
# When there is a large jump, skip to the end of the current period,
# create blocks for a few epochs to finalize it, then proceed
let
nextPeriod = slot.sync_committee_period + 1
periodEpoch = nextPeriod.start_epoch
periodSlot = periodEpoch.start_slot
checkpointSlot = periodSlot - maxAttestedSlotsPerPeriod
if targetSlot > checkpointSlot and checkpointSlot > dag.head.slot:
var info: ForkedEpochInfo
doAssert process_slots(cfg, dag.headState, checkpointSlot,
cache, info, flags = {}).isOk()
slot = checkpointSlot
# Create blocks for final few epochs
let blocks = min(targetSlot - slot, maxAttestedSlotsPerPeriod)
for blck in makeTestBlocks(dag.headState, cache, blocks.int,
attested, syncCommitteeRatio, cfg):
let added =
case blck.kind
of BeaconBlockFork.Phase0:
const nilCallback = OnPhase0BlockAdded(nil)
dag.addHeadBlock(verifier, blck.phase0Data, nilCallback)
of BeaconBlockFork.Altair:
const nilCallback = OnAltairBlockAdded(nil)
dag.addHeadBlock(verifier, blck.altairData, nilCallback)
of BeaconBlockFork.Bellatrix:
const nilCallback = OnBellatrixBlockAdded(nil)
dag.addHeadBlock(verifier, blck.bellatrixData, nilCallback)
of BeaconBlockFork.Capella:
const nilCallback = OnCapellaBlockAdded(nil)
dag.addHeadBlock(verifier, blck.capellaData, nilCallback)
of BeaconBlockFork.EIP4844:
const nilCallback = OnEIP4844BlockAdded(nil)
dag.addHeadBlock(verifier, blck.eip4844Data, nilCallback)
check: added.isOk()
dag.updateHead(added[], quarantine)
setup:
const num_validators = SLOTS_PER_EPOCH
test "Pre-Altair":
# Genesis
block:
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = ChainDAGRef.init(
cfg, makeTestDB(num_validators), validatorMonitor, {},
lcDataConfig = LightClientDataConfig(
serve: true,
importMode: LightClientDataImportMode.OnlyNew))
quarantine = newClone(Quarantine.init())
taskpool = Taskpool.new()
var verifier = BatchVerifier(rng: keys.newRng(), taskpool: taskpool)
test "Pre-Altair":
# Genesis
block:
let
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
dag.headState.kind == BeaconStateFork.Phase0
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
# Advance to last slot before Altair
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
block:
let
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
dag.headState.kind == BeaconStateFork.Phase0
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
block:
let
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
dag.headState.kind == BeaconStateFork.Altair
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
test "Light client sync":
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
# Track trusted checkpoint for light client
let
genesis_validators_root = dag.genesis_validators_root
trusted_block_root = dag.head.root
# Advance to target slot
const
headPeriod = 2.SyncCommitteePeriod
periodEpoch = headPeriod.start_epoch
headSlot = (periodEpoch + 2).start_slot + 5
dag.advanceToSlot(headSlot, verifier, quarantine[])
let currentSlot = getStateField(dag.headState, slot)
# Initialize light client store
let bootstrap = dag.getLightClientBootstrap(trusted_block_root)
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
bootstrap.kind > LightClientDataFork.None
bootstrap.kind <= storeDataFork
let upgradedBootstrap = bootstrap.migratingToDataFork(storeDataFork)
template forkyBootstrap: untyped = upgradedBootstrap.forky(storeDataFork)
var storeRes = initialize_light_client_store(
trusted_block_root, forkyBootstrap, cfg)
check storeRes.isOk
template store(): auto = storeRes.get
dag.headState.kind == BeaconStateFork.Phase0
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
# Sync to latest sync committee period
var numIterations = 0
template storePeriod: SyncCommitteePeriod =
store.finalized_header.beacon.slot.sync_committee_period
while storePeriod + 1 < headPeriod:
let
period =
if store.is_next_sync_committee_known:
# Advance to last slot before Altair
dag.advanceToSlot(altairStartSlot - 1, verifier, quarantine[])
block:
let
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
dag.headState.kind == BeaconStateFork.Phase0
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
block:
let
update = dag.getLightClientUpdateForPeriod(0.SyncCommitteePeriod)
finalityUpdate = dag.getLightClientFinalityUpdate
optimisticUpdate = dag.getLightClientOptimisticUpdate
check:
dag.headState.kind == BeaconStateFork.Altair
update.kind == LightClientDataFork.None
finalityUpdate.kind == LightClientDataFork.None
optimisticUpdate.kind == LightClientDataFork.None
test "Light client sync":
# Advance to Altair
dag.advanceToSlot(altairStartSlot, verifier, quarantine[])
# Track trusted checkpoint for light client
let
genesis_validators_root = dag.genesis_validators_root
trusted_block_root = dag.head.root
# Advance to target slot
const
periodEpoch = headPeriod.start_epoch
headSlot = (periodEpoch + 2).start_slot + 5
dag.advanceToSlot(headSlot, verifier, quarantine[])
let currentSlot = getStateField(dag.headState, slot)
# Initialize light client store
var bootstrap = dag.getLightClientBootstrap(trusted_block_root)
check bootstrap.kind > LightClientDataFork.None
var store {.noinit.}: ForkedLightClientStore
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
var storeRes = initialize_light_client_store(
trusted_block_root, forkyBootstrap, cfg)
check storeRes.isOk
store = ForkedLightClientStore(kind: lcDataFork)
store.forky(lcDataFork) = storeRes.get
# Sync to latest sync committee period
var numIterations = 0
while true:
let storePeriod = withForkyStore(store):
when lcDataFork > LightClientDataFork.None:
forkyStore.finalized_header.beacon.slot.sync_committee_period
else:
GENESIS_SLOT.SyncCommitteePeriod
if storePeriod + 1 >= headPeriod:
break
let
period = withForkyStore(store):
when lcDataFork > LightClientDataFork.None:
if forkyStore.is_next_sync_committee_known:
storePeriod + 1
else:
storePeriod
update = dag.getLightClientUpdateForPeriod(period)
check:
update.kind > LightClientDataFork.None
update.kind <= storeDataFork
let upgradedUpdate = update.migratingToDataFork(storeDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(storeDataFork)
let res = process_light_client_update(
store, forkyUpdate, currentSlot, cfg, genesis_validators_root)
check:
forkyUpdate.finalized_header.beacon.slot.sync_committee_period ==
period
res.isOk
if forkyUpdate.finalized_header.beacon.slot >
forkyBootstrap.header.beacon.slot:
store.finalized_header == forkyUpdate.finalized_header
else:
store.finalized_header == forkyBootstrap.header
inc numIterations
if numIterations > 20: doAssert false # Avoid endless loop on test failure
storePeriod
update = dag.getLightClientUpdateForPeriod(period)
check update.kind > LightClientDataFork.None
if update.kind > store.kind:
withForkyUpdate(update):
when lcDataFork > LightClientDataFork.None:
store.migrateToDataFork(lcDataFork)
withForkyStore(store):
when lcDataFork > LightClientDataFork.None:
bootstrap.migrateToDataFork(lcDataFork)
template forkyBootstrap: untyped = bootstrap.forky(lcDataFork)
let upgradedUpdate = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(lcDataFork)
let res = process_light_client_update(
forkyStore, forkyUpdate, currentSlot, cfg, genesis_validators_root)
check:
forkyUpdate.finalized_header.beacon.slot.sync_committee_period ==
period
res.isOk
if forkyUpdate.finalized_header.beacon.slot >
forkyBootstrap.header.beacon.slot:
forkyStore.finalized_header == forkyUpdate.finalized_header
else:
forkyStore.finalized_header == forkyBootstrap.header
inc numIterations
if numIterations > 20: doAssert false # Avoid endless loop on test failure
# Sync to latest update
let finalityUpdate = dag.getLightClientFinalityUpdate
check:
finalityUpdate.kind > LightClientDataFork.None
finalityUpdate.kind <= storeDataFork
let upgradedFinalityUpdate =
finalityUpdate.migratingToDataFork(storeDataFork)
template forkyFinalityUpdate: untyped =
upgradedFinalityUpdate.forky(storeDataFork)
let res = process_light_client_update(
store, forkyFinalityUpdate, currentSlot, cfg, genesis_validators_root)
check:
forkyFinalityUpdate.attested_header.beacon.slot == dag.head.parent.slot
res.isOk
store.finalized_header == forkyFinalityUpdate.finalized_header
store.optimistic_header == forkyFinalityUpdate.attested_header
# Sync to latest update
let finalityUpdate = dag.getLightClientFinalityUpdate
check finalityUpdate.kind > LightClientDataFork.None
if finalityUpdate.kind > store.kind:
withForkyFinalityUpdate(finalityUpdate):
when lcDataFork > LightClientDataFork.None:
store.migrateToDataFork(lcDataFork)
withForkyStore(store):
when lcDataFork > LightClientDataFork.None:
let upgradedUpdate = finalityUpdate.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(lcDataFork)
let res = process_light_client_update(
forkyStore, forkyUpdate, currentSlot, cfg, genesis_validators_root)
check:
forkyUpdate.attested_header.beacon.slot == dag.head.parent.slot
res.isOk
forkyStore.finalized_header == forkyUpdate.finalized_header
forkyStore.optimistic_header == forkyUpdate.attested_header
test "Init from checkpoint":
# Fetch genesis state
let genesisState = assignClone dag.headState
test "Init from checkpoint":
# Fetch genesis state
let genesisState = assignClone dag.headState
# Advance to target slot for checkpoint
let finalizedSlot =
((altairStartSlot.sync_committee_period + 1).start_epoch + 2).start_slot
dag.advanceToSlot(finalizedSlot, verifier, quarantine[])
# Advance to target slot for checkpoint
let finalizedSlot =
((altairStartSlot.sync_committee_period + 1).start_epoch + 2).start_slot
dag.advanceToSlot(finalizedSlot, verifier, quarantine[])
# Initialize new DAG from checkpoint
let cpDb = BeaconChainDB.new("", inMemory = true)
ChainDAGRef.preInit(cpDb, genesisState[])
ChainDAGRef.preInit(cpDb, dag.headState) # dag.getForkedBlock(dag.head.bid).get)
let cpDag = ChainDAGRef.init(
cfg, cpDb, validatorMonitor, {},
lcDataConfig = LightClientDataConfig(
serve: true,
importMode: LightClientDataImportMode.Full))
# Initialize new DAG from checkpoint
let cpDb = BeaconChainDB.new("", inMemory = true)
ChainDAGRef.preInit(cpDb, genesisState[])
ChainDAGRef.preInit(cpDb, dag.headState) # dag.getForkedBlock(dag.head.bid).get)
let cpDag = ChainDAGRef.init(
cfg, cpDb, validatorMonitor, {},
lcDataConfig = LightClientDataConfig(
serve: true,
importMode: LightClientDataImportMode.Full))
# Advance by a couple epochs
for i in 1'u64 .. 10:
let headSlot = (finalizedSlot.epoch + i).start_slot
cpDag.advanceToSlot(headSlot, verifier, quarantine[])
# Advance by a couple epochs
for i in 1'u64 .. 10:
let headSlot = (finalizedSlot.epoch + i).start_slot
cpDag.advanceToSlot(headSlot, verifier, quarantine[])
check true
withAll(LightClientDataFork):
when lcDataFork > LightClientDataFork.None:
runTest(lcDataFork)
check true

View File

@ -19,10 +19,18 @@ import
./testutil, ./testdbutil
suite "Light client processor" & preset():
const # Test config, should be long enough to cover interesting transitions
lowPeriod = 0.SyncCommitteePeriod
lastPeriodWithSupermajority = 3.SyncCommitteePeriod
highPeriod = 5.SyncCommitteePeriod
let
cfg = block:
cfg = block: # Fork schedule so that each `LightClientDataFork` is covered
static: doAssert BeaconStateFork.high == BeaconStateFork.EIP4844
var res = defaultRuntimeConfig
res.ALTAIR_FORK_EPOCH = GENESIS_EPOCH + 1
res.ALTAIR_FORK_EPOCH = 1.Epoch
res.BELLATRIX_FORK_EPOCH = 2.Epoch
# $capellaImplementationMissing res.CAPELLA_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 1).Epoch
# $eip4844ImplementationMissing res.EIP4844_FORK_EPOCH = (EPOCHS_PER_SYNC_COMMITTEE_PERIOD * 2).Epoch
res
const numValidators = SLOTS_PER_EPOCH
@ -68,10 +76,6 @@ suite "Light client processor" & preset():
proc getTrustedBlockRoot(): Option[Eth2Digest] =
some trustedBlockRoot
const
lowPeriod = 0.SyncCommitteePeriod
lastPeriodWithSupermajority = 3.SyncCommitteePeriod
highPeriod = 5.SyncCommitteePeriod
for period in lowPeriod .. highPeriod:
const numFilledEpochsPerPeriod = 3
let slot = ((period + 1).start_epoch - numFilledEpochsPerPeriod).start_slot
@ -98,8 +102,7 @@ suite "Light client processor" & preset():
var numOnStoreInitializedCalls = 0
func onStoreInitialized() = inc numOnStoreInitializedCalls
const storeDataFork = LightClientProcessor.storeDataFork
let store = (ref Option[storeDataFork.LightClientStore])()
let store = (ref ForkedLightClientStore)()
var
processor = LightClientProcessor.new(
false, "", "", cfg, genesis_validators_root, finalizationMode,
@ -107,40 +110,42 @@ suite "Light client processor" & preset():
res: Result[bool, VerifierError]
test "Sync" & testNameSuffix:
let bootstrap = dag.getLightClientBootstrap(trustedBlockRoot)
check:
bootstrap.kind > LightClientDataFork.None
bootstrap.kind <= storeDataFork
let upgradedBootstrap = bootstrap.migratingToDataFork(storeDataFork)
template forkyBootstrap: untyped = upgradedBootstrap.forky(storeDataFork)
setTimeToSlot(forkyBootstrap.header.beacon.slot)
var bootstrap = dag.getLightClientBootstrap(trustedBlockRoot)
check bootstrap.kind > LightClientDataFork.None
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyBootstrap.header.beacon.slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), bootstrap)
check:
res.isOk
numOnStoreInitializedCalls == 1
store[].isSome
store[].kind > LightClientDataFork.None
# Reduce stack size by making this a `proc`
proc applyPeriodWithSupermajority(period: SyncCommitteePeriod) =
let update = dag.getLightClientUpdateForPeriod(period)
check:
update.kind > LightClientDataFork.None
update.kind <= storeDataFork
let upgradedUpdate = update.migratingToDataFork(storeDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(storeDataFork)
setTimeToSlot(forkyUpdate.signature_slot)
check update.kind > LightClientDataFork.None
withForkyUpdate(update):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyUpdate.signature_slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), update)
check:
res.isOk
store[].isSome
if forkyUpdate.finalized_header.beacon.slot >
forkyBootstrap.header.beacon.slot:
store[].get.finalized_header == forkyUpdate.finalized_header
else:
store[].get.finalized_header == forkyBootstrap.header
store[].get.optimistic_header == forkyUpdate.attested_header
check update.kind <= store[].kind
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
bootstrap.migrateToDataFork(lcDataFork)
template forkyBootstrap: untyped = bootstrap.forky(lcDataFork)
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isOk
if forkyUpdate.finalized_header.beacon.slot >
forkyBootstrap.header.beacon.slot:
forkyStore.finalized_header == forkyUpdate.finalized_header
else:
forkyStore.finalized_header == forkyBootstrap.header
forkyStore.optimistic_header == forkyUpdate.attested_header
for period in lowPeriod .. lastPeriodWithSupermajority:
applyPeriodWithSupermajority(period)
@ -148,58 +153,73 @@ suite "Light client processor" & preset():
# Reduce stack size by making this a `proc`
proc applyPeriodWithoutSupermajority(period: SyncCommitteePeriod) =
let update = dag.getLightClientUpdateForPeriod(period)
check:
update.kind > LightClientDataFork.None
update.kind <= storeDataFork
let upgradedUpdate = update.migratingToDataFork(storeDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(storeDataFork)
setTimeToSlot(forkyUpdate.signature_slot)
check update.kind > LightClientDataFork.None
withForkyUpdate(update):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyUpdate.signature_slot)
for i in 0 ..< 2:
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), update)
check update.kind <= store[].kind
if finalizationMode == LightClientFinalizationMode.Optimistic or
period == lastPeriodWithSupermajority + 1:
if finalizationMode == LightClientFinalizationMode.Optimistic or
i == 0:
check:
res.isOk
store[].isSome
store[].get.best_valid_update.isSome
store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isOk
forkyStore.best_valid_update.isSome
forkyStore.best_valid_update.get.matches(forkyUpdate)
else:
check:
res.isErr
res.error == VerifierError.Duplicate
store[].isSome
store[].get.best_valid_update.isSome
store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.Duplicate
forkyStore.best_valid_update.isSome
forkyStore.best_valid_update.get.matches(forkyUpdate)
else:
check:
res.isErr
res.error == VerifierError.MissingParent
store[].isSome
store[].get.best_valid_update.isSome
not store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.MissingParent
forkyStore.best_valid_update.isSome
not forkyStore.best_valid_update.get.matches(forkyUpdate)
proc applyDuplicate() = # Reduce stack size by making this a `proc`
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), update)
check update.kind <= store[].kind
if finalizationMode == LightClientFinalizationMode.Optimistic or
period == lastPeriodWithSupermajority + 1:
check:
res.isErr
res.error == VerifierError.Duplicate
store[].isSome
store[].get.best_valid_update.isSome
store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.Duplicate
forkyStore.best_valid_update.isSome
forkyStore.best_valid_update.get.matches(forkyUpdate)
else:
check:
res.isErr
res.error == VerifierError.MissingParent
store[].isSome
store[].get.best_valid_update.isSome
not store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.MissingParent
forkyStore.best_valid_update.isSome
not forkyStore.best_valid_update.get.matches(forkyUpdate)
applyDuplicate()
time += chronos.minutes(15)
@ -210,58 +230,83 @@ suite "Light client processor" & preset():
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), update)
check update.kind <= store[].kind
if finalizationMode == LightClientFinalizationMode.Optimistic:
check:
res.isErr
res.error == VerifierError.Duplicate
store[].isSome
store[].get.best_valid_update.isNone
if store[].get.finalized_header == forkyUpdate.attested_header:
break
check store[].get.finalized_header == forkyUpdate.finalized_header
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.Duplicate
forkyStore.best_valid_update.isNone
if forkyStore.finalized_header == forkyUpdate.attested_header:
break
check forkyStore.finalized_header ==
forkyUpdate.finalized_header
elif period == lastPeriodWithSupermajority + 1:
check:
res.isErr
res.error == VerifierError.Duplicate
store[].isSome
store[].get.best_valid_update.isSome
store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.Duplicate
forkyStore.best_valid_update.isSome
forkyStore.best_valid_update.get.matches(forkyUpdate)
else:
check:
res.isErr
res.error == VerifierError.MissingParent
store[].isSome
store[].get.best_valid_update.isSome
not store[].get.best_valid_update.get.matches(forkyUpdate)
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
res.isErr
res.error == VerifierError.MissingParent
forkyStore.best_valid_update.isSome
not forkyStore.best_valid_update.get.matches(forkyUpdate)
if finalizationMode == LightClientFinalizationMode.Optimistic:
check store[].get.finalized_header == forkyUpdate.attested_header
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check forkyStore.finalized_header == forkyUpdate.attested_header
else:
check store[].get.finalized_header != forkyUpdate.attested_header
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
let upgraded = update.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check forkyStore.finalized_header != forkyUpdate.attested_header
for period in lastPeriodWithSupermajority + 1 .. highPeriod:
applyPeriodWithoutSupermajority(period)
let
previousFinalized = store[].get.finalized_header
finalityUpdate = dag.getLightClientFinalityUpdate()
check:
finalityUpdate.kind > LightClientDataFork.None
finalityUpdate.kind <= storeDataFork
let upgradedFinalityUpdate =
finalityUpdate.migratingToDataFork(storeDataFork)
template forkyFinalityUpdate: untyped =
upgradedFinalityUpdate.forky(storeDataFork)
setTimeToSlot(forkyFinalityUpdate.signature_slot)
var oldFinalized {.noinit.}: ForkedLightClientHeader
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
oldFinalized = ForkedLightClientHeader(kind: lcDataFork)
oldFinalized.forky(lcDataFork) = forkyStore.finalized_header
else: raiseAssert "Unreachable"
let finalityUpdate = dag.getLightClientFinalityUpdate()
check finalityUpdate.kind > LightClientDataFork.None
withForkyFinalityUpdate(finalityUpdate):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyFinalityUpdate.signature_slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), finalityUpdate)
check finalityUpdate.kind <= store[].kind
if res.isOk:
check:
finalizationMode == LightClientFinalizationMode.Optimistic
store[].isSome
store[].get.finalized_header == previousFinalized
store[].get.best_valid_update.isSome
store[].get.best_valid_update.get.matches(forkyFinalityUpdate)
store[].get.optimistic_header == forkyFinalityUpdate.attested_header
withForkyStore(store[]):
when lcDataFork > LightClientDataFork.None:
oldFinalized.migrateToDataFork(lcDataFork)
template forkyOldFinalized: untyped = oldFinalized.forky(lcDataFork)
let upgraded = finalityUpdate.migratingToDataFork(lcDataFork)
template forkyUpdate: untyped = upgraded.forky(lcDataFork)
check:
finalizationMode == LightClientFinalizationMode.Optimistic
forkyStore.finalized_header == forkyOldFinalized
forkyStore.best_valid_update.isSome
forkyStore.best_valid_update.get.matches(forkyUpdate)
forkyStore.optimistic_header == forkyUpdate.attested_header
elif finalizationMode == LightClientFinalizationMode.Optimistic:
check res.error == VerifierError.Duplicate
else:
@ -270,15 +315,11 @@ suite "Light client processor" & preset():
test "Invalid bootstrap" & testNameSuffix:
var bootstrap = dag.getLightClientBootstrap(trustedBlockRoot)
check:
bootstrap.kind > LightClientDataFork.None
bootstrap.kind <= storeDataFork
check bootstrap.kind > LightClientDataFork.None
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
forkyBootstrap.header.beacon.slot.inc()
let upgradedBootstrap = bootstrap.migratingToDataFork(storeDataFork)
template forkyBootstrap: untyped = upgradedBootstrap.forky(storeDataFork)
setTimeToSlot(forkyBootstrap.header.beacon.slot)
setTimeToSlot(forkyBootstrap.header.beacon.slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), bootstrap)
check:
@ -288,12 +329,10 @@ suite "Light client processor" & preset():
test "Duplicate bootstrap" & testNameSuffix:
let bootstrap = dag.getLightClientBootstrap(trustedBlockRoot)
check:
bootstrap.kind > LightClientDataFork.None
bootstrap.kind <= storeDataFork
let upgradedBootstrap = bootstrap.migratingToDataFork(storeDataFork)
template forkyBootstrap: untyped = upgradedBootstrap.forky(storeDataFork)
setTimeToSlot(forkyBootstrap.header.beacon.slot)
check bootstrap.kind > LightClientDataFork.None
withForkyBootstrap(bootstrap):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyBootstrap.header.beacon.slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), bootstrap)
check:
@ -308,12 +347,10 @@ suite "Light client processor" & preset():
test "Missing bootstrap (update)" & testNameSuffix:
let update = dag.getLightClientUpdateForPeriod(lowPeriod)
check:
update.kind > LightClientDataFork.None
update.kind <= storeDataFork
let upgradedUpdate = update.migratingToDataFork(storeDataFork)
template forkyUpdate: untyped = upgradedUpdate.forky(storeDataFork)
setTimeToSlot(forkyUpdate.signature_slot)
check update.kind > LightClientDataFork.None
withForkyUpdate(update):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyUpdate.signature_slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), update)
check:
@ -323,14 +360,10 @@ suite "Light client processor" & preset():
test "Missing bootstrap (finality update)" & testNameSuffix:
let finalityUpdate = dag.getLightClientFinalityUpdate()
check:
finalityUpdate.kind > LightClientDataFork.None
finalityUpdate.kind <= storeDataFork
let upgradedFinalityUpdate =
finalityUpdate.migratingToDataFork(storeDataFork)
template forkyFinalityUpdate: untyped =
upgradedFinalityUpdate.forky(storeDataFork)
setTimeToSlot(forkyFinalityUpdate.signature_slot)
check finalityUpdate.kind > LightClientDataFork.None
withForkyFinalityUpdate(finalityUpdate):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyFinalityUpdate.signature_slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), finalityUpdate)
check:
@ -340,14 +373,10 @@ suite "Light client processor" & preset():
test "Missing bootstrap (optimistic update)" & testNameSuffix:
let optimisticUpdate = dag.getLightClientOptimisticUpdate()
check:
optimisticUpdate.kind > LightClientDataFork.None
optimisticUpdate.kind <= storeDataFork
let upgradedOptimisticUpdate =
optimisticUpdate.migratingToDataFork(storeDataFork)
template forkyOptimisticUpdate: untyped =
upgradedOptimisticUpdate.forky(storeDataFork)
setTimeToSlot(forkyOptimisticUpdate.signature_slot)
check optimisticUpdate.kind > LightClientDataFork.None
withForkyOptimisticUpdate(optimisticUpdate):
when lcDataFork > LightClientDataFork.None:
setTimeToSlot(forkyOptimisticUpdate.signature_slot)
res = processor[].storeObject(
MsgSource.gossip, getBeaconTime(), optimisticUpdate)
check: