mirror of
https://github.com/status-im/nimbus-eth2.git
synced 2025-01-15 00:54:49 +00:00
cache LC headers and sync committees for bootstrap (#4499)
To allow LC data retention longer than the one for historic states, introduce persistent DB caches for `current_sync_committee` and `LightClientHeader` for finalized epoch boundary blocks. This way, historic `LightClientBootstrap` requests may still be honored even after pruning. Note that historic `LightClientUpdate` requests are already answered using fully persisted objects, so don't need changes. Sync committees and headers are cached on finalization of new data. For existing data, info is lazily cached on first access. Co-authored-by: Jacek Sieka <jacek@status.im>
This commit is contained in:
parent
daa99f43c3
commit
9f279e2933
@ -520,10 +520,23 @@ proc new*(T: type BeaconChainDB,
|
|||||||
finalizedBlocks = FinalizedBlocks.init(db, "finalized_blocks").expectDb()
|
finalizedBlocks = FinalizedBlocks.init(db, "finalized_blocks").expectDb()
|
||||||
|
|
||||||
lcData = db.initLightClientDataDB(LightClientDataDBNames(
|
lcData = db.initLightClientDataDB(LightClientDataDBNames(
|
||||||
|
altairHeaders: "lc_altair_headers",
|
||||||
|
capellaHeaders:
|
||||||
|
if cfg.CAPELLA_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
||||||
|
"lc_capella_headers"
|
||||||
|
else:
|
||||||
|
"",
|
||||||
|
eip4844Headers:
|
||||||
|
if cfg.EIP4844_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
||||||
|
"lc_eip4844_headers"
|
||||||
|
else:
|
||||||
|
"",
|
||||||
altairCurrentBranches: "lc_altair_current_branches",
|
altairCurrentBranches: "lc_altair_current_branches",
|
||||||
|
altairSyncCommittees: "lc_altair_sync_committees",
|
||||||
legacyAltairBestUpdates: "lc_altair_best_updates",
|
legacyAltairBestUpdates: "lc_altair_best_updates",
|
||||||
bestUpdates: "lc_best_updates",
|
bestUpdates: "lc_best_updates",
|
||||||
sealedPeriods: "lc_sealed_periods")).expectDb()
|
sealedPeriods: "lc_sealed_periods")).expectDb()
|
||||||
|
static: doAssert LightClientDataFork.high == LightClientDataFork.EIP4844
|
||||||
|
|
||||||
var blobs : KvStoreRef
|
var blobs : KvStoreRef
|
||||||
if cfg.EIP4844_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
if cfg.EIP4844_FORK_EPOCH != FAR_FUTURE_EPOCH:
|
||||||
|
@ -19,10 +19,30 @@ import
|
|||||||
|
|
||||||
logScope: topics = "lcdata"
|
logScope: topics = "lcdata"
|
||||||
|
|
||||||
|
# `lc_xxxxx_headers` contains a copy of historic `LightClientHeader`.
|
||||||
|
# Data is only kept for blocks that are used in `LightClientBootstrap` objects.
|
||||||
|
# Caching is necessary to support longer retention for LC data than state data.
|
||||||
|
# SSZ because this data does not compress well, and because this data
|
||||||
|
# needs to be bundled together with other data to fulfill requests.
|
||||||
|
# Mainnet data size (all columns):
|
||||||
|
# - Altair: ~38 KB per `SyncCommitteePeriod` (~1.0 MB per month)
|
||||||
|
# - Capella: ~222 KB per `SyncCommitteePeriod` (~6.1 MB per month)
|
||||||
|
# - EIP4844: ~230 KB per `SyncCommitteePeriod` (~6.3 MB per month)
|
||||||
|
#
|
||||||
# `lc_altair_current_branches` holds merkle proofs needed to
|
# `lc_altair_current_branches` holds merkle proofs needed to
|
||||||
# construct `LightClientBootstrap` objects.
|
# construct `LightClientBootstrap` objects.
|
||||||
# SSZ because this data does not compress well, and because this data
|
# SSZ because this data does not compress well, and because this data
|
||||||
# needs to be bundled together with other data to fulfill requests.
|
# needs to be bundled together with other data to fulfill requests.
|
||||||
|
# Mainnet data size (all columns):
|
||||||
|
# - Altair ... EIP4844: ~42 KB per `SyncCommitteePeriod` (~1.1 MB per month)
|
||||||
|
#
|
||||||
|
# `lc_altair_sync_committees` contains a copy of finalized sync committees.
|
||||||
|
# They are initially populated from the main DAG (usually a fast state access).
|
||||||
|
# Caching is necessary to support longer retention for LC data than state data.
|
||||||
|
# SSZ because this data does not compress well, and because this data
|
||||||
|
# needs to be bundled together with other data to fulfill requests.
|
||||||
|
# Mainnet data size (all columns):
|
||||||
|
# - Altair ... EIP4844: ~32 KB per `SyncCommitteePeriod` (~0.9 MB per month)
|
||||||
#
|
#
|
||||||
# `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form.
|
# `lc_best_updates` holds full `LightClientUpdate` objects in SSZ form.
|
||||||
# These objects are frequently queried in bulk, but there is only one per
|
# These objects are frequently queried in bulk, but there is only one per
|
||||||
@ -35,19 +55,36 @@ logScope: topics = "lcdata"
|
|||||||
# deriving the fork digest; the `kind` column is not sufficient to derive
|
# deriving the fork digest; the `kind` column is not sufficient to derive
|
||||||
# the fork digest, because the same storage format may be used across forks.
|
# the fork digest, because the same storage format may be used across forks.
|
||||||
# SSZ storage selected due to the small size and reduced logic complexity.
|
# SSZ storage selected due to the small size and reduced logic complexity.
|
||||||
|
# Mainnet data size (all columns):
|
||||||
|
# - Altair: ~33 KB per `SyncCommitteePeriod` (~0.9 MB per month)
|
||||||
|
# - Capella: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month)
|
||||||
|
# - EIP4844: ~34 KB per `SyncCommitteePeriod` (~0.9 MB per month)
|
||||||
#
|
#
|
||||||
# `lc_sealed_periods` contains the sync committee periods for which
|
# `lc_sealed_periods` contains the sync committee periods for which
|
||||||
# full light client data was imported. Data for these periods may no longer
|
# full light client data was imported. Data for these periods may no longer
|
||||||
# improve regardless of further block processing. The listed periods are skipped
|
# improve regardless of further block processing. The listed periods are skipped
|
||||||
# when restarting the program.
|
# when restarting the program.
|
||||||
|
# Mainnet data size (all columns):
|
||||||
|
# - All forks: 8 bytes per `SyncCommitteePeriod` (~0.0 MB per month)
|
||||||
|
|
||||||
type
|
type
|
||||||
|
LightClientHeaderStore = object
|
||||||
|
getStmt: SqliteStmt[array[32, byte], seq[byte]]
|
||||||
|
putStmt: SqliteStmt[(array[32, byte], int64, seq[byte]), void]
|
||||||
|
keepFromStmt: SqliteStmt[int64, void]
|
||||||
|
|
||||||
CurrentSyncCommitteeBranchStore = object
|
CurrentSyncCommitteeBranchStore = object
|
||||||
containsStmt: SqliteStmt[int64, int64]
|
containsStmt: SqliteStmt[int64, int64]
|
||||||
getStmt: SqliteStmt[int64, seq[byte]]
|
getStmt: SqliteStmt[int64, seq[byte]]
|
||||||
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
||||||
keepFromStmt: SqliteStmt[int64, void]
|
keepFromStmt: SqliteStmt[int64, void]
|
||||||
|
|
||||||
|
SyncCommitteeStore = object
|
||||||
|
containsStmt: SqliteStmt[int64, int64]
|
||||||
|
getStmt: SqliteStmt[int64, seq[byte]]
|
||||||
|
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
||||||
|
keepFromStmt: SqliteStmt[int64, void]
|
||||||
|
|
||||||
LegacyBestLightClientUpdateStore = object
|
LegacyBestLightClientUpdateStore = object
|
||||||
getStmt: SqliteStmt[int64, (int64, seq[byte])]
|
getStmt: SqliteStmt[int64, (int64, seq[byte])]
|
||||||
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
||||||
@ -72,12 +109,20 @@ type
|
|||||||
backend: SqStoreRef
|
backend: SqStoreRef
|
||||||
## SQLite backend
|
## SQLite backend
|
||||||
|
|
||||||
|
headers: array[LightClientDataFork, LightClientHeaderStore]
|
||||||
|
## Eth2Digest -> (Slot, LightClientHeader)
|
||||||
|
## Cached block headers to support longer retention than block storage.
|
||||||
|
|
||||||
currentBranches: CurrentSyncCommitteeBranchStore
|
currentBranches: CurrentSyncCommitteeBranchStore
|
||||||
## Slot -> altair.CurrentSyncCommitteeBranch
|
## Slot -> altair.CurrentSyncCommitteeBranch
|
||||||
## Cached data for creating future `LightClientBootstrap` instances.
|
## Cached data for creating future `LightClientBootstrap` instances.
|
||||||
## Key is the block slot of which the post state was used to get the data.
|
## Key is the block slot of which the post state was used to get the data.
|
||||||
## Data stored for all finalized epoch boundary blocks.
|
## Data stored for all finalized epoch boundary blocks.
|
||||||
|
|
||||||
|
syncCommittees: SyncCommitteeStore
|
||||||
|
## SyncCommitteePeriod -> altair.SyncCommittee
|
||||||
|
## Cached sync committees to support longer retention than state storage.
|
||||||
|
|
||||||
legacyBestUpdates: LegacyBestLightClientUpdateStore
|
legacyBestUpdates: LegacyBestLightClientUpdateStore
|
||||||
## SyncCommitteePeriod -> altair.LightClientUpdate
|
## SyncCommitteePeriod -> altair.LightClientUpdate
|
||||||
## Used through Bellatrix.
|
## Used through Bellatrix.
|
||||||
@ -97,6 +142,74 @@ template disposeSafe(s: untyped): untyped =
|
|||||||
s.dispose()
|
s.dispose()
|
||||||
s = nil
|
s = nil
|
||||||
|
|
||||||
|
proc initHeadersStore(
|
||||||
|
backend: SqStoreRef,
|
||||||
|
name, typeName: string): KvResult[LightClientHeaderStore] =
|
||||||
|
if name == "":
|
||||||
|
return ok LightClientHeaderStore()
|
||||||
|
if not backend.readOnly:
|
||||||
|
? backend.exec("""
|
||||||
|
CREATE TABLE IF NOT EXISTS `""" & name & """` (
|
||||||
|
`block_root` BLOB PRIMARY KEY, -- `Eth2Digest`
|
||||||
|
`slot` INTEGER, -- `Slot`
|
||||||
|
`header` BLOB -- `""" & typeName & """` (SSZ)
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
if not ? backend.hasTable(name):
|
||||||
|
return ok LightClientHeaderStore()
|
||||||
|
|
||||||
|
let
|
||||||
|
getStmt = backend.prepareStmt("""
|
||||||
|
SELECT `header`
|
||||||
|
FROM `""" & name & """`
|
||||||
|
WHERE `block_root` = ?;
|
||||||
|
""", array[32, byte], seq[byte], managed = false).expect("SQL query OK")
|
||||||
|
putStmt = backend.prepareStmt("""
|
||||||
|
REPLACE INTO `""" & name & """` (
|
||||||
|
`block_root`, `slot`, `header`
|
||||||
|
) VALUES (?, ?, ?);
|
||||||
|
""", (array[32, byte], int64, seq[byte]), void, managed = false)
|
||||||
|
.expect("SQL query OK")
|
||||||
|
keepFromStmt = backend.prepareStmt("""
|
||||||
|
DELETE FROM `""" & name & """`
|
||||||
|
WHERE `slot` < ?;
|
||||||
|
""", int64, void, managed = false).expect("SQL query OK")
|
||||||
|
|
||||||
|
ok LightClientHeaderStore(
|
||||||
|
getStmt: getStmt,
|
||||||
|
putStmt: putStmt,
|
||||||
|
keepFromStmt: keepFromStmt)
|
||||||
|
|
||||||
|
func close(store: var LightClientHeaderStore) =
|
||||||
|
store.getStmt.disposeSafe()
|
||||||
|
store.putStmt.disposeSafe()
|
||||||
|
store.keepFromStmt.disposeSafe()
|
||||||
|
|
||||||
|
proc getHeader*[T: ForkyLightClientHeader](
|
||||||
|
db: LightClientDataDB, blockRoot: Eth2Digest): Opt[T] =
|
||||||
|
if distinctBase(db.headers[T.kind].getStmt) == nil:
|
||||||
|
return Opt.none(T)
|
||||||
|
var header: seq[byte]
|
||||||
|
for res in db.headers[T.kind].getStmt.exec(blockRoot.data, header):
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
try:
|
||||||
|
return ok SSZ.decode(header, T)
|
||||||
|
except SszError as exc:
|
||||||
|
error "LC data store corrupted", store = "headers", kind = T.kind,
|
||||||
|
blockRoot, exc = exc.msg
|
||||||
|
return Opt.none(T)
|
||||||
|
|
||||||
|
func putHeader*[T: ForkyLightClientHeader](
|
||||||
|
db: LightClientDataDB, header: T) =
|
||||||
|
doAssert not db.backend.readOnly and
|
||||||
|
distinctBase(db.headers[T.kind].putStmt) != nil
|
||||||
|
let
|
||||||
|
blockRoot = hash_tree_root(header.beacon)
|
||||||
|
slot = header.beacon.slot
|
||||||
|
res = db.headers[T.kind].putStmt.exec(
|
||||||
|
(blockRoot.data, slot.int64, SSZ.encode(header)))
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
|
||||||
proc initCurrentBranchesStore(
|
proc initCurrentBranchesStore(
|
||||||
backend: SqStoreRef,
|
backend: SqStoreRef,
|
||||||
name: string): KvResult[CurrentSyncCommitteeBranchStore] =
|
name: string): KvResult[CurrentSyncCommitteeBranchStore] =
|
||||||
@ -122,7 +235,7 @@ proc initCurrentBranchesStore(
|
|||||||
WHERE `slot` = ?;
|
WHERE `slot` = ?;
|
||||||
""", int64, seq[byte], managed = false).expect("SQL query OK")
|
""", int64, seq[byte], managed = false).expect("SQL query OK")
|
||||||
putStmt = backend.prepareStmt("""
|
putStmt = backend.prepareStmt("""
|
||||||
INSERT INTO `""" & name & """` (
|
REPLACE INTO `""" & name & """` (
|
||||||
`slot`, `branch`
|
`slot`, `branch`
|
||||||
) VALUES (?, ?);
|
) VALUES (?, ?);
|
||||||
""", (int64, seq[byte]), void, managed = false).expect("SQL query OK")
|
""", (int64, seq[byte]), void, managed = false).expect("SQL query OK")
|
||||||
@ -156,19 +269,19 @@ func hasCurrentSyncCommitteeBranch*(
|
|||||||
false
|
false
|
||||||
|
|
||||||
proc getCurrentSyncCommitteeBranch*(
|
proc getCurrentSyncCommitteeBranch*(
|
||||||
db: LightClientDataDB, slot: Slot): altair.CurrentSyncCommitteeBranch =
|
db: LightClientDataDB, slot: Slot): Opt[altair.CurrentSyncCommitteeBranch] =
|
||||||
if not slot.isSupportedBySQLite or
|
if not slot.isSupportedBySQLite or
|
||||||
distinctBase(db.currentBranches.getStmt) == nil:
|
distinctBase(db.currentBranches.getStmt) == nil:
|
||||||
return default(altair.CurrentSyncCommitteeBranch)
|
return Opt.none(altair.CurrentSyncCommitteeBranch)
|
||||||
var branch: seq[byte]
|
var branch: seq[byte]
|
||||||
for res in db.currentBranches.getStmt.exec(slot.int64, branch):
|
for res in db.currentBranches.getStmt.exec(slot.int64, branch):
|
||||||
res.expect("SQL query OK")
|
res.expect("SQL query OK")
|
||||||
try:
|
try:
|
||||||
return SSZ.decode(branch, altair.CurrentSyncCommitteeBranch)
|
return ok SSZ.decode(branch, altair.CurrentSyncCommitteeBranch)
|
||||||
except SszError as exc:
|
except SszError as exc:
|
||||||
error "LC data store corrupted", store = "currentBranches",
|
error "LC data store corrupted", store = "currentBranches",
|
||||||
slot, exc = exc.msg
|
slot, exc = exc.msg
|
||||||
return default(altair.CurrentSyncCommitteeBranch)
|
return Opt.none(altair.CurrentSyncCommitteeBranch)
|
||||||
|
|
||||||
func putCurrentSyncCommitteeBranch*(
|
func putCurrentSyncCommitteeBranch*(
|
||||||
db: LightClientDataDB, slot: Slot,
|
db: LightClientDataDB, slot: Slot,
|
||||||
@ -179,6 +292,89 @@ func putCurrentSyncCommitteeBranch*(
|
|||||||
let res = db.currentBranches.putStmt.exec((slot.int64, SSZ.encode(branch)))
|
let res = db.currentBranches.putStmt.exec((slot.int64, SSZ.encode(branch)))
|
||||||
res.expect("SQL query OK")
|
res.expect("SQL query OK")
|
||||||
|
|
||||||
|
proc initSyncCommitteesStore(
|
||||||
|
backend: SqStoreRef,
|
||||||
|
name: string): KvResult[SyncCommitteeStore] =
|
||||||
|
if not backend.readOnly:
|
||||||
|
? backend.exec("""
|
||||||
|
CREATE TABLE IF NOT EXISTS `""" & name & """` (
|
||||||
|
`period` INTEGER PRIMARY KEY, -- `SyncCommitteePeriod`
|
||||||
|
`sync_committee` BLOB -- `altair.SyncCommittee` (SSZ)
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
if not ? backend.hasTable(name):
|
||||||
|
return ok SyncCommitteeStore()
|
||||||
|
|
||||||
|
let
|
||||||
|
containsStmt = backend.prepareStmt("""
|
||||||
|
SELECT 1 AS `exists`
|
||||||
|
FROM `""" & name & """`
|
||||||
|
WHERE `period` = ?;
|
||||||
|
""", int64, int64, managed = false).expect("SQL query OK")
|
||||||
|
getStmt = backend.prepareStmt("""
|
||||||
|
SELECT `sync_committee`
|
||||||
|
FROM `""" & name & """`
|
||||||
|
WHERE `period` = ?;
|
||||||
|
""", int64, seq[byte], managed = false).expect("SQL query OK")
|
||||||
|
putStmt = backend.prepareStmt("""
|
||||||
|
REPLACE INTO `""" & name & """` (
|
||||||
|
`period`, `sync_committee`
|
||||||
|
) VALUES (?, ?);
|
||||||
|
""", (int64, seq[byte]), void, managed = false).expect("SQL query OK")
|
||||||
|
keepFromStmt = backend.prepareStmt("""
|
||||||
|
DELETE FROM `""" & name & """`
|
||||||
|
WHERE `period` < ?;
|
||||||
|
""", int64, void, managed = false).expect("SQL query OK")
|
||||||
|
|
||||||
|
ok SyncCommitteeStore(
|
||||||
|
containsStmt: containsStmt,
|
||||||
|
getStmt: getStmt,
|
||||||
|
putStmt: putStmt,
|
||||||
|
keepFromStmt: keepFromStmt)
|
||||||
|
|
||||||
|
func close(store: var SyncCommitteeStore) =
|
||||||
|
store.containsStmt.disposeSafe()
|
||||||
|
store.getStmt.disposeSafe()
|
||||||
|
store.putStmt.disposeSafe()
|
||||||
|
store.keepFromStmt.disposeSafe()
|
||||||
|
|
||||||
|
func hasSyncCommittee*(
|
||||||
|
db: LightClientDataDB, period: SyncCommitteePeriod): bool =
|
||||||
|
doAssert period.isSupportedBySQLite
|
||||||
|
if distinctBase(db.syncCommittees.containsStmt) == nil:
|
||||||
|
return false
|
||||||
|
var exists: int64
|
||||||
|
for res in db.syncCommittees.containsStmt.exec(period.int64, exists):
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
doAssert exists == 1
|
||||||
|
return true
|
||||||
|
false
|
||||||
|
|
||||||
|
proc getSyncCommittee*(
|
||||||
|
db: LightClientDataDB, period: SyncCommitteePeriod
|
||||||
|
): Opt[altair.SyncCommittee] =
|
||||||
|
doAssert period.isSupportedBySQLite
|
||||||
|
if distinctBase(db.syncCommittees.getStmt) == nil:
|
||||||
|
return Opt.none(altair.SyncCommittee)
|
||||||
|
var branch: seq[byte]
|
||||||
|
for res in db.syncCommittees.getStmt.exec(period.int64, branch):
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
try:
|
||||||
|
return ok SSZ.decode(branch, altair.SyncCommittee)
|
||||||
|
except SszError as exc:
|
||||||
|
error "LC data store corrupted", store = "syncCommittees",
|
||||||
|
period, exc = exc.msg
|
||||||
|
return Opt.none(altair.SyncCommittee)
|
||||||
|
|
||||||
|
func putSyncCommittee*(
|
||||||
|
db: LightClientDataDB, period: SyncCommitteePeriod,
|
||||||
|
syncCommittee: altair.SyncCommittee) =
|
||||||
|
doAssert not db.backend.readOnly # All `stmt` are non-nil
|
||||||
|
doAssert period.isSupportedBySQLite
|
||||||
|
let res = db.syncCommittees.putStmt.exec(
|
||||||
|
(period.int64, SSZ.encode(syncCommittee)))
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
|
||||||
proc initLegacyBestUpdatesStore(
|
proc initLegacyBestUpdatesStore(
|
||||||
backend: SqStoreRef,
|
backend: SqStoreRef,
|
||||||
name: string,
|
name: string,
|
||||||
@ -391,7 +587,7 @@ proc initSealedPeriodsStore(
|
|||||||
WHERE `period` = ?;
|
WHERE `period` = ?;
|
||||||
""", int64, int64, managed = false).expect("SQL query OK")
|
""", int64, int64, managed = false).expect("SQL query OK")
|
||||||
putStmt = backend.prepareStmt("""
|
putStmt = backend.prepareStmt("""
|
||||||
INSERT INTO `""" & name & """` (
|
REPLACE INTO `""" & name & """` (
|
||||||
`period`
|
`period`
|
||||||
) VALUES (?);
|
) VALUES (?);
|
||||||
""", int64, void, managed = false).expect("SQL query OK")
|
""", int64, void, managed = false).expect("SQL query OK")
|
||||||
@ -448,7 +644,7 @@ func delNonFinalizedPeriodsFrom*(
|
|||||||
block:
|
block:
|
||||||
let res = db.legacyBestUpdates.delFromStmt.exec(minPeriod.int64)
|
let res = db.legacyBestUpdates.delFromStmt.exec(minPeriod.int64)
|
||||||
res.expect("SQL query OK")
|
res.expect("SQL query OK")
|
||||||
# `currentBranches` only has finalized data
|
# `syncCommittees`, `currentBranches` and `headers` only have finalized data
|
||||||
|
|
||||||
func keepPeriodsFrom*(
|
func keepPeriodsFrom*(
|
||||||
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
|
db: LightClientDataDB, minPeriod: SyncCommitteePeriod) =
|
||||||
@ -463,13 +659,25 @@ func keepPeriodsFrom*(
|
|||||||
block:
|
block:
|
||||||
let res = db.legacyBestUpdates.keepFromStmt.exec(minPeriod.int64)
|
let res = db.legacyBestUpdates.keepFromStmt.exec(minPeriod.int64)
|
||||||
res.expect("SQL query OK")
|
res.expect("SQL query OK")
|
||||||
|
block:
|
||||||
|
let res = db.syncCommittees.keepFromStmt.exec(minPeriod.int64)
|
||||||
|
res.expect("SQL query OK")
|
||||||
let minSlot = min(minPeriod.start_slot, int64.high.Slot)
|
let minSlot = min(minPeriod.start_slot, int64.high.Slot)
|
||||||
block:
|
block:
|
||||||
let res = db.currentBranches.keepFromStmt.exec(minSlot.int64)
|
let res = db.currentBranches.keepFromStmt.exec(minSlot.int64)
|
||||||
res.expect("SQL query OK")
|
res.expect("SQL query OK")
|
||||||
|
for lcDataFork, store in db.headers:
|
||||||
|
if lcDataFork > LightClientDataFork.None and
|
||||||
|
distinctBase(store.keepFromStmt) != nil:
|
||||||
|
let res = store.keepFromStmt.exec(minSlot.int64)
|
||||||
|
res.expect("SQL query OK")
|
||||||
|
|
||||||
type LightClientDataDBNames* = object
|
type LightClientDataDBNames* = object
|
||||||
|
altairHeaders*: string
|
||||||
|
capellaHeaders*: string
|
||||||
|
eip4844Headers*: string
|
||||||
altairCurrentBranches*: string
|
altairCurrentBranches*: string
|
||||||
|
altairSyncCommittees*: string
|
||||||
legacyAltairBestUpdates*: string
|
legacyAltairBestUpdates*: string
|
||||||
bestUpdates*: string
|
bestUpdates*: string
|
||||||
sealedPeriods*: string
|
sealedPeriods*: string
|
||||||
@ -477,9 +685,25 @@ type LightClientDataDBNames* = object
|
|||||||
proc initLightClientDataDB*(
|
proc initLightClientDataDB*(
|
||||||
backend: SqStoreRef,
|
backend: SqStoreRef,
|
||||||
names: LightClientDataDBNames): KvResult[LightClientDataDB] =
|
names: LightClientDataDBNames): KvResult[LightClientDataDB] =
|
||||||
|
static: doAssert LightClientDataFork.high == LightClientDataFork.EIP4844
|
||||||
let
|
let
|
||||||
|
headers = [
|
||||||
|
# LightClientDataFork.None
|
||||||
|
LightClientHeaderStore(),
|
||||||
|
# LightClientDataFork.Altair
|
||||||
|
? backend.initHeadersStore(
|
||||||
|
names.altairHeaders, "altair.LightClientHeader"),
|
||||||
|
# LightClientDataFork.Capella
|
||||||
|
? backend.initHeadersStore(
|
||||||
|
names.capellaHeaders, "capella.LightClientHeader"),
|
||||||
|
# LightClientDataFork.EIP4844
|
||||||
|
? backend.initHeadersStore(
|
||||||
|
names.eip4844Headers, "eip4844.LightClientHeader")
|
||||||
|
]
|
||||||
currentBranches =
|
currentBranches =
|
||||||
? backend.initCurrentBranchesStore(names.altairCurrentBranches)
|
? backend.initCurrentBranchesStore(names.altairCurrentBranches)
|
||||||
|
syncCommittees =
|
||||||
|
? backend.initSyncCommitteesStore(names.altairSyncCommittees)
|
||||||
legacyBestUpdates =
|
legacyBestUpdates =
|
||||||
? backend.initLegacyBestUpdatesStore(names.legacyAltairBestUpdates)
|
? backend.initLegacyBestUpdatesStore(names.legacyAltairBestUpdates)
|
||||||
bestUpdates =
|
bestUpdates =
|
||||||
@ -489,15 +713,21 @@ proc initLightClientDataDB*(
|
|||||||
? backend.initSealedPeriodsStore(names.sealedPeriods)
|
? backend.initSealedPeriodsStore(names.sealedPeriods)
|
||||||
|
|
||||||
ok LightClientDataDB(
|
ok LightClientDataDB(
|
||||||
|
headers: headers,
|
||||||
backend: backend,
|
backend: backend,
|
||||||
currentBranches: currentBranches,
|
currentBranches: currentBranches,
|
||||||
|
syncCommittees: syncCommittees,
|
||||||
legacyBestUpdates: legacyBestUpdates,
|
legacyBestUpdates: legacyBestUpdates,
|
||||||
bestUpdates: bestUpdates,
|
bestUpdates: bestUpdates,
|
||||||
sealedPeriods: sealedPeriods)
|
sealedPeriods: sealedPeriods)
|
||||||
|
|
||||||
proc close*(db: LightClientDataDB) =
|
proc close*(db: LightClientDataDB) =
|
||||||
if db.backend != nil:
|
if db.backend != nil:
|
||||||
|
for lcDataFork in LightClientDataFork:
|
||||||
|
if lcDataFork > LightClientDataFork.None:
|
||||||
|
db.headers[lcDataFork].close()
|
||||||
db.currentBranches.close()
|
db.currentBranches.close()
|
||||||
|
db.syncCommittees.close()
|
||||||
db.legacyBestUpdates.close()
|
db.legacyBestUpdates.close()
|
||||||
db.bestUpdates.close()
|
db.bestUpdates.close()
|
||||||
db.sealedPeriods.close()
|
db.sealedPeriods.close()
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
import
|
import
|
||||||
# Status libraries
|
# Status libraries
|
||||||
stew/[bitops2, objects],
|
stew/bitops2,
|
||||||
# Beacon chain internals
|
# Beacon chain internals
|
||||||
../spec/datatypes/[phase0, altair, bellatrix, capella, eip4844],
|
../spec/datatypes/[phase0, altair, bellatrix, capella, eip4844],
|
||||||
../beacon_chain_db_light_client,
|
../beacon_chain_db_light_client,
|
||||||
@ -202,16 +202,26 @@ proc initLightClientBootstrapForPeriod(
|
|||||||
boundarySlot = bid.slot.nextEpochBoundarySlot
|
boundarySlot = bid.slot.nextEpochBoundarySlot
|
||||||
if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and
|
if boundarySlot == nextBoundarySlot and bid.slot >= lowSlot and
|
||||||
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(bid.slot):
|
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(bid.slot):
|
||||||
|
let bdata = dag.getExistingForkedBlock(bid).valueOr:
|
||||||
|
dag.handleUnexpectedLightClientError(bid.slot)
|
||||||
|
res.err()
|
||||||
|
continue
|
||||||
if not dag.updateExistingState(
|
if not dag.updateExistingState(
|
||||||
tmpState[], bid.atSlot, save = false, tmpCache):
|
tmpState[], bid.atSlot, save = false, tmpCache):
|
||||||
dag.handleUnexpectedLightClientError(bid.slot)
|
dag.handleUnexpectedLightClientError(bid.slot)
|
||||||
res.err()
|
res.err()
|
||||||
continue
|
continue
|
||||||
let branch = withState(tmpState[]):
|
withStateAndBlck(tmpState[], bdata):
|
||||||
when stateFork >= BeaconStateFork.Altair:
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
forkyState.data.build_proof(altair.CURRENT_SYNC_COMMITTEE_INDEX).get
|
const lcDataFork = lcDataForkAtStateFork(stateFork)
|
||||||
|
if not dag.lcDataStore.db.hasSyncCommittee(period):
|
||||||
|
dag.lcDataStore.db.putSyncCommittee(
|
||||||
|
period, forkyState.data.current_sync_committee)
|
||||||
|
dag.lcDataStore.db.putHeader(blck.toLightClientHeader(lcDataFork))
|
||||||
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
|
bid.slot, forkyState.data.build_proof(
|
||||||
|
altair.CURRENT_SYNC_COMMITTEE_INDEX).get)
|
||||||
else: raiseAssert "Unreachable"
|
else: raiseAssert "Unreachable"
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(bid.slot, branch)
|
|
||||||
res
|
res
|
||||||
|
|
||||||
proc initLightClientUpdateForPeriod(
|
proc initLightClientUpdateForPeriod(
|
||||||
@ -843,6 +853,35 @@ proc processFinalizationForLightClient*(
|
|||||||
break
|
break
|
||||||
bid = bsi.bid
|
bid = bsi.bid
|
||||||
if bid.slot >= lowSlot:
|
if bid.slot >= lowSlot:
|
||||||
|
let
|
||||||
|
bdata = dag.getExistingForkedBlock(bid).valueOr:
|
||||||
|
dag.handleUnexpectedLightClientError(bid.slot)
|
||||||
|
break
|
||||||
|
period = bid.slot.sync_committee_period
|
||||||
|
if not dag.lcDataStore.db.hasSyncCommittee(period):
|
||||||
|
let didPutSyncCommittee = withState(dag.headState):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
if period == forkyState.data.slot.sync_committee_period:
|
||||||
|
dag.lcDataStore.db.putSyncCommittee(
|
||||||
|
period, forkyState.data.current_sync_committee)
|
||||||
|
true
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
else:
|
||||||
|
false
|
||||||
|
if not didPutSyncCommittee:
|
||||||
|
let
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
syncCommittee = dag.existingCurrentSyncCommitteeForPeriod(
|
||||||
|
tmpState[], period).valueOr:
|
||||||
|
dag.handleUnexpectedLightClientError(bid.slot)
|
||||||
|
break
|
||||||
|
dag.lcDataStore.db.putSyncCommittee(period, syncCommittee)
|
||||||
|
withBlck(bdata):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
const lcDataFork = lcDataForkAtStateFork(stateFork)
|
||||||
|
dag.lcDataStore.db.putHeader(blck.toLightClientHeader(lcDataFork))
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
bid.slot, dag.getLightClientData(bid).current_sync_committee_branch)
|
bid.slot, dag.getLightClientData(bid).current_sync_committee_branch)
|
||||||
boundarySlot = bid.slot.nextEpochBoundarySlot
|
boundarySlot = bid.slot.nextEpochBoundarySlot
|
||||||
@ -885,59 +924,97 @@ proc processFinalizationForLightClient*(
|
|||||||
for key in keysToDelete:
|
for key in keysToDelete:
|
||||||
dag.lcDataStore.cache.pendingBest.del key
|
dag.lcDataStore.cache.pendingBest.del key
|
||||||
|
|
||||||
|
proc getLightClientBootstrap(
|
||||||
|
dag: ChainDAGRef,
|
||||||
|
header: ForkyLightClientHeader): ForkedLightClientBootstrap =
|
||||||
|
let
|
||||||
|
slot = header.beacon.slot
|
||||||
|
period = slot.sync_committee_period
|
||||||
|
blockRoot = hash_tree_root(header)
|
||||||
|
if slot < dag.targetLightClientTailSlot:
|
||||||
|
debug "LC bootstrap unavailable: Block too old", slot
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
if slot > dag.finalizedHead.blck.slot:
|
||||||
|
debug "LC bootstrap unavailable: Not finalized", blockRoot
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
|
# Ensure `current_sync_committee_branch` is known
|
||||||
|
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand and
|
||||||
|
not dag.lcDataStore.db.hasCurrentSyncCommitteeBranch(slot):
|
||||||
|
let
|
||||||
|
bsi = dag.getExistingBlockIdAtSlot(slot).valueOr:
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
dag.withUpdatedExistingState(tmpState[], bsi) do:
|
||||||
|
withState(updatedState):
|
||||||
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
|
if not dag.lcDataStore.db.hasSyncCommittee(period):
|
||||||
|
dag.lcDataStore.db.putSyncCommittee(
|
||||||
|
period, forkyState.data.current_sync_committee)
|
||||||
|
dag.lcDataStore.db.putHeader(header)
|
||||||
|
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(
|
||||||
|
slot, forkyState.data.build_proof(
|
||||||
|
altair.CURRENT_SYNC_COMMITTEE_INDEX).get)
|
||||||
|
else: raiseAssert "Unreachable"
|
||||||
|
do: return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
|
# Ensure `current_sync_committee` is known
|
||||||
|
if not dag.lcDataStore.db.hasSyncCommittee(period):
|
||||||
|
let
|
||||||
|
tmpState = assignClone(dag.headState)
|
||||||
|
syncCommittee = dag.existingCurrentSyncCommitteeForPeriod(
|
||||||
|
tmpState[], period).valueOr:
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
dag.lcDataStore.db.putSyncCommittee(period, syncCommittee)
|
||||||
|
|
||||||
|
# Construct `LightClientBootstrap` from cached data
|
||||||
|
const lcDataFork = typeof(header).kind
|
||||||
|
var bootstrap = ForkedLightClientBootstrap(kind: lcDataFork)
|
||||||
|
template forkyBootstrap: untyped = bootstrap.forky(lcDataFork)
|
||||||
|
forkyBootstrap.header = header
|
||||||
|
forkyBootstrap.current_sync_committee =
|
||||||
|
dag.lcDataStore.db.getSyncCommittee(period).valueOr:
|
||||||
|
debug "LC bootstrap unavailable: Sync committee not cached", period
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
forkyBootstrap.current_sync_committee_branch =
|
||||||
|
dag.lcDataStore.db.getCurrentSyncCommitteeBranch(slot).valueOr:
|
||||||
|
debug "LC bootstrap unavailable: Sync committee branch not cached", slot
|
||||||
|
return default(ForkedLightClientBootstrap)
|
||||||
|
bootstrap
|
||||||
|
|
||||||
proc getLightClientBootstrap*(
|
proc getLightClientBootstrap*(
|
||||||
dag: ChainDAGRef,
|
dag: ChainDAGRef,
|
||||||
blockRoot: Eth2Digest): ForkedLightClientBootstrap =
|
blockRoot: Eth2Digest): ForkedLightClientBootstrap =
|
||||||
if not dag.lcDataStore.serve:
|
if not dag.lcDataStore.serve:
|
||||||
return default(ForkedLightClientBootstrap)
|
return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
|
# Try to load from cache
|
||||||
|
template tryFromCache(lcDataFork: static LightClientDataFork): untyped =
|
||||||
|
block:
|
||||||
|
let header = getHeader[lcDataFork.LightClientHeader](
|
||||||
|
dag.lcDataStore.db, blockRoot)
|
||||||
|
if header.isOk:
|
||||||
|
return dag.getLightClientBootstrap(header.get)
|
||||||
|
static: doAssert LightClientDataFork.high == LightClientDataFork.EIP4844
|
||||||
|
tryFromCache(LightClientDataFork.EIP4844)
|
||||||
|
tryFromCache(LightClientDataFork.Capella)
|
||||||
|
tryFromCache(LightClientDataFork.Altair)
|
||||||
|
|
||||||
|
# Fallback to DAG
|
||||||
let bdata = dag.getForkedBlock(blockRoot).valueOr:
|
let bdata = dag.getForkedBlock(blockRoot).valueOr:
|
||||||
debug "LC bootstrap unavailable: Block not found", blockRoot
|
debug "LC bootstrap unavailable: Block not found", blockRoot
|
||||||
return default(ForkedLightClientBootstrap)
|
return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
withBlck(bdata):
|
withBlck(bdata):
|
||||||
let slot = blck.message.slot
|
|
||||||
when stateFork >= BeaconStateFork.Altair:
|
when stateFork >= BeaconStateFork.Altair:
|
||||||
if slot < dag.targetLightClientTailSlot:
|
|
||||||
debug "LC bootstrap unavailable: Block too old", slot
|
|
||||||
return default(ForkedLightClientBootstrap)
|
|
||||||
if slot > dag.finalizedHead.blck.slot:
|
|
||||||
debug "LC bootstrap unavailable: Not finalized", blockRoot
|
|
||||||
return default(ForkedLightClientBootstrap)
|
|
||||||
|
|
||||||
var branch = dag.lcDataStore.db.getCurrentSyncCommitteeBranch(slot)
|
|
||||||
if branch.isZeroMemory:
|
|
||||||
if dag.lcDataStore.importMode == LightClientDataImportMode.OnDemand:
|
|
||||||
let
|
|
||||||
bsi = dag.getExistingBlockIdAtSlot(slot).valueOr:
|
|
||||||
return default(ForkedLightClientBootstrap)
|
|
||||||
tmpState = assignClone(dag.headState)
|
|
||||||
dag.withUpdatedExistingState(tmpState[], bsi) do:
|
|
||||||
branch = withState(updatedState):
|
|
||||||
when stateFork >= BeaconStateFork.Altair:
|
|
||||||
forkyState.data.build_proof(
|
|
||||||
altair.CURRENT_SYNC_COMMITTEE_INDEX).get
|
|
||||||
else: raiseAssert "Unreachable"
|
|
||||||
do: return default(ForkedLightClientBootstrap)
|
|
||||||
dag.lcDataStore.db.putCurrentSyncCommitteeBranch(slot, branch)
|
|
||||||
else:
|
|
||||||
debug "LC bootstrap unavailable: Data not cached", slot
|
|
||||||
return default(ForkedLightClientBootstrap)
|
|
||||||
|
|
||||||
const lcDataFork = lcDataForkAtStateFork(stateFork)
|
const lcDataFork = lcDataForkAtStateFork(stateFork)
|
||||||
var bootstrap = ForkedLightClientBootstrap(kind: lcDataFork)
|
|
||||||
template forkyBootstrap: untyped = bootstrap.forky(lcDataFork)
|
|
||||||
let
|
let
|
||||||
period = slot.sync_committee_period
|
header = blck.toLightClientHeader(lcDataFork)
|
||||||
tmpState = assignClone(dag.headState)
|
bootstrap = dag.getLightClientBootstrap(header)
|
||||||
forkyBootstrap.current_sync_committee =
|
if bootstrap.kind > LightClientDataFork.None:
|
||||||
dag.existingCurrentSyncCommitteeForPeriod(tmpState[], period).valueOr:
|
dag.lcDataStore.db.putHeader(header)
|
||||||
return default(ForkedLightClientBootstrap)
|
|
||||||
forkyBootstrap.header = blck.toLightClientHeader(lcDataFork)
|
|
||||||
forkyBootstrap.current_sync_committee_branch = branch
|
|
||||||
return bootstrap
|
return bootstrap
|
||||||
else:
|
else:
|
||||||
debug "LC bootstrap unavailable: Block before Altair", slot
|
debug "LC bootstrap unavailable: Block before Altair", blockRoot
|
||||||
return default(ForkedLightClientBootstrap)
|
return default(ForkedLightClientBootstrap)
|
||||||
|
|
||||||
proc getLightClientUpdateForPeriod*(
|
proc getLightClientUpdateForPeriod*(
|
||||||
|
@ -103,9 +103,9 @@ proc initLightClientHeadersStore(
|
|||||||
if not backend.readOnly:
|
if not backend.readOnly:
|
||||||
? backend.exec("""
|
? backend.exec("""
|
||||||
CREATE TABLE IF NOT EXISTS `""" & name & """` (
|
CREATE TABLE IF NOT EXISTS `""" & name & """` (
|
||||||
`key` INTEGER PRIMARY KEY, -- `LightClientHeaderKey`
|
`key` INTEGER PRIMARY KEY, -- `LightClientHeaderKey`
|
||||||
`kind` INTEGER, -- `LightClientDataFork`
|
`kind` INTEGER, -- `LightClientDataFork`
|
||||||
`header` BLOB -- `LightClientHeader` (SSZ)
|
`header` BLOB -- `LightClientHeader` (SSZ)
|
||||||
);
|
);
|
||||||
""")
|
""")
|
||||||
if ? backend.hasTable(legacyAltairName):
|
if ? backend.hasTable(legacyAltairName):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user