2024-01-25 10:04:09 +00:00
|
|
|
# fluffy
|
|
|
|
# Copyright (c) 2022-2024 Status Research & Development GmbH
|
2022-11-18 09:00:06 +00:00
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
2023-01-31 12:38:08 +00:00
|
|
|
{.push raises: [].}
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
import
|
|
|
|
chronicles,
|
|
|
|
metrics,
|
|
|
|
eth/db/kvstore,
|
|
|
|
eth/db/kvstore_sqlite3,
|
|
|
|
stint,
|
2024-05-30 12:54:03 +00:00
|
|
|
results,
|
2022-11-18 09:00:06 +00:00
|
|
|
ssz_serialization,
|
2023-10-18 14:59:44 +00:00
|
|
|
beacon_chain/db_limits,
|
|
|
|
beacon_chain/spec/datatypes/[phase0, altair, bellatrix],
|
|
|
|
beacon_chain/spec/forks,
|
|
|
|
beacon_chain/spec/forks_light_client,
|
2023-10-20 10:06:25 +00:00
|
|
|
./beacon_content,
|
2024-01-26 22:38:12 +00:00
|
|
|
./beacon_chain_historical_summaries,
|
2023-10-20 10:06:25 +00:00
|
|
|
./beacon_init_loader,
|
2024-09-05 16:31:55 +00:00
|
|
|
../wire/[portal_protocol, portal_protocol_config]
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
from beacon_chain/spec/helpers import is_better_update, toMeta
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
export kvstore_sqlite3
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
BestLightClientUpdateStore = ref object
|
2023-10-18 14:59:44 +00:00
|
|
|
getStmt: SqliteStmt[int64, seq[byte]]
|
2022-11-18 09:00:06 +00:00
|
|
|
getBulkStmt: SqliteStmt[(int64, int64), seq[byte]]
|
2023-10-18 14:59:44 +00:00
|
|
|
putStmt: SqliteStmt[(int64, seq[byte]), void]
|
|
|
|
delStmt: SqliteStmt[int64, void]
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
BeaconDb* = ref object
|
2023-10-18 14:59:44 +00:00
|
|
|
backend: SqStoreRef
|
2022-11-18 09:00:06 +00:00
|
|
|
kv: KvStoreRef
|
2024-09-05 16:31:55 +00:00
|
|
|
dataRadius*: UInt256
|
2023-10-18 14:59:44 +00:00
|
|
|
bestUpdates: BestLightClientUpdateStore
|
|
|
|
forkDigests: ForkDigests
|
2024-07-25 18:15:26 +00:00
|
|
|
cfg*: RuntimeConfig
|
2023-10-05 06:57:45 +00:00
|
|
|
finalityUpdateCache: Opt[LightClientFinalityUpdateCache]
|
|
|
|
optimisticUpdateCache: Opt[LightClientOptimisticUpdateCache]
|
|
|
|
|
|
|
|
# Storing the content encoded here. Could also store decoded and access the
|
|
|
|
# slot directly. However, that would require is to have access to the
|
|
|
|
# fork digests here to be able the re-encode the data.
|
|
|
|
LightClientFinalityUpdateCache = object
|
|
|
|
lastFinalityUpdate: seq[byte]
|
|
|
|
lastFinalityUpdateSlot: uint64
|
2024-02-28 17:31:45 +00:00
|
|
|
|
2023-10-05 06:57:45 +00:00
|
|
|
LightClientOptimisticUpdateCache = object
|
|
|
|
lastOptimisticUpdate: seq[byte]
|
|
|
|
lastOptimisticUpdateSlot: uint64
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
template expectDb(x: auto): untyped =
|
|
|
|
# There's no meaningful error handling implemented for a corrupt database or
|
|
|
|
# full disk - this requires manual intervention, so we'll panic for now
|
|
|
|
x.expect("working database (disk broken/full?)")
|
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
template disposeSafe(s: untyped): untyped =
|
|
|
|
if distinctBase(s) != nil:
|
|
|
|
s.dispose()
|
|
|
|
s = typeof(s)(nil)
|
|
|
|
|
2022-11-18 09:00:06 +00:00
|
|
|
proc initBestUpdatesStore(
|
2024-02-28 17:31:45 +00:00
|
|
|
backend: SqStoreRef, name: string
|
|
|
|
): KvResult[BestLightClientUpdateStore] =
|
|
|
|
?backend.exec(
|
|
|
|
"""
|
|
|
|
CREATE TABLE IF NOT EXISTS `""" & name &
|
|
|
|
"""` (
|
2022-11-18 09:00:06 +00:00
|
|
|
`period` INTEGER PRIMARY KEY, -- `SyncCommitteePeriod`
|
|
|
|
`update` BLOB -- `altair.LightClientUpdate` (SSZ)
|
|
|
|
);
|
2024-02-28 17:31:45 +00:00
|
|
|
"""
|
|
|
|
)
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
let
|
2024-02-28 17:31:45 +00:00
|
|
|
getStmt = backend
|
|
|
|
.prepareStmt(
|
|
|
|
"""
|
2023-10-18 14:59:44 +00:00
|
|
|
SELECT `update`
|
2024-02-28 17:31:45 +00:00
|
|
|
FROM `""" & name &
|
|
|
|
"""`
|
2023-10-18 14:59:44 +00:00
|
|
|
WHERE `period` = ?;
|
2024-02-28 17:31:45 +00:00
|
|
|
""",
|
|
|
|
int64,
|
|
|
|
seq[byte],
|
|
|
|
managed = false,
|
|
|
|
)
|
|
|
|
.expect("SQL query OK")
|
|
|
|
getBulkStmt = backend
|
|
|
|
.prepareStmt(
|
|
|
|
"""
|
2022-11-18 09:00:06 +00:00
|
|
|
SELECT `update`
|
2024-02-28 17:31:45 +00:00
|
|
|
FROM `""" & name &
|
|
|
|
"""`
|
2022-11-18 09:00:06 +00:00
|
|
|
WHERE `period` >= ? AND `period` < ?;
|
2024-02-28 17:31:45 +00:00
|
|
|
""",
|
|
|
|
(int64, int64),
|
|
|
|
seq[byte],
|
|
|
|
managed = false,
|
|
|
|
)
|
|
|
|
.expect("SQL query OK")
|
|
|
|
putStmt = backend
|
|
|
|
.prepareStmt(
|
|
|
|
"""
|
|
|
|
REPLACE INTO `""" & name &
|
|
|
|
"""` (
|
2023-10-18 14:59:44 +00:00
|
|
|
`period`, `update`
|
|
|
|
) VALUES (?, ?);
|
2024-02-28 17:31:45 +00:00
|
|
|
""",
|
|
|
|
(int64, seq[byte]),
|
|
|
|
void,
|
|
|
|
managed = false,
|
|
|
|
)
|
|
|
|
.expect("SQL query OK")
|
|
|
|
delStmt = backend
|
|
|
|
.prepareStmt(
|
|
|
|
"""
|
|
|
|
DELETE FROM `""" & name &
|
|
|
|
"""`
|
2023-10-18 14:59:44 +00:00
|
|
|
WHERE `period` = ?;
|
2024-02-28 17:31:45 +00:00
|
|
|
""",
|
|
|
|
int64,
|
|
|
|
void,
|
|
|
|
managed = false,
|
|
|
|
)
|
|
|
|
.expect("SQL query OK")
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
ok BestLightClientUpdateStore(
|
2024-02-28 17:31:45 +00:00
|
|
|
getStmt: getStmt, getBulkStmt: getBulkStmt, putStmt: putStmt, delStmt: delStmt
|
2022-11-18 09:00:06 +00:00
|
|
|
)
|
|
|
|
|
2024-01-25 10:04:09 +00:00
|
|
|
func close*(store: var BestLightClientUpdateStore) =
|
2023-10-18 14:59:44 +00:00
|
|
|
store.getStmt.disposeSafe()
|
|
|
|
store.getBulkStmt.disposeSafe()
|
|
|
|
store.putStmt.disposeSafe()
|
|
|
|
store.delStmt.disposeSafe()
|
|
|
|
|
2022-11-18 09:00:06 +00:00
|
|
|
proc new*(
|
2024-02-28 17:31:45 +00:00
|
|
|
T: type BeaconDb, networkData: NetworkInitData, path: string, inMemory = false
|
|
|
|
): BeaconDb =
|
2023-10-18 14:59:44 +00:00
|
|
|
let
|
|
|
|
db =
|
|
|
|
if inMemory:
|
|
|
|
SqStoreRef.init("", "lc-test", inMemory = true).expect(
|
2024-02-28 17:31:45 +00:00
|
|
|
"working database (out of memory?)"
|
|
|
|
)
|
2023-10-18 14:59:44 +00:00
|
|
|
else:
|
|
|
|
SqStoreRef.init(path, "lc").expectDb()
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
kvStore = kvStore db.openKvStore().expectDb()
|
|
|
|
bestUpdates = initBestUpdatesStore(db, "lcu").expectDb()
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
BeaconDb(
|
2023-10-18 14:59:44 +00:00
|
|
|
backend: db,
|
2022-11-18 09:00:06 +00:00
|
|
|
kv: kvStore,
|
2024-09-05 16:31:55 +00:00
|
|
|
dataRadius: UInt256.high(), # Radius to max to accept all data
|
2023-10-18 14:59:44 +00:00
|
|
|
bestUpdates: bestUpdates,
|
|
|
|
cfg: networkData.metadata.cfg,
|
2024-02-28 17:31:45 +00:00
|
|
|
forkDigests: (newClone networkData.forks)[],
|
2022-11-18 09:00:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
## Private KvStoreRef Calls
|
|
|
|
proc get(kv: KvStoreRef, key: openArray[byte]): results.Opt[seq[byte]] =
|
|
|
|
var res: results.Opt[seq[byte]] = Opt.none(seq[byte])
|
2024-02-28 17:31:45 +00:00
|
|
|
proc onData(data: openArray[byte]) =
|
|
|
|
res = ok(@data)
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
discard kv.get(key, onData).expectDb()
|
|
|
|
|
|
|
|
return res
|
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
## Private BeaconDb calls
|
|
|
|
proc get(db: BeaconDb, key: openArray[byte]): results.Opt[seq[byte]] =
|
2022-11-18 09:00:06 +00:00
|
|
|
db.kv.get(key)
|
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
proc put(db: BeaconDb, key, value: openArray[byte]) =
|
2022-11-18 09:00:06 +00:00
|
|
|
db.kv.put(key, value).expectDb()
|
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
## Public ContentId based ContentDB calls
|
2024-02-28 17:31:45 +00:00
|
|
|
proc get*(db: BeaconDb, key: ContentId): results.Opt[seq[byte]] =
|
2022-11-18 09:00:06 +00:00
|
|
|
# TODO: Here it is unfortunate that ContentId is a uint256 instead of Digest256.
|
2023-09-13 02:32:38 +00:00
|
|
|
db.get(key.toBytesBE())
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
proc put*(db: BeaconDb, key: ContentId, value: openArray[byte]) =
|
2023-09-13 02:32:38 +00:00
|
|
|
db.put(key.toBytesBE(), value)
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2023-10-18 14:59:44 +00:00
|
|
|
# TODO Add checks that uint64 can be safely casted to int64
|
|
|
|
proc getLightClientUpdates(
|
2024-02-28 17:31:45 +00:00
|
|
|
db: BeaconDb, start: uint64, to: uint64
|
|
|
|
): ForkedLightClientUpdateBytesList =
|
2023-10-18 14:59:44 +00:00
|
|
|
## Get multiple consecutive LightClientUpdates for given periods
|
|
|
|
var updates: ForkedLightClientUpdateBytesList
|
|
|
|
var update: seq[byte]
|
|
|
|
for res in db.bestUpdates.getBulkStmt.exec((start.int64, to.int64), update):
|
|
|
|
res.expect("SQL query OK")
|
|
|
|
let byteList = List[byte, MAX_LIGHT_CLIENT_UPDATE_SIZE].init(update)
|
|
|
|
discard updates.add(byteList)
|
|
|
|
return updates
|
|
|
|
|
|
|
|
proc getBestUpdate*(
|
2024-02-28 17:31:45 +00:00
|
|
|
db: BeaconDb, period: SyncCommitteePeriod
|
|
|
|
): Result[ForkedLightClientUpdate, string] =
|
2023-10-18 14:59:44 +00:00
|
|
|
## Get the best ForkedLightClientUpdate for given period
|
|
|
|
## Note: Only the best one for a given period is being stored.
|
|
|
|
doAssert period.isSupportedBySQLite
|
|
|
|
doAssert distinctBase(db.bestUpdates.getStmt) != nil
|
|
|
|
|
|
|
|
var update: seq[byte]
|
|
|
|
for res in db.bestUpdates.getStmt.exec(period.int64, update):
|
|
|
|
res.expect("SQL query OK")
|
|
|
|
return decodeLightClientUpdateForked(db.forkDigests, update)
|
|
|
|
|
|
|
|
proc putBootstrap*(
|
2024-02-28 17:31:45 +00:00
|
|
|
db: BeaconDb, blockRoot: Digest, bootstrap: ForkedLightClientBootstrap
|
|
|
|
) =
|
2023-10-18 14:59:44 +00:00
|
|
|
# Put a ForkedLightClientBootstrap in the db.
|
|
|
|
withForkyBootstrap(bootstrap):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
let
|
|
|
|
contentKey = bootstrapContentKey(blockRoot)
|
|
|
|
contentId = toContentId(contentKey)
|
|
|
|
forkDigest = forkDigestAtEpoch(
|
2024-02-28 17:31:45 +00:00
|
|
|
db.forkDigests, epoch(forkyBootstrap.header.beacon.slot), db.cfg
|
|
|
|
)
|
2023-10-18 14:59:44 +00:00
|
|
|
encodedBootstrap = encodeBootstrapForked(forkDigest, bootstrap)
|
|
|
|
|
|
|
|
db.put(contentId, encodedBootstrap)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
func putLightClientUpdate*(db: BeaconDb, period: uint64, update: seq[byte]) =
|
2023-10-18 14:59:44 +00:00
|
|
|
# Put an encoded ForkedLightClientUpdate in the db.
|
|
|
|
let res = db.bestUpdates.putStmt.exec((period.int64, update))
|
|
|
|
res.expect("SQL query OK")
|
|
|
|
|
|
|
|
func putBestUpdate*(
|
2024-02-28 17:31:45 +00:00
|
|
|
db: BeaconDb, period: SyncCommitteePeriod, update: ForkedLightClientUpdate
|
|
|
|
) =
|
2023-10-18 14:59:44 +00:00
|
|
|
# Put a ForkedLightClientUpdate in the db.
|
2024-02-28 17:31:45 +00:00
|
|
|
doAssert not db.backend.readOnly # All `stmt` are non-nil
|
2023-10-18 14:59:44 +00:00
|
|
|
doAssert period.isSupportedBySQLite
|
|
|
|
withForkyUpdate(update):
|
|
|
|
when lcDataFork > LightClientDataFork.None:
|
|
|
|
let numParticipants = forkyUpdate.sync_aggregate.num_active_participants
|
|
|
|
if numParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
2024-02-28 17:31:45 +00:00
|
|
|
let res = db.bestUpdates.delStmt.exec(period.int64)
|
|
|
|
res.expect("SQL query OK")
|
2023-10-18 14:59:44 +00:00
|
|
|
else:
|
2024-02-28 17:31:45 +00:00
|
|
|
let
|
|
|
|
forkDigest = forkDigestAtEpoch(
|
|
|
|
db.forkDigests, epoch(forkyUpdate.attested_header.beacon.slot), db.cfg
|
|
|
|
)
|
|
|
|
encodedUpdate = encodeForkedLightClientObject(update, forkDigest)
|
|
|
|
res = db.bestUpdates.putStmt.exec((period.int64, encodedUpdate))
|
|
|
|
res.expect("SQL query OK")
|
2023-10-18 14:59:44 +00:00
|
|
|
else:
|
|
|
|
db.bestUpdates.delStmt.exec(period.int64).expect("SQL query OK")
|
|
|
|
|
|
|
|
proc putUpdateIfBetter*(
|
2024-02-28 17:31:45 +00:00
|
|
|
db: BeaconDb, period: SyncCommitteePeriod, update: ForkedLightClientUpdate
|
|
|
|
) =
|
2023-10-18 14:59:44 +00:00
|
|
|
let currentUpdate = db.getBestUpdate(period).valueOr:
|
|
|
|
# No current update for that period so we can just put this one
|
|
|
|
db.putBestUpdate(period, update)
|
|
|
|
return
|
|
|
|
|
|
|
|
if is_better_update(update, currentUpdate):
|
|
|
|
db.putBestUpdate(period, update)
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
proc putUpdateIfBetter*(db: BeaconDb, period: SyncCommitteePeriod, update: seq[byte]) =
|
2023-10-18 14:59:44 +00:00
|
|
|
let newUpdate = decodeLightClientUpdateForked(db.forkDigests, update).valueOr:
|
|
|
|
# TODO:
|
|
|
|
# Need to go over the usage in offer/accept vs findcontent/content
|
|
|
|
# and in some (all?) decoding has already been verified.
|
|
|
|
return
|
|
|
|
|
|
|
|
db.putUpdateIfBetter(period, newUpdate)
|
|
|
|
|
2024-01-26 22:38:12 +00:00
|
|
|
proc getLastFinalityUpdate*(db: BeaconDb): Opt[ForkedLightClientFinalityUpdate] =
|
|
|
|
db.finalityUpdateCache.map(
|
|
|
|
proc(x: LightClientFinalityUpdateCache): ForkedLightClientFinalityUpdate =
|
|
|
|
decodeLightClientFinalityUpdateForked(db.forkDigests, x.lastFinalityUpdate).valueOr:
|
2024-02-28 17:31:45 +00:00
|
|
|
raiseAssert "Stored finality update must be valid"
|
|
|
|
)
|
2024-01-26 22:38:12 +00:00
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
proc createGetHandler*(db: BeaconDb): DbGetHandler =
|
2022-11-18 09:00:06 +00:00
|
|
|
return (
|
2024-07-17 15:07:27 +00:00
|
|
|
proc(contentKey: ContentKeyByteList, contentId: ContentId): results.Opt[seq[byte]] =
|
2023-10-19 13:47:29 +00:00
|
|
|
let contentKey = contentKey.decode().valueOr:
|
2024-02-28 17:31:45 +00:00
|
|
|
# TODO: as this should not fail, maybe it is better to raiseAssert ?
|
2022-11-18 09:00:06 +00:00
|
|
|
return Opt.none(seq[byte])
|
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
case contentKey.contentType
|
2023-12-19 18:59:38 +00:00
|
|
|
of unused:
|
|
|
|
raiseAssert "Should not be used and fail at decoding"
|
2023-10-19 13:47:29 +00:00
|
|
|
of lightClientBootstrap:
|
|
|
|
db.get(contentId)
|
|
|
|
of lightClientUpdate:
|
2022-11-18 09:00:06 +00:00
|
|
|
let
|
|
|
|
# TODO: add validation that startPeriod is not from the future,
|
|
|
|
# this requires db to be aware off the current beacon time
|
2023-10-19 13:47:29 +00:00
|
|
|
startPeriod = contentKey.lightClientUpdateKey.startPeriod
|
2022-11-18 09:00:06 +00:00
|
|
|
# get max 128 updates
|
|
|
|
numOfUpdates = min(
|
|
|
|
uint64(MAX_REQUEST_LIGHT_CLIENT_UPDATES),
|
2024-02-28 17:31:45 +00:00
|
|
|
contentKey.lightClientUpdateKey.count,
|
2022-11-18 09:00:06 +00:00
|
|
|
)
|
2023-10-19 13:47:29 +00:00
|
|
|
toPeriod = startPeriod + numOfUpdates # Not inclusive
|
|
|
|
updates = db.getLightClientUpdates(startPeriod, toPeriod)
|
2022-11-18 09:00:06 +00:00
|
|
|
|
|
|
|
if len(updates) == 0:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.none(seq[byte])
|
2022-11-18 09:00:06 +00:00
|
|
|
else:
|
2024-07-26 16:56:32 +00:00
|
|
|
# Note that this might not return all of the requested updates.
|
|
|
|
# This might seem faulty/tricky as it is also used in handleOffer to
|
|
|
|
# check if an offer should be accepted.
|
|
|
|
# But it is actually fine as this will occur only when the node is
|
|
|
|
# synced and it would not be able to verify the older updates in the
|
|
|
|
# range anyhow.
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.some(SSZ.encode(updates))
|
|
|
|
of lightClientFinalityUpdate:
|
2023-10-05 06:57:45 +00:00
|
|
|
# TODO:
|
|
|
|
# Return only when the update is better than what is requested by
|
2023-09-28 16:16:41 +00:00
|
|
|
# contentKey. This is currently not possible as the contentKey does not
|
|
|
|
# include best update information.
|
2023-10-05 06:57:45 +00:00
|
|
|
if db.finalityUpdateCache.isSome():
|
2023-10-19 13:47:29 +00:00
|
|
|
let slot = contentKey.lightClientFinalityUpdateKey.finalizedSlot
|
2023-10-05 06:57:45 +00:00
|
|
|
let cache = db.finalityUpdateCache.get()
|
|
|
|
if cache.lastFinalityUpdateSlot >= slot:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.some(cache.lastFinalityUpdate)
|
2023-10-05 06:57:45 +00:00
|
|
|
else:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.none(seq[byte])
|
2023-10-05 06:57:45 +00:00
|
|
|
else:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.none(seq[byte])
|
|
|
|
of lightClientOptimisticUpdate:
|
2023-10-05 06:57:45 +00:00
|
|
|
# TODO same as above applies here too.
|
|
|
|
if db.optimisticUpdateCache.isSome():
|
2023-10-19 13:47:29 +00:00
|
|
|
let slot = contentKey.lightClientOptimisticUpdateKey.optimisticSlot
|
2023-10-05 06:57:45 +00:00
|
|
|
let cache = db.optimisticUpdateCache.get()
|
|
|
|
if cache.lastOptimisticUpdateSlot >= slot:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.some(cache.lastOptimisticUpdate)
|
2023-10-05 06:57:45 +00:00
|
|
|
else:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.none(seq[byte])
|
2023-10-05 06:57:45 +00:00
|
|
|
else:
|
2023-10-19 13:47:29 +00:00
|
|
|
Opt.none(seq[byte])
|
2024-01-26 22:38:12 +00:00
|
|
|
of beacon_content.ContentType.historicalSummaries:
|
|
|
|
db.get(contentId)
|
2022-11-18 09:00:06 +00:00
|
|
|
)
|
|
|
|
|
2023-10-20 10:06:25 +00:00
|
|
|
proc createStoreHandler*(db: BeaconDb): DbStoreHandler =
|
2024-02-28 17:31:45 +00:00
|
|
|
return (
|
|
|
|
proc(
|
2024-07-17 15:07:27 +00:00
|
|
|
contentKey: ContentKeyByteList, contentId: ContentId, content: seq[byte]
|
2024-02-28 17:31:45 +00:00
|
|
|
) {.raises: [], gcsafe.} =
|
|
|
|
let contentKey = decode(contentKey).valueOr:
|
|
|
|
# TODO: as this should not fail, maybe it is better to raiseAssert ?
|
|
|
|
return
|
|
|
|
|
|
|
|
case contentKey.contentType
|
|
|
|
of unused:
|
|
|
|
raiseAssert "Should not be used and fail at decoding"
|
|
|
|
of lightClientBootstrap:
|
|
|
|
db.put(contentId, content)
|
|
|
|
of lightClientUpdate:
|
|
|
|
let updates = decodeSsz(content, ForkedLightClientUpdateBytesList).valueOr:
|
2023-10-19 13:47:29 +00:00
|
|
|
return
|
2022-11-18 09:00:06 +00:00
|
|
|
|
2024-02-28 17:31:45 +00:00
|
|
|
# Lot of assumptions here:
|
|
|
|
# - that updates are continious i.e there is no period gaps
|
|
|
|
# - that updates start from startPeriod of content key
|
|
|
|
var period = contentKey.lightClientUpdateKey.startPeriod
|
|
|
|
for update in updates.asSeq():
|
|
|
|
# Only put the update if it is better, although in currently a new offer
|
|
|
|
# should not be accepted as it is based on only the period.
|
|
|
|
db.putUpdateIfBetter(SyncCommitteePeriod(period), update.asSeq())
|
|
|
|
inc period
|
|
|
|
of lightClientFinalityUpdate:
|
|
|
|
db.finalityUpdateCache = Opt.some(
|
|
|
|
LightClientFinalityUpdateCache(
|
|
|
|
lastFinalityUpdateSlot:
|
|
|
|
contentKey.lightClientFinalityUpdateKey.finalizedSlot,
|
|
|
|
lastFinalityUpdate: content,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
of lightClientOptimisticUpdate:
|
|
|
|
db.optimisticUpdateCache = Opt.some(
|
|
|
|
LightClientOptimisticUpdateCache(
|
|
|
|
lastOptimisticUpdateSlot:
|
|
|
|
contentKey.lightClientOptimisticUpdateKey.optimisticSlot,
|
|
|
|
lastOptimisticUpdate: content,
|
|
|
|
)
|
|
|
|
)
|
|
|
|
of beacon_content.ContentType.historicalSummaries:
|
2024-05-24 21:15:04 +00:00
|
|
|
# TODO: Its probably better to not use the kvstore here and instead use a sql
|
2024-02-28 17:31:45 +00:00
|
|
|
# table with slot as index and move the slot logic to the db store handler.
|
|
|
|
let current = db.get(contentId)
|
|
|
|
if current.isSome():
|
2024-05-24 21:15:04 +00:00
|
|
|
let summariesWithProof = decodeSsz(
|
|
|
|
db.forkDigests, current.get(), HistoricalSummariesWithProof
|
|
|
|
).valueOr:
|
|
|
|
raiseAssert error
|
|
|
|
let newSummariesWithProof = decodeSsz(
|
|
|
|
db.forkDigests, content, HistoricalSummariesWithProof
|
|
|
|
).valueOr:
|
2024-01-26 22:38:12 +00:00
|
|
|
return
|
2024-05-24 21:15:04 +00:00
|
|
|
if newSummariesWithProof.epoch > summariesWithProof.epoch:
|
2024-02-28 17:31:45 +00:00
|
|
|
db.put(contentId, content)
|
|
|
|
else:
|
2024-01-26 22:38:12 +00:00
|
|
|
db.put(contentId, content)
|
2022-11-18 09:00:06 +00:00
|
|
|
)
|
2024-09-05 16:31:55 +00:00
|
|
|
|
|
|
|
proc createRadiusHandler*(db: BeaconDb): DbRadiusHandler =
|
|
|
|
return (
|
|
|
|
proc(): UInt256 {.raises: [], gcsafe.} =
|
|
|
|
db.dataRadius
|
|
|
|
)
|