Store block summary in database
This introcudes a cache for block summaries, useful for instantiating the block dag on startup, bringing medalla startup times down from minutes to seconds. This is something of a temporary band-aid that would be obsoleted by a finalized block store.
This commit is contained in:
parent
7823c25ba8
commit
fc7885b27e
|
@ -84,6 +84,15 @@ type
|
||||||
kFinalizedEth2DepositsMarkleizer
|
kFinalizedEth2DepositsMarkleizer
|
||||||
## A merkleizer used for computing merkle proofs of deposits added
|
## A merkleizer used for computing merkle proofs of deposits added
|
||||||
## to Eth2 blocks (it may lag behind the finalized deposits merkleizer).
|
## to Eth2 blocks (it may lag behind the finalized deposits merkleizer).
|
||||||
|
kHashToBlockSummary
|
||||||
|
## Cache of beacon block summaries - during startup when we construct the
|
||||||
|
## chain dag, loading full blocks takes a lot of time - the block
|
||||||
|
## summary contains a minimal snapshot of what's needed to instanciate
|
||||||
|
## the BlockRef tree.
|
||||||
|
|
||||||
|
BeaconBlockSummary* = object
|
||||||
|
slot*: Slot
|
||||||
|
parent_root*: Eth2Digest
|
||||||
|
|
||||||
const
|
const
|
||||||
maxDecompressedDbRecordSize = 16*1024*1024
|
maxDecompressedDbRecordSize = 16*1024*1024
|
||||||
|
@ -105,6 +114,9 @@ func subkey(kind: type BeaconState, key: Eth2Digest): auto =
|
||||||
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
func subkey(kind: type SignedBeaconBlock, key: Eth2Digest): auto =
|
||||||
subkey(kHashToBlock, key.data)
|
subkey(kHashToBlock, key.data)
|
||||||
|
|
||||||
|
func subkey(kind: type BeaconBlockSummary, key: Eth2Digest): auto =
|
||||||
|
subkey(kHashToBlockSummary, key.data)
|
||||||
|
|
||||||
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
func subkey(root: Eth2Digest, slot: Slot): array[40, byte] =
|
||||||
var ret: array[40, byte]
|
var ret: array[40, byte]
|
||||||
# big endian to get a naturally ascending order on slots in sorted indices
|
# big endian to get a naturally ascending order on slots in sorted indices
|
||||||
|
@ -324,10 +336,18 @@ proc get[T](db: BeaconChainDB, key: openArray[byte], output: var T): GetResult =
|
||||||
proc close*(db: BeaconChainDB) =
|
proc close*(db: BeaconChainDB) =
|
||||||
discard db.backend.close()
|
discard db.backend.close()
|
||||||
|
|
||||||
|
func toBeaconBlockSummary(v: SomeBeaconBlock): BeaconBlockSummary =
|
||||||
|
BeaconBlockSummary(
|
||||||
|
slot: v.slot,
|
||||||
|
parent_root: v.parent_root,
|
||||||
|
)
|
||||||
|
|
||||||
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
proc putBlock*(db: BeaconChainDB, value: SignedBeaconBlock) =
|
||||||
db.put(subkey(type value, value.root), value)
|
db.put(subkey(type value, value.root), value)
|
||||||
|
db.put(subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
||||||
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
|
proc putBlock*(db: BeaconChainDB, value: TrustedSignedBeaconBlock) =
|
||||||
db.put(subkey(SignedBeaconBlock, value.root), value)
|
db.put(subkey(SignedBeaconBlock, value.root), value)
|
||||||
|
db.put(subkey(BeaconBlockSummary, value.root), value.message.toBeaconBlockSummary())
|
||||||
|
|
||||||
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
||||||
# TODO prune old states - this is less easy than it seems as we never know
|
# TODO prune old states - this is less easy than it seems as we never know
|
||||||
|
@ -343,8 +363,8 @@ proc putStateRoot*(db: BeaconChainDB, root: Eth2Digest, slot: Slot,
|
||||||
db.put(subkey(root, slot), value)
|
db.put(subkey(root, slot), value)
|
||||||
|
|
||||||
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
proc delBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||||
db.backend.del(subkey(SignedBeaconBlock, key)).expect(
|
db.backend.del(subkey(SignedBeaconBlock, key)).expect("working database")
|
||||||
"working database")
|
db.backend.del(subkey(BeaconBlockSummary, key)).expect("working database")
|
||||||
|
|
||||||
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
proc delState*(db: BeaconChainDB, key: Eth2Digest) =
|
||||||
db.backend.del(subkey(BeaconState, key)).expect("working database")
|
db.backend.del(subkey(BeaconState, key)).expect("working database")
|
||||||
|
@ -373,6 +393,12 @@ proc getBlock*(db: BeaconChainDB, key: Eth2Digest): Opt[TrustedSignedBeaconBlock
|
||||||
# set root after deserializing (so it doesn't get zeroed)
|
# set root after deserializing (so it doesn't get zeroed)
|
||||||
result.get().root = key
|
result.get().root = key
|
||||||
|
|
||||||
|
proc getBlockSummary*(db: BeaconChainDB, key: Eth2Digest): Opt[BeaconBlockSummary] =
|
||||||
|
# We only store blocks that we trust in the database
|
||||||
|
result.ok(BeaconBlockSummary())
|
||||||
|
if db.get(subkey(BeaconBlockSummary, key), result.get) != GetResult.found:
|
||||||
|
result.err()
|
||||||
|
|
||||||
proc getState*(
|
proc getState*(
|
||||||
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
|
db: BeaconChainDB, key: Eth2Digest, output: var BeaconState,
|
||||||
rollback: RollbackProc): bool =
|
rollback: RollbackProc): bool =
|
||||||
|
@ -435,3 +461,29 @@ iterator getAncestors*(db: BeaconChainDB, root: Eth2Digest):
|
||||||
res.root = root
|
res.root = root
|
||||||
yield res
|
yield res
|
||||||
root = res.message.parent_root
|
root = res.message.parent_root
|
||||||
|
|
||||||
|
iterator getAncestorSummaries*(db: BeaconChainDB, root: Eth2Digest):
|
||||||
|
tuple[root: Eth2Digest, summary: BeaconBlockSummary] =
|
||||||
|
## Load a chain of ancestors for blck - returns a list of blocks with the
|
||||||
|
## oldest block last (blck will be at result[0]).
|
||||||
|
##
|
||||||
|
## The search will go on until the ancestor cannot be found.
|
||||||
|
|
||||||
|
var
|
||||||
|
res: tuple[root: Eth2Digest, summary: BeaconBlockSummary]
|
||||||
|
tmp: TrustedSignedBeaconBlock
|
||||||
|
root = root
|
||||||
|
|
||||||
|
while true:
|
||||||
|
if db.get(subkey(BeaconBlockSummary, root), res.summary) == GetResult.found:
|
||||||
|
res.root = root
|
||||||
|
yield res
|
||||||
|
elif db.get(subkey(SignedBeaconBlock, root), tmp) == GetResult.found:
|
||||||
|
res.summary = tmp.message.toBeaconBlockSummary()
|
||||||
|
db.put(subkey(BeaconBlockSummary, root), res.summary)
|
||||||
|
res.root = root
|
||||||
|
yield res
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
root = res.summary.parent_root
|
||||||
|
|
|
@ -334,14 +334,14 @@ proc init*(T: type ChainDAGRef,
|
||||||
if headRoot != tailRoot:
|
if headRoot != tailRoot:
|
||||||
var curRef: BlockRef
|
var curRef: BlockRef
|
||||||
|
|
||||||
for blck in db.getAncestors(headRoot):
|
for blck in db.getAncestorSummaries(headRoot):
|
||||||
if blck.root == tailRef.root:
|
if blck.root == tailRef.root:
|
||||||
doAssert(not curRef.isNil)
|
doAssert(not curRef.isNil)
|
||||||
link(tailRef, curRef)
|
link(tailRef, curRef)
|
||||||
curRef = curRef.parent
|
curRef = curRef.parent
|
||||||
break
|
break
|
||||||
|
|
||||||
let newRef = BlockRef.init(blck.root, blck.message)
|
let newRef = BlockRef.init(blck.root, blck.summary.slot)
|
||||||
if curRef == nil:
|
if curRef == nil:
|
||||||
curRef = newRef
|
curRef = newRef
|
||||||
headRef = newRef
|
headRef = newRef
|
||||||
|
|
|
@ -99,21 +99,33 @@ suiteReport "Beacon chain DB" & preset():
|
||||||
doAssert toSeq(db.getAncestors(a0.root)) == []
|
doAssert toSeq(db.getAncestors(a0.root)) == []
|
||||||
doAssert toSeq(db.getAncestors(a2.root)) == []
|
doAssert toSeq(db.getAncestors(a2.root)) == []
|
||||||
|
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 0
|
||||||
|
|
||||||
db.putBlock(a2)
|
db.putBlock(a2)
|
||||||
|
|
||||||
doAssert toSeq(db.getAncestors(a0.root)) == []
|
doAssert toSeq(db.getAncestors(a0.root)) == []
|
||||||
doAssert toSeq(db.getAncestors(a2.root)) == [a2]
|
doAssert toSeq(db.getAncestors(a2.root)) == [a2]
|
||||||
|
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 1
|
||||||
|
|
||||||
db.putBlock(a1)
|
db.putBlock(a1)
|
||||||
|
|
||||||
doAssert toSeq(db.getAncestors(a0.root)) == []
|
doAssert toSeq(db.getAncestors(a0.root)) == []
|
||||||
doAssert toSeq(db.getAncestors(a2.root)) == [a2, a1]
|
doAssert toSeq(db.getAncestors(a2.root)) == [a2, a1]
|
||||||
|
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 0
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 2
|
||||||
|
|
||||||
db.putBlock(a0)
|
db.putBlock(a0)
|
||||||
|
|
||||||
doAssert toSeq(db.getAncestors(a0.root)) == [a0]
|
doAssert toSeq(db.getAncestors(a0.root)) == [a0]
|
||||||
doAssert toSeq(db.getAncestors(a2.root)) == [a2, a1, a0]
|
doAssert toSeq(db.getAncestors(a2.root)) == [a2, a1, a0]
|
||||||
|
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a0.root)).len == 1
|
||||||
|
doAssert toSeq(db.getAncestorSummaries(a2.root)).len == 3
|
||||||
|
|
||||||
wrappedTimedTest "sanity check genesis roundtrip" & preset():
|
wrappedTimedTest "sanity check genesis roundtrip" & preset():
|
||||||
# This is a really dumb way of checking that we can roundtrip a genesis
|
# This is a really dumb way of checking that we can roundtrip a genesis
|
||||||
# state. We've been bit by this because we've had a bug in the BLS
|
# state. We've been bit by this because we've had a bug in the BLS
|
||||||
|
|
Loading…
Reference in New Issue