Prune `BlockRef` on finalization (#3513)
Up til now, the block dag has been using `BlockRef`, a structure adapted for a full DAG, to represent all of chain history. This is a correct and simple design, but does not exploit the linearity of the chain once parts of it finalize. By pruning the in-memory `BlockRef` structure at finalization, we save, at the time of writing, a cool ~250mb (or 25%:ish) chunk of memory landing us at a steady state of ~750mb normal memory usage for a validating node. Above all though, we prevent memory usage from growing proportionally with the length of the chain, something that would not be sustainable over time - instead, the steady state memory usage is roughly determined by the validator set size which grows much more slowly. With these changes, the core should remain sustainable memory-wise post-merge all the way to withdrawals (when the validator set is expected to grow). In-memory indices are still used for the "hot" unfinalized portion of the chain - this ensure that consensus performance remains unchanged. What changes is that for historical access, we use a db-based linear slot index which is cache-and-disk-friendly, keeping the cost for accessing historical data at a similar level as before, achieving the savings at no percievable cost to functionality or performance. A nice collateral benefit is the almost-instant startup since we no longer load any large indicies at dag init. The cost of this functionality instead can be found in the complexity of having to deal with two ways of traversing the chain - by `BlockRef` and by slot. * use `BlockId` instead of `BlockRef` where finalized / historical data may be required * simplify clearance pre-advancement * remove dag.finalizedBlocks (~50:ish mb) * remove `getBlockAtSlot` - use `getBlockIdAtSlot` instead * `parent` and `atSlot` for `BlockId` now require a `ChainDAGRef` instance, unlike `BlockRef` traversal * prune `BlockRef` parents on finality (~200:ish mb) * speed up ChainDAG init by not loading finalized history index * mess up light client server error handling - this need revisiting :)
This commit is contained in:
parent
9a2b50d2c6
commit
05ffe7b2bf
|
@ -64,12 +64,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
```diff
|
||||
+ Adding the same block twice returns a Duplicate error [Preset: mainnet] OK
|
||||
+ Simple block add&get [Preset: mainnet] OK
|
||||
+ getBlockRef returns none for missing blocks OK
|
||||
+ loading tail block works [Preset: mainnet] OK
|
||||
+ basic ops OK
|
||||
+ updateHead updates head and headState [Preset: mainnet] OK
|
||||
+ updateState sanity [Preset: mainnet] OK
|
||||
```
|
||||
OK: 6/6 Fail: 0/6 Skip: 0/6
|
||||
OK: 5/5 Fail: 0/5 Skip: 0/5
|
||||
## Block processor [Preset: mainnet]
|
||||
```diff
|
||||
+ Reverse order block add & get [Preset: mainnet] OK
|
||||
|
@ -98,11 +97,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
+ parent sanity OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## ChainDAG helpers
|
||||
```diff
|
||||
+ epochAncestor sanity [Preset: mainnet] OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
## DeleteKeys requests [Preset: mainnet]
|
||||
```diff
|
||||
+ Deleting not existing key [Preset: mainnet] OK
|
||||
|
@ -525,4 +519,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 289/295 Fail: 0/295 Skip: 6/295
|
||||
OK: 287/293 Fail: 0/293 Skip: 6/293
|
||||
|
|
|
@ -60,10 +60,6 @@ proc addResolvedHeadBlock(
|
|||
if not foundHead:
|
||||
dag.heads.add(blockRef)
|
||||
|
||||
# Up to here, state.data was referring to the new state after the block had
|
||||
# been applied but the `blck` field was still set to the parent
|
||||
dag.clearanceBlck = blockRef
|
||||
|
||||
# Regardless of the chain we're on, the deposits come in the same order so
|
||||
# as soon as we import a block, we'll also update the shared public key
|
||||
# cache
|
||||
|
@ -71,7 +67,7 @@ proc addResolvedHeadBlock(
|
|||
|
||||
# Getting epochRef with the state will potentially create a new EpochRef
|
||||
let
|
||||
epochRef = dag.getEpochRef(state, blockRef, cache)
|
||||
epochRef = dag.getEpochRef(state, cache)
|
||||
epochRefTick = Moment.now()
|
||||
|
||||
debug "Block resolved",
|
||||
|
@ -122,19 +118,20 @@ proc advanceClearanceState*(dag: ChainDAGRef) =
|
|||
# epoch transition ahead of time.
|
||||
# Notably, we use the clearance state here because that's where the block will
|
||||
# first be seen - later, this state will be copied to the head state!
|
||||
if dag.clearanceBlck.slot == getStateField(dag.clearanceState, slot):
|
||||
let next = dag.clearanceBlck.atSlot(dag.clearanceBlck.slot + 1)
|
||||
let advanced = withState(dag.clearanceState):
|
||||
state.data.slot > state.data.latest_block_header.slot
|
||||
if not advanced:
|
||||
let next = getStateField(dag.clearanceState, slot) + 1
|
||||
|
||||
let startTick = Moment.now()
|
||||
var cache = StateCache()
|
||||
if not updateState(dag, dag.clearanceState, next, true, cache):
|
||||
# The next head update will likely fail - something is very wrong here
|
||||
error "Cannot advance to next slot, database corrupt?",
|
||||
clearance = shortLog(dag.clearanceBlck),
|
||||
next = shortLog(next)
|
||||
else:
|
||||
debug "Prepared clearance state for next block",
|
||||
next, updateStateDur = Moment.now() - startTick
|
||||
var
|
||||
cache = StateCache()
|
||||
info = ForkedEpochInfo()
|
||||
|
||||
dag.advanceSlots(dag.clearanceState, next, true, cache, info)
|
||||
|
||||
debug "Prepared clearance state for next block",
|
||||
next, updateStateDur = Moment.now() - startTick
|
||||
|
||||
proc addHeadBlock*(
|
||||
dag: ChainDAGRef, verifier: var BatchVerifier,
|
||||
|
@ -216,17 +213,17 @@ proc addHeadBlock*(
|
|||
# by the time a new block reaches this point, the parent block will already
|
||||
# have "established" itself in the network to some degree at least.
|
||||
var cache = StateCache()
|
||||
let clearanceBlock =
|
||||
parent.atSlot(signedBlock.message.slot).toBlockslotId.expect("not nil")
|
||||
if not updateState(
|
||||
dag, dag.clearanceState, parent.atSlot(signedBlock.message.slot), true,
|
||||
cache):
|
||||
dag, dag.clearanceState, clearanceBlock, true, cache):
|
||||
# We should never end up here - the parent must be a block no older than and
|
||||
# rooted in the finalized checkpoint, hence we should always be able to
|
||||
# load its corresponding state
|
||||
error "Unable to load clearance state for parent block, database corrupt?",
|
||||
parent = shortLog(parent.atSlot(signedBlock.message.slot)),
|
||||
clearanceBlock = shortLog(dag.clearanceBlck)
|
||||
clearanceBlock = shortLog(clearanceBlock)
|
||||
return err(BlockError.MissingParent)
|
||||
dag.clearanceBlck = parent
|
||||
|
||||
let stateDataTick = Moment.now()
|
||||
|
||||
|
|
|
@ -75,10 +75,10 @@ type
|
|||
## a snapshots and applies blocks until the desired point in history is
|
||||
## reached.
|
||||
##
|
||||
## Several indices are kept in memory to enable fast lookups - their shape
|
||||
## and contents somewhat depend on how the chain was instantiated: sync
|
||||
## from genesis or checkpoint, and therefore, what features we can offer in
|
||||
## terms of historical replay.
|
||||
## Several indices are kept in memory and database to enable fast lookups -
|
||||
## their shape and contents somewhat depend on how the chain was
|
||||
## instantiated: sync from genesis or checkpoint, and therefore, what
|
||||
## features we can offer in terms of historical replay.
|
||||
##
|
||||
## Beacuse the state transition is forwards-only, checkpoint sync generally
|
||||
## allows replaying states from that point onwards - anything earlier
|
||||
|
@ -94,12 +94,12 @@ type
|
|||
## pointers may overlap and some indices might be empty as a result.
|
||||
##
|
||||
## / heads
|
||||
## /-------* |
|
||||
## | archive | history /-------* |
|
||||
## *--------*---------*---------------*--------------*
|
||||
## | | | | |
|
||||
## genesis backfill tail finalizedHead head
|
||||
## | | |
|
||||
## archive finalizedBlocks forkBlocks
|
||||
## db.finalizedBlocks dag.forkBlocks
|
||||
##
|
||||
## The archive is the the part of finalized history for which we no longer
|
||||
## recreate states quickly because we don't have a reasonable state to
|
||||
|
@ -107,9 +107,11 @@ type
|
|||
## case - recreating history requires either replaying from genesis or
|
||||
## providing an earlier checkpoint state.
|
||||
##
|
||||
## We do not keep an in-memory index for the archive - instead, lookups are
|
||||
## made via `BeaconChainDB.finalizedBlocks` which covers the full range from
|
||||
## `backfill` to `finalizedHead`.
|
||||
## We do not keep an in-memory index for finalized blocks - instead, lookups
|
||||
## are made via `BeaconChainDB.finalizedBlocks` which covers the full range
|
||||
## from `backfill` to `finalizedHead`. Finalized blocks are generally not
|
||||
## needed for day-to-day validation work - rather, they're used for
|
||||
## auxiliary functionality such as historical state access and replays.
|
||||
|
||||
db*: BeaconChainDB
|
||||
## Database of recent chain history as well as the state and metadata
|
||||
|
@ -125,16 +127,10 @@ type
|
|||
## `finalizedHead.slot..head.slot` (inclusive) - dag.heads keeps track
|
||||
## of each potential head block in this table.
|
||||
|
||||
finalizedBlocks*: seq[BlockRef]
|
||||
## Slot -> BlockRef mapping for the finalized portion of the canonical
|
||||
## chain - use getBlockAtSlot to access
|
||||
## Covers the slots `tail.slot..finalizedHead.slot` (including the
|
||||
## finalized head block). Indices offset by `tail.slot`.
|
||||
|
||||
genesis*: BlockRef
|
||||
genesis*: BlockId
|
||||
## The genesis block of the network
|
||||
|
||||
tail*: BlockRef
|
||||
tail*: BlockId
|
||||
## The earliest finalized block for which we have a corresponding state -
|
||||
## when making a replay of chain history, this is as far back as we can
|
||||
## go - the tail block is unique in that its parent is set to `nil`, even
|
||||
|
@ -158,7 +154,7 @@ type
|
|||
# -----------------------------------
|
||||
# Pruning metadata
|
||||
|
||||
lastPrunePoint*: BlockSlot
|
||||
lastPrunePoint*: BlockSlotId
|
||||
## The last prune point
|
||||
## We can prune up to finalizedHead
|
||||
|
||||
|
@ -176,8 +172,6 @@ type
|
|||
clearanceState*: ForkedHashedBeaconState
|
||||
## Cached state used during block clearance - must only be used in
|
||||
## clearance module
|
||||
clearanceBlck*: BlockRef
|
||||
## The latest block that was applied to the clearance state
|
||||
|
||||
updateFlags*: UpdateFlags
|
||||
|
||||
|
@ -233,7 +227,7 @@ type
|
|||
## the epoch start - we call this block the "epoch ancestor" in other parts
|
||||
## of the code.
|
||||
epoch*: Epoch
|
||||
blck*: BlockRef
|
||||
bid*: BlockId
|
||||
|
||||
EpochRef* = ref object
|
||||
dag*: ChainDAGRef
|
||||
|
@ -304,7 +298,7 @@ template epoch*(e: EpochRef): Epoch = e.key.epoch
|
|||
|
||||
func shortLog*(v: EpochKey): string =
|
||||
# epoch:root when logging epoch, root:slot when logging slot!
|
||||
$v.epoch & ":" & shortLog(v.blck)
|
||||
$v.epoch & ":" & shortLog(v.bid)
|
||||
|
||||
template setFinalizationCb*(dag: ChainDAGRef, cb: OnFinalizedCallback) =
|
||||
dag.onFinHappened = cb
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -72,8 +72,8 @@ proc currentSyncCommitteeForPeriod(
|
|||
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
|
||||
# TODO introduce error handling in the case that we don't have historical
|
||||
# data for the period
|
||||
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bs) do:
|
||||
bsi = dag.getBlockIdAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bsi) do:
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.current_sync_committee
|
||||
|
@ -100,8 +100,8 @@ proc syncCommitteeRootForPeriod(
|
|||
let
|
||||
periodStartSlot = period.start_slot
|
||||
syncCommitteeSlot = max(periodStartSlot, earliestSlot)
|
||||
bs = dag.getBlockAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bs) do:
|
||||
bsi = dag.getBlockIdAtSlot(syncCommitteeSlot).expect("TODO")
|
||||
dag.withUpdatedState(tmpState, bsi) do:
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.syncCommitteeRoot
|
||||
|
@ -199,17 +199,13 @@ proc createLightClientUpdates(
|
|||
dag: ChainDAGRef,
|
||||
state: HashedBeaconStateWithSyncCommittee,
|
||||
blck: TrustedSignedBeaconBlockWithSyncAggregate,
|
||||
parent: BlockRef) =
|
||||
parent: BlockId) =
|
||||
## Create `LightClientUpdate` and `OptimisticLightClientUpdate` instances for
|
||||
## a given block and its post-state, and keep track of best / latest ones.
|
||||
## Data about the parent block's post-state and its `finalized_checkpoint`'s
|
||||
## block's post-state needs to be cached (`cacheLightClientData`) before
|
||||
## calling this function.
|
||||
|
||||
# Parent needs to be known to continue
|
||||
if parent == nil:
|
||||
return
|
||||
|
||||
# Verify sync committee has sufficient participants
|
||||
template sync_aggregate(): auto = blck.message.body.sync_aggregate
|
||||
template sync_committee_bits(): auto = sync_aggregate.sync_committee_bits
|
||||
|
@ -402,10 +398,10 @@ proc processNewBlockForLightClient*(
|
|||
|
||||
when signedBlock is bellatrix.TrustedSignedBeaconBlock:
|
||||
dag.cacheLightClientData(state.bellatrixData, signedBlock)
|
||||
dag.createLightClientUpdates(state.bellatrixData, signedBlock, parent)
|
||||
dag.createLightClientUpdates(state.bellatrixData, signedBlock, parent.bid)
|
||||
elif signedBlock is altair.TrustedSignedBeaconBlock:
|
||||
dag.cacheLightClientData(state.altairData, signedBlock)
|
||||
dag.createLightClientUpdates(state.altairData, signedBlock, parent)
|
||||
dag.createLightClientUpdates(state.altairData, signedBlock, parent.bid)
|
||||
elif signedBlock is phase0.TrustedSignedBeaconBlock:
|
||||
discard
|
||||
else:
|
||||
|
@ -467,13 +463,13 @@ proc processFinalizationForLightClient*(dag: ChainDAGRef) =
|
|||
let lowSlot = max(lastCheckpoint.epoch.start_slot, earliestSlot)
|
||||
var boundarySlot = dag.finalizedHead.slot
|
||||
while boundarySlot >= lowSlot:
|
||||
let blck = dag.getBlockAtSlot(boundarySlot).expect("historical data").blck
|
||||
if blck.slot >= lowSlot:
|
||||
dag.lightClientCache.bootstrap[blck.slot] =
|
||||
let bid = dag.getBlockIdAtSlot(boundarySlot).expect("historical data").bid
|
||||
if bid.slot >= lowSlot:
|
||||
dag.lightClientCache.bootstrap[bid.slot] =
|
||||
CachedLightClientBootstrap(
|
||||
current_sync_committee_branch:
|
||||
dag.getLightClientData(blck.bid).current_sync_committee_branch)
|
||||
boundarySlot = blck.slot.nextEpochBoundarySlot
|
||||
dag.getLightClientData(bid).current_sync_committee_branch)
|
||||
boundarySlot = bid.slot.nextEpochBoundarySlot
|
||||
if boundarySlot < SLOTS_PER_EPOCH:
|
||||
break
|
||||
boundarySlot -= SLOTS_PER_EPOCH
|
||||
|
@ -540,17 +536,23 @@ proc initBestLightClientUpdateForPeriod(
|
|||
period, update = dag.lightClientCache.bestUpdates.getOrDefault(period),
|
||||
computeDur = endTick - startTick
|
||||
|
||||
proc maxParticipantsBlock(highBlck: BlockRef, lowSlot: Slot): BlockRef =
|
||||
proc maxParticipantsBlock(
|
||||
dag: ChainDAGRef, highBlck: BlockId, lowSlot: Slot): Opt[BlockId] =
|
||||
## Determine the earliest block with most sync committee signatures among
|
||||
## ancestors of `highBlck` with at least `lowSlot` as parent block slot.
|
||||
## Return `nil` if no block with `MIN_SYNC_COMMITTEE_PARTICIPANTS` is found.
|
||||
var
|
||||
maxParticipants = 0
|
||||
maxBlockRef: BlockRef
|
||||
maxBlockRef: Opt[BlockId]
|
||||
blockRef = highBlck
|
||||
while blockRef.parent != nil and blockRef.parent.slot >= lowSlot:
|
||||
while true:
|
||||
let parent = dag.parent(blockRef).valueOr:
|
||||
break
|
||||
if parent.slot < lowSlot:
|
||||
break
|
||||
|
||||
let
|
||||
bdata = dag.getForkedBlock(blockRef.bid).get
|
||||
bdata = dag.getForkedBlock(blockRef).get
|
||||
numParticipants =
|
||||
withBlck(bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
|
@ -558,19 +560,19 @@ proc initBestLightClientUpdateForPeriod(
|
|||
else: raiseAssert "Unreachable"
|
||||
if numParticipants >= maxParticipants:
|
||||
maxParticipants = numParticipants
|
||||
maxBlockRef = blockRef
|
||||
blockRef = blockRef.parent
|
||||
maxBlockRef = ok blockRef
|
||||
blockRef = parent
|
||||
if maxParticipants < MIN_SYNC_COMMITTEE_PARTICIPANTS:
|
||||
maxBlockRef = nil
|
||||
maxBlockRef.reset()
|
||||
maxBlockRef
|
||||
|
||||
# Determine the block in the period with highest sync committee participation
|
||||
let
|
||||
lowSlot = max(periodStartSlot, earliestSlot)
|
||||
highSlot = min(periodEndSlot, dag.finalizedHead.blck.slot)
|
||||
highBlck = dag.getBlockAtSlot(highSlot).expect("TODO").blck
|
||||
bestNonFinalizedRef = maxParticipantsBlock(highBlck, lowSlot)
|
||||
if bestNonFinalizedRef == nil:
|
||||
highBlck = dag.getBlockIdAtSlot(highSlot).expect("TODO").bid
|
||||
bestNonFinalizedRef = dag.maxParticipantsBlock(highBlck, lowSlot)
|
||||
if bestNonFinalizedRef.isNone:
|
||||
dag.lightClientCache.bestUpdates[period] = default(altair.LightClientUpdate)
|
||||
return
|
||||
|
||||
|
@ -581,11 +583,11 @@ proc initBestLightClientUpdateForPeriod(
|
|||
var
|
||||
tmpState = assignClone(dag.headState)
|
||||
bestFinalizedRef = bestNonFinalizedRef
|
||||
finalizedBlck {.noinit.}: BlockRef
|
||||
while bestFinalizedRef != nil:
|
||||
finalizedBlck: Opt[BlockId]
|
||||
while bestFinalizedRef.isSome:
|
||||
let
|
||||
finalizedEpoch = block:
|
||||
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||
dag.withUpdatedState(tmpState[], dag.parent(bestFinalizedRef.get()).expect("TODO").atSlot) do:
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.finalized_checkpoint.epoch
|
||||
|
@ -593,20 +595,20 @@ proc initBestLightClientUpdateForPeriod(
|
|||
do: raiseAssert "Unreachable"
|
||||
finalizedEpochStartSlot = finalizedEpoch.start_slot
|
||||
if finalizedEpochStartSlot >= lowSlot:
|
||||
finalizedBlck = dag.getBlockAtSlot(finalizedEpochStartSlot).expect(
|
||||
"TODO").blck
|
||||
if finalizedBlck.slot >= lowSlot:
|
||||
finalizedBlck = Opt[BlockId].ok(dag.getBlockIdAtSlot(finalizedEpochStartSlot).expect(
|
||||
"TODO").bid)
|
||||
if finalizedBlck.get.slot >= lowSlot:
|
||||
break
|
||||
bestFinalizedRef = maxParticipantsBlock(highBlck, bestFinalizedRef.slot + 1)
|
||||
bestFinalizedRef = dag.maxParticipantsBlock(highBlck, bestFinalizedRef.get().slot + 1)
|
||||
|
||||
# If a finalized block has been found within the sync commitee period,
|
||||
# create a `LightClientUpdate` for that one. Otherwise, create a non-finalized
|
||||
# `LightClientUpdate`
|
||||
var update {.noinit.}: LightClientUpdate
|
||||
if bestFinalizedRef != nil:
|
||||
if bestFinalizedRef.isSome:
|
||||
# Fill data from attested block
|
||||
dag.withUpdatedState(tmpState[], bestFinalizedRef.parent.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
dag.withUpdatedState(tmpState[], dag.parent(bestFinalizedRef.get()).expect("TODO").atSlot) do:
|
||||
let bdata = dag.getForkedBlock(bid).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.attested_header =
|
||||
|
@ -617,18 +619,18 @@ proc initBestLightClientUpdateForPeriod(
|
|||
do: raiseAssert "Unreachable"
|
||||
|
||||
# Fill data from signature block
|
||||
let bdata = dag.getForkedBlock(bestFinalizedRef.bid).get
|
||||
let bdata = dag.getForkedBlock(bestFinalizedRef.get()).get
|
||||
withBlck(bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.sync_aggregate =
|
||||
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
|
||||
else: raiseAssert "Unreachable"
|
||||
update.fork_version =
|
||||
dag.cfg.forkAtEpoch(bestFinalizedRef.slot.epoch).current_version
|
||||
dag.cfg.forkAtEpoch(bestFinalizedRef.get().slot.epoch).current_version
|
||||
|
||||
# Fill data from finalized block
|
||||
dag.withUpdatedState(tmpState[], finalizedBlck.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
dag.withUpdatedState(tmpState[], finalizedBlck.get().atSlot) do:
|
||||
let bdata = dag.getForkedBlock(bid).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.next_sync_committee =
|
||||
|
@ -641,8 +643,8 @@ proc initBestLightClientUpdateForPeriod(
|
|||
do: raiseAssert "Unreachable"
|
||||
else:
|
||||
# Fill data from attested block
|
||||
dag.withUpdatedState(tmpState[], bestNonFinalizedRef.parent.atSlot) do:
|
||||
let bdata = dag.getForkedBlock(blck.bid).get
|
||||
dag.withUpdatedState(tmpState[], dag.parent(bestNonFinalizedRef.get()).expect("TODO").atSlot) do:
|
||||
let bdata = dag.getForkedBlock(bid).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.attested_header =
|
||||
|
@ -657,14 +659,14 @@ proc initBestLightClientUpdateForPeriod(
|
|||
do: raiseAssert "Unreachable"
|
||||
|
||||
# Fill data from signature block
|
||||
let bdata = dag.getForkedBlock(bestNonFinalizedRef.bid).get
|
||||
let bdata = dag.getForkedBlock(bestNonFinalizedRef.get()).get
|
||||
withBlck(bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
update.sync_aggregate =
|
||||
isomorphicCast[SyncAggregate](blck.message.body.sync_aggregate)
|
||||
else: raiseAssert "Unreachable"
|
||||
update.fork_version =
|
||||
dag.cfg.forkAtEpoch(bestNonFinalizedRef.slot.epoch).current_version
|
||||
dag.cfg.forkAtEpoch(bestNonFinalizedRef.get.slot.epoch).current_version
|
||||
dag.lightClientCache.bestUpdates[period] = update
|
||||
|
||||
proc initLightClientBootstrapForPeriod(
|
||||
|
@ -699,7 +701,7 @@ proc initLightClientBootstrapForPeriod(
|
|||
nextBoundarySlot = lowBoundarySlot
|
||||
while nextBoundarySlot <= highBoundarySlot:
|
||||
let
|
||||
blck = dag.getBlockAtSlot(nextBoundarySlot).expect("TODO").blck
|
||||
blck = dag.getBlockIdAtSlot(nextBoundarySlot).expect("TODO").bid
|
||||
boundarySlot = blck.slot.nextEpochBoundarySlot
|
||||
if boundarySlot == nextBoundarySlot and
|
||||
blck.slot >= lowSlot and blck.slot <= highSlot and
|
||||
|
@ -741,11 +743,11 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
# first build a todo list, then process them in ascending order
|
||||
let lowSlot = max(finalizedSlot, dag.computeEarliestLightClientSlot)
|
||||
var
|
||||
blocksBetween = newSeqOfCap[BlockRef](dag.head.slot - lowSlot + 1)
|
||||
blockRef = dag.head
|
||||
blocksBetween = newSeqOfCap[BlockId](dag.head.slot - lowSlot + 1)
|
||||
blockRef = dag.head.bid
|
||||
while blockRef.slot > lowSlot:
|
||||
blocksBetween.add blockRef
|
||||
blockRef = blockRef.parent
|
||||
blockRef = dag.parent(blockRef).expect("TODO")
|
||||
blocksBetween.add blockRef
|
||||
|
||||
# Process blocks (reuses `dag.headState`, but restores it to the current head)
|
||||
|
@ -759,7 +761,7 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
doAssert dag.updateState(
|
||||
dag.headState, blockRef.atSlot(), save = false, cache)
|
||||
withStateVars(dag.headState):
|
||||
let bdata = dag.getForkedBlock(blockRef.bid).get
|
||||
let bdata = dag.getForkedBlock(blockRef).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
# Cache data for `LightClientUpdate` of descendant blocks
|
||||
|
@ -788,13 +790,13 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
# This is because light clients are unable to advance slots.
|
||||
if checkpoint.root != dag.finalizedHead.blck.root:
|
||||
let cpRef =
|
||||
dag.getBlockAtSlot(checkpoint.epoch.start_slot).expect("TODO").blck
|
||||
if cpRef != nil and cpRef.slot >= earliestSlot:
|
||||
assert cpRef.bid.root == checkpoint.root
|
||||
dag.getBlockIdAtSlot(checkpoint.epoch.start_slot).expect("TODO").bid
|
||||
if cpRef.slot >= earliestSlot:
|
||||
assert cpRef.root == checkpoint.root
|
||||
doAssert dag.updateState(
|
||||
tmpState[], cpRef.atSlot, save = false, tmpCache)
|
||||
withStateVars(tmpState[]):
|
||||
let bdata = dag.getForkedBlock(cpRef.bid).get
|
||||
let bdata = dag.getForkedBlock(cpRef).get
|
||||
withStateAndBlck(state, bdata):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
dag.cacheLightClientData(state, blck, isNew = false)
|
||||
|
@ -802,7 +804,7 @@ proc initLightClientCache*(dag: ChainDAGRef) =
|
|||
|
||||
# Create `LightClientUpdate` for non-finalized blocks.
|
||||
if blockRef.slot > finalizedSlot:
|
||||
dag.createLightClientUpdates(state, blck, blockRef.parent)
|
||||
dag.createLightClientUpdates(state, blck, dag.parent(blockRef).expect("TODO"))
|
||||
else: raiseAssert "Unreachable"
|
||||
|
||||
let lightClientEndTick = Moment.now()
|
||||
|
@ -879,7 +881,7 @@ proc getLightClientBootstrap*(
|
|||
if cachedBootstrap.current_sync_committee_branch.isZeroMemory:
|
||||
if dag.importLightClientData == ImportLightClientData.OnDemand:
|
||||
var tmpState = assignClone(dag.headState)
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockAtSlot(slot).expect("TODO")) do:
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("TODO")) do:
|
||||
withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
state.data.build_proof(
|
||||
|
|
|
@ -299,11 +299,11 @@ proc validateBeaconBlock*(
|
|||
return errIgnore("BeaconBlock: already seen")
|
||||
|
||||
let
|
||||
slotBlock = getBlockAtSlot(dag, signed_beacon_block.message.slot)
|
||||
slotBlock = getBlockIdAtSlot(dag, signed_beacon_block.message.slot)
|
||||
|
||||
if slotBlock.isSome() and slotBlock.get().isProposed() and
|
||||
slotBlock.get().blck.slot == signed_beacon_block.message.slot:
|
||||
let curBlock = dag.getForkedBlock(slotBlock.get().blck.bid)
|
||||
slotBlock.get().bid.slot == signed_beacon_block.message.slot:
|
||||
let curBlock = dag.getForkedBlock(slotBlock.get().bid)
|
||||
if curBlock.isOk():
|
||||
let data = curBlock.get()
|
||||
if getForkedBlockField(data, proposer_index) ==
|
||||
|
|
|
@ -119,7 +119,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -127,7 +127,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$error)
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
return RestApiResponse.jsonResponse((root: stateRoot))
|
||||
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError)
|
||||
|
@ -139,7 +139,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -147,7 +147,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$error)
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
previous_version: getStateField(state, fork).previous_version,
|
||||
|
@ -165,7 +165,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -173,13 +173,11 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$error)
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
return RestApiResponse.jsonResponse(
|
||||
(
|
||||
previous_justified:
|
||||
getStateField(state, previous_justified_checkpoint),
|
||||
current_justified:
|
||||
getStateField(state, current_justified_checkpoint),
|
||||
previous_justified: getStateField(state, previous_justified_checkpoint),
|
||||
current_justified: getStateField(state, current_justified_checkpoint),
|
||||
finalized: getStateField(state, finalized_checkpoint)
|
||||
)
|
||||
)
|
||||
|
@ -193,7 +191,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -223,7 +221,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
$res.error())
|
||||
res.get()
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
let
|
||||
current_epoch = getStateField(state, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
@ -320,7 +318,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
vid = validator_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidValidatorIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -328,7 +326,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$error)
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
let
|
||||
current_epoch = getStateField(state, slot).epoch()
|
||||
validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
@ -338,8 +336,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
let vid = validator_id.get()
|
||||
case vid.kind
|
||||
of ValidatorQueryKind.Key:
|
||||
let optIndices = keysToIndices(node.restKeysCache, state,
|
||||
[vid.key])
|
||||
let optIndices = keysToIndices(node.restKeysCache, state, [vid.key])
|
||||
if optIndices[0].isNone():
|
||||
return RestApiResponse.jsonError(Http404, ValidatorNotFoundError)
|
||||
optIndices[0].get()
|
||||
|
@ -382,7 +379,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -401,7 +398,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
MaximumNumberOfValidatorIdsError)
|
||||
ires
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
let validatorsCount = lenu64(getStateField(state, validators))
|
||||
|
||||
let indices =
|
||||
|
@ -450,7 +447,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if len(validatorIds) == 0:
|
||||
# There is no indices, so we going to return balances of all
|
||||
# known validators.
|
||||
for index, balance in getStateField(state, balances).asSeq.pairs():
|
||||
for index, balance in getStateField(state, balances).pairs():
|
||||
res.add(RestValidatorBalance.init(ValidatorIndex(index),
|
||||
balance))
|
||||
else:
|
||||
|
@ -471,7 +468,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -537,7 +534,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
some(res)
|
||||
else:
|
||||
none[Slot]()
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
proc getCommittee(slot: Slot,
|
||||
index: CommitteeIndex): RestBeaconStatesCommittees =
|
||||
let validators = get_beacon_committee(state, slot, index, cache)
|
||||
|
@ -583,7 +580,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
sid = state_id.valueOr:
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$error)
|
||||
bslot = node.getBlockSlot(sid).valueOr:
|
||||
bslot = node.getBlockSlotId(sid).valueOr:
|
||||
if sid.kind == StateQueryKind.Root:
|
||||
# TODO (cheatfate): Its impossible to retrieve state by `state_root`
|
||||
# in current version of database.
|
||||
|
@ -609,7 +606,7 @@ proc installBeaconApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# the state will be obtained.
|
||||
bslot.slot.epoch()
|
||||
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
let keys =
|
||||
block:
|
||||
let res = syncCommitteeParticipants(state, qepoch)
|
||||
|
|
|
@ -25,7 +25,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if state_id.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$state_id.error())
|
||||
let bres = node.getBlockSlot(state_id.get())
|
||||
let bres = node.getBlockSlotId(state_id.get())
|
||||
if bres.isErr():
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$bres.error())
|
||||
|
@ -37,7 +37,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
||||
res.get()
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
return
|
||||
case state.kind
|
||||
of BeaconStateFork.Phase0:
|
||||
|
@ -60,7 +60,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if state_id.isErr():
|
||||
return RestApiResponse.jsonError(Http400, InvalidStateIdValueError,
|
||||
$state_id.error())
|
||||
let bres = node.getBlockSlot(state_id.get())
|
||||
let bres = node.getBlockSlotId(state_id.get())
|
||||
if bres.isErr():
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError,
|
||||
$bres.error())
|
||||
|
@ -72,7 +72,7 @@ proc installDebugApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
if res.isErr():
|
||||
return RestApiResponse.jsonError(Http406, ContentNotAcceptableError)
|
||||
res.get()
|
||||
node.withStateForBlockSlot(bslot):
|
||||
node.withStateForBlockSlotId(bslot):
|
||||
return
|
||||
if contentType == jsonMediaType:
|
||||
RestApiResponse.jsonResponsePlain(state)
|
||||
|
|
|
@ -230,7 +230,7 @@ proc installNimbusApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
return RestApiResponse.jsonError(Http503, BeaconNodeInSyncError)
|
||||
res.get()
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)) do:
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot).toBlockSlotId().expect("not nil")) do:
|
||||
return RestApiResponse.jsonResponse(
|
||||
node.getBlockProposalEth1Data(state))
|
||||
do:
|
||||
|
|
|
@ -60,32 +60,32 @@ proc getCurrentHead*(node: BeaconNode,
|
|||
return err("Requesting epoch for which slot would overflow")
|
||||
node.getCurrentHead(epoch.start_slot())
|
||||
|
||||
proc getBlockSlot*(node: BeaconNode,
|
||||
stateIdent: StateIdent): Result[BlockSlot, cstring] =
|
||||
proc getBlockSlotId*(node: BeaconNode,
|
||||
stateIdent: StateIdent): Result[BlockSlotId, cstring] =
|
||||
case stateIdent.kind
|
||||
of StateQueryKind.Slot:
|
||||
let bs = node.dag.getBlockAtSlot(? node.getCurrentSlot(stateIdent.slot))
|
||||
if bs.isSome:
|
||||
ok(bs.get())
|
||||
else:
|
||||
err("State for given slot not found, history not available?")
|
||||
let bsi = node.dag.getBlockIdAtSlot(? node.getCurrentSlot(stateIdent.slot)).valueOr:
|
||||
return err("State for given slot not found, history not available?")
|
||||
|
||||
ok(bsi)
|
||||
|
||||
of StateQueryKind.Root:
|
||||
if stateIdent.root == getStateRoot(node.dag.headState):
|
||||
ok(node.dag.head.atSlot())
|
||||
ok(node.dag.head.bid.atSlot())
|
||||
else:
|
||||
# We don't have a state root -> BlockSlot mapping
|
||||
err("State for given root not found")
|
||||
of StateQueryKind.Named:
|
||||
case stateIdent.value
|
||||
of StateIdentType.Head:
|
||||
ok(node.dag.head.atSlot())
|
||||
ok(node.dag.head.bid.atSlot())
|
||||
of StateIdentType.Genesis:
|
||||
ok(node.dag.genesis.atSlot())
|
||||
of StateIdentType.Finalized:
|
||||
ok(node.dag.finalizedHead)
|
||||
ok(node.dag.finalizedHead.toBlockSlotId().expect("not nil"))
|
||||
of StateIdentType.Justified:
|
||||
ok(node.dag.head.atEpochStart(getStateField(
|
||||
node.dag.headState, current_justified_checkpoint).epoch))
|
||||
node.dag.headState, current_justified_checkpoint).epoch).toBlockSlotId().expect("not nil"))
|
||||
|
||||
proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
|
||||
case id.kind
|
||||
|
@ -94,7 +94,7 @@ proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] =
|
|||
of BlockIdentType.Head:
|
||||
ok(node.dag.head.bid)
|
||||
of BlockIdentType.Genesis:
|
||||
ok(node.dag.genesis.bid)
|
||||
ok(node.dag.genesis)
|
||||
of BlockIdentType.Finalized:
|
||||
ok(node.dag.finalizedHead.blck.bid)
|
||||
of BlockQueryKind.Root:
|
||||
|
@ -131,17 +131,17 @@ proc disallowInterruptionsAux(body: NimNode) =
|
|||
macro disallowInterruptions(body: untyped) =
|
||||
disallowInterruptionsAux(body)
|
||||
|
||||
template withStateForBlockSlot*(nodeParam: BeaconNode,
|
||||
blockSlotParam: BlockSlot,
|
||||
body: untyped): untyped =
|
||||
template withStateForBlockSlotId*(nodeParam: BeaconNode,
|
||||
blockSlotIdParam: BlockSlotId,
|
||||
body: untyped): untyped =
|
||||
|
||||
block:
|
||||
let
|
||||
node = nodeParam
|
||||
blockSlot = blockSlotParam
|
||||
blockSlotId = blockSlotIdParam
|
||||
|
||||
template isState(state: ForkedHashedBeaconState): bool =
|
||||
state.matches_block_slot(blockSlot.blck.root, blockSlot.slot)
|
||||
state.matches_block_slot(blockSlotId.bid.root, blockSlotId.slot)
|
||||
|
||||
var cache {.inject, used.}: StateCache
|
||||
|
||||
|
@ -162,11 +162,13 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
|
|||
# TODO view-types
|
||||
# Avoid the code bloat produced by the double `body` reference through a lent var
|
||||
if isState(node.dag.headState):
|
||||
withStateVars(node.dag.headState):
|
||||
body
|
||||
template state: untyped {.inject, used.} = node.dag.headState
|
||||
template stateRoot: untyped {.inject, used.} =
|
||||
getStateRoot(node.dag.headState)
|
||||
body
|
||||
else:
|
||||
let cachedState = if node.stateTtlCache != nil:
|
||||
node.stateTtlCache.getClosestState(blockSlot)
|
||||
node.stateTtlCache.getClosestState(node.dag, blockSlotId)
|
||||
else:
|
||||
nil
|
||||
|
||||
|
@ -175,13 +177,15 @@ template withStateForBlockSlot*(nodeParam: BeaconNode,
|
|||
else:
|
||||
assignClone(node.dag.headState)
|
||||
|
||||
if node.dag.updateState(stateToAdvance[], blockSlot, false, cache):
|
||||
if node.dag.updateState(stateToAdvance[], blockSlotId, false, cache):
|
||||
if cachedState == nil and node.stateTtlCache != nil:
|
||||
# This was not a cached state, we can cache it now
|
||||
node.stateTtlCache.add(stateToAdvance)
|
||||
|
||||
withStateVars(stateToAdvance[]):
|
||||
body
|
||||
template state: untyped {.inject, used.} = stateToAdvance[]
|
||||
template stateRoot: untyped {.inject, used.} = getStateRoot(stateToAdvance[])
|
||||
|
||||
body
|
||||
|
||||
template strData*(body: ContentBody): string =
|
||||
bind fromBytes
|
||||
|
|
|
@ -260,10 +260,10 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
|
|||
# in order to compute the sync committee for the epoch. See the following
|
||||
# discussion for more details:
|
||||
# https://github.com/status-im/nimbus-eth2/pull/3133#pullrequestreview-817184693
|
||||
let bs = node.dag.getBlockAtSlot(earliestSlotInQSyncPeriod).valueOr:
|
||||
let bsi = node.dag.getBlockIdAtSlot(earliestSlotInQSyncPeriod).valueOr:
|
||||
return RestApiResponse.jsonError(Http404, StateNotFoundError)
|
||||
|
||||
node.withStateForBlockSlot(bs):
|
||||
node.withStateForBlockSlotId(bsi):
|
||||
let res = withState(state):
|
||||
when stateFork >= BeaconStateFork.Altair:
|
||||
produceResponse(indexList,
|
||||
|
|
|
@ -155,7 +155,7 @@ proc getForkedBlockFromBlockId(
|
|||
node.dag.getForkedBlock(node.dag.head.bid).valueOr:
|
||||
raise newException(CatchableError, "Block not found")
|
||||
of "genesis":
|
||||
node.dag.getForkedBlock(node.dag.genesis.bid).valueOr:
|
||||
node.dag.getForkedBlock(node.dag.genesis).valueOr:
|
||||
raise newException(CatchableError, "Block not found")
|
||||
of "finalized":
|
||||
node.dag.getForkedBlock(node.dag.finalizedHead.blck.bid).valueOr:
|
||||
|
|
|
@ -108,7 +108,9 @@ proc installNimbusApiHandlers*(rpcServer: RpcServer, node: BeaconNode) {.
|
|||
head = node.doChecksAndGetCurrentHead(wallSlot)
|
||||
|
||||
let proposalState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(wallSlot)):
|
||||
node.dag.withUpdatedState(
|
||||
proposalState[],
|
||||
head.atSlot(wallSlot).toBlockSlotId().expect("not nil")):
|
||||
return node.getBlockProposalEth1Data(state)
|
||||
do:
|
||||
raise (ref CatchableError)(msg: "Trying to access pruned state")
|
||||
|
|
|
@ -24,10 +24,10 @@ template raiseNoAltairSupport*() =
|
|||
|
||||
template withStateForStateId*(stateId: string, body: untyped): untyped =
|
||||
let
|
||||
bs = node.stateIdToBlockSlot(stateId)
|
||||
bsi = node.stateIdToBlockSlotId(stateId)
|
||||
|
||||
template isState(state: ForkedHashedBeaconState): bool =
|
||||
state.matches_block_slot(bs.blck.root, bs.slot)
|
||||
state.matches_block_slot(bsi.bid.root, bsi.slot)
|
||||
|
||||
if isState(node.dag.headState):
|
||||
withStateVars(node.dag.headState):
|
||||
|
@ -35,7 +35,7 @@ template withStateForStateId*(stateId: string, body: untyped): untyped =
|
|||
body
|
||||
else:
|
||||
let rpcState = assignClone(node.dag.headState)
|
||||
node.dag.withUpdatedState(rpcState[], bs) do:
|
||||
node.dag.withUpdatedState(rpcState[], bsi) do:
|
||||
body
|
||||
do:
|
||||
raise (ref CatchableError)(msg: "Trying to access pruned state")
|
||||
|
@ -69,10 +69,10 @@ proc parseSlot(slot: string): Slot {.raises: [Defect, CatchableError].} =
|
|||
raise newException(ValueError, "Not a valid slot number")
|
||||
Slot parsed
|
||||
|
||||
proc getBlockSlotFromString*(node: BeaconNode, slot: string): BlockSlot {.raises: [Defect, CatchableError].} =
|
||||
proc getBlockSlotIdFromString*(node: BeaconNode, slot: string): BlockSlotId {.raises: [Defect, CatchableError].} =
|
||||
let parsed = parseSlot(slot)
|
||||
discard node.doChecksAndGetCurrentHead(parsed)
|
||||
node.dag.getBlockAtSlot(parsed).valueOr:
|
||||
node.dag.getBlockIdAtSlot(parsed).valueOr:
|
||||
raise newException(ValueError, "Block not found")
|
||||
|
||||
proc getBlockIdFromString*(node: BeaconNode, slot: string): BlockId {.raises: [Defect, CatchableError].} =
|
||||
|
@ -84,25 +84,28 @@ proc getBlockIdFromString*(node: BeaconNode, slot: string): BlockId {.raises: [D
|
|||
else:
|
||||
raise (ref ValueError)(msg: "Block not found")
|
||||
|
||||
proc stateIdToBlockSlot*(node: BeaconNode, stateId: string): BlockSlot {.raises: [Defect, CatchableError].} =
|
||||
proc stateIdToBlockSlotId*(node: BeaconNode, stateId: string): BlockSlotId {.raises: [Defect, CatchableError].} =
|
||||
case stateId:
|
||||
of "head":
|
||||
node.dag.head.atSlot()
|
||||
node.dag.head.bid.atSlot()
|
||||
of "genesis":
|
||||
node.dag.genesis.atSlot()
|
||||
of "finalized":
|
||||
node.dag.finalizedHead
|
||||
node.dag.finalizedHead.toBlockSlotId().expect("not nil")
|
||||
of "justified":
|
||||
node.dag.head.atEpochStart(
|
||||
getStateField(node.dag.headState, current_justified_checkpoint).epoch)
|
||||
getStateField(
|
||||
node.dag.headState, current_justified_checkpoint).epoch).
|
||||
toBlockSlotId().valueOr:
|
||||
raise (ref ValueError)(msg: "State not found")
|
||||
else:
|
||||
if stateId.startsWith("0x"):
|
||||
let stateRoot = parseRoot(stateId)
|
||||
if stateRoot == getStateRoot(node.dag.headState):
|
||||
node.dag.head.atSlot()
|
||||
node.dag.head.bid.atSlot()
|
||||
else:
|
||||
# We don't have a state root -> BlockSlot mapping
|
||||
raise (ref ValueError)(msg: "State not found")
|
||||
|
||||
else: # Parse as slot number
|
||||
node.getBlockSlotFromString(stateId)
|
||||
node.getBlockSlotIdFromString(stateId)
|
||||
|
|
|
@ -9,7 +9,7 @@ import
|
|||
chronos,
|
||||
chronicles,
|
||||
../spec/beaconstate,
|
||||
../consensus_object_pools/block_pools_types
|
||||
../consensus_object_pools/blockchain_dag
|
||||
|
||||
type
|
||||
CacheEntry = ref object
|
||||
|
@ -71,7 +71,8 @@ proc add*(cache: StateTtlCache, state: ref ForkedHashedBeaconState) =
|
|||
cache.scheduleEntryExpiration(index)
|
||||
|
||||
proc getClosestState*(
|
||||
cache: StateTtlCache, bs: BlockSlot): ref ForkedHashedBeaconState =
|
||||
cache: StateTtlCache, dag: ChainDAGRef,
|
||||
bsi: BlockSlotId): ref ForkedHashedBeaconState =
|
||||
var
|
||||
bestSlotDifference = Slot.high
|
||||
index = -1
|
||||
|
@ -81,20 +82,21 @@ proc getClosestState*(
|
|||
continue
|
||||
|
||||
let stateSlot = getStateField(cache.entries[i][].state[], slot)
|
||||
if stateSlot > bs.slot:
|
||||
if stateSlot > bsi.slot:
|
||||
# We can use only states that can be advanced forward in time.
|
||||
continue
|
||||
|
||||
let slotDifference = bs.slot - stateSlot
|
||||
let slotDifference = bsi.slot - stateSlot
|
||||
if slotDifference > slotDifferenceForCacheHit:
|
||||
# The state is too old to be useful as a rewind starting point.
|
||||
continue
|
||||
|
||||
var cur = bs
|
||||
var cur = bsi
|
||||
for j in 0 ..< slotDifference:
|
||||
cur = cur.parentOrSlot
|
||||
cur = dag.parentOrSlot(cur).valueOr:
|
||||
break
|
||||
|
||||
if not cache.entries[i].state[].matches_block(cur.blck.root):
|
||||
if not cache.entries[i].state[].matches_block(cur.bid.root):
|
||||
# The cached state and the requested BlockSlot are at different branches
|
||||
# of history.
|
||||
continue
|
||||
|
|
|
@ -12,7 +12,7 @@ import
|
|||
eth/p2p/discoveryv5/random2,
|
||||
../spec/datatypes/base,
|
||||
../spec/[helpers, network],
|
||||
../consensus_object_pools/[block_pools_types, spec_cache]
|
||||
../consensus_object_pools/[blockchain_dag, spec_cache]
|
||||
|
||||
export base, helpers, network, sets, tables
|
||||
|
||||
|
|
|
@ -442,7 +442,8 @@ proc makeBeaconBlockForHeadAndSlot*(node: BeaconNode,
|
|||
let
|
||||
proposalState = assignClone(node.dag.headState)
|
||||
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(slot - 1)) do:
|
||||
# TODO fails at checkpoint synced head
|
||||
node.dag.withUpdatedState(proposalState[], head.atSlot(slot - 1).toBlockSlotId().expect("not nil")) do:
|
||||
# Advance to the given slot without calculating state root - we'll only
|
||||
# need a state root _with_ the block applied
|
||||
var info: ForkedEpochInfo
|
||||
|
|
|
@ -17,7 +17,7 @@ import
|
|||
state_transition_epoch,
|
||||
state_transition_block,
|
||||
signatures],
|
||||
../beacon_chain/consensus_object_pools/block_pools_types
|
||||
../beacon_chain/consensus_object_pools/blockchain_dag
|
||||
|
||||
type
|
||||
RewardsAndPenalties* = object
|
||||
|
@ -110,18 +110,21 @@ func getFilePathForEpochs*(startEpoch, endEpoch: Epoch, dir: string): string =
|
|||
epochAsString(endEpoch) & epochFileNameExtension
|
||||
dir / fileName
|
||||
|
||||
func getBlockRange*(dag: ChainDAGRef, start, ends: Slot): seq[BlockRef] =
|
||||
func getBlockRange*(dag: ChainDAGRef, start, ends: Slot): seq[BlockId] =
|
||||
# Range of block in reverse order
|
||||
doAssert start < ends
|
||||
result = newSeqOfCap[BlockRef](ends - start)
|
||||
var current = dag.head
|
||||
while current != nil:
|
||||
if current.slot < ends:
|
||||
if current.slot < start or current.slot == 0: # skip genesis
|
||||
break
|
||||
else:
|
||||
result.add current
|
||||
current = current.parent
|
||||
result = newSeqOfCap[BlockId](ends - start)
|
||||
var current = ends
|
||||
while current > start:
|
||||
current -= 1
|
||||
let bsid = dag.getBlockIdAtSlot(current).valueOr:
|
||||
continue
|
||||
|
||||
if bsid.bid.slot < start: # current might be empty
|
||||
break
|
||||
|
||||
result.add(bsid.bid)
|
||||
current = bsid.bid.slot # skip empty slots
|
||||
|
||||
func getOutcome(delta: RewardDelta): int64 =
|
||||
delta.rewards.int64 - delta.penalties.int64
|
||||
|
|
|
@ -253,7 +253,9 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
|
|||
|
||||
withTimer(timers[tLoadState]):
|
||||
doAssert dag.updateState(
|
||||
stateData[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
stateData[],
|
||||
dag.atSlot(blockRefs[^1], blockRefs[^1].slot - 1).expect("not nil"),
|
||||
false, cache)
|
||||
|
||||
template processBlocks(blocks: auto) =
|
||||
for b in blocks.mitems():
|
||||
|
@ -409,12 +411,13 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
|||
validatorMonitor = newClone(ValidatorMonitor.init())
|
||||
dag = init(ChainDAGRef, cfg, db, validatorMonitor, {})
|
||||
|
||||
let blckRef = dag.getBlockRef(fromHex(Eth2Digest, conf.blockRoot)).valueOr:
|
||||
let bid = dag.getBlockId(fromHex(Eth2Digest, conf.blockRoot)).valueOr:
|
||||
echo "Block not found in database"
|
||||
return
|
||||
|
||||
let tmpState = assignClone(dag.headState)
|
||||
dag.withUpdatedState(tmpState[], blckRef.atSlot(Slot(conf.slot))) do:
|
||||
dag.withUpdatedState(
|
||||
tmpState[], dag.atSlot(bid, Slot(conf.slot)).expect("block found")) do:
|
||||
echo "Writing state..."
|
||||
withState(state):
|
||||
dump("./", state)
|
||||
|
@ -480,7 +483,7 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
|
|||
group.update(e2, blocks[i].slot, tmp).get()
|
||||
|
||||
withTimer(timers[tState]):
|
||||
dag.withUpdatedState(tmpState[], canonical) do:
|
||||
dag.withUpdatedState(tmpState[], canonical.toBlockSlotId().expect("not nil")) do:
|
||||
withState(state):
|
||||
group.finish(e2, state.data).get()
|
||||
do: raiseAssert "withUpdatedState failed"
|
||||
|
@ -592,7 +595,9 @@ proc cmdValidatorPerf(conf: DbConf, cfg: RuntimeConfig) =
|
|||
|
||||
let state = newClone(dag.headState)
|
||||
doAssert dag.updateState(
|
||||
state[], blockRefs[^1].atSlot(blockRefs[^1].slot - 1), false, cache)
|
||||
state[],
|
||||
dag.atSlot(blockRefs[^1], blockRefs[^1].slot - 1).expect("block found"),
|
||||
false, cache)
|
||||
|
||||
proc processEpoch() =
|
||||
let
|
||||
|
@ -865,9 +870,11 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
var cache = StateCache()
|
||||
let slot = if startSlot > 0: startSlot - 1 else: 0.Slot
|
||||
if blockRefs.len > 0:
|
||||
discard dag.updateState(tmpState[], blockRefs[^1].atSlot(slot), false, cache)
|
||||
discard dag.updateState(
|
||||
tmpState[], dag.atSlot(blockRefs[^1], slot).expect("block"), false, cache)
|
||||
else:
|
||||
discard dag.updateState(tmpState[], dag.head.atSlot(slot), false, cache)
|
||||
discard dag.updateState(
|
||||
tmpState[], dag.getBlockIdAtSlot(slot).expect("block"), false, cache)
|
||||
|
||||
let savedValidatorsCount = outDb.getDbValidatorsCount
|
||||
var validatorsCount = getStateField(tmpState[], validators).len
|
||||
|
@ -956,7 +963,7 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
|
|||
clear cache
|
||||
|
||||
for bi in 0 ..< blockRefs.len:
|
||||
let forkedBlock = dag.getForkedBlock(blockRefs[blockRefs.len - bi - 1].bid).get()
|
||||
let forkedBlock = dag.getForkedBlock(blockRefs[blockRefs.len - bi - 1]).get()
|
||||
withBlck(forkedBlock):
|
||||
processSlots(blck.message.slot, {skipLastStateRootCalculation})
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
let
|
||||
attestationHead = dag.head.atSlot(slot)
|
||||
|
||||
dag.withUpdatedState(tmpState[], attestationHead) do:
|
||||
dag.withUpdatedState(tmpState[], attestationHead.toBlockSlotId.expect("not nil")) do:
|
||||
let committees_per_slot =
|
||||
get_committee_count_per_slot(state, slot.epoch, cache)
|
||||
|
||||
|
@ -124,7 +124,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if rand(r, 1.0) <= attesterRatio:
|
||||
let
|
||||
data = makeAttestationData(
|
||||
state, slot, committee_index, blck.root)
|
||||
state, slot, committee_index, bid.root)
|
||||
sig =
|
||||
get_attestation_signature(getStateField(state, fork),
|
||||
getStateField(state, genesis_validators_root),
|
||||
|
@ -303,7 +303,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
|
||||
let
|
||||
newBlock = getNewBlock[phase0.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
|
@ -324,7 +324,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
|
||||
let
|
||||
newBlock = getNewBlock[altair.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
|
@ -345,7 +345,7 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
if rand(r, 1.0) > blockRatio:
|
||||
return
|
||||
|
||||
dag.withUpdatedState(tmpState[], dag.head.atSlot(slot)) do:
|
||||
dag.withUpdatedState(tmpState[], dag.getBlockIdAtSlot(slot).expect("block")) do:
|
||||
let
|
||||
newBlock = getNewBlock[bellatrix.SignedBeaconBlock](state, slot, cache)
|
||||
added = dag.addHeadBlock(verifier, newBlock) do (
|
||||
|
@ -430,7 +430,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
withTimer(timers[tReplay]):
|
||||
var cache = StateCache()
|
||||
doAssert dag.updateState(
|
||||
replayState[], dag.head.atSlot(Slot(slots)), false, cache)
|
||||
replayState[], dag.getBlockIdAtSlot(Slot(slots)).expect("block"),
|
||||
false, cache)
|
||||
|
||||
echo "Done!"
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ proc stepOnBlock(
|
|||
# 1. Move state to proper slot.
|
||||
doAssert dag.updateState(
|
||||
state,
|
||||
dag.head.atSlot(time.slotOrZero),
|
||||
dag.getBlockIdAtSlot(time.slotOrZero).expect("block exists"),
|
||||
save = false,
|
||||
stateCache
|
||||
)
|
||||
|
|
|
@ -29,32 +29,6 @@ proc pruneAtFinalization(dag: ChainDAGRef) =
|
|||
if dag.needStateCachesAndForkChoicePruning():
|
||||
dag.pruneStateCachesDAG()
|
||||
|
||||
suite "ChainDAG helpers":
|
||||
test "epochAncestor sanity" & preset():
|
||||
let
|
||||
s0 = BlockRef(bid: BlockId(slot: Slot(0)))
|
||||
var cur = s0
|
||||
for i in 1..SLOTS_PER_EPOCH * 2:
|
||||
cur = BlockRef(bid: BlockId(slot: Slot(i)), parent: cur)
|
||||
|
||||
let ancestor = cur.epochAncestor(cur.slot.epoch)
|
||||
|
||||
check:
|
||||
ancestor.epoch == cur.slot.epoch
|
||||
ancestor.blck != cur # should have selected a parent
|
||||
|
||||
ancestor.blck.epochAncestor(cur.slot.epoch) == ancestor
|
||||
ancestor.blck.epochAncestor(ancestor.blck.slot.epoch) != ancestor
|
||||
|
||||
let
|
||||
farEpoch = Epoch(42)
|
||||
farTail = BlockRef(
|
||||
bid: BlockId(slot: farEpoch.start_slot() + 5))
|
||||
check:
|
||||
|
||||
not isNil(epochAncestor(farTail, farEpoch).blck)
|
||||
isNil(epochAncestor(farTail, farEpoch - 1).blck)
|
||||
|
||||
suite "Block pool processing" & preset():
|
||||
setup:
|
||||
var
|
||||
|
@ -70,16 +44,22 @@ suite "Block pool processing" & preset():
|
|||
b1 = addTestBlock(state[], cache, attestations = att0).phase0Data
|
||||
b2 = addTestBlock(state[], cache).phase0Data
|
||||
|
||||
test "getBlockRef returns none for missing blocks":
|
||||
test "basic ops":
|
||||
check:
|
||||
dag.getBlockRef(default Eth2Digest).isNone()
|
||||
|
||||
test "loading tail block works" & preset():
|
||||
let
|
||||
b0 = dag.getForkedBlock(dag.tail.root)
|
||||
|
||||
bh = dag.getForkedBlock(dag.head.root)
|
||||
bh2 = dag.getForkedBlock(dag.head.bid)
|
||||
check:
|
||||
b0.isSome()
|
||||
bh.isSome()
|
||||
bh2.isSome()
|
||||
|
||||
dag.getBlockRef(dag.finalizedHead.blck.root).get() ==
|
||||
dag.finalizedHead.blck
|
||||
dag.getBlockRef(dag.head.root).get() == dag.head
|
||||
|
||||
test "Simple block add&get" & preset():
|
||||
let
|
||||
|
@ -96,7 +76,7 @@ suite "Block pool processing" & preset():
|
|||
let
|
||||
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
||||
b2Get = dag.getForkedBlock(b2.root)
|
||||
er = dag.findEpochRef(b1Add[], b1Add[].slot.epoch)
|
||||
er = dag.findEpochRef(b1Add[].bid, b1Add[].slot.epoch)
|
||||
validators = getStateField(dag.headState, validators).lenu64()
|
||||
|
||||
check:
|
||||
|
@ -106,12 +86,16 @@ suite "Block pool processing" & preset():
|
|||
dag.heads.len == 1
|
||||
dag.heads[0] == b2Add[]
|
||||
dag.containsForkBlock(b2.root)
|
||||
dag.parent(b2Add[].bid).get() == b1Add[].bid
|
||||
# head not updated yet - getBlockIdAtSlot won't give those blocks
|
||||
dag.getBlockIdAtSlot(b2Add[].slot).get() ==
|
||||
BlockSlotId.init(dag.genesis, b2Add[].slot)
|
||||
|
||||
not er.isErr()
|
||||
# Same epoch - same epochRef
|
||||
er[] == dag.findEpochRef(b2Add[], b2Add[].slot.epoch)[]
|
||||
er[] == dag.findEpochRef(b2Add[].bid, b2Add[].slot.epoch)[]
|
||||
# Different epoch that was never processed
|
||||
dag.findEpochRef(b1Add[], b1Add[].slot.epoch + 1).isErr()
|
||||
dag.findEpochRef(b1Add[].bid, b1Add[].slot.epoch + 1).isErr()
|
||||
|
||||
er[].validatorKey(0'u64).isSome()
|
||||
er[].validatorKey(validators - 1).isSome()
|
||||
|
@ -133,27 +117,35 @@ suite "Block pool processing" & preset():
|
|||
dag.updateHead(b4Add[], quarantine)
|
||||
dag.pruneAtFinalization()
|
||||
|
||||
check: # getBlockIdAtSlot operates on the head chain!
|
||||
dag.getBlockIdAtSlot(b2Add[].slot).get() ==
|
||||
BlockSlotId.init(b2Add[].bid, b2Add[].slot)
|
||||
dag.parentOrSlot(dag.getBlockIdAtSlot(b2Add[].slot).get()).get() ==
|
||||
BlockSlotId.init(b1Add[].bid, b2Add[].slot)
|
||||
dag.parentOrSlot(dag.getBlockIdAtSlot(b2Add[].slot + 1).get()).get() ==
|
||||
BlockSlotId.init(b2Add[].bid, b2Add[].slot)
|
||||
|
||||
var blocks: array[3, BlockId]
|
||||
|
||||
check:
|
||||
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 0)) == 0
|
||||
blocks[0..<1] == [dag.tail.bid]
|
||||
blocks[0..<1] == [dag.tail]
|
||||
|
||||
dag.getBlockRange(Slot(0), 1, blocks.toOpenArray(0, 1)) == 0
|
||||
blocks[0..<2] == [dag.tail.bid, b1Add[].bid]
|
||||
blocks[0..<2] == [dag.tail, b1Add[].bid]
|
||||
|
||||
dag.getBlockRange(Slot(0), 2, blocks.toOpenArray(0, 1)) == 0
|
||||
blocks[0..<2] == [dag.tail.bid, b2Add[].bid]
|
||||
blocks[0..<2] == [dag.tail, b2Add[].bid]
|
||||
|
||||
dag.getBlockRange(Slot(0), 3, blocks.toOpenArray(0, 1)) == 1
|
||||
blocks[1..<2] == [dag.tail.bid] # block 3 is missing!
|
||||
blocks[1..<2] == [dag.tail] # block 3 is missing!
|
||||
|
||||
dag.getBlockRange(Slot(2), 2, blocks.toOpenArray(0, 1)) == 0
|
||||
blocks[0..<2] == [b2Add[].bid, b4Add[].bid] # block 3 is missing!
|
||||
|
||||
# large skip step
|
||||
dag.getBlockRange(Slot(0), uint64.high, blocks.toOpenArray(0, 2)) == 2
|
||||
blocks[2..2] == [dag.tail.bid]
|
||||
blocks[2..2] == [dag.tail]
|
||||
|
||||
# large skip step
|
||||
dag.getBlockRange(Slot(2), uint64.high, blocks.toOpenArray(0, 1)) == 1
|
||||
|
@ -176,13 +168,16 @@ suite "Block pool processing" & preset():
|
|||
let
|
||||
nextEpoch = dag.head.slot.epoch + 1
|
||||
nextEpochSlot = nextEpoch.start_slot()
|
||||
stateCheckpoint = dag.head.parent.atSlot(nextEpochSlot).stateCheckpoint
|
||||
parentBsi = dag.head.parent.atSlot(nextEpochSlot).toBlockSlotId().get()
|
||||
stateCheckpoint = dag.stateCheckpoint(parentBsi)
|
||||
|
||||
check:
|
||||
parentBsi.bid == dag.head.parent.bid
|
||||
parentBsi.slot == nextEpochSlot
|
||||
dag.getEpochRef(dag.head.parent, nextEpoch, true).isOk()
|
||||
|
||||
# Getting an EpochRef should not result in states being stored
|
||||
db.getStateRoot(stateCheckpoint.blck.root, stateCheckpoint.slot).isErr()
|
||||
db.getStateRoot(stateCheckpoint.bid.root, stateCheckpoint.slot).isErr()
|
||||
# this is required for the test to work - it's not a "public"
|
||||
# post-condition of getEpochRef
|
||||
getStateField(dag.epochRefState, slot) == nextEpochSlot
|
||||
|
@ -194,7 +189,7 @@ suite "Block pool processing" & preset():
|
|||
|
||||
check:
|
||||
# Getting an EpochRef should not result in states being stored
|
||||
db.getStateRoot(stateCheckpoint.blck.root, stateCheckpoint.slot).isOk()
|
||||
db.getStateRoot(stateCheckpoint.bid.root, stateCheckpoint.slot).isOk()
|
||||
|
||||
test "Adding the same block twice returns a Duplicate error" & preset():
|
||||
let
|
||||
|
@ -220,9 +215,9 @@ suite "Block pool processing" & preset():
|
|||
let
|
||||
b1Add = dag.addHeadBlock(verifier, b1, nilPhase0Callback)
|
||||
b2Add = dag.addHeadBlock(verifier, b2, nilPhase0Callback)
|
||||
bs1 = BlockSlot(blck: b1Add[], slot: b1.message.slot)
|
||||
bs1_3 = b1Add[].atSlot(3.Slot)
|
||||
bs2_3 = b2Add[].atSlot(3.Slot)
|
||||
bs1 = BlockSlotId.init(b1Add[].bid, b1.message.slot)
|
||||
bs1_3 = BlockSlotId.init(b1Add[].bid, 3.Slot)
|
||||
bs2_3 = BlockSlotId.init(b2Add[].bid, 3.Slot)
|
||||
|
||||
let tmpState = assignClone(dag.headState)
|
||||
|
||||
|
@ -241,9 +236,10 @@ suite "Block pool processing" & preset():
|
|||
|
||||
# Move back slots, but not blocks
|
||||
check:
|
||||
dag.updateState(tmpState[], bs1_3.parent(), false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].root
|
||||
getStateField(tmpState[], slot) == bs1_3.parent().slot
|
||||
dag.updateState(
|
||||
tmpState[], dag.parent(bs1_3.bid).expect("block").atSlot(), false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].parent.root
|
||||
getStateField(tmpState[], slot) == b1Add[].parent.slot
|
||||
|
||||
# Move to different block and slot
|
||||
check:
|
||||
|
@ -259,9 +255,10 @@ suite "Block pool processing" & preset():
|
|||
|
||||
# Move back to genesis
|
||||
check:
|
||||
dag.updateState(tmpState[], bs1.parent(), false, cache)
|
||||
dag.updateState(
|
||||
tmpState[], dag.parent(bs1.bid).expect("block").atSlot(), false, cache)
|
||||
tmpState[].latest_block_root == b1Add[].parent.root
|
||||
getStateField(tmpState[], slot) == bs1.parent.slot
|
||||
getStateField(tmpState[], slot) == b1Add[].parent.slot
|
||||
|
||||
when declared(GC_fullCollect): # i386 test machines seem to run low..
|
||||
GC_fullCollect()
|
||||
|
@ -374,7 +371,7 @@ suite "chain DAG finalization tests" & preset():
|
|||
|
||||
assign(tmpState[], dag.headState)
|
||||
|
||||
# skip slots so we can test gappy getBlockAtSlot
|
||||
# skip slots so we can test gappy getBlockIdAtSlot
|
||||
check process_slots(
|
||||
defaultRuntimeConfig, tmpState[],
|
||||
getStateField(tmpState[], slot) + 2.uint64,
|
||||
|
@ -397,18 +394,24 @@ suite "chain DAG finalization tests" & preset():
|
|||
|
||||
check:
|
||||
dag.heads.len() == 1
|
||||
dag.getBlockAtSlot(0.Slot).get() == BlockSlot(blck: dag.genesis, slot: 0.Slot)
|
||||
dag.getBlockAtSlot(2.Slot).get() ==
|
||||
BlockSlot(blck: dag.getBlockAtSlot(1.Slot).get().blck, slot: 2.Slot)
|
||||
dag.getBlockIdAtSlot(0.Slot).get() == BlockSlotId.init(dag.genesis, 0.Slot)
|
||||
dag.getBlockIdAtSlot(2.Slot).get() ==
|
||||
BlockSlotId.init(dag.getBlockIdAtSlot(1.Slot).get().bid, 2.Slot)
|
||||
|
||||
dag.getBlockAtSlot(dag.head.slot).get() == BlockSlot(
|
||||
blck: dag.head, slot: dag.head.slot.Slot)
|
||||
dag.getBlockAtSlot(dag.head.slot + 1).get() == BlockSlot(
|
||||
blck: dag.head, slot: dag.head.slot.Slot + 1)
|
||||
dag.getBlockIdAtSlot(dag.head.slot).get() == BlockSlotId.init(
|
||||
dag.head.bid, dag.head.slot)
|
||||
dag.getBlockIdAtSlot(dag.head.slot + 1).get() == BlockSlotId.init(
|
||||
dag.head.bid, dag.head.slot + 1)
|
||||
|
||||
not dag.containsForkBlock(dag.getBlockAtSlot(5.Slot).get().blck.root)
|
||||
not dag.containsForkBlock(dag.getBlockIdAtSlot(5.Slot).get().bid.root)
|
||||
dag.containsForkBlock(dag.finalizedHead.blck.root)
|
||||
|
||||
dag.getBlockRef(dag.genesis.root).isNone() # Finalized - no BlockRef
|
||||
|
||||
dag.getBlockRef(dag.finalizedHead.blck.root).isSome()
|
||||
|
||||
isNil dag.finalizedHead.blck.parent
|
||||
|
||||
check:
|
||||
dag.db.immutableValidators.len() == getStateField(dag.headState, validators).len()
|
||||
|
||||
|
@ -433,7 +436,9 @@ suite "chain DAG finalization tests" & preset():
|
|||
# evicted yet
|
||||
cache = StateCache()
|
||||
check: updateState(
|
||||
dag, tmpStateData[], dag.head.atSlot(dag.head.slot), false, cache)
|
||||
dag, tmpStateData[],
|
||||
dag.head.atSlot(dag.head.slot).toBlockSlotId().expect("not nil"),
|
||||
false, cache)
|
||||
|
||||
check:
|
||||
dag.head.slot.epoch in cache.shuffled_active_validator_indices
|
||||
|
@ -451,11 +456,11 @@ suite "chain DAG finalization tests" & preset():
|
|||
|
||||
block:
|
||||
let
|
||||
finalizedCheckpoint = dag.finalizedHead.stateCheckpoint
|
||||
headCheckpoint = dag.head.atSlot(dag.head.slot).stateCheckpoint
|
||||
finalizedCheckpoint = dag.stateCheckpoint(dag.finalizedHead.toBlockSlotId().get())
|
||||
headCheckpoint = dag.stateCheckpoint(dag.head.bid.atSlot())
|
||||
check:
|
||||
db.getStateRoot(headCheckpoint.blck.root, headCheckpoint.slot).isSome
|
||||
db.getStateRoot(finalizedCheckpoint.blck.root, finalizedCheckpoint.slot).isSome
|
||||
db.getStateRoot(headCheckpoint.bid.root, headCheckpoint.slot).isSome
|
||||
db.getStateRoot(finalizedCheckpoint.bid.root, finalizedCheckpoint.slot).isSome
|
||||
|
||||
let
|
||||
validatorMonitor2 = newClone(ValidatorMonitor.init())
|
||||
|
@ -537,10 +542,10 @@ suite "chain DAG finalization tests" & preset():
|
|||
var
|
||||
cur = dag.head
|
||||
tmpStateData = assignClone(dag.headState)
|
||||
while cur.slot >= dag.finalizedHead.slot:
|
||||
while cur != nil: # Go all the way to dag.finalizedHead
|
||||
assign(tmpStateData[], dag.headState)
|
||||
check:
|
||||
dag.updateState(tmpStateData[], cur.atSlot(cur.slot), false, cache)
|
||||
dag.updateState(tmpStateData[], cur.bid.atSlot(), false, cache)
|
||||
dag.getForkedBlock(cur.bid).get().phase0Data.message.state_root ==
|
||||
getStateRoot(tmpStateData[])
|
||||
getStateRoot(tmpStateData[]) == hash_tree_root(
|
||||
|
@ -709,15 +714,17 @@ suite "Backfill":
|
|||
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
|
||||
|
||||
check:
|
||||
dag.getBlockRef(tailBlock.root).get() == dag.tail
|
||||
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
||||
dag.getBlockRef(blocks[^2].root).isNone()
|
||||
|
||||
dag.getBlockAtSlot(dag.tail.slot).get().blck == dag.tail
|
||||
dag.getBlockAtSlot(dag.tail.slot - 1).isNone()
|
||||
dag.getBlockId(tailBlock.root).get() == dag.tail
|
||||
dag.getBlockId(blocks[^2].root).isNone()
|
||||
|
||||
dag.getBlockAtSlot(Slot(0)).get().blck == dag.genesis
|
||||
dag.getBlockIdAtSlot(Slot(0)).get() == dag.genesis.bid.atSlot()
|
||||
dag.getBlockIdAtSlot(Slot(1)).isNone
|
||||
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
|
||||
dag.getBlockIdAtSlot(dag.tail.slot - 1).isNone()
|
||||
|
||||
dag.getBlockIdAtSlot(Slot(0)).get() == dag.genesis.atSlot()
|
||||
dag.getBlockIdAtSlot(Slot(1)).isNone()
|
||||
|
||||
# No epochref for pre-tail epochs
|
||||
dag.getEpochRef(dag.tail, dag.tail.slot.epoch - 1, true).isErr()
|
||||
|
@ -739,12 +746,13 @@ suite "Backfill":
|
|||
check:
|
||||
dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
|
||||
|
||||
dag.getBlockRef(tailBlock.root).get() == dag.tail
|
||||
dag.getBlockRef(tailBlock.root).get().bid == dag.tail
|
||||
dag.getBlockRef(blocks[^2].root).isNone()
|
||||
|
||||
dag.getBlockAtSlot(dag.tail.slot).get().blck == dag.tail
|
||||
dag.getBlockAtSlot(dag.tail.slot - 1).isNone()
|
||||
dag.getBlockId(tailBlock.root).get() == dag.tail
|
||||
dag.getBlockId(blocks[^2].root).get().root == blocks[^2].root
|
||||
|
||||
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
|
||||
dag.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
|
||||
blocks[^2].toBlockId().atSlot()
|
||||
dag.getBlockIdAtSlot(dag.tail.slot - 2).isNone
|
||||
|
@ -795,8 +803,7 @@ suite "Backfill":
|
|||
dag2.getBlockRef(tailBlock.root).get().root == dag.tail.root
|
||||
dag2.getBlockRef(blocks[^2].root).isNone()
|
||||
|
||||
dag2.getBlockAtSlot(dag.tail.slot).get().blck.root == dag.tail.root
|
||||
dag2.getBlockAtSlot(dag.tail.slot - 1).isNone()
|
||||
dag2.getBlockIdAtSlot(dag.tail.slot).get().bid.root == dag.tail.root
|
||||
|
||||
dag2.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
|
||||
blocks[^2].toBlockId().atSlot()
|
||||
|
|
Loading…
Reference in New Issue