Revert "fix checkpoint block potentially not getting backfilled into DB (#5863)" (#5871)

This reverts commit 65e6f892deb5d9ff4399a0840a90788726024008.
This commit is contained in:
tersec 2024-02-09 12:49:07 +00:00 committed by GitHub
parent 65e6f892de
commit 1575478b72
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 57 additions and 143 deletions

View File

@ -23,12 +23,11 @@ OK: 1/1 Fail: 0/1 Skip: 0/1
OK: 12/12 Fail: 0/12 Skip: 0/12 OK: 12/12 Fail: 0/12 Skip: 0/12
## Backfill ## Backfill
```diff ```diff
+ Backfill to genesis OK
+ Init without genesis / block OK + Init without genesis / block OK
+ Reload backfill position OK + backfill to genesis OK
+ Restart after each block OK + reload backfill position OK
``` ```
OK: 4/4 Fail: 0/4 Skip: 0/4 OK: 3/3 Fail: 0/3 Skip: 0/3
## Beacon chain DB [Preset: mainnet] ## Beacon chain DB [Preset: mainnet]
```diff ```diff
+ empty database [Preset: mainnet] OK + empty database [Preset: mainnet] OK

View File

@ -204,9 +204,6 @@ type
slot*: Slot slot*: Slot
parent_root*: Eth2Digest parent_root*: Eth2Digest
func shortLog*(v: BeaconBlockSummary): auto =
(v.slot, shortLog(v.parent_root))
# Subkeys essentially create "tables" within the key-value store by prefixing # Subkeys essentially create "tables" within the key-value store by prefixing
# each entry with a table id # each entry with a table id

View File

@ -349,7 +349,7 @@ proc addBackfillBlock*(
blockRoot = shortLog(signedBlock.root) blockRoot = shortLog(signedBlock.root)
blck = shortLog(signedBlock.message) blck = shortLog(signedBlock.message)
signature = shortLog(signedBlock.signature) signature = shortLog(signedBlock.signature)
backfill = shortLog(dag.backfill) backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root))
template blck(): untyped = signedBlock.message # shortcuts without copy template blck(): untyped = signedBlock.message # shortcuts without copy
template blockRoot(): untyped = signedBlock.root template blockRoot(): untyped = signedBlock.root
@ -393,22 +393,18 @@ proc addBackfillBlock*(
if existing.isSome: if existing.isSome:
if existing.get().bid.slot == blck.slot and if existing.get().bid.slot == blck.slot and
existing.get().bid.root == blockRoot: existing.get().bid.root == blockRoot:
let isDuplicate = dag.containsBlock(existing.get().bid)
if isDuplicate: # Special case: when starting with only a checkpoint state, we will not
debug "Duplicate block" # have the head block data in the database
else: if dag.getForkedBlock(existing.get().bid).isNone():
checkSignature() checkSignature()
debug "Block backfilled (known BlockId)"
debug "Block backfilled (checkpoint)"
dag.putBlock(signedBlock.asTrusted()) dag.putBlock(signedBlock.asTrusted())
return ok()
if blockRoot == dag.backfill.parent_root: debug "Duplicate block"
dag.backfill = blck.toBeaconBlockSummary() return err(VerifierError.Duplicate)
return
if isDuplicate:
err(VerifierError.Duplicate)
else:
ok()
# Block is older than finalized, but different from the block in our # Block is older than finalized, but different from the block in our
# canonical history: it must be from an unviable branch # canonical history: it must be from an unviable branch

View File

@ -156,12 +156,6 @@ type
## The backfill points to the oldest block with an unbroken ancestry from ## The backfill points to the oldest block with an unbroken ancestry from
## dag.tail - when backfilling, we'll move backwards in time starting ## dag.tail - when backfilling, we'll move backwards in time starting
## with the parent of this block until we reach `frontfill`. ## with the parent of this block until we reach `frontfill`.
##
## - `backfill.slot` points to the earliest block that has been synced,
## or, if no blocks have been synced yet, to `checkpoint_state.slot + 1`
## which is the earliest slot that may have `parent_root` as ancestor.
## - `backfill.parent_root` is the latest block that is not yet synced.
## - Once backfill completes, `backfill.slot` refers to `GENESIS_SLOT`.
frontfillBlocks*: seq[Eth2Digest] frontfillBlocks*: seq[Eth2Digest]
## A temporary cache of blocks that we could load from era files, once ## A temporary cache of blocks that we could load from era files, once

View File

@ -117,7 +117,7 @@ proc updateFrontfillBlocks*(dag: ChainDAGRef) =
# era files match the chain # era files match the chain
if dag.db.db.readOnly: return # TODO abstraction leak - where to put this? if dag.db.db.readOnly: return # TODO abstraction leak - where to put this?
if dag.frontfillBlocks.len == 0 or dag.backfill.slot > GENESIS_SLOT: if dag.frontfillBlocks.len == 0 or dag.backfill.slot > 0:
return return
info "Writing frontfill index", slots = dag.frontfillBlocks.len info "Writing frontfill index", slots = dag.frontfillBlocks.len
@ -259,9 +259,6 @@ proc containsBlock(
cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool = cfg: RuntimeConfig, db: BeaconChainDB, slot: Slot, root: Eth2Digest): bool =
db.containsBlock(root, cfg.consensusForkAtEpoch(slot.epoch)) db.containsBlock(root, cfg.consensusForkAtEpoch(slot.epoch))
proc containsBlock*(dag: ChainDAGRef, bid: BlockId): bool =
dag.cfg.containsBlock(dag.db, bid.slot, bid.root)
proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest):
Opt[ForkedTrustedSignedBeaconBlock] = Opt[ForkedTrustedSignedBeaconBlock] =
# When we only have a digest, we don't know which fork it's from so we try # When we only have a digest, we don't know which fork it's from so we try
@ -1230,14 +1227,9 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
db.getBeaconBlockSummary(backfillRoot).expect( db.getBeaconBlockSummary(backfillRoot).expect(
"Backfill block must have a summary: " & $backfillRoot) "Backfill block must have a summary: " & $backfillRoot)
elif dag.containsBlock(dag.tail): else:
db.getBeaconBlockSummary(dag.tail.root).expect( db.getBeaconBlockSummary(dag.tail.root).expect(
"Tail block must have a summary: " & $dag.tail.root) "Tail block must have a summary: " & $dag.tail.root)
else:
# Checkpoint sync, checkpoint block unavailable
BeaconBlockSummary(
slot: dag.tail.slot + 1,
parent_root: dag.tail.root)
dag.forkDigests = newClone ForkDigests.init( dag.forkDigests = newClone ForkDigests.init(
cfg, getStateField(dag.headState, genesis_validators_root)) cfg, getStateField(dag.headState, genesis_validators_root))
@ -1249,9 +1241,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
let finalizedTick = Moment.now() let finalizedTick = Moment.now()
if dag.backfill.slot > GENESIS_SLOT: # Try frontfill from era files if dag.backfill.slot > 0: # See if we can frontfill blocks from era files
let backfillSlot = dag.backfill.slot - 1 dag.frontfillBlocks = newSeqOfCap[Eth2Digest](dag.backfill.slot.int)
dag.frontfillBlocks = newSeqOfCap[Eth2Digest](backfillSlot.int)
let let
historical_roots = getStateField(dag.headState, historical_roots).asSeq() historical_roots = getStateField(dag.headState, historical_roots).asSeq()
@ -1264,7 +1255,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
# blocks from genesis to backfill, if possible. # blocks from genesis to backfill, if possible.
for bid in dag.era.getBlockIds( for bid in dag.era.getBlockIds(
historical_roots, historical_summaries, Slot(0), Eth2Digest()): historical_roots, historical_summaries, Slot(0), Eth2Digest()):
if bid.slot >= backfillSlot: if bid.slot >= dag.backfill.slot:
# If we end up in here, we failed the root comparison just below in # If we end up in here, we failed the root comparison just below in
# an earlier iteration # an earlier iteration
fatal "Era summaries don't lead up to backfill, database or era files corrupt?", fatal "Era summaries don't lead up to backfill, database or era files corrupt?",
@ -1313,7 +1304,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB,
head = shortLog(dag.head), head = shortLog(dag.head),
finalizedHead = shortLog(dag.finalizedHead), finalizedHead = shortLog(dag.finalizedHead),
tail = shortLog(dag.tail), tail = shortLog(dag.tail),
backfill = shortLog(dag.backfill), backfill = (dag.backfill.slot, shortLog(dag.backfill.parent_root)),
loadDur = loadTick - startTick, loadDur = loadTick - startTick,
summariesDur = summariesTick - loadTick, summariesDur = summariesTick - loadTick,

View File

@ -28,8 +28,7 @@ proc updateExistingState(
## Wrapper around `updateState` for states expected to exist. ## Wrapper around `updateState` for states expected to exist.
let ok = dag.updateState(state, bsi, save, cache) let ok = dag.updateState(state, bsi, save, cache)
if not ok: if not ok:
error "State failed to load unexpectedly", error "State failed to load unexpectedly", bsi, tail = dag.tail.slot
bsi, tail = dag.tail.slot, backfill = shortLog(dag.backfill)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
ok ok
@ -42,8 +41,7 @@ template withUpdatedExistingState(
dag.withUpdatedState(stateParam, bsiParam) do: dag.withUpdatedState(stateParam, bsiParam) do:
okBody okBody
do: do:
error "State failed to load unexpectedly", error "State failed to load unexpectedly", bsi, tail = dag.tail.slot
bsi, tail = dag.tail.slot, backfill = shortLog(dag.backfill)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
failureBody failureBody
@ -51,8 +49,7 @@ proc getExistingBlockIdAtSlot(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] =
## Wrapper around `getBlockIdAtSlot` for blocks expected to exist. ## Wrapper around `getBlockIdAtSlot` for blocks expected to exist.
let bsi = dag.getBlockIdAtSlot(slot) let bsi = dag.getBlockIdAtSlot(slot)
if bsi.isNone: if bsi.isNone:
error "Block failed to load unexpectedly", error "Block failed to load unexpectedly", slot, tail = dag.tail.slot
slot, tail = dag.tail.slot, backfill = shortLog(dag.backfill)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
bsi bsi
@ -60,8 +57,7 @@ proc existingParent(dag: ChainDAGRef, bid: BlockId): Opt[BlockId] =
## Wrapper around `parent` for parents known to exist. ## Wrapper around `parent` for parents known to exist.
let parent = dag.parent(bid) let parent = dag.parent(bid)
if parent.isNone: if parent.isNone:
error "Parent failed to load unexpectedly", error "Parent failed to load unexpectedly", bid, tail = dag.tail.slot
bid, tail = dag.tail.slot, backfill = shortLog(dag.backfill)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
parent parent
@ -70,8 +66,7 @@ proc getExistingForkedBlock(
## Wrapper around `getForkedBlock` for blocks expected to exist. ## Wrapper around `getForkedBlock` for blocks expected to exist.
let bdata = dag.getForkedBlock(bid) let bdata = dag.getForkedBlock(bid)
if bdata.isNone: if bdata.isNone:
error "Block failed to load unexpectedly", error "Block failed to load unexpectedly", bid, tail = dag.tail.slot
bid, tail = dag.tail.slot, backfill = shortLog(dag.backfill)
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
bdata bdata
@ -83,7 +78,7 @@ proc existingCurrentSyncCommitteeForPeriod(
let syncCommittee = dag.currentSyncCommitteeForPeriod(tmpState, period) let syncCommittee = dag.currentSyncCommitteeForPeriod(tmpState, period)
if syncCommittee.isNone: if syncCommittee.isNone:
error "Current sync committee failed to load unexpectedly", error "Current sync committee failed to load unexpectedly",
period, tail = dag.tail.slot, backfill = shortLog(dag.backfill) period, tail = dag.tail.slot
doAssert strictVerification notin dag.updateFlags doAssert strictVerification notin dag.updateFlags
syncCommittee syncCommittee
@ -365,7 +360,7 @@ proc initLightClientUpdateForPeriod(
continue continue
finalizedSlot = finalizedEpoch.start_slot finalizedSlot = finalizedEpoch.start_slot
finalizedBsi = finalizedBsi =
if finalizedSlot >= max(dag.tail.slot, dag.backfill.slot): if finalizedSlot >= dag.tail.slot:
dag.getExistingBlockIdAtSlot(finalizedSlot).valueOr: dag.getExistingBlockIdAtSlot(finalizedSlot).valueOr:
dag.handleUnexpectedLightClientError(finalizedSlot) dag.handleUnexpectedLightClientError(finalizedSlot)
res.err() res.err()
@ -547,7 +542,7 @@ proc assignLightClientData(
when lcDataFork > LightClientDataFork.None: when lcDataFork > LightClientDataFork.None:
if finalized_slot == forkyObject.finalized_header.beacon.slot: if finalized_slot == forkyObject.finalized_header.beacon.slot:
forkyObject.finality_branch = attested_data.finality_branch forkyObject.finality_branch = attested_data.finality_branch
elif finalized_slot < max(dag.tail.slot, dag.backfill.slot): elif finalized_slot < dag.tail.slot:
forkyObject.finalized_header.reset() forkyObject.finalized_header.reset()
forkyObject.finality_branch.reset() forkyObject.finality_branch.reset()
else: else:
@ -637,13 +632,13 @@ proc createLightClientUpdate(
let let
finalized_slot = attested_data.finalized_slot finalized_slot = attested_data.finalized_slot
finalized_bsi = finalized_bsi =
if finalized_slot >= max(dag.tail.slot, dag.backfill.slot): if finalized_slot >= dag.tail.slot:
dag.getExistingBlockIdAtSlot(finalized_slot) dag.getExistingBlockIdAtSlot(finalized_slot)
else: else:
Opt.none(BlockSlotId) Opt.none(BlockSlotId)
has_finality = has_finality =
finalized_bsi.isSome and finalized_bsi.isSome and
finalized_bsi.get.bid.slot >= max(dag.tail.slot, dag.backfill.slot) finalized_bsi.get.bid.slot >= dag.tail.slot
meta = LightClientUpdateMetadata( meta = LightClientUpdateMetadata(
attested_slot: attested_slot, attested_slot: attested_slot,
finalized_slot: finalized_slot, finalized_slot: finalized_slot,
@ -727,6 +722,18 @@ proc initLightClientDataCache*(dag: ChainDAGRef) =
# State availability, needed for `cacheLightClientData` # State availability, needed for `cacheLightClientData`
dag.tail.slot, dag.tail.slot,
# Block availability, needed for `LightClientHeader.execution_branch` # Block availability, needed for `LightClientHeader.execution_branch`
if dag.backfill.slot != GENESIS_SLOT:
let existing = dag.getBlockIdAtSlot(dag.backfill.slot)
if existing.isSome:
if dag.getForkedBlock(existing.get.bid).isNone:
# Special case: when starting with only a checkpoint state,
# we will not have the head block data in the database
dag.backfill.slot + 1
else:
dag.backfill.slot
else:
dag.backfill.slot
else:
dag.backfill.slot)) dag.backfill.slot))
dag.lcDataStore.cache.tailSlot = max(dag.head.slot, targetTailSlot) dag.lcDataStore.cache.tailSlot = max(dag.head.slot, targetTailSlot)

View File

@ -323,10 +323,7 @@ proc initFullNode(
dag.finalizedHead.slot dag.finalizedHead.slot
func getBackfillSlot(): Slot = func getBackfillSlot(): Slot =
if dag.backfill.parent_root != dag.tail.root:
dag.backfill.slot dag.backfill.slot
else:
dag.tail.slot
func getFrontfillSlot(): Slot = func getFrontfillSlot(): Slot =
max(dag.frontfill.get(BlockId()).slot, dag.horizon) max(dag.frontfill.get(BlockId()).slot, dag.horizon)

View File

@ -407,7 +407,7 @@ proc doTrustedNodeSync*(
let let
validatorMonitor = newClone(ValidatorMonitor.init(false, false)) validatorMonitor = newClone(ValidatorMonitor.init(false, false))
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir) dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir)
backfillSlot = max(dag.backfill.slot, 1.Slot) - 1 backfillSlot = dag.backfill.slot
horizon = max(dag.horizon, dag.frontfill.valueOr(BlockId()).slot) horizon = max(dag.horizon, dag.frontfill.valueOr(BlockId()).slot)
let canReindex = if backfillSlot <= horizon: let canReindex = if backfillSlot <= horizon:
@ -418,7 +418,7 @@ proc doTrustedNodeSync*(
# detection to kick in, in addBackfillBlock # detection to kick in, in addBackfillBlock
let missingSlots = dag.backfill.slot - horizon + 1 let missingSlots = dag.backfill.slot - horizon + 1
notice "Downloading historical blocks - you can interrupt this process at any time and it will automatically be completed when you start the beacon node", notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
backfillSlot, horizon, missingSlots backfillSlot, horizon, missingSlots
var # Same averaging as SyncManager var # Same averaging as SyncManager

View File

@ -817,7 +817,7 @@ suite "Backfill":
let let
db = BeaconChainDB.new("", inMemory = true) db = BeaconChainDB.new("", inMemory = true)
test "Backfill to genesis": test "backfill to genesis":
let let
tailBlock = blocks[^1] tailBlock = blocks[^1]
genBlock = get_initial_beacon_block(genState[]) genBlock = get_initial_beacon_block(genState[])
@ -861,17 +861,15 @@ suite "Backfill":
dag.getFinalizedEpochRef() != nil dag.getFinalizedEpochRef() != nil
# Checkpoint block is unavailable, and should be backfileld first dag.backfill == tailBlock.phase0Data.message.toBeaconBlockSummary()
not dag.containsBlock(dag.tail)
dag.backfill == BeaconBlockSummary(
slot: dag.tail.slot + 1,
parent_root: dag.tail.root)
# Check that we can propose right from the checkpoint state # Check that we can propose right from the checkpoint state
dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk() dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk()
var badBlock = blocks[^1].phase0Data var
badBlock.signature = blocks[^2].phase0Data.signature badBlock = blocks[^2].phase0Data
badBlock.signature = blocks[^3].phase0Data.signature
check: check:
dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid
@ -881,8 +879,6 @@ suite "Backfill":
dag.addBackfillBlock(genBlock.phase0Data.asSigned()) == dag.addBackfillBlock(genBlock.phase0Data.asSigned()) ==
AddBackRes.err VerifierError.MissingParent AddBackRes.err VerifierError.MissingParent
dag.addBackfillBlock(blocks[^2].phase0Data) ==
AddBackRes.err VerifierError.MissingParent
dag.addBackfillBlock(tailBlock.phase0Data).isOk() dag.addBackfillBlock(tailBlock.phase0Data).isOk()
check: check:
@ -924,10 +920,7 @@ suite "Backfill":
check: check:
dag.getFinalizedEpochRef() != nil dag.getFinalizedEpochRef() != nil
for i in 0..<blocks.len: test "reload backfill position":
check dag.containsBlock(blocks[i].toBlockId())
test "Reload backfill position":
let let
tailBlock = blocks[^1] tailBlock = blocks[^1]
@ -939,9 +932,6 @@ suite "Backfill":
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
check: check:
dag.addBackfillBlock(blocks[^1].phase0Data).isOk()
dag.backfill == blocks[^1].phase0Data.message.toBeaconBlockSummary()
dag.addBackfillBlock(blocks[^2].phase0Data).isOk() dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary() dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()
@ -976,11 +966,6 @@ suite "Backfill":
check: check:
dag.getFinalizedEpochRef() != nil dag.getFinalizedEpochRef() != nil
# Try importing blocks too early
for i in 0..<blocks.len - 1:
check dag.addBackfillBlock(blocks[i].phase0Data) ==
AddBackRes.err VerifierError.MissingParent
for i in 0..<blocks.len: for i in 0..<blocks.len:
check: dag.addBackfillBlock( check: dag.addBackfillBlock(
blocks[blocks.len - i - 1].phase0Data).isOk() blocks[blocks.len - i - 1].phase0Data).isOk()
@ -1010,50 +995,6 @@ suite "Backfill":
check: check:
dag2.head.root == next.root dag2.head.root == next.root
test "Restart after each block":
ChainDAGRef.preInit(db, tailState[])
for i in 1..blocks.len:
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
check dag.backfill == (
if i > 1:
blocks[^(i - 1)].phase0Data.message.toBeaconBlockSummary()
else:
BeaconBlockSummary(
slot: blocks[^1].phase0Data.message.slot + 1,
parent_root: blocks[^1].phase0Data.root))
for j in 1..blocks.len:
if j < i:
check dag.addBackfillBlock(blocks[^j].phase0Data) ==
AddBackRes.err VerifierError.Duplicate
elif j > i:
check dag.addBackfillBlock(blocks[^j].phase0Data) ==
AddBackRes.err VerifierError.MissingParent
else:
discard
check:
dag.addBackfillBlock(blocks[^i].phase0Data).isOk()
dag.backfill == blocks[^i].phase0Data.message.toBeaconBlockSummary()
block:
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
genBlock = get_initial_beacon_block(genState[])
check:
dag.addBackfillBlock(genBlock.phase0Data.asSigned()).isOk()
dag.backfill == default(BeaconBlockSummary)
let
validatorMonitor = newClone(ValidatorMonitor.init())
dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {})
check dag.backfill == default(BeaconBlockSummary)
suite "Starting states": suite "Starting states":
setup: setup:
let let
@ -1118,17 +1059,14 @@ suite "Starting states":
dag.getFinalizedEpochRef() != nil dag.getFinalizedEpochRef() != nil
# Checkpoint block is unavailable, and should be backfileld first dag.backfill == tailBlock.phase0Data.message.toBeaconBlockSummary()
not dag.containsBlock(dag.tail)
dag.backfill == BeaconBlockSummary(
slot: dag.tail.slot + 1,
parent_root: dag.tail.root)
# Check that we can propose right from the checkpoint state # Check that we can propose right from the checkpoint state
dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk() dag.getProposalState(dag.head, dag.head.slot + 1, cache).isOk()
var badBlock = blocks[^1].phase0Data var
badBlock.signature = blocks[^2].phase0Data.signature badBlock = blocks[^2].phase0Data
badBlock.signature = blocks[^3].phase0Data.signature
check: check:
dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid dag.addBackfillBlock(badBlock) == AddBackRes.err VerifierError.Invalid
@ -1138,9 +1076,7 @@ suite "Starting states":
dag.addBackfillBlock(genBlock.phase0Data.asSigned()) == dag.addBackfillBlock(genBlock.phase0Data.asSigned()) ==
AddBackRes.err VerifierError.MissingParent AddBackRes.err VerifierError.MissingParent
dag.addBackfillBlock(blocks[^2].phase0Data) == dag.addBackfillBlock(tailBlock.phase0Data) == AddBackRes.ok()
AddBackRes.err VerifierError.MissingParent
dag.addBackfillBlock(tailBlock.phase0Data).isOk()
check: check:
dag.addBackfillBlock(blocks[^2].phase0Data).isOk() dag.addBackfillBlock(blocks[^2].phase0Data).isOk()
@ -1152,9 +1088,6 @@ suite "Starting states":
dag.getBlockId(blocks[^2].root).get().root == blocks[^2].root dag.getBlockId(blocks[^2].root).get().root == blocks[^2].root
dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail
dag.getBlockIdAtSlot(dag.tail.slot - 1).get() ==
blocks[^2].toBlockId().atSlot()
dag.getBlockIdAtSlot(dag.tail.slot - 2).isNone
dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary() dag.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary()