diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 68d79d950..2417c0f4d 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -17,10 +17,11 @@ AllTests-mainnet OK: 11/11 Fail: 0/11 Skip: 0/11 ## Backfill ```diff ++ Init without genesis / block OK + backfill to genesis OK + reload backfill position OK ``` -OK: 2/2 Fail: 0/2 Skip: 0/2 +OK: 3/3 Fail: 0/3 Skip: 0/3 ## Beacon chain DB [Preset: mainnet] ```diff + empty database [Preset: mainnet] OK @@ -597,4 +598,4 @@ OK: 1/1 Fail: 0/1 Skip: 0/1 OK: 9/9 Fail: 0/9 Skip: 0/9 ---TOTAL--- -OK: 330/335 Fail: 0/335 Skip: 5/335 +OK: 331/336 Fail: 0/336 Skip: 5/336 diff --git a/beacon_chain/consensus_object_pools/block_clearance.nim b/beacon_chain/consensus_object_pools/block_clearance.nim index d8c2917fd..277d4c14d 100644 --- a/beacon_chain/consensus_object_pools/block_clearance.nim +++ b/beacon_chain/consensus_object_pools/block_clearance.nim @@ -316,6 +316,37 @@ proc addBackfillBlock*( template blck(): untyped = signedBlock.message # shortcuts without copy template blockRoot(): untyped = signedBlock.root + template checkSignature = + # If the hash is correct, the block itself must be correct, but the root does + # not cover the signature, which we check next + if blck.slot == GENESIS_SLOT: + # The genesis block must have an empty signature (since there's no proposer) + if signedBlock.signature != ValidatorSig(): + info "Invalid genesis block signature" + return err(BlockError.Invalid) + else: + let proposerKey = dag.validatorKey(blck.proposer_index) + if proposerKey.isNone(): + # We've verified that the block root matches our expectations by following + # the chain of parents all the way from checkpoint. If all those blocks + # were valid, the proposer_index in this block must also be valid, and we + # should have a key for it but we don't: this is either a bug on our from + # which we cannot recover, or an invalid checkpoint state was given in which + # case we're in trouble. + fatal "Invalid proposer in backfill block - checkpoint state corrupt?", + head = shortLog(dag.head), tail = shortLog(dag.tail) + + quit 1 + + if not verify_block_signature( + dag.forkAtEpoch(blck.slot.epoch), + getStateField(dag.headState, genesis_validators_root), + blck.slot, + signedBlock.root, + proposerKey.get(), + signedBlock.signature): + info "Block signature verification failed" + return err(BlockError.Invalid) let startTick = Moment.now() @@ -324,9 +355,16 @@ proc addBackfillBlock*( if existing.isSome: if existing.get().bid.slot == blck.slot and existing.get().bid.root == blockRoot: - # We should not call the block added callback for blocks that already - # existed in the pool, as that may confuse consumers such as the fork - # choice. + + # Special case: when starting with only a checkpoint state, we will not + # have the head block data in the database + if dag.getForkedBlock(existing.get().bid).isNone(): + checkSignature() + + debug "Block backfilled (checkpoint)" + dag.putBlock(signedBlock.asTrusted()) + return ok() + debug "Duplicate block" return err(BlockError.Duplicate) @@ -338,60 +376,37 @@ proc addBackfillBlock*( return err(BlockError.UnviableFork) - if blck.slot == dag.frontfill.slot and - dag.backfill.parent_root == dag.frontfill.root: - if blockRoot != dag.frontfill.root: - # We've matched the backfill blocks all the way back to frontfill via the - # `parent_root` chain and ended up at a different block - one way this - # can happen is when an invalid `--network` parameter is given during - # startup (though in theory, we check that - maybe the database was - # swapped or something?). - fatal "Checkpoint given during initial startup inconsistent with genesis block - wrong network used when starting the node?", - genesis = shortLog(dag.genesis), tail = shortLog(dag.tail), - head = shortLog(dag.head) - quit 1 + if dag.frontfill.isSome(): + let frontfill = dag.frontfill.get() + if blck.slot == frontfill.slot and + dag.backfill.parent_root == frontfill.root: + if blockRoot != frontfill.root: + # We've matched the backfill blocks all the way back to frontfill via the + # `parent_root` chain and ended up at a different block - one way this + # can happen is when an invalid `--network` parameter is given during + # startup (though in theory, we check that - maybe the database was + # swapped or something?). + fatal "Checkpoint given during initial startup inconsistent with genesis block - wrong network used when starting the node?", + tail = shortLog(dag.tail), head = shortLog(dag.head) + quit 1 - # Signal that we're done by resetting backfill - reset(dag.backfill) - dag.db.finalizedBlocks.insert(blck.slot, blockRoot) - dag.updateFrontfillBlocks() + # Signal that we're done by resetting backfill + reset(dag.backfill) + dag.db.finalizedBlocks.insert(blck.slot, blockRoot) + dag.updateFrontfillBlocks() - notice "Received final block during backfill, backfill complete" + notice "Received final block during backfill, backfill complete" - # Backfill done - dag.backfill.slot now points to genesis block just like - # it would if we loaded a fully synced database - returning duplicate - # here is appropriate, though one could also call it ... ok? - return err(BlockError.Duplicate) + # Backfill done - dag.backfill.slot now points to genesis block just like + # it would if we loaded a fully synced database - returning duplicate + # here is appropriate, though one could also call it ... ok? + return err(BlockError.Duplicate) if dag.backfill.parent_root != blockRoot: debug "Block does not match expected backfill root" return err(BlockError.MissingParent) # MissingChild really, but .. - # If the hash is correct, the block itself must be correct, but the root does - # not cover the signature, which we check next - let proposerKey = dag.validatorKey(blck.proposer_index) - if proposerKey.isNone(): - # We've verified that the block root matches our expectations by following - # the chain of parents all the way from checkpoint. If all those blocks - # were valid, the proposer_index in this block must also be valid, and we - # should have a key for it but we don't: this is either a bug on our from - # which we cannot recover, or an invalid checkpoint state was given in which - # case we're in trouble. - fatal "Invalid proposer in backfill block - checkpoint state corrupt?", - head = shortLog(dag.head), tail = shortLog(dag.tail), - genesis = shortLog(dag.genesis) - - quit 1 - - if not verify_block_signature( - dag.forkAtEpoch(blck.slot.epoch), - getStateField(dag.headState, genesis_validators_root), - blck.slot, - signedBlock.root, - proposerKey.get(), - signedBlock.signature): - info "Block signature verification failed" - return err(BlockError.Invalid) + checkSignature() let sigVerifyTick = Moment.now diff --git a/beacon_chain/consensus_object_pools/block_pools_types.nim b/beacon_chain/consensus_object_pools/block_pools_types.nim index 13d4495d7..31e34410f 100644 --- a/beacon_chain/consensus_object_pools/block_pools_types.nim +++ b/beacon_chain/consensus_object_pools/block_pools_types.nim @@ -138,14 +138,15 @@ type ## `finalizedHead.slot..head.slot` (inclusive) - dag.heads keeps track ## of each potential head block in this table. - genesis*: BlockId - ## The genesis block of the network + genesis*: Opt[BlockId] + ## The root of the genesis block, iff it is known (ie if the database was + ## created with a genesis state available) tail*: BlockId - ## The earliest finalized block for which we have a corresponding state - - ## when making a replay of chain history, this is as far back as we can - ## go - the tail block is unique in that its parent is set to `nil`, even - ## in the case where an earlier genesis block exists. + ## The earliest block for which we can construct a state - we consider + ## the tail implicitly finalized no matter what fork choice and state + ## says - when starting from a trusted checkpoint, the tail is set to + ## the checkpoint block. head*: BlockRef ## The most recently known head, as chosen by fork choice; might be @@ -333,10 +334,14 @@ type template head*(dag: ChainDAGRef): BlockRef = dag.headState.blck -template frontfill*(dagParam: ChainDAGRef): BlockId = +template frontfill*(dagParam: ChainDAGRef): Opt[BlockId] = + ## When there's a gap in the block database, this is the most recent block + ## that we know of _before_ the gap - after a checkpoint sync, this will + ## typically be the genesis block (iff available) - if we have era files, + ## this is the most recent era file block. let dag = dagParam if dag.frontfillBlocks.lenu64 > 0: - BlockId( + Opt.some BlockId( slot: Slot(dag.frontfillBlocks.lenu64 - 1), root: dag.frontfillBlocks[^1]) else: dag.genesis diff --git a/beacon_chain/consensus_object_pools/blockchain_dag.nim b/beacon_chain/consensus_object_pools/blockchain_dag.nim index 686ce2be6..e0fcb108c 100644 --- a/beacon_chain/consensus_object_pools/blockchain_dag.nim +++ b/beacon_chain/consensus_object_pools/blockchain_dag.nim @@ -171,12 +171,13 @@ func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] = ## Retrieve the canonical block at the given slot, or the last block that ## comes before - similar to atSlot, but without the linear scan - may hit ## the database to look up early indices. - if slot == dag.genesis.slot: - return ok BlockSlotId.init(dag.genesis, slot) - if slot > dag.finalizedHead.slot: return dag.head.atSlot(slot).toBlockSlotId() # iterate to the given slot + if dag.finalizedHead.blck == nil: + # Not initialized yet (in init) + return Opt.none(BlockSlotId) + if slot >= dag.finalizedHead.blck.slot: # finalized head is still in memory return dag.finalizedHead.blck.atSlot(slot).toBlockSlotId() @@ -195,7 +196,35 @@ func getBlockIdAtSlot*(dag: ChainDAGRef, slot: Slot): Opt[BlockSlotId] = pos = pos - 1 - err() # not backfilled yet, and not genesis + if slot == GENESIS_SLOT and dag.genesis.isSome(): + return ok dag.genesis.get().atSlot() + + err() # not backfilled yet + +proc getForkedBlock*(db: BeaconChainDB, root: Eth2Digest): + Opt[ForkedTrustedSignedBeaconBlock] {.gcsafe.} + +proc getBlockId(db: BeaconChainDB, root: Eth2Digest): Opt[BlockId] = + block: # We might have a summary in the database + let summary = db.getBeaconBlockSummary(root) + if summary.isOk(): + return ok(BlockId(root: root, slot: summary.get().slot)) + + block: + # We might have a block without having written a summary - this can happen + # if there was a crash between writing the block and writing the summary, + # specially in databases written by older nimbus versions + let forked = db.getForkedBlock(root) + if forked.isSome(): + # Shouldn't happen too often but.. + let + blck = forked.get() + summary = withBlck(blck): blck.message.toBeaconBlockSummary() + debug "Writing summary", blck = shortLog(blck) + db.putBeaconBlockSummary(root, summary) + return ok(BlockId(root: root, slot: summary.slot)) + + err() proc getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] = ## Look up block id by root in history - useful for turning a root into a @@ -207,12 +236,7 @@ proc getBlockId*(dag: ChainDAGRef, root: Eth2Digest): Opt[BlockId] = if blck.isOk(): return ok(blck.get().bid) - block: # We might have a summary in the database - let summary = dag.db.getBeaconBlockSummary(root) - if summary.isOk(): - return ok(BlockId(root: root, slot: summary.get().slot)) - - err() + dag.db.getBlockId(root) func isCanonical*(dag: ChainDAGRef, bid: BlockId): bool = ## Return true iff the given `bid` is part of the history selected by `dag.head` @@ -701,20 +725,6 @@ export blockchain_dag_light_client.getLightClientFinalityUpdate, blockchain_dag_light_client.getLightClientOptimisticUpdate -proc getViableHead(cfg: RuntimeConfig, db: BeaconChainDB): Opt[BlockId] = - # When the database has been written with a pre-fork version of the - # software, it may happen that blocks produced using an "unforked" - # chain get written to the database - we need to skip such blocks - # when loading the database with a fork-compatible version - let - headRoot = ? db.getHeadBlock() - - for blck in db.getAncestorSummaries(headRoot): - if containsBlock(cfg, db, blck.summary.slot, blck.root): - return ok(BlockId(slot: blck.summary.slot, root: blck.root)) - - err() - proc putState(dag: ChainDAGRef, state: ForkedHashedBeaconState, bid: BlockId) = # Store a state and its root logScope: @@ -816,21 +826,22 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, # genesis or a checkpoint let startTick = Moment.now() - genesisRoot = db.getGenesisBlock().expect( - "preInit should have initialized the database with a genesis block root") + genesisRoot = db.getGenesisBlock() tailRoot = db.getTailBlock().expect( "preInit should have initialized the database with a tail block root") - tailBlock = db.getForkedBlock(tailRoot).expect( - "Tail block in database, corrupt?") - head = getViableHead(cfg, db).expect("Head root in database, corrupt?") + tail = db.getBlockId(tailRoot).expect( + "tail block summary in database, database corrupt?") + headRoot = db.getHeadBlock().expect("head root, database corrupt?") + head = db.getBlockId(headRoot).expect("head block id, database corrupt?") # Have to be careful with this instance, it is not yet fully initialized so # as to avoid having to allocate a separate "init" state dag = ChainDAGRef( db: db, validatorMonitor: validatorMonitor, - genesis: BlockId(root: genesisRoot, slot: GENESIS_SLOT), - tail: tailBlock.toBlockId(), + genesis: genesisRoot.map( + proc(x: auto): auto = BlockId(root: x, slot: GENESIS_SLOT)), + tail: tail, # The only allowed flag right now is strictVerification, as the others all # allow skipping some validation. @@ -857,7 +868,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, # To know the finalized checkpoint of the head, we need to recreate its # state - the tail is implicitly finalized, and if we have a finalized block # table, that provides another hint - finalizedSlot = db.finalizedBlocks.high.get(dag.tail.slot) + finalizedSlot = db.finalizedBlocks.high.get(tail.slot) newFinalized: seq[BlockId] cache: StateCache foundHeadState = false @@ -869,7 +880,6 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, # The execution block root gets filled in as needed let newRef = BlockRef.init(blck.root, none Eth2Digest, blck.summary.slot) if headRef == nil: - doAssert blck.root == head.root headRef = newRef if curRef != nil: @@ -908,7 +918,21 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, slot += 1 if not foundHeadState: - headBlocks.add curRef + # When the database has been written with a pre-fork version of the + # software, it may happen that blocks produced using an "unforked" + # chain get written to the database - we need to skip such blocks + # when loading the database with a fork-compatible version + if containsBlock(cfg, db, curRef.slot, curRef.root): + headBlocks.add curRef + else: + if headBlocks.len > 0: + fatal "Missing block needed to create head state, database corrupt?", + curRef = shortLog(curRef) + quit 1 + # Without the block data we can't form a state for this root, so + # we'll need to move the head back + headRef = nil + dag.forkBlocks.excl(KeyedBlockRef.init(curRef)) if curRef.slot <= finalizedSlot: # Only non-finalized slots get a `BlockRef` @@ -918,8 +942,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, if not foundHeadState: fatal "Could not load head state, database corrupt?", - head = shortLog(head), tail = shortLog(dag.tail), - genesis = shortLog(dag.genesis) + head = shortLog(head), tail = shortLog(dag.tail) quit 1 let @@ -931,7 +954,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, if stateFork != configFork: error "State from database does not match network, check --network parameter", - genesis = dag.genesis, tail = dag.tail, headRef, stateFork, configFork + tail = dag.tail, headRef, stateFork, configFork quit 1 # Need to load state to find genesis validators root, before loading era db @@ -956,7 +979,7 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, db.finalizedBlocks.high.get() < dag.finalizedHead.blck.slot: info "Loading finalized blocks", finHigh = db.finalizedBlocks.high, - finalizedHead = shortLog(dag.finalizedHead) + finalizedHead = shortLog(dag.finalizedHead) for blck in db.getAncestorSummaries(dag.finalizedHead.blck.root): if db.finalizedBlocks.high.isSome and @@ -989,7 +1012,8 @@ proc init*(T: type ChainDAGRef, cfg: RuntimeConfig, db: BeaconChainDB, db.getBeaconBlockSummary(backfillRoot).expect( "Backfill block must have a summary: " & $backfillRoot) else: - withBlck(tailBlock): blck.message.toBeaconBlockSummary() + db.getBeaconBlockSummary(dag.tail.root).expect( + "Tail block must have a summary: " & $dag.tail.root) dag.forkDigests = newClone ForkDigests.init( cfg, getStateField(dag.headState, genesis_validators_root)) @@ -1937,6 +1961,42 @@ proc isInitialized*(T: type ChainDAGRef, db: BeaconChainDB): Result[void, cstrin ok() +proc preInit*( + T: type ChainDAGRef, db: BeaconChainDB, state: ForkedHashedBeaconState) = + ## Initialize a database using the given state, which potentially may be a + ## non-genesis state. + ## + ## Databases created like this are incompatible with versions prior to + ## 22.11.0. + + logScope: + stateRoot = getStateRoot(state) + stateSlot = getStateField(state, slot) + + withState(state): + db.putState(forkyState) + if forkyState.data.slot == GENESIS_SLOT: + let blck = get_initial_beacon_block(forkyState) + db.putBlock(blck) + db.putGenesisBlock(blck.root) + db.putHeadBlock(blck.root) + db.putTailBlock(blck.root) + + notice "New genesis database initialized", + genesisBlockRoot = $blck.root + + else: + let blockRoot = forkyState.latest_block_root() + db.putBeaconBlockSummary(blockRoot, BeaconBlockSummary( + slot: forkyState.data.latest_block_header.slot, + parent_root: forkyState.data.latest_block_header.parent_root + )) + db.putHeadBlock(blockRoot) + db.putTailBlock(blockRoot) + + notice "New snapshot database initialized", + blockRoot = $blockRoot + proc preInit*( T: type ChainDAGRef, db: BeaconChainDB, genesisState, tailState: ForkedHashedBeaconState, @@ -2026,7 +2086,7 @@ proc getProposer*( proposer proc getProposalState*( - dag: ChainDagRef, head: BlockRef, slot: Slot, cache: var StateCache): + dag: ChainDAGRef, head: BlockRef, slot: Slot, cache: var StateCache): Result[ref ForkedHashedBeaconState, cstring] = ## Return a state suitable for making proposals for the given head and slot - ## in particular, the state can be discarded after use and does not have a @@ -2115,16 +2175,17 @@ proc aggregateAll*( ok(finish(aggregateKey)) func needsBackfill*(dag: ChainDAGRef): bool = - dag.backfill.slot > dag.genesis.slot + dag.backfill.slot > GENESIS_SLOT proc rebuildIndex*(dag: ChainDAGRef) = ## After a checkpoint sync, we lack intermediate states to replay from - this ## function rebuilds them so that historical replay can take place again + ## TODO handle databases without genesis state if dag.backfill.slot > 0: debug "Backfill not complete, cannot rebuild archive" return - if dag.tail.slot == dag.genesis.slot: + if dag.tail.slot == GENESIS_SLOT: # The tail is the earliest slot for which we're supposed to have states - # if it's sufficiently recent, don't do anything debug "Archive does not need rebuilding" @@ -2226,9 +2287,12 @@ proc rebuildIndex*(dag: ChainDAGRef) = # Now that we have states all the way to genesis, we can adjust the tail # and readjust the in-memory indices to what they would look like if we had # started with an earlier tail - dag.db.putTailBlock(dag.genesis.root) + let + genesis = + dag.getBlockIdAtSlot(GENESIS_SLOT).expect("Genesis in database").bid + dag.db.putTailBlock(genesis.root) - dag.tail = dag.genesis + dag.tail = genesis if junk.len > 0: info "Dropping redundant states", states = junk.len diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index d345e1b2c..3022a98c8 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -277,7 +277,10 @@ proc initFullNode( dag.backfill.slot func getFrontfillSlot(): Slot = - dag.frontfill.slot + if dag.frontfill.isSome(): + dag.frontfill.get().slot + else: + GENESIS_SLOT let quarantine = newClone( diff --git a/beacon_chain/rpc/rest_utils.nim b/beacon_chain/rpc/rest_utils.nim index b7679a33d..a2d11df67 100644 --- a/beacon_chain/rpc/rest_utils.nim +++ b/beacon_chain/rpc/rest_utils.nim @@ -80,7 +80,9 @@ proc getBlockSlotId*(node: BeaconNode, of StateIdentType.Head: ok(node.dag.head.bid.atSlot()) of StateIdentType.Genesis: - ok(node.dag.genesis.atSlot()) + let bid = node.dag.getBlockIdAtSlot(GENESIS_SLOT).valueOr: + return err("Genesis state not available / pruned") + ok bid of StateIdentType.Finalized: ok(node.dag.finalizedHead.toBlockSlotId().expect("not nil")) of StateIdentType.Justified: @@ -98,7 +100,7 @@ proc getBlockId*(node: BeaconNode, id: BlockIdent): Opt[BlockId] = of BlockIdentType.Head: ok(node.dag.head.bid) of BlockIdentType.Genesis: - ok(node.dag.genesis) + node.dag.getBlockIdAtSlot(GENESIS_SLOT).map(proc(x: auto): auto = x.bid) of BlockIdentType.Finalized: ok(node.dag.finalizedHead.blck.bid) of BlockQueryKind.Root: diff --git a/ncli/ncli_db.nim b/ncli/ncli_db.nim index 66b7f530a..d2b9cae9a 100644 --- a/ncli/ncli_db.nim +++ b/ncli/ncli_db.nim @@ -437,7 +437,7 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) = func atCanonicalSlot(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] = if slot == 0: - ok dag.genesis.atSlot() + dag.getBlockIdAtSlot(GENESIS_SLOT) else: ok BlockSlotId.init((? dag.atSlot(bid, slot - 1)).bid, slot) diff --git a/tests/consensus_spec/test_fixture_fork_choice.nim b/tests/consensus_spec/test_fixture_fork_choice.nim index 25db1280b..42d47756d 100644 --- a/tests/consensus_spec/test_fixture_fork_choice.nim +++ b/tests/consensus_spec/test_fixture_fork_choice.nim @@ -288,10 +288,8 @@ proc stepChecks( doAssert fkChoice.checkpoints.proposer_boost_root == Eth2Digest.fromHex(val.getStr()) elif check == "genesis_time": - # The fork choice is pruned regularly - # and does not store the genesis time, - # hence we check the DAG - doAssert dag.genesis.slot == Slot(val.getInt()) + # We do not store genesis in fork choice.. + discard else: doAssert false, "Unsupported check '" & $check & "'" diff --git a/tests/test_blockchain_dag.nim b/tests/test_blockchain_dag.nim index fd706695b..ec0d00397 100644 --- a/tests/test_blockchain_dag.nim +++ b/tests/test_blockchain_dag.nim @@ -89,7 +89,7 @@ suite "Block pool processing" & preset(): dag.parent(b2Add[].bid).get() == b1Add[].bid # head not updated yet - getBlockIdAtSlot won't give those blocks dag.getBlockIdAtSlot(b2Add[].slot).get() == - BlockSlotId.init(dag.genesis, b2Add[].slot) + BlockSlotId.init(dag.getBlockIdAtSlot(GENESIS_SLOT).get().bid, b2Add[].slot) sr.isSome() er.isSome() @@ -466,7 +466,7 @@ suite "chain DAG finalization tests" & preset(): check: dag.heads.len() == 1 - dag.getBlockIdAtSlot(0.Slot).get() == BlockSlotId.init(dag.genesis, 0.Slot) + dag.getBlockIdAtSlot(0.Slot).get().bid.slot == 0.Slot dag.getBlockIdAtSlot(2.Slot).get() == BlockSlotId.init(dag.getBlockIdAtSlot(1.Slot).get().bid, 2.Slot) @@ -478,7 +478,7 @@ suite "chain DAG finalization tests" & preset(): not dag.containsForkBlock(dag.getBlockIdAtSlot(5.Slot).get().bid.root) dag.containsForkBlock(dag.finalizedHead.blck.root) - dag.getBlockRef(dag.genesis.root).isNone() # Finalized - no BlockRef + dag.getBlockRef(dag.getBlockIdAtSlot(0.Slot).get().bid.root).isNone() # Finalized - no BlockRef dag.getBlockRef(dag.finalizedHead.blck.root).isSome() @@ -838,7 +838,7 @@ suite "Backfill": dag.getBlockIdAtSlot(dag.tail.slot).get().bid == dag.tail dag.getBlockIdAtSlot(dag.tail.slot - 1).isNone() - dag.getBlockIdAtSlot(Slot(0)).get() == dag.genesis.atSlot() + dag.getBlockIdAtSlot(Slot(0)).isSome() # genesis stored in db dag.getBlockIdAtSlot(Slot(1)).isNone() # No EpochRef for pre-tail epochs @@ -943,6 +943,46 @@ suite "Backfill": dag2.getBlockIdAtSlot(dag.tail.slot - 2).isNone dag2.backfill == blocks[^2].phase0Data.message.toBeaconBlockSummary() + test "Init without genesis / block": + let + tailBlock = blocks[^1] + genBlock = get_initial_beacon_block(genState[]) + + ChainDAGRef.preInit(db, tailState[]) + + let + validatorMonitor = newClone(ValidatorMonitor.init()) + dag = init(ChainDAGRef, defaultRuntimeConfig, db, validatorMonitor, {}) + + check: + dag.getFinalizedEpochRef() != nil + + for i in 0..