# Copyright (c) 2018-2022 Status Research & Development GmbH # Licensed and distributed under either of # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # at your option. This file may not be copied, modified, or distributed except according to those terms. {.push raises: [Defect].} import stew/[assign2, base10], chronicles, chronos, ./sync/sync_manager, ./consensus_object_pools/blockchain_dag, ./spec/eth2_apis/rest_beacon_client, ./spec/[beaconstate, eth2_merkleization, forks, presets, state_transition], "."/[beacon_clock, beacon_chain_db] type DbCache = object summaries: Table[Eth2Digest, BeaconBlockSummary] slots: seq[Option[Eth2Digest]] const emptyHash = Eth2Digest() proc updateSlots(cache: var DbCache, root: Eth2Digest, slot: Slot) = # The slots mapping stores one linear block history - we construct it by # starting from a given root/slot and walking the known parents as far back # as possible which ensures that all blocks belong to the same history if cache.slots.len() < slot.int + 1: cache.slots.setLen(slot.int + 1) var root = root lastSlot = slot while true: cache.summaries.withValue(root, v) do: let slot = v[].slot for i in slot.int + 1.. headSlot: # When the checkpoint is newer than the head, we run into trouble: the # current backfill in ChainDAG does not support filling in arbitrary gaps. # If we were to update the backfill pointer in this case, the ChainDAG # backfiller would re-download the entire backfill history. # For now, we'll abort and let the user choose what to do. error "Checkpoint block is newer than head slot - start with a new database or use a checkpoint no more recent than the head", checkpointSlot, checkpointRoot = shortLog(checkpointBlock.root), headSlot quit 1 if checkpointSlot.is_epoch(): found = true break id = BlockIdent.init((checkpointSlot.epoch() - 1).start_slot) info "Downloaded checkpoint block does not fall on epoch boundary, trying an earlier epoch", checkpointSlot, id if not found: # The ChainDAG requires that the tail falls on an epoch boundary, or it # will be unable to load the corresponding state - this could be fixed, but # for now, we ask the user to fix it instead error "A checkpoint block from the first slot of an epoch could not be found with the given block id - pass an epoch slot with a block using the --block-id parameter", blockId quit 1 checkpointBlock let checkpointSlot = getForkedBlockField(checkpointBlock, slot) if checkpointBlock.root in dbCache.summaries: notice "Checkpoint block is already known, skipping checkpoint state download" withBlck(checkpointBlock): dbCache.updateSlots(blck.root, blck.message.slot) else: notice "Downloading checkpoint state", restUrl, checkpointSlot let state = try: await client.getStateV2(StateIdent.init(checkpointSlot), cfg) except CatchableError as exc: error "Unable to download checkpoint state", error = exc.msg, restUrl, checkpointSlot quit 1 if isNil(state): notice "No state found at given checkpoint", checkpointSlot quit 1 withState(state[]): let latest_block_root = state.latest_block_root if latest_block_root != checkpointBlock.root: error "Checkpoint state does not match checkpoint block, server error?", blockRoot = shortLog(checkpointBlock.root), blck = shortLog(checkpointBlock), stateBlockRoot = shortLog(latest_block_root) quit 1 info "Writing checkpoint state", stateRoot = shortLog(state.root) db.putStateRoot(state.latest_block_root(), state.data.slot, state.root) db.putState(state.root, state.data) withBlck(checkpointBlock): info "Writing checkpoint block", blockRoot = shortLog(blck.root), blck = shortLog(blck.message) db.putBlock(blck.asTrusted()) db.putHeadBlock(blck.root) db.putTailBlock(blck.root) dbCache.update(blck) # Coming this far, we've done what ChainDAGRef.preInit would normally do - # Let's do a sanity check and start backfilling blocks from the trusted node if (let v = ChainDAGRef.isInitialized(db); v.isErr()): error "Database not initialized after checkpoint sync, report bug", err = v.error() quit 1 let missingSlots = block: var total = 0 for i in 0..= dbCache.slots.lenu64(): error "Downloaded block does not match checkpoint history" quit 1 if not dbCache.slots[childSlot.int].isSome(): # Should never happen - we download slots backwards error "Downloaded block does not match checkpoint history" quit 1 let knownRoot = dbCache.slots[childSlot.int].get() if knownRoot == emptyHash: childSlot += 1 continue dbCache.summaries.withValue(knownRoot, summary): if summary[].parent_root != blck.root: error "Downloaded block does not match checkpoint history", blockRoot = shortLog(blck.root), expectedRoot = shortLog(summary[].parent_root) quit 1 break # This shouldn't happen - we should have downloaded the child and # updated knownBlocks before here error "Expected child block not found in checkpoint history" quit 1 if blck.root notin dbCache.summaries: db.putBlock(blck.asTrusted()) dbCache.update(blck) let newStamp = SyncMoment.now(processed) if newStamp.stamp - stamp.stamp > 12.seconds: syncCount += 1 let remaining = blck.message.slot.int.float slotsPerSec = speed(stamp, newStamp) avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount) info "Backfilling", timeleft = toTimeLeftString( if avgSyncSpeed >= 0.001: Duration.fromFloatSeconds(remaining / avgSyncSpeed) else: InfiniteDuration), avgSyncSpeed, remaining stamp = newStamp # Download blocks backwards from the checkpoint slot, skipping the ones we # already have in the database. We'll do a few downloads in parallel which # risks having some redundant downloads going on, but speeds things up for i in 0'u64..<(checkpointSlot.uint64 + gets.lenu64()): if not isNil(gets[int(i mod gets.lenu64)]): await processBlock( gets[int(i mod gets.lenu64)], checkpointSlot + gets.lenu64() - uint64(i)) gets[int(i mod gets.lenu64)] = nil if i < checkpointSlot: let slot = checkpointSlot - i if dbCache.isKnown(slot): continue gets[int(i mod gets.lenu64)] = downloadBlock(slot) else: notice "Database initialized, historical blocks will be backfilled when starting the node", missingSlots notice "Done, your beacon node is ready to serve you! Don't forget to check that you're on the canoncial chain by comparing the checkpoint root with other online sources. See https://nimbus.guide/trusted-node-sync.html for more information.", checkpointRoot = checkpointBlock.root when isMainModule: import std/[os] let backfill = os.paramCount() > 3 and os.paramStr(4) == "true" waitFor doTrustedNodeSync( defaultRuntimeConfig, os.paramStr(1), os.paramStr(2), os.paramStr(3), backfill)