Backfill only up to MIN_EPOCHS_FOR_BLOCK_REQUESTS blocks (#4421)
When backfilling, we only need to download blocks that are newer than MIN_EPOCHS_FOR_BLOCK_REQUESTS - the rest cannot reliably be fetched from the network and does not have to be provided to others. This change affects only trusted-node-synced clients - genesis sync continues to work as before (because it needs to construct a state by building it from genesis). Those wishing to complete a backfill should do so with era files instead.
This commit is contained in:
parent
2ac7609259
commit
75c7195bfd
|
@ -410,6 +410,13 @@ proc addBackfillBlock*(
|
|||
debug "Block does not match expected backfill root"
|
||||
return err(VerifierError.MissingParent) # MissingChild really, but ..
|
||||
|
||||
if blck.slot < dag.horizon:
|
||||
# This can happen as the horizon keeps moving - we'll discard it as
|
||||
# duplicate since it would have duplicated an existing block had we been
|
||||
# interested
|
||||
debug "Block past horizon, dropping", horizon = dag.horizon
|
||||
return err(VerifierError.Duplicate)
|
||||
|
||||
checkSignature()
|
||||
|
||||
let sigVerifyTick = Moment.now
|
||||
|
|
|
@ -362,6 +362,14 @@ template frontfill*(dagParam: ChainDAGRef): Opt[BlockId] =
|
|||
else:
|
||||
dag.genesis
|
||||
|
||||
func horizon*(dag: ChainDAGRef): Slot =
|
||||
## The sync horizon that we target during backfill - ie we will not backfill
|
||||
## blocks older than this from the network
|
||||
if dag.head.slot.epoch > dag.cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS:
|
||||
start_slot(dag.head.slot.epoch - dag.cfg.MIN_EPOCHS_FOR_BLOCK_REQUESTS)
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
|
||||
template epoch*(e: EpochRef): Epoch = e.key.epoch
|
||||
|
||||
func shortLog*(v: EpochKey): string =
|
||||
|
|
|
@ -1295,8 +1295,13 @@ proc getBlockRange*(
|
|||
head = shortLog(dag.head.root), requestedCount, startSlot, skipStep, headSlot
|
||||
|
||||
if startSlot < dag.backfill.slot:
|
||||
notice "Got request for pre-backfill slot",
|
||||
startSlot, backfillSlot = dag.backfill.slot
|
||||
if startSlot < dag.horizon:
|
||||
# We will not backfill these
|
||||
debug "Got request for pre-horizon slot",
|
||||
startSlot, backfillSlot = dag.backfill.slot
|
||||
else:
|
||||
notice "Got request for pre-backfill slot",
|
||||
startSlot, backfillSlot = dag.backfill.slot
|
||||
return output.len
|
||||
|
||||
if headSlot <= startSlot or requestedCount == 0:
|
||||
|
|
|
@ -244,7 +244,6 @@ proc newExecutionPayload*(
|
|||
executionPayload: bellatrix.ExecutionPayload | capella.ExecutionPayload):
|
||||
Future[Opt[PayloadExecutionStatus]] {.async.} =
|
||||
if eth1Monitor.isNil:
|
||||
warn "newPayload: attempting to process execution payload without Eth1Monitor. Ensure --web3-url setting is correct and JWT is configured."
|
||||
return Opt.none PayloadExecutionStatus
|
||||
|
||||
debug "newPayload: inserting block into execution engine",
|
||||
|
@ -320,6 +319,9 @@ proc getExecutionValidity(
|
|||
if not blck.message.is_execution_block:
|
||||
return NewPayloadStatus.valid # vacuously
|
||||
|
||||
if eth1Monitor.isNil:
|
||||
return NewPayloadStatus.noResponse
|
||||
|
||||
try:
|
||||
# Minimize window for Eth1 monitor to shut down connection
|
||||
await eth1Monitor.ensureDataProvider()
|
||||
|
@ -381,8 +383,10 @@ proc storeBlock*(
|
|||
# `processBlock` (indirectly). `validator_duties` does call `storeBlock`
|
||||
# directly, so is exposed to this, but only cares about whether there is
|
||||
# an error or not.
|
||||
return err((
|
||||
VerifierError.MissingParent, ProcessingStatus.notCompleted))
|
||||
if self[].consensusManager.eth1Monitor.isNil:
|
||||
warn "Attempting to process execution payload without execution client. Ensure --web3-url setting is correct and JWT is configured."
|
||||
|
||||
return err((VerifierError.MissingParent, ProcessingStatus.notCompleted))
|
||||
|
||||
# Client software MUST validate blockHash value as being equivalent to
|
||||
# Keccak256(RLP(ExecutionBlockHeader))
|
||||
|
|
|
@ -277,10 +277,7 @@ proc initFullNode(
|
|||
dag.backfill.slot
|
||||
|
||||
func getFrontfillSlot(): Slot =
|
||||
if dag.frontfill.isSome():
|
||||
dag.frontfill.get().slot
|
||||
else:
|
||||
GENESIS_SLOT
|
||||
max(dag.frontfill.get(BlockId()).slot, dag.horizon)
|
||||
|
||||
let
|
||||
quarantine = newClone(
|
||||
|
@ -451,10 +448,7 @@ proc init*(T: type BeaconNode,
|
|||
let optJwtSecret = rng[].loadJwtSecret(config, allowCreate = false)
|
||||
|
||||
if config.web3Urls.len() == 0:
|
||||
if cfg.BELLATRIX_FORK_EPOCH == FAR_FUTURE_EPOCH:
|
||||
notice "Running without execution client - validator features partially disabled (see https://nimbus.guide/eth1.html)"
|
||||
else:
|
||||
notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)"
|
||||
notice "Running without execution client - validator features disabled (see https://nimbus.guide/eth1.html)"
|
||||
|
||||
var eth1Monitor: Eth1Monitor
|
||||
|
||||
|
@ -1633,8 +1627,6 @@ proc start*(node: BeaconNode) {.raises: [Defect, CatchableError].} =
|
|||
|
||||
if node.eth1Monitor != nil:
|
||||
node.eth1Monitor.start()
|
||||
else:
|
||||
notice "Running without execution chain monitor, block producation partially disabled"
|
||||
|
||||
node.run()
|
||||
|
||||
|
|
|
@ -183,10 +183,19 @@ proc getBlocks*[A, B](man: SyncManager[A, B], peer: A,
|
|||
return
|
||||
|
||||
proc remainingSlots(man: SyncManager): uint64 =
|
||||
let
|
||||
first = man.getFirstSlot()
|
||||
last = man.getLastSlot()
|
||||
if man.direction == SyncQueueKind.Forward:
|
||||
man.getLastSlot() - man.getFirstSlot()
|
||||
if last > first:
|
||||
man.getLastSlot() - man.getFirstSlot()
|
||||
else:
|
||||
0'u64
|
||||
else:
|
||||
man.getFirstSlot() - man.getLastSlot()
|
||||
if first > last:
|
||||
man.getFirstSlot() - man.getLastSlot()
|
||||
else:
|
||||
0'u64
|
||||
|
||||
proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
|
||||
logScope:
|
||||
|
@ -297,9 +306,8 @@ proc syncStep[A, B](man: SyncManager[A, B], index: int, peer: A) {.async.} =
|
|||
peer.updateScore(PeerScoreUseless)
|
||||
return
|
||||
|
||||
if man.direction == SyncQueueKind.Forward:
|
||||
# Wall clock keeps ticking, so we need to update the queue
|
||||
man.queue.updateLastSlot(man.getLastSlot())
|
||||
# Wall clock keeps ticking, so we need to update the queue
|
||||
man.queue.updateLastSlot(man.getLastSlot())
|
||||
|
||||
man.workers[index].status = SyncWorkerStatus.Requesting
|
||||
let req = man.queue.pop(peerSlot, peer)
|
||||
|
@ -575,15 +583,27 @@ proc syncLoop[A, B](man: SyncManager[A, B]) {.async.} =
|
|||
progress =
|
||||
case man.queue.kind
|
||||
of SyncQueueKind.Forward:
|
||||
man.queue.outSlot - pivot
|
||||
if man.queue.outSlot >= pivot:
|
||||
man.queue.outSlot - pivot
|
||||
else:
|
||||
0'u64
|
||||
of SyncQueueKind.Backward:
|
||||
pivot - man.queue.outSlot
|
||||
if pivot >= man.queue.outSlot:
|
||||
pivot - man.queue.outSlot
|
||||
else:
|
||||
0'u64
|
||||
total =
|
||||
case man.queue.kind
|
||||
of SyncQueueKind.Forward:
|
||||
man.queue.finalSlot + 1'u64 - pivot
|
||||
if man.queue.finalSlot >= pivot:
|
||||
man.queue.finalSlot + 1'u64 - pivot
|
||||
else:
|
||||
0'u64
|
||||
of SyncQueueKind.Backward:
|
||||
pivot + 1'u64 - man.queue.finalSlot
|
||||
if pivot >= man.queue.finalSlot:
|
||||
pivot + 1'u64 - man.queue.finalSlot
|
||||
else:
|
||||
0'u64
|
||||
remaining = total - progress
|
||||
done =
|
||||
if total > 0:
|
||||
|
|
|
@ -281,17 +281,7 @@ proc makePending*[T](sq: SyncQueue[T], req: var SyncRequest[T]) =
|
|||
|
||||
proc updateLastSlot*[T](sq: SyncQueue[T], last: Slot) {.inline.} =
|
||||
## Update last slot stored in queue ``sq`` with value ``last``.
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
doAssert(sq.finalSlot <= last,
|
||||
"Last slot could not be lower then stored one " &
|
||||
$sq.finalSlot & " <= " & $last)
|
||||
sq.finalSlot = last
|
||||
of SyncQueueKind.Backward:
|
||||
doAssert(sq.finalSlot >= last,
|
||||
"Last slot could not be higher then stored one " &
|
||||
$sq.finalSlot & " >= " & $last)
|
||||
sq.finalSlot = last
|
||||
sq.finalSlot = last
|
||||
|
||||
proc wakeupWaiters[T](sq: SyncQueue[T], reset = false) =
|
||||
## Wakeup one or all blocked waiters.
|
||||
|
@ -793,7 +783,7 @@ proc push*[T](sq: SyncQueue[T], sr: SyncRequest[T],
|
|||
if safeSlot > failSlot:
|
||||
let rewindSlot = sq.getRewindPoint(failSlot, safeSlot)
|
||||
# It's quite common peers give us fewer blocks than we ask for
|
||||
info "Gap in block range response, rewinding", request = req,
|
||||
debug "Gap in block range response, rewinding", request = req,
|
||||
rewind_to_slot = rewindSlot, rewind_fail_slot = failSlot,
|
||||
finalized_slot = safeSlot, blocks_count = len(item.data),
|
||||
blocks_map = getShortMap(req, item.data)
|
||||
|
@ -974,22 +964,31 @@ proc len*[T](sq: SyncQueue[T]): uint64 {.inline.} =
|
|||
## Returns number of slots left in queue ``sq``.
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
sq.finalSlot + 1'u64 - sq.outSlot
|
||||
if sq.finalSlot >= sq.outSlot:
|
||||
sq.finalSlot + 1'u64 - sq.outSlot
|
||||
else:
|
||||
0'u64
|
||||
of SyncQueueKind.Backward:
|
||||
sq.outSlot + 1'u64 - sq.finalSlot
|
||||
if sq.outSlot >= sq.finalSlot:
|
||||
sq.outSlot + 1'u64 - sq.finalSlot
|
||||
else:
|
||||
0'u64
|
||||
|
||||
proc total*[T](sq: SyncQueue[T]): uint64 {.inline.} =
|
||||
## Returns total number of slots in queue ``sq``.
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
sq.finalSlot + 1'u64 - sq.startSlot
|
||||
if sq.finalSlot >= sq.startSlot:
|
||||
sq.finalSlot + 1'u64 - sq.startSlot
|
||||
else:
|
||||
0'u64
|
||||
of SyncQueueKind.Backward:
|
||||
sq.startSlot + 1'u64 - sq.finalSlot
|
||||
if sq.startSlot >= sq.finalSlot:
|
||||
sq.startSlot + 1'u64 - sq.finalSlot
|
||||
else:
|
||||
0'u64
|
||||
|
||||
proc progress*[T](sq: SyncQueue[T]): uint64 =
|
||||
## How many slots we've synced so far
|
||||
case sq.kind
|
||||
of SyncQueueKind.Forward:
|
||||
sq.outSlot - sq.startSlot
|
||||
of SyncQueueKind.Backward:
|
||||
sq.startSlot - sq.outSlot
|
||||
## How many useful slots we've synced so far, adjusting for how much has
|
||||
## become obsolete by time movements
|
||||
sq.total - sq.len
|
||||
|
|
|
@ -209,18 +209,18 @@ proc doTrustedNodeSync*(
|
|||
validatorMonitor = newClone(ValidatorMonitor.init(false, false))
|
||||
dag = ChainDAGRef.init(cfg, db, validatorMonitor, {}, eraPath = eraDir)
|
||||
backfillSlot = dag.backfill.slot
|
||||
frontfill = dag.frontfill.valueOr(BlockId())
|
||||
horizon = max(dag.horizon, dag.frontfill.valueOr(BlockId()).slot)
|
||||
|
||||
let canReindex = if backfillSlot <= frontfill.slot:
|
||||
info "Database backfilled"
|
||||
let canReindex = if backfillSlot <= horizon:
|
||||
info "Database backfilled", backfill = dag.backfill, horizon
|
||||
true
|
||||
elif backfill:
|
||||
# +1 because we need to download the frontfill slot for the frontfill match
|
||||
# detection to kick in, in addBackfillBlock
|
||||
let missingSlots = dag.backfill.slot - frontfill.slot + 1
|
||||
let missingSlots = dag.backfill.slot - horizon + 1
|
||||
|
||||
notice "Downloading historical blocks - you can interrupt this process at any time and it automatically be completed when you start the beacon node",
|
||||
backfillSlot, frontfill, missingSlots
|
||||
backfillSlot, horizon, missingSlots
|
||||
|
||||
var # Same averaging as SyncManager
|
||||
syncCount = 0
|
||||
|
@ -260,7 +260,7 @@ proc doTrustedNodeSync*(
|
|||
syncCount += 1
|
||||
|
||||
let
|
||||
remaining = dag.backfill.slot - frontfill.slot
|
||||
remaining = dag.backfill.slot - horizon
|
||||
slotsPerSec = speed(stamp, newStamp)
|
||||
avgSyncSpeed = avgSyncSpeed + (slotsPerSec - avgSyncSpeed) / float(syncCount)
|
||||
|
||||
|
@ -314,9 +314,9 @@ proc doTrustedNodeSync*(
|
|||
notice "Backfilling incomplete - blocks will be downloaded when starting the node", msg = exc.msg
|
||||
false
|
||||
else:
|
||||
let missingSlots = dag.backfill.slot - frontfill.slot
|
||||
let missingSlots = dag.backfill.slot - horizon
|
||||
notice "Database initialized, historical blocks will be backfilled when starting the node",
|
||||
missingSlots
|
||||
missingSlots, backfill = dag.backfill, horizon
|
||||
|
||||
false
|
||||
|
||||
|
|
Loading…
Reference in New Issue