mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-17 07:41:46 +00:00
ea268e81ff
* Clarifying/commenting FCU setup condition & small fixes, comments etc. * Update some logging * Reorg metrics updater and activation * Better `async` responsiveness why: Block import does not allow `async` task activation while executing. So allow potential switch after each imported block (rather than a group of 32 blocks.) * Handle resuming after previous sync followed by import why: In this case the ledger state is more recent than the saved sync state. So this is considered a pristine sync where any previous sync state is forgotten. This fixes some assert thrown because of inconsistent internal state at some point. * Provide option for clearing saved beacon sync state before starting syncer why: It would resume with the last state otherwise which might be undesired sometimes. Without RPC available, the syncer typically stops and terminates with the canonical head larger than the base/finalised head. The latter one will be saved as database/ledger state and the canonical head as syncer target. Resuming syncing here will repeat itself. So clearing the syncer state can prevent from starting the syncer unnecessarily avoiding useless actions. * Allow workers to request syncer shutdown from within why: In one-trick-pony mode (after resuming without RPC support) the syncer can be stopped from within soavoiding unnecessary polling. In that case, the syncer can (theoretically) be restarted externally with `startSync()`. * Terminate beacon sync after a single run target is reached why: Stops doing useless polling (typically when there is no RPC available) * Remove crufty comments * Tighten state reload condition when resuming why: Some pathological case might apply if the syncer is stopped while the distance between finalised block and head is very large and the FCU base becomes larger than the locked finalised state. * Verify that finalised number from CL is at least FCU base number why: The FCU base number is determined by the database, non zero if manually imported. The finalised number is passed via RPC by the CL node and will increase over time. Unless fully synced, this number will be pretty low. On the other hand, the FCU call `forkChoice()` will eventually fail if the `finalizedHash` argument refers to something outside the internal chain starting at the FCU base block. * Remove support for completing interrupted sync without RPC support why: Simplifies start/stop logic * Rmove unused import
183 lines
6.2 KiB
Nim
183 lines
6.2 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at
|
|
# https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at
|
|
# https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
# except according to those terms.
|
|
|
|
{.push raises:[].}
|
|
|
|
import
|
|
pkg/[chronicles, chronos],
|
|
pkg/eth/[common, rlp],
|
|
pkg/stew/sorted_set,
|
|
../../../core/chain,
|
|
../worker_desc,
|
|
./update/metrics,
|
|
./headers_staged/staged_queue,
|
|
"."/[blocks_unproc, db, headers_unproc]
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Private functions
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc updateTargetChange(ctx: BeaconCtxRef; info: static[string]) =
|
|
##
|
|
## Layout (see (3) in README):
|
|
## ::
|
|
## 0 C==D==H T
|
|
## o----------------o---------------------o---->
|
|
## | <-- linked --> |
|
|
##
|
|
## or
|
|
## ::
|
|
## 0==T C==D==H
|
|
## o----------------o-------------------------->
|
|
## | <-- linked --> |
|
|
##
|
|
## with `T == target.consHead.number` or `T == 0`
|
|
##
|
|
## to be updated to
|
|
## ::
|
|
## 0 C==D D'==H'
|
|
## o----------------o---------------------o---->
|
|
## | <-- linked --> | <-- unprocessed --> |
|
|
##
|
|
var target = ctx.target.consHead.number
|
|
|
|
# Need: `H < T` and `C == D`
|
|
if target != 0 and target <= ctx.layout.head: # violates `H < T`
|
|
trace info & ": update not applicable",
|
|
H=ctx.layout.head.bnStr, T=target.bnStr
|
|
return
|
|
|
|
if ctx.layout.coupler != ctx.layout.dangling: # violates `C == D`
|
|
trace info & ": update not applicable",
|
|
C=ctx.layout.coupler.bnStr, D=ctx.layout.dangling.bnStr
|
|
return
|
|
|
|
# Check consistency: `C == D <= H` for maximal `C` => `D == H`
|
|
doAssert ctx.layout.dangling == ctx.layout.head
|
|
|
|
let rlpHeader = rlp.encode(ctx.target.consHead)
|
|
|
|
ctx.sst.layout = SyncStateLayout(
|
|
coupler: ctx.layout.coupler,
|
|
couplerHash: ctx.layout.couplerHash,
|
|
dangling: target,
|
|
danglingParent: ctx.target.consHead.parentHash,
|
|
final: ctx.target.final,
|
|
finalHash: ctx.target.finalHash,
|
|
head: target,
|
|
headHash: rlpHeader.keccak256,
|
|
headLocked: true)
|
|
|
|
# Save this header on the database so it needs not be fetched again from
|
|
# somewhere else.
|
|
ctx.dbStashHeaders(target, @[rlpHeader], info)
|
|
|
|
# Save state
|
|
ctx.dbStoreSyncStateLayout info
|
|
|
|
# Update range
|
|
doAssert ctx.headersUnprocTotal() == 0
|
|
doAssert ctx.headersUnprocBorrowed() == 0
|
|
doAssert ctx.headersStagedQueueIsEmpty()
|
|
ctx.headersUnprocSet(ctx.layout.coupler+1, ctx.layout.dangling-1)
|
|
|
|
trace info & ": updated sync state", C=ctx.layout.coupler.bnStr,
|
|
uTop=ctx.headersUnprocTop(),
|
|
D=ctx.layout.dangling.bnStr, H=ctx.layout.head.bnStr, T=target.bnStr
|
|
|
|
# Update, so it can be followed nicely
|
|
ctx.updateMetrics()
|
|
|
|
|
|
proc mergeAdjacentChains(ctx: BeaconCtxRef; info: static[string]) =
|
|
## Merge if `C+1` == `D`
|
|
##
|
|
if ctx.layout.coupler+1 < ctx.layout.dangling or # gap btw. `C` & `D`
|
|
ctx.layout.coupler == ctx.layout.dangling: # merged already
|
|
return
|
|
|
|
# No overlap allowed!
|
|
doAssert ctx.layout.coupler+1 == ctx.layout.dangling
|
|
|
|
# Verify adjacent chains
|
|
if ctx.layout.couplerHash != ctx.layout.danglingParent:
|
|
# FIXME: Oops -- any better idea than to defect?
|
|
raiseAssert info & ": hashes do not match" &
|
|
" C=" & ctx.layout.coupler.bnStr & " D=" & $ctx.layout.dangling.bnStr
|
|
|
|
trace info & ": merging adjacent chains", C=ctx.layout.coupler.bnStr,
|
|
D=ctx.layout.dangling.bnStr
|
|
|
|
# Merge adjacent linked chains
|
|
ctx.sst.layout = SyncStateLayout(
|
|
coupler: ctx.layout.head, # `C`
|
|
couplerHash: ctx.layout.headHash,
|
|
dangling: ctx.layout.head, # `D`
|
|
danglingParent: ctx.dbPeekParentHash(ctx.layout.head).expect "Hash32",
|
|
final: ctx.layout.final, # `F`
|
|
finalHash: ctx.layout.finalHash,
|
|
head: ctx.layout.head, # `H`
|
|
headHash: ctx.layout.headHash,
|
|
headLocked: ctx.layout.headLocked)
|
|
|
|
# Save state
|
|
ctx.dbStoreSyncStateLayout info
|
|
|
|
# Update, so it can be followed nicely
|
|
ctx.updateMetrics()
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions
|
|
# ------------------------------------------------------------------------------
|
|
|
|
proc updateSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]) =
|
|
## Update layout
|
|
|
|
# Check whether the target has been reached. In that case, unlock the
|
|
# consensus head `H` from the current layout so that it can be updated
|
|
# in time.
|
|
if ctx.layout.headLocked:
|
|
# So we have a session
|
|
let latest= ctx.chain.latestNumber()
|
|
if ctx.layout.head <= latest:
|
|
doAssert ctx.layout.head == latest
|
|
ctx.layout.headLocked = false
|
|
|
|
|
|
# Check whether there is something to do regarding beacon node change
|
|
if not ctx.layout.headLocked and # there was an active import request
|
|
ctx.target.changed and # and there is a new target from CL
|
|
ctx.target.final != 0: # .. ditto
|
|
ctx.target.changed = false
|
|
ctx.updateTargetChange info
|
|
|
|
# Check whether header downloading is done
|
|
ctx.mergeAdjacentChains info
|
|
|
|
|
|
proc updateBlockRequests*(ctx: BeaconCtxRef; info: static[string]) =
|
|
## Update block requests if there staged block queue is empty
|
|
let latest = ctx.chain.latestNumber()
|
|
if latest < ctx.layout.coupler: # so half open interval `(L,C]` is not empty
|
|
|
|
# One can fill/import/execute blocks by number from `(L,C]`
|
|
if ctx.blk.topRequest < ctx.layout.coupler:
|
|
# So there is some space
|
|
trace info & ": updating block requests", L=latest.bnStr,
|
|
topReq=ctx.blk.topRequest.bnStr, C=ctx.layout.coupler.bnStr
|
|
|
|
ctx.blocksUnprocCommit(
|
|
0, max(latest, ctx.blk.topRequest) + 1, ctx.layout.coupler)
|
|
ctx.blk.topRequest = ctx.layout.coupler
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# End
|
|
# ------------------------------------------------------------------------------
|