mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-01-28 13:05:18 +00:00
ea268e81ff
* Clarifying/commenting FCU setup condition & small fixes, comments etc. * Update some logging * Reorg metrics updater and activation * Better `async` responsiveness why: Block import does not allow `async` task activation while executing. So allow potential switch after each imported block (rather than a group of 32 blocks.) * Handle resuming after previous sync followed by import why: In this case the ledger state is more recent than the saved sync state. So this is considered a pristine sync where any previous sync state is forgotten. This fixes some assert thrown because of inconsistent internal state at some point. * Provide option for clearing saved beacon sync state before starting syncer why: It would resume with the last state otherwise which might be undesired sometimes. Without RPC available, the syncer typically stops and terminates with the canonical head larger than the base/finalised head. The latter one will be saved as database/ledger state and the canonical head as syncer target. Resuming syncing here will repeat itself. So clearing the syncer state can prevent from starting the syncer unnecessarily avoiding useless actions. * Allow workers to request syncer shutdown from within why: In one-trick-pony mode (after resuming without RPC support) the syncer can be stopped from within soavoiding unnecessary polling. In that case, the syncer can (theoretically) be restarted externally with `startSync()`. * Terminate beacon sync after a single run target is reached why: Stops doing useless polling (typically when there is no RPC available) * Remove crufty comments * Tighten state reload condition when resuming why: Some pathological case might apply if the syncer is stopped while the distance between finalised block and head is very large and the FCU base becomes larger than the locked finalised state. * Verify that finalised number from CL is at least FCU base number why: The FCU base number is determined by the database, non zero if manually imported. The finalised number is passed via RPC by the CL node and will increase over time. Unless fully synced, this number will be pretty low. On the other hand, the FCU call `forkChoice()` will eventually fail if the `finalizedHash` argument refers to something outside the internal chain starting at the FCU base block. * Remove support for completing interrupted sync without RPC support why: Simplifies start/stop logic * Rmove unused import
148 lines
5.2 KiB
Nim
148 lines
5.2 KiB
Nim
# Nimbus
|
|
# Copyright (c) 2023-2024 Status Research & Development GmbH
|
|
# Licensed and distributed under either of
|
|
# * MIT license (license terms in the root directory or at
|
|
# https://opensource.org/licenses/MIT).
|
|
# * Apache v2 license (license terms in the root directory or at
|
|
# https://www.apache.org/licenses/LICENSE-2.0).
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
# except according to those terms.
|
|
|
|
{.push raises:[].}
|
|
|
|
import
|
|
pkg/eth/[common, p2p],
|
|
../../../core/chain,
|
|
../../protocol,
|
|
../worker_desc,
|
|
./blocks_staged/staged_queue,
|
|
./headers_staged/staged_queue,
|
|
"."/[blocks_unproc, db, headers_unproc]
|
|
|
|
when enableTicker:
|
|
import ./start_stop/ticker
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Private functions
|
|
# ------------------------------------------------------------------------------
|
|
|
|
when enableTicker:
|
|
proc tickerUpdater(ctx: BeaconCtxRef): TickerStatsUpdater =
|
|
## Legacy stuff, will be probably be superseded by `metrics`
|
|
return proc: auto =
|
|
TickerStats(
|
|
stored: ctx.db.getSavedStateBlockNumber(),
|
|
base: ctx.chain.baseNumber(),
|
|
latest: ctx.chain.latestNumber(),
|
|
coupler: ctx.layout.coupler,
|
|
dangling: ctx.layout.dangling,
|
|
final: ctx.layout.final,
|
|
head: ctx.layout.head,
|
|
headOk: ctx.layout.headLocked,
|
|
target: ctx.target.consHead.number,
|
|
targetOk: ctx.target.final != 0,
|
|
|
|
nHdrStaged: ctx.headersStagedQueueLen(),
|
|
hdrStagedTop: ctx.headersStagedQueueTopKey(),
|
|
hdrUnprocTop: ctx.headersUnprocTop(),
|
|
nHdrUnprocessed: ctx.headersUnprocTotal() + ctx.headersUnprocBorrowed(),
|
|
nHdrUnprocFragm: ctx.headersUnprocChunks(),
|
|
|
|
nBlkStaged: ctx.blocksStagedQueueLen(),
|
|
blkStagedBottom: ctx.blocksStagedQueueBottomKey(),
|
|
blkUnprocTop: ctx.blk.topRequest,
|
|
nBlkUnprocessed: ctx.blocksUnprocTotal() + ctx.blocksUnprocBorrowed(),
|
|
nBlkUnprocFragm: ctx.blocksUnprocChunks(),
|
|
|
|
reorg: ctx.pool.nReorg,
|
|
nBuddies: ctx.pool.nBuddies)
|
|
|
|
proc updateBeaconHeaderCB(ctx: BeaconCtxRef): ReqBeaconSyncTargetCB =
|
|
## Update beacon header. This function is intended as a call back function
|
|
## for the RPC module.
|
|
return proc(h: Header; f: Hash32) {.gcsafe, raises: [].} =
|
|
# Check whether there is an update running (otherwise take next upate)
|
|
if not ctx.target.locked:
|
|
# Rpc checks empty header against a zero hash rather than `emptyRoot`
|
|
if f != zeroHash32 and
|
|
ctx.layout.head < h.number and
|
|
ctx.target.consHead.number < h.number:
|
|
ctx.target.consHead = h
|
|
ctx.target.final = BlockNumber(0)
|
|
ctx.target.finalHash = f
|
|
ctx.target.changed = true
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# Public functions
|
|
# ------------------------------------------------------------------------------
|
|
|
|
when enableTicker:
|
|
proc setupTicker*(ctx: BeaconCtxRef) =
|
|
## Helper for `setup()`: Start ticker
|
|
ctx.pool.ticker = TickerRef.init(ctx.tickerUpdater)
|
|
|
|
proc destroyTicker*(ctx: BeaconCtxRef) =
|
|
## Helper for `release()`
|
|
ctx.pool.ticker.destroy()
|
|
ctx.pool.ticker = TickerRef(nil)
|
|
|
|
else:
|
|
template setupTicker*(ctx: BeaconCtxRef) = discard
|
|
template destroyTicker*(ctx: BeaconCtxRef) = discard
|
|
|
|
# ---------
|
|
|
|
proc setupDatabase*(ctx: BeaconCtxRef; info: static[string]) =
|
|
## Initalise database related stuff
|
|
|
|
# Initialise up queues and lists
|
|
ctx.headersStagedQueueInit()
|
|
ctx.blocksStagedQueueInit()
|
|
ctx.headersUnprocInit()
|
|
ctx.blocksUnprocInit()
|
|
|
|
# Load initial state from database if there is any
|
|
ctx.dbLoadSyncStateLayout info
|
|
|
|
# Set blocks batch import value for block import
|
|
if ctx.pool.nBodiesBatch < nFetchBodiesRequest:
|
|
if ctx.pool.nBodiesBatch == 0:
|
|
ctx.pool.nBodiesBatch = nFetchBodiesBatchDefault
|
|
else:
|
|
ctx.pool.nBodiesBatch = nFetchBodiesRequest
|
|
|
|
# Set length of `staged` queue
|
|
if ctx.pool.nBodiesBatch < nFetchBodiesBatchDefault:
|
|
const nBlocks = blocksStagedQueueLenMaxDefault * nFetchBodiesBatchDefault
|
|
ctx.pool.blocksStagedQuLenMax =
|
|
(nBlocks + ctx.pool.nBodiesBatch - 1) div ctx.pool.nBodiesBatch
|
|
else:
|
|
ctx.pool.blocksStagedQuLenMax = blocksStagedQueueLenMaxDefault
|
|
|
|
|
|
proc setupRpcMagic*(ctx: BeaconCtxRef) =
|
|
## Helper for `setup()`: Enable external pivot update via RPC
|
|
ctx.pool.chain.com.reqBeaconSyncTarget = ctx.updateBeaconHeaderCB
|
|
|
|
proc destroyRpcMagic*(ctx: BeaconCtxRef) =
|
|
## Helper for `release()`
|
|
ctx.pool.chain.com.reqBeaconSyncTarget = ReqBeaconSyncTargetCB(nil)
|
|
|
|
# ---------
|
|
|
|
proc startBuddy*(buddy: BeaconBuddyRef): bool =
|
|
## Convenience setting for starting a new worker
|
|
let
|
|
ctx = buddy.ctx
|
|
peer = buddy.peer
|
|
if peer.supports(protocol.eth) and peer.state(protocol.eth).initialized:
|
|
ctx.pool.nBuddies.inc # for metrics
|
|
return true
|
|
|
|
proc stopBuddy*(buddy: BeaconBuddyRef) =
|
|
buddy.ctx.pool.nBuddies.dec # for metrics
|
|
|
|
# ------------------------------------------------------------------------------
|
|
# End
|
|
# ------------------------------------------------------------------------------
|