Jordan Hrycaj ea268e81ff
Beacon sync activation control update (#2782)
* Clarifying/commenting FCU setup condition & small fixes, comments etc.

* Update some logging

* Reorg metrics updater and activation

* Better `async` responsiveness

why:
  Block import does not allow `async` task activation while
  executing. So allow potential switch after each imported
  block (rather than a group of 32 blocks.)

* Handle resuming after previous sync followed by import

why:
  In this case the ledger state is more recent than the saved
  sync state. So this is considered a pristine sync where any
  previous sync state is forgotten.

  This fixes some assert thrown because of inconsistent internal
  state at some point.

* Provide option for clearing saved beacon sync state before starting syncer

why:
  It would resume with the last state otherwise which might be undesired
  sometimes.

  Without RPC available, the syncer typically stops and terminates with
  the canonical head larger than the base/finalised head. The latter one
  will be saved as database/ledger state and the canonical head as syncer
  target. Resuming syncing here will repeat itself.

  So clearing the syncer state can prevent from starting the syncer
  unnecessarily avoiding useless actions.

* Allow workers to request syncer shutdown from within

why:
  In one-trick-pony mode (after resuming without RPC support) the
  syncer can be stopped from within soavoiding unnecessary polling.
  In that case, the syncer can (theoretically) be restarted externally
  with `startSync()`.

* Terminate beacon sync after a single run target is reached

why:
  Stops doing useless polling (typically when there is no RPC available)

* Remove crufty comments

* Tighten state reload condition when resuming

why:
  Some pathological case might apply if the syncer is stopped while the
  distance between finalised block and head is very large and the FCU
  base becomes larger than the locked finalised state.

* Verify that finalised number from CL is at least FCU base number

why:
  The FCU base number is determined by the database, non zero if
  manually imported. The finalised number is passed via RPC by the CL
  node and will increase over time. Unless fully synced, this number
  will be pretty low.

  On the other hand, the FCU call `forkChoice()` will eventually fail
  if the `finalizedHash` argument refers to something outside the
  internal chain starting at the FCU base block.

* Remove support for completing interrupted sync without RPC support

why:
  Simplifies start/stop logic

* Rmove unused import
2024-10-28 16:22:04 +00:00

172 lines
5.7 KiB
Nim

# Nimbus
# Copyright (c) 2023-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at
# https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at
# https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises:[].}
import
pkg/[chronicles, chronos],
pkg/eth/[common, rlp],
pkg/stew/[byteutils, interval_set, sorted_set],
pkg/results,
"../../.."/[common, core/chain, db/storage_types],
../worker_desc,
"."/[blocks_unproc, headers_unproc]
const
LhcStateKey = 1.beaconStateKey
# ------------------------------------------------------------------------------
# Private debugging & logging helpers
# ------------------------------------------------------------------------------
formatIt(Hash32):
it.data.toHex
# ------------------------------------------------------------------------------
# Private helpers
# ------------------------------------------------------------------------------
proc fetchSyncStateLayout(ctx: BeaconCtxRef): Opt[SyncStateLayout] =
let data = ctx.db.ctx.getKvt().get(LhcStateKey.toOpenArray).valueOr:
return err()
try:
return ok(rlp.decode(data, SyncStateLayout))
except RlpError:
discard
err()
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc dbStoreSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]) =
## Save chain layout to persistent db
if ctx.layout == ctx.sst.lastLayout:
return
let data = rlp.encode(ctx.layout)
ctx.db.ctx.getKvt().put(LhcStateKey.toOpenArray, data).isOkOr:
raiseAssert info & " put() failed: " & $$error
# While executing blocks there are frequent save cycles. Otherwise, an
# extra save request might help to pick up an interrupted sync session.
let txLevel = ctx.db.level()
if txLevel == 0:
let number = ctx.db.getSavedStateBlockNumber()
ctx.db.persistent(number).isOkOr:
debug info & ": failed to save sync state persistently", error=($$error)
return
else:
trace info & ": sync state not saved, tx pending", txLevel
return
trace info & ": saved sync state persistently"
proc dbLoadSyncStateLayout*(ctx: BeaconCtxRef; info: static[string]) =
## Restore chain layout from persistent db
let
rc = ctx.fetchSyncStateLayout()
latest = ctx.chain.latestNumber()
# See `dbLoadSyncStateAvailable()` for comments
if rc.isOk and
ctx.chain.baseNumber() <= rc.value.final and
latest < rc.value.head:
ctx.sst.layout = rc.value
# Add interval of unprocessed block range `(L,C]` from `README.md`
ctx.blocksUnprocSet(latest+1, ctx.layout.coupler)
ctx.blk.topRequest = ctx.layout.coupler
# Add interval of unprocessed header range `(C,D)` from `README.md`
ctx.headersUnprocSet(ctx.layout.coupler+1, ctx.layout.dangling-1)
trace info & ": restored sync state", L=latest.bnStr,
C=ctx.layout.coupler.bnStr, D=ctx.layout.dangling.bnStr,
F=ctx.layout.final.bnStr, H=ctx.layout.head.bnStr
else:
let
latestHash = ctx.chain.latestHash()
latestParent = ctx.chain.latestHeader.parentHash
ctx.sst.layout = SyncStateLayout(
coupler: latest,
couplerHash: latestHash,
dangling: latest,
danglingParent: latestParent,
# There is no need to record a separate finalised head `F` as its only
# use is to serve as second argument in `forkChoice()` when committing
# a batch of imported blocks. Currently, there are no blocks to fetch
# and import. The system must wait for instructions and update the fields
# `final` and `head` while the latter will be increased so that import
# can start.
final: latest,
finalHash: latestHash,
head: latest,
headHash: latestHash,
headLocked: false)
trace info & ": new sync state", L="C", C="D", D="F", F="H", H=latest.bnStr
ctx.sst.lastLayout = ctx.layout
# ------------------
proc dbStashHeaders*(
ctx: BeaconCtxRef;
first: BlockNumber;
revBlobs: openArray[seq[byte]];
info: static[string];
) =
## Temporarily store header chain to persistent db (oblivious of the chain
## layout.) The headers should not be stashed if they are imepreted and
## executed on the database, already.
##
## The `revBlobs[]` arguments are passed in reverse order so that block
## numbers apply as
## ::
## #first -- revBlobs[^1]
## #(first+1) -- revBlobs[^2]
## ..
##
let
kvt = ctx.db.ctx.getKvt()
last = first + revBlobs.len.uint64 - 1
for n,data in revBlobs:
let key = beaconHeaderKey(last - n.uint64)
kvt.put(key.toOpenArray, data).isOkOr:
raiseAssert info & ": put() failed: " & $$error
proc dbPeekHeader*(ctx: BeaconCtxRef; num: BlockNumber): Opt[Header] =
## Retrieve some stashed header.
let
key = beaconHeaderKey(num)
rc = ctx.db.ctx.getKvt().get(key.toOpenArray)
if rc.isOk:
try:
return ok(rlp.decode(rc.value, Header))
except RlpError:
discard
err()
proc dbPeekParentHash*(ctx: BeaconCtxRef; num: BlockNumber): Opt[Hash32] =
## Retrieve some stashed parent hash.
ok (? ctx.dbPeekHeader num).parentHash
proc dbUnstashHeader*(ctx: BeaconCtxRef; bn: BlockNumber) =
## Remove header from temporary DB list
discard ctx.db.ctx.getKvt().del(beaconHeaderKey(bn).toOpenArray)
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------