2023-04-06 19:42:07 +00:00
|
|
|
# Nimbus
|
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
{.push raises: [].}
|
|
|
|
|
|
|
|
import
|
|
|
|
chronicles,
|
|
|
|
chronos,
|
|
|
|
eth/p2p,
|
|
|
|
stew/[interval_set, keyed_queue],
|
2023-04-24 20:24:07 +00:00
|
|
|
"../../.."/[handlers/eth, misc/ticker, protocol],
|
|
|
|
"../.."/[range_desc, worker_desc],
|
2023-04-06 19:42:07 +00:00
|
|
|
../db/[hexary_desc, snapdb_pivot],
|
2023-04-24 20:24:07 +00:00
|
|
|
../get/get_error,
|
|
|
|
./pass_desc,
|
|
|
|
./pass_snap/helper/[beacon_header, storage_queue],
|
2023-04-25 16:34:48 +00:00
|
|
|
./pass_snap/[pivot, snap_pass_desc]
|
2023-04-06 19:42:07 +00:00
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "snap-play"
|
|
|
|
|
|
|
|
const
|
2023-04-24 20:24:07 +00:00
|
|
|
extraTraceMessages = false # or true
|
2023-04-06 19:42:07 +00:00
|
|
|
## Enabled additional logging noise
|
|
|
|
|
2023-04-24 20:24:07 +00:00
|
|
|
extraScrutinyDoubleCheckCompleteness = 1_000_000
|
|
|
|
## Double check database whether it is complete (debugging, testing). This
|
|
|
|
## action is slow and intended for debugging and testing use, only. The
|
|
|
|
## numeric value limits the action to the maximal number of account in the
|
|
|
|
## database.
|
|
|
|
##
|
|
|
|
## Set to `0` to disable.
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
template logTxt(info: static[string]): static[string] =
|
|
|
|
"Snap worker " & info
|
|
|
|
|
|
|
|
template ignoreException(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except CatchableError as e:
|
|
|
|
error "Exception at " & info & ":", name=($e.name), msg=(e.msg)
|
|
|
|
|
|
|
|
# --------------
|
|
|
|
|
|
|
|
proc disableWireServices(ctx: SnapCtxRef) =
|
|
|
|
## Helper for `setup()`: Temporarily stop useless wire protocol services.
|
|
|
|
ctx.ethWireCtx.txPoolEnabled = false
|
|
|
|
|
|
|
|
proc enableWireServices(ctx: SnapCtxRef) =
|
|
|
|
## Helper for `release()`
|
|
|
|
ctx.ethWireCtx.txPoolEnabled = true
|
|
|
|
|
|
|
|
# --------------
|
|
|
|
|
|
|
|
proc detectSnapSyncRecovery(ctx: SnapCtxRef) =
|
|
|
|
## Helper for `setup()`: Initiate snap sync recovery (if any)
|
|
|
|
let rc = ctx.pool.snapDb.pivotRecoverDB()
|
|
|
|
if rc.isOk:
|
2023-04-25 16:34:48 +00:00
|
|
|
let snap = ctx.pool.pass
|
|
|
|
snap.recovery = RecoveryRef(state: rc.value)
|
2023-04-14 22:28:57 +00:00
|
|
|
ctx.daemon = true
|
|
|
|
|
|
|
|
# Set up early initial pivot
|
2023-04-25 16:34:48 +00:00
|
|
|
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
|
2023-04-14 22:28:57 +00:00
|
|
|
trace logTxt "recovery started",
|
2023-04-25 16:34:48 +00:00
|
|
|
checkpoint=(snap.pivotTable.topNumber.toStr & "(0)")
|
2023-04-14 22:28:57 +00:00
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.startRecovery()
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
|
2023-04-25 16:34:48 +00:00
|
|
|
let
|
|
|
|
snap = ctx.pool.pass
|
|
|
|
recov = snap.recovery
|
2023-04-06 19:42:07 +00:00
|
|
|
if recov.isNil:
|
|
|
|
return false
|
|
|
|
|
|
|
|
let
|
2023-04-14 22:28:57 +00:00
|
|
|
checkpoint = recov.state.header.blockNumber.toStr & "(" & $recov.level & ")"
|
2023-04-06 19:42:07 +00:00
|
|
|
topLevel = recov.level == 0
|
|
|
|
env = block:
|
2023-04-25 16:34:48 +00:00
|
|
|
let rc = snap.pivotTable.eq recov.state.header.stateRoot
|
2023-04-06 19:42:07 +00:00
|
|
|
if rc.isErr:
|
2023-04-14 22:28:57 +00:00
|
|
|
error logTxt "recovery pivot context gone", checkpoint, topLevel
|
2023-04-06 19:42:07 +00:00
|
|
|
return false
|
|
|
|
rc.value
|
|
|
|
|
|
|
|
# Cosmetics: allow other processes (e.g. ticker) to log the current recovery
|
|
|
|
# state. There is no other intended purpose of this wait state.
|
|
|
|
await sleepAsync 1100.milliseconds
|
|
|
|
|
|
|
|
#when extraTraceMessages:
|
|
|
|
# trace "Recovery continued ...", checkpoint, topLevel,
|
|
|
|
# nAccounts=recov.state.nAccounts, nDangling=recov.state.dangling.len
|
|
|
|
|
|
|
|
# Update pivot data from recovery checkpoint
|
|
|
|
env.pivotRecoverFromCheckpoint(ctx, topLevel)
|
|
|
|
|
|
|
|
# Fetch next recovery record if there is any
|
|
|
|
if recov.state.predecessor.isZero:
|
|
|
|
#when extraTraceMessages:
|
|
|
|
# trace "Recovery done", checkpoint, topLevel
|
|
|
|
return false
|
|
|
|
let rc = ctx.pool.snapDb.pivotRecoverDB(recov.state.predecessor)
|
|
|
|
if rc.isErr:
|
|
|
|
when extraTraceMessages:
|
2023-04-14 22:28:57 +00:00
|
|
|
trace logTxt "stale pivot, recovery stopped", checkpoint, topLevel
|
2023-04-06 19:42:07 +00:00
|
|
|
return false
|
|
|
|
|
|
|
|
# Set up next level pivot checkpoint
|
2023-04-25 16:34:48 +00:00
|
|
|
snap.recovery = RecoveryRef(
|
2023-04-06 19:42:07 +00:00
|
|
|
state: rc.value,
|
|
|
|
level: recov.level + 1)
|
|
|
|
|
|
|
|
# Push onto pivot table and continue recovery (i.e. do not stop it yet)
|
2023-04-25 16:34:48 +00:00
|
|
|
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
|
2023-04-06 19:42:07 +00:00
|
|
|
|
|
|
|
return true # continue recovery
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# --------------
|
|
|
|
|
|
|
|
proc snapSyncCompleteOk(
|
|
|
|
env: SnapPivotRef; # Current pivot environment
|
|
|
|
ctx: SnapCtxRef; # Some global context
|
|
|
|
): Future[bool]
|
|
|
|
{.async.} =
|
|
|
|
## Check whether this pivot is fully downloaded. The `async` part is for
|
|
|
|
## debugging, only and should not be used on a large database as it uses
|
|
|
|
## quite a bit of computation ressources.
|
|
|
|
if env.pivotCompleteOk():
|
2023-04-24 20:24:07 +00:00
|
|
|
when 0 < extraScrutinyDoubleCheckCompleteness:
|
|
|
|
# Larger sizes might be infeasible
|
|
|
|
if env.nAccounts <= extraScrutinyDoubleCheckCompleteness:
|
|
|
|
if not await env.pivotVerifyComplete(ctx):
|
|
|
|
error logTxt "inconsistent state, pivot incomplete",
|
|
|
|
pivot=env.stateHeader.blockNumber.toStr, nAccounts=env.nAccounts
|
|
|
|
return false
|
2023-04-25 16:34:48 +00:00
|
|
|
ctx.pool.pass.completedPivot = env
|
2023-04-14 22:28:57 +00:00
|
|
|
ctx.poolMode = true # Fast sync mode must be synchronized among all peers
|
|
|
|
return true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions, snap sync admin handlers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc snapSyncSetup(ctx: SnapCtxRef) =
|
2023-04-25 16:34:48 +00:00
|
|
|
# Set up snap sync descriptor
|
|
|
|
ctx.pool.pass = SnapPassCtxRef()
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# For snap sync book keeping
|
2023-04-25 16:34:48 +00:00
|
|
|
ctx.pool.pass.coveredAccounts = NodeTagRangeSet.init()
|
|
|
|
ctx.pool.ticker.init(cb = ctx.pool.pass.pivotTable.tickerStats(ctx))
|
2023-04-14 22:28:57 +00:00
|
|
|
|
|
|
|
ctx.disableWireServices() # Stop unwanted public services
|
|
|
|
ctx.detectSnapSyncRecovery() # Check for recovery mode
|
|
|
|
|
|
|
|
proc snapSyncRelease(ctx: SnapCtxRef) =
|
|
|
|
ctx.enableWireServices() # re-enable public services
|
|
|
|
ctx.pool.ticker.stop()
|
|
|
|
|
|
|
|
proc snapSyncStart(buddy: SnapBuddyRef): bool =
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
|
|
|
if peer.supports(protocol.snap) and
|
|
|
|
peer.supports(protocol.eth) and
|
|
|
|
peer.state(protocol.eth).initialized:
|
|
|
|
ctx.pool.ticker.startBuddy()
|
|
|
|
buddy.ctrl.multiOk = false # confirm default mode for soft restart
|
2023-04-24 20:24:07 +00:00
|
|
|
buddy.only.errors = GetErrorStatsRef()
|
2023-04-14 22:28:57 +00:00
|
|
|
return true
|
|
|
|
|
|
|
|
proc snapSyncStop(buddy: SnapBuddyRef) =
|
|
|
|
buddy.ctx.pool.ticker.stopBuddy()
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2023-04-14 22:28:57 +00:00
|
|
|
# Private functions, snap sync action handlers
|
2023-04-06 19:42:07 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
proc snapSyncPool(buddy: SnapBuddyRef, last: bool, laps: int): bool =
|
2023-04-06 19:42:07 +00:00
|
|
|
## Enabled when `buddy.ctrl.poolMode` is `true`
|
|
|
|
##
|
2023-04-14 22:28:57 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
2023-04-25 16:34:48 +00:00
|
|
|
snap = ctx.pool.pass
|
|
|
|
env = snap.completedPivot
|
2023-04-14 22:28:57 +00:00
|
|
|
|
|
|
|
# Check whether the snapshot is complete. If so, switch to full sync mode.
|
|
|
|
# This process needs to be applied to all buddy peers.
|
|
|
|
if not env.isNil:
|
|
|
|
ignoreException("snapSyncPool"):
|
|
|
|
# Stop all peers
|
2023-04-24 20:24:07 +00:00
|
|
|
buddy.snapSyncStop()
|
2023-04-14 22:28:57 +00:00
|
|
|
# After the last buddy peer was stopped switch to full sync mode
|
|
|
|
# and repeat that loop over buddy peers for re-starting them.
|
|
|
|
if last:
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace logTxt "switch to full sync", peer=buddy.peer, last, laps,
|
|
|
|
pivot=env.stateHeader.blockNumber.toStr,
|
|
|
|
mode=ctx.pool.syncMode.active, state= buddy.ctrl.state
|
2023-04-24 20:24:07 +00:00
|
|
|
ctx.snapSyncRelease()
|
2023-04-14 22:28:57 +00:00
|
|
|
ctx.pool.syncMode.active = FullSyncMode
|
2023-04-24 20:24:07 +00:00
|
|
|
ctx.passActor.setup(ctx)
|
|
|
|
ctx.poolMode = true # repeat looping over peers
|
|
|
|
ctx.pool.fullHeader = some(env.stateHeader) # Full sync start here
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
return false # do stop magically when looping over peers is exhausted
|
|
|
|
|
|
|
|
# Clean up empty pivot slots (never the top one.) This needs to be run on
|
|
|
|
# a single peer only. So the loop can stop immediately (returning `true`)
|
|
|
|
# after this job is done.
|
2023-04-25 16:34:48 +00:00
|
|
|
var rc = snap.pivotTable.beforeLast
|
2023-04-06 19:42:07 +00:00
|
|
|
while rc.isOK:
|
|
|
|
let (key, env) = (rc.value.key, rc.value.data)
|
|
|
|
if env.fetchAccounts.processed.isEmpty:
|
2023-04-25 16:34:48 +00:00
|
|
|
snap.pivotTable.del key
|
|
|
|
rc = snap.pivotTable.prev(key)
|
2023-04-14 22:28:57 +00:00
|
|
|
true # Stop ok
|
2023-04-06 19:42:07 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc snapSyncDaemon(ctx: SnapCtxRef) {.async.} =
|
|
|
|
## Enabled while `ctx.daemon` is `true`
|
|
|
|
##
|
2023-04-25 16:34:48 +00:00
|
|
|
if not ctx.pool.pass.recovery.isNil:
|
2023-04-06 19:42:07 +00:00
|
|
|
if not await ctx.recoveryStepContinue():
|
|
|
|
# Done, stop recovery
|
2023-04-25 16:34:48 +00:00
|
|
|
ctx.pool.pass.recovery = nil
|
2023-04-06 19:42:07 +00:00
|
|
|
ctx.daemon = false
|
|
|
|
|
|
|
|
# Update logging
|
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.stopRecovery()
|
|
|
|
|
|
|
|
|
|
|
|
proc snapSyncSingle(buddy: SnapBuddyRef) {.async.} =
|
|
|
|
## Enabled while
|
|
|
|
## * `buddy.ctrl.multiOk` is `false`
|
|
|
|
## * `buddy.ctrl.poolMode` is `false`
|
|
|
|
##
|
|
|
|
# External beacon header updater
|
2023-04-24 20:24:07 +00:00
|
|
|
await buddy.beaconHeaderUpdateFromFile()
|
2023-04-06 19:42:07 +00:00
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
# Dedicate some process cycles to the recovery process (if any)
|
2023-04-25 16:34:48 +00:00
|
|
|
if not buddy.ctx.pool.pass.recovery.isNil:
|
2023-04-14 22:28:57 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace "Throttling single mode in favour of recovery", peer=buddy.peer
|
|
|
|
await sleepAsync 900.milliseconds
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
await buddy.pivotApprovePeer()
|
|
|
|
buddy.ctrl.multiOk = true
|
|
|
|
|
|
|
|
|
|
|
|
proc snapSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
|
|
|
|
## Enabled while
|
|
|
|
## * `buddy.ctx.multiOk` is `true`
|
|
|
|
## * `buddy.ctx.poolMode` is `false`
|
|
|
|
##
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
|
|
|
|
# Fetch latest state root environment
|
|
|
|
env = block:
|
2023-04-25 16:34:48 +00:00
|
|
|
let rc = ctx.pool.pass.pivotTable.lastValue
|
2023-04-06 19:42:07 +00:00
|
|
|
if rc.isErr:
|
|
|
|
buddy.ctrl.multiOk = false
|
|
|
|
return # nothing to do
|
|
|
|
rc.value
|
|
|
|
|
|
|
|
# Check whether this pivot is fully downloaded
|
2023-04-14 22:28:57 +00:00
|
|
|
if await env.snapSyncCompleteOk(ctx):
|
2023-04-06 19:42:07 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# If this is a new snap sync pivot, the previous one can be cleaned up and
|
|
|
|
# archived. There is no point in keeping some older space consuming state
|
|
|
|
# data any longer.
|
2023-04-25 16:34:48 +00:00
|
|
|
ctx.pool.pass.pivotTable.beforeTopMostlyClean()
|
2023-04-06 19:42:07 +00:00
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
let
|
|
|
|
peer = buddy.peer
|
|
|
|
pivot = env.stateHeader.blockNumber.toStr # for logging
|
|
|
|
fa = env.fetchAccounts
|
|
|
|
|
2023-04-06 19:42:07 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
trace "Multi sync runner", peer, pivot, nAccounts=env.nAccounts,
|
2023-04-21 21:11:04 +00:00
|
|
|
processed=fa.processed.fullPC3, nStoQ=env.storageQueueTotal(),
|
2023-04-06 19:42:07 +00:00
|
|
|
nSlotLists=env.nSlotLists
|
|
|
|
|
|
|
|
# This one is the syncing work horse which downloads the database
|
|
|
|
await env.execSnapSyncAction(buddy)
|
|
|
|
|
|
|
|
# Various logging entries (after accounts and storage slots download)
|
|
|
|
let
|
|
|
|
nAccounts = env.nAccounts
|
|
|
|
nSlotLists = env.nSlotLists
|
|
|
|
processed = fa.processed.fullPC3
|
|
|
|
|
|
|
|
# Archive this pivot eveironment if it has become stale
|
|
|
|
if env.archived:
|
|
|
|
when extraTraceMessages:
|
2023-04-14 22:28:57 +00:00
|
|
|
trace logTxt "mothballing", peer, pivot, nAccounts, nSlotLists
|
2023-04-06 19:42:07 +00:00
|
|
|
env.pivotMothball()
|
|
|
|
return
|
|
|
|
|
|
|
|
# Save state so sync can be resumed at next start up
|
|
|
|
let rc = env.saveCheckpoint(ctx)
|
|
|
|
if rc.isOk:
|
|
|
|
when extraTraceMessages:
|
2023-04-14 22:28:57 +00:00
|
|
|
trace logTxt "saved checkpoint", peer, pivot, nAccounts,
|
2023-04-21 21:11:04 +00:00
|
|
|
processed, nStoQ=env.storageQueueTotal(), nSlotLists,
|
2023-04-14 22:28:57 +00:00
|
|
|
blobSize=rc.value
|
2023-04-06 19:42:07 +00:00
|
|
|
return
|
|
|
|
|
2023-04-14 22:28:57 +00:00
|
|
|
error logTxt "failed to save checkpoint", peer, pivot, nAccounts,
|
2023-04-21 21:11:04 +00:00
|
|
|
processed, nStoQ=env.storageQueueTotal(), nSlotLists,
|
2023-04-14 22:28:57 +00:00
|
|
|
error=rc.error
|
|
|
|
|
|
|
|
# Check whether this pivot is fully downloaded
|
|
|
|
discard await env.snapSyncCompleteOk(ctx)
|
2023-04-06 19:42:07 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-04-24 20:24:07 +00:00
|
|
|
proc passSnap*: auto =
|
2023-04-06 19:42:07 +00:00
|
|
|
## Return snap sync handler environment
|
2023-04-24 20:24:07 +00:00
|
|
|
PassActorRef(
|
2023-04-14 22:28:57 +00:00
|
|
|
setup: snapSyncSetup,
|
|
|
|
release: snapSyncRelease,
|
|
|
|
start: snapSyncStart,
|
|
|
|
stop: snapSyncStop,
|
|
|
|
pool: snapSyncPool,
|
|
|
|
daemon: snapSyncDaemon,
|
|
|
|
single: snapSyncSingle,
|
|
|
|
multi: snapSyncMulti)
|
2023-04-06 19:42:07 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|