2022-08-04 08:04:30 +00:00
|
|
|
# Nimbus
|
2022-05-09 14:04:48 +00:00
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
2022-05-13 16:30:10 +00:00
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
2022-05-09 14:04:48 +00:00
|
|
|
|
|
|
|
import
|
2023-01-30 17:50:58 +00:00
|
|
|
std/[options, sets, strutils],
|
2022-05-17 11:09:49 +00:00
|
|
|
chronicles,
|
|
|
|
chronos,
|
2022-11-01 15:07:44 +00:00
|
|
|
eth/[common, p2p],
|
2022-08-04 08:04:30 +00:00
|
|
|
stew/[interval_set, keyed_queue],
|
2023-01-17 09:28:14 +00:00
|
|
|
../../common as nimcom,
|
2022-08-12 15:42:07 +00:00
|
|
|
../../db/select_backend,
|
2022-11-25 14:56:42 +00:00
|
|
|
../../utils/prettify,
|
2023-01-18 08:31:57 +00:00
|
|
|
".."/[handlers, protocol, sync_desc],
|
2022-12-19 21:22:09 +00:00
|
|
|
./worker/[pivot, ticker],
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
./worker/com/com_error,
|
2022-11-28 09:03:23 +00:00
|
|
|
./worker/db/[hexary_desc, snapdb_desc, snapdb_pivot],
|
2023-01-17 09:28:14 +00:00
|
|
|
"."/[range_desc, worker_desc]
|
2022-05-24 08:07:39 +00:00
|
|
|
|
2023-01-30 22:10:23 +00:00
|
|
|
{.push raises: [].}
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-05-23 16:53:19 +00:00
|
|
|
logScope:
|
2022-11-01 15:07:44 +00:00
|
|
|
topics = "snap-buddy"
|
2022-05-23 16:53:19 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
const
|
|
|
|
extraTraceMessages = false or true
|
|
|
|
## Enabled additional logging noise
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2023-01-17 09:28:14 +00:00
|
|
|
# Private helpers
|
2022-09-30 08:22:14 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2023-01-17 09:28:14 +00:00
|
|
|
template noExceptionOops(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except CatchableError as e:
|
2023-02-14 23:38:33 +00:00
|
|
|
raiseAssert "Inconveivable (" &
|
|
|
|
info & "): name=" & $e.name & " msg=" & e.msg
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
|
2023-02-23 13:13:02 +00:00
|
|
|
let recov = ctx.pool.recovery
|
2022-11-25 14:56:42 +00:00
|
|
|
if recov.isNil:
|
|
|
|
return false
|
|
|
|
|
|
|
|
let
|
|
|
|
checkpoint =
|
|
|
|
"#" & $recov.state.header.blockNumber & "(" & $recov.level & ")"
|
|
|
|
topLevel = recov.level == 0
|
|
|
|
env = block:
|
2023-02-23 13:13:02 +00:00
|
|
|
let rc = ctx.pool.pivotTable.eq recov.state.header.stateRoot
|
2022-11-25 14:56:42 +00:00
|
|
|
if rc.isErr:
|
|
|
|
error "Recovery pivot context gone", checkpoint, topLevel
|
|
|
|
return false
|
|
|
|
rc.value
|
|
|
|
|
2022-12-12 22:00:24 +00:00
|
|
|
# Cosmetics: allow other processes (e.g. ticker) to log the current recovery
|
|
|
|
# state. There is no other intended purpose of this wait state.
|
|
|
|
await sleepAsync 1100.milliseconds
|
2022-11-25 14:56:42 +00:00
|
|
|
|
2022-12-12 22:00:24 +00:00
|
|
|
#when extraTraceMessages:
|
|
|
|
# trace "Recovery continued ...", checkpoint, topLevel,
|
|
|
|
# nAccounts=recov.state.nAccounts, nDangling=recov.state.dangling.len
|
2022-11-25 14:56:42 +00:00
|
|
|
|
|
|
|
# Update pivot data from recovery checkpoint
|
|
|
|
env.recoverPivotFromCheckpoint(ctx, topLevel)
|
|
|
|
|
|
|
|
# Fetch next recovery record if there is any
|
|
|
|
if recov.state.predecessor.isZero:
|
2022-12-12 22:00:24 +00:00
|
|
|
#when extraTraceMessages:
|
|
|
|
# trace "Recovery done", checkpoint, topLevel
|
2022-11-25 14:56:42 +00:00
|
|
|
return false
|
2023-02-23 13:13:02 +00:00
|
|
|
let rc = ctx.pool.snapDb.recoverPivot(recov.state.predecessor)
|
2022-11-25 14:56:42 +00:00
|
|
|
if rc.isErr:
|
|
|
|
when extraTraceMessages:
|
|
|
|
trace "Recovery stopped at pivot stale checkpoint", checkpoint, topLevel
|
|
|
|
return false
|
|
|
|
|
|
|
|
# Set up next level pivot checkpoint
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.recovery = SnapRecoveryRef(
|
2022-11-25 14:56:42 +00:00
|
|
|
state: rc.value,
|
|
|
|
level: recov.level + 1)
|
|
|
|
|
|
|
|
# Push onto pivot table and continue recovery (i.e. do not stop it yet)
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.pivotTable.reverseUpdate(ctx.pool.recovery.state.header, ctx)
|
2022-11-25 14:56:42 +00:00
|
|
|
|
|
|
|
return true # continue recovery
|
|
|
|
|
2022-06-06 13:42:08 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public start/stop and admin functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Global set up
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.coveredAccounts = NodeTagRangeSet.init()
|
2023-01-17 09:28:14 +00:00
|
|
|
noExceptionOops("worker.setup()"):
|
2023-01-18 08:31:57 +00:00
|
|
|
ctx.ethWireCtx.txPoolEnabled false
|
2023-01-17 09:28:14 +00:00
|
|
|
ctx.chain.com.syncReqNewHead = ctx.pivotUpdateBeaconHeaderCB
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.snapDb =
|
|
|
|
if ctx.pool.dbBackend.isNil: SnapDbRef.init(ctx.chain.db.db)
|
|
|
|
else: SnapDbRef.init(ctx.pool.dbBackend)
|
2022-08-04 08:04:30 +00:00
|
|
|
if tickerOK:
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.ticker = TickerRef.init(ctx.pool.pivotTable.tickerStats(ctx))
|
2022-08-04 08:04:30 +00:00
|
|
|
else:
|
|
|
|
trace "Ticker is disabled"
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
# Check for recovery mode
|
2023-02-23 13:13:02 +00:00
|
|
|
if not ctx.pool.noRecovery:
|
|
|
|
let rc = ctx.pool.snapDb.recoverPivot()
|
2022-11-25 14:56:42 +00:00
|
|
|
if rc.isOk:
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.recovery = SnapRecoveryRef(state: rc.value)
|
2022-11-25 14:56:42 +00:00
|
|
|
ctx.daemon = true
|
|
|
|
|
|
|
|
# Set up early initial pivot
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.pivotTable.reverseUpdate(ctx.pool.recovery.state.header, ctx)
|
2022-11-25 14:56:42 +00:00
|
|
|
trace "Recovery started",
|
2023-02-23 13:13:02 +00:00
|
|
|
checkpoint=("#" & $ctx.pool.pivotTable.topNumber() & "(0)")
|
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.startRecovery()
|
2022-11-25 14:56:42 +00:00
|
|
|
true
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
proc release*(ctx: SnapCtxRef) =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Global clean up
|
2023-02-23 13:13:02 +00:00
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.stop()
|
|
|
|
ctx.pool.ticker = nil
|
2023-01-18 08:31:57 +00:00
|
|
|
noExceptionOops("worker.release()"):
|
|
|
|
ctx.ethWireCtx.txPoolEnabled true
|
2023-01-17 09:28:14 +00:00
|
|
|
ctx.chain.com.syncReqNewHead = nil
|
2022-06-16 08:58:50 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc start*(buddy: SnapBuddyRef): bool =
|
|
|
|
## Initialise worker peer
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
|
|
|
if peer.supports(protocol.snap) and
|
|
|
|
peer.supports(protocol.eth) and
|
|
|
|
peer.state(protocol.eth).initialized:
|
2023-02-23 13:13:02 +00:00
|
|
|
buddy.only.errors = ComErrorStatsRef()
|
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.startBuddy()
|
2022-06-06 13:42:08 +00:00
|
|
|
return true
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc stop*(buddy: SnapBuddyRef) =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Clean up this peer
|
2023-01-17 09:28:14 +00:00
|
|
|
let ctx = buddy.ctx
|
2023-02-23 13:13:02 +00:00
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.stopBuddy()
|
2022-06-06 13:42:08 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-14 14:13:00 +00:00
|
|
|
proc runDaemon*(ctx: SnapCtxRef) {.async.} =
|
|
|
|
## Enabled while `ctx.daemon` is `true`
|
2022-05-09 14:04:48 +00:00
|
|
|
##
|
2023-02-23 13:13:02 +00:00
|
|
|
if not ctx.pool.recovery.isNil:
|
2022-11-25 14:56:42 +00:00
|
|
|
if not await ctx.recoveryStepContinue():
|
|
|
|
# Done, stop recovery
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.recovery = nil
|
2022-11-25 14:56:42 +00:00
|
|
|
ctx.daemon = false
|
|
|
|
|
|
|
|
# Update logging
|
2023-02-23 13:13:02 +00:00
|
|
|
if not ctx.pool.ticker.isNil:
|
|
|
|
ctx.pool.ticker.stopRecovery()
|
2022-11-25 14:56:42 +00:00
|
|
|
|
2022-11-14 14:13:00 +00:00
|
|
|
|
|
|
|
proc runSingle*(buddy: SnapBuddyRef) {.async.} =
|
|
|
|
## Enabled while
|
|
|
|
## * `buddy.ctrl.multiOk` is `false`
|
|
|
|
## * `buddy.ctrl.poolMode` is `false`
|
2022-05-09 14:04:48 +00:00
|
|
|
##
|
2023-01-17 09:28:14 +00:00
|
|
|
await buddy.pivotApprovePeer()
|
2022-10-08 17:20:50 +00:00
|
|
|
buddy.ctrl.multiOk = true
|
2022-05-09 14:04:48 +00:00
|
|
|
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
proc runPool*(buddy: SnapBuddyRef, last: bool): bool =
|
2022-11-14 14:13:00 +00:00
|
|
|
## Enabled when `buddy.ctrl.poolMode` is `true`
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
2022-08-24 13:44:18 +00:00
|
|
|
let ctx = buddy.ctx
|
2022-11-25 14:56:42 +00:00
|
|
|
ctx.poolMode = false
|
|
|
|
result = true
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2023-01-17 09:28:14 +00:00
|
|
|
# Clean up empty pivot slots (never the top one)
|
2023-02-23 13:13:02 +00:00
|
|
|
var rc = ctx.pool.pivotTable.beforeLast
|
2023-01-17 09:28:14 +00:00
|
|
|
while rc.isOK:
|
|
|
|
let (key, env) = (rc.value.key, rc.value.data)
|
|
|
|
if env.fetchAccounts.processed.isEmpty:
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.pivotTable.del key
|
|
|
|
rc = ctx.pool.pivotTable.prev(key)
|
2023-01-17 09:28:14 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc runMulti*(buddy: SnapBuddyRef) {.async.} =
|
2022-11-14 14:13:00 +00:00
|
|
|
## Enabled while
|
2022-12-12 22:00:24 +00:00
|
|
|
## * `buddy.ctx.multiOk` is `true`
|
|
|
|
## * `buddy.ctx.poolMode` is `false`
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
2022-08-24 13:44:18 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Set up current state root environment for accounts snapshot
|
2022-11-01 15:07:44 +00:00
|
|
|
let
|
|
|
|
env = block:
|
2023-02-23 13:13:02 +00:00
|
|
|
let rc = ctx.pool.pivotTable.lastValue
|
2022-11-01 15:07:44 +00:00
|
|
|
if rc.isErr:
|
|
|
|
return # nothing to do
|
|
|
|
rc.value
|
|
|
|
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2023-02-23 13:13:02 +00:00
|
|
|
buddy.only.pivotEnv = env
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
# Full sync processsing based on current snapshot
|
|
|
|
# -----------------------------------------------
|
|
|
|
if env.storageDone:
|
|
|
|
trace "Snap full sync -- not implemented yet", peer, pivot
|
|
|
|
await sleepAsync(5.seconds)
|
|
|
|
return
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
# Snapshot sync processing
|
|
|
|
# ------------------------
|
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
# If this is a new pivot, the previous one can be cleaned up. There is no
|
|
|
|
# point in keeping some older space consuming state data any longer.
|
2023-02-23 13:13:02 +00:00
|
|
|
ctx.pool.pivotTable.beforeTopMostlyClean()
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-12-12 22:00:24 +00:00
|
|
|
when extraTraceMessages:
|
|
|
|
block:
|
|
|
|
let
|
2023-01-30 22:10:23 +00:00
|
|
|
nAccounts {.used.} = env.nAccounts
|
|
|
|
nSlotLists {.used.} = env.nSlotLists
|
|
|
|
processed {.used.} = env.fetchAccounts.processed.fullFactor.toPC(2)
|
|
|
|
nStoQu {.used.} = env.fetchStorageFull.len + env.fetchStoragePart.len
|
2022-12-12 22:00:24 +00:00
|
|
|
trace "Multi sync runner", peer, pivot, nAccounts, nSlotLists, processed,
|
2022-12-24 09:54:18 +00:00
|
|
|
nStoQu
|
2022-12-12 22:00:24 +00:00
|
|
|
|
2022-11-25 14:56:42 +00:00
|
|
|
# This one is the syncing work horse which downloads the database
|
2022-11-28 09:03:23 +00:00
|
|
|
await env.execSnapSyncAction(buddy)
|
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
# Various logging entries (after accounts and storage slots download)
|
|
|
|
let
|
|
|
|
nAccounts = env.nAccounts
|
|
|
|
nSlotLists = env.nSlotLists
|
|
|
|
processed = env.fetchAccounts.processed.fullFactor.toPC(2)
|
|
|
|
nStoQu = env.fetchStorageFull.len + env.fetchStoragePart.len
|
|
|
|
|
2022-12-12 22:00:24 +00:00
|
|
|
if env.archived:
|
2022-12-24 09:54:18 +00:00
|
|
|
# Archive pivot if it became stale
|
2022-12-19 21:22:09 +00:00
|
|
|
when extraTraceMessages:
|
2022-12-24 09:54:18 +00:00
|
|
|
trace "Mothballing", peer, pivot, nAccounts, nSlotLists
|
2022-12-19 21:22:09 +00:00
|
|
|
env.pivotMothball()
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-12-24 09:54:18 +00:00
|
|
|
else:
|
2022-12-12 22:00:24 +00:00
|
|
|
# Save state so sync can be partially resumed at next start up
|
2022-12-24 09:54:18 +00:00
|
|
|
let rc = env.saveCheckpoint(ctx)
|
2022-11-16 23:51:06 +00:00
|
|
|
if rc.isErr:
|
2022-12-12 22:00:24 +00:00
|
|
|
error "Failed to save recovery checkpoint", peer, pivot, nAccounts,
|
|
|
|
nSlotLists, processed, nStoQu, error=rc.error
|
2022-11-16 23:51:06 +00:00
|
|
|
else:
|
|
|
|
when extraTraceMessages:
|
2022-12-12 22:00:24 +00:00
|
|
|
trace "Saved recovery checkpoint", peer, pivot, nAccounts, nSlotLists,
|
2022-12-24 09:54:18 +00:00
|
|
|
processed, nStoQu, blobSize=rc.value
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|