2022-08-04 08:04:30 +00:00
|
|
|
# Nimbus
|
2022-05-09 14:04:48 +00:00
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
2022-05-13 16:30:10 +00:00
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
2022-05-09 14:04:48 +00:00
|
|
|
|
|
|
|
import
|
2022-10-19 10:04:06 +00:00
|
|
|
std/[hashes, math, options, sets, strutils],
|
2022-05-17 11:09:49 +00:00
|
|
|
chronicles,
|
|
|
|
chronos,
|
2022-11-01 15:07:44 +00:00
|
|
|
eth/[common, p2p],
|
2022-08-04 08:04:30 +00:00
|
|
|
stew/[interval_set, keyed_queue],
|
2022-08-12 15:42:07 +00:00
|
|
|
../../db/select_backend,
|
2022-10-19 14:03:55 +00:00
|
|
|
".."/[handlers, misc/best_pivot, protocol, sync_desc],
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
./worker/[heal_accounts, heal_storage_slots,
|
|
|
|
range_fetch_accounts, range_fetch_storage_slots, ticker],
|
|
|
|
./worker/com/com_error,
|
2022-11-01 15:07:44 +00:00
|
|
|
./worker/db/[snapdb_check, snapdb_desc],
|
|
|
|
"."/[constants, range_desc, worker_desc]
|
2022-05-24 08:07:39 +00:00
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
2022-05-23 16:53:19 +00:00
|
|
|
logScope:
|
2022-11-01 15:07:44 +00:00
|
|
|
topics = "snap-buddy"
|
2022-05-23 16:53:19 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
const
|
|
|
|
extraTraceMessages = false or true
|
|
|
|
## Enabled additional logging noise
|
|
|
|
|
2022-05-24 08:07:39 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
|
|
|
if 0 < length:
|
|
|
|
result[0] = sum / length.float
|
|
|
|
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
2022-05-17 11:09:49 +00:00
|
|
|
|
2022-08-24 13:44:18 +00:00
|
|
|
template noExceptionOops(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except CatchableError as e:
|
|
|
|
raiseAssert "Inconveivable (" & info & ": name=" & $e.name & " msg=" & e.msg
|
|
|
|
except Defect as e:
|
|
|
|
raise e
|
|
|
|
except Exception as e:
|
|
|
|
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
|
|
|
|
|
2022-09-30 08:22:14 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
2022-10-08 17:20:50 +00:00
|
|
|
# Private helpers: integration of pivot finder
|
2022-09-30 08:22:14 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-10-19 14:03:55 +00:00
|
|
|
proc pivot(ctx: SnapCtxRef): BestPivotCtxRef =
|
2022-10-08 17:20:50 +00:00
|
|
|
# Getter
|
2022-10-19 14:03:55 +00:00
|
|
|
ctx.data.pivotFinderCtx.BestPivotCtxRef
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-19 14:03:55 +00:00
|
|
|
proc `pivot=`(ctx: SnapCtxRef; val: BestPivotCtxRef) =
|
2022-10-08 17:20:50 +00:00
|
|
|
# Setter
|
|
|
|
ctx.data.pivotFinderCtx = val
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-19 14:03:55 +00:00
|
|
|
proc pivot(buddy: SnapBuddyRef): BestPivotWorkerRef =
|
2022-10-08 17:20:50 +00:00
|
|
|
# Getter
|
2022-10-19 14:03:55 +00:00
|
|
|
buddy.data.pivotFinder.BestPivotWorkerRef
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-19 14:03:55 +00:00
|
|
|
proc `pivot=`(buddy: SnapBuddyRef; val: BestPivotWorkerRef) =
|
2022-10-08 17:20:50 +00:00
|
|
|
# Setter
|
|
|
|
buddy.data.pivotFinder = val
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-14 16:40:32 +00:00
|
|
|
proc init(batch: var SnapTrieRangeBatch; ctx: SnapCtxRef) =
|
|
|
|
## Returns a pair of account hash range lists with the full range of hashes
|
|
|
|
## smartly spread across the mutually disjunct interval sets.
|
|
|
|
for n in 0 ..< batch.unprocessed.len:
|
|
|
|
batch.unprocessed[n] = NodeTagRangeSet.init()
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
# Initialise accounts range fetch batch, the pair of `fetchAccounts[]`
|
|
|
|
# range sets.
|
|
|
|
if ctx.data.coveredAccounts.total == 0 and
|
|
|
|
ctx.data.coveredAccounts.chunks == 1:
|
2022-10-14 16:40:32 +00:00
|
|
|
# All (i.e. 100%) of accounts hashes are covered by completed range fetch
|
|
|
|
# processes for all pivot environments. Do a random split distributing the
|
|
|
|
# full accounts hash range across the pair of range sats.
|
2022-10-08 17:20:50 +00:00
|
|
|
var nodeKey: NodeKey
|
|
|
|
ctx.data.rng[].generate(nodeKey.ByteArray32)
|
|
|
|
|
|
|
|
let partition = nodeKey.to(NodeTag)
|
2022-10-14 16:40:32 +00:00
|
|
|
discard batch.unprocessed[0].merge(partition, high(NodeTag))
|
2022-10-08 17:20:50 +00:00
|
|
|
if low(NodeTag) < partition:
|
2022-10-14 16:40:32 +00:00
|
|
|
discard batch.unprocessed[1].merge(low(NodeTag), partition - 1.u256)
|
2022-09-30 08:22:14 +00:00
|
|
|
else:
|
2022-10-08 17:20:50 +00:00
|
|
|
# Not all account hashes are covered, yet. So keep the uncovered
|
|
|
|
# account hashes in the first range set, and the other account hashes
|
|
|
|
# in the second range set.
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Pre-filled with the first range set with largest possible interval
|
2022-10-14 16:40:32 +00:00
|
|
|
discard batch.unprocessed[0].merge(low(NodeTag),high(NodeTag))
|
2022-09-30 08:22:14 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Move covered account ranges (aka intervals) to the second set.
|
|
|
|
for iv in ctx.data.coveredAccounts.increasing:
|
2022-10-14 16:40:32 +00:00
|
|
|
discard batch.unprocessed[0].reduce(iv)
|
|
|
|
discard batch.unprocessed[1].merge(iv)
|
2022-05-17 11:09:49 +00:00
|
|
|
|
2022-06-06 13:42:08 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
proc appendPivotEnv(buddy: SnapBuddyRef; header: BlockHeader) =
|
|
|
|
## Activate environment for state root implied by `header` argument. This
|
|
|
|
## function appends a new environment unless there was any not far enough
|
|
|
|
## apart.
|
|
|
|
##
|
|
|
|
## Note that this function relies on a queue sorted by the block numbers of
|
|
|
|
## the pivot header. To maintain the sort order, the function `lruFetch()`
|
|
|
|
## must not be called and only records appended with increasing block
|
|
|
|
## numbers.
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
minNumber = block:
|
|
|
|
let rc = ctx.data.pivotTable.lastValue
|
2022-11-01 15:07:44 +00:00
|
|
|
if rc.isOk: rc.value.stateHeader.blockNumber + pivotBlockDistanceMin
|
2022-10-08 17:20:50 +00:00
|
|
|
else: 1.toBlockNumber
|
|
|
|
|
|
|
|
# Check whether the new header follows minimum depth requirement. This is
|
|
|
|
# where the queue is assumed to have increasing block numbers.
|
|
|
|
if minNumber <= header.blockNumber:
|
|
|
|
# Ok, append a new environment
|
2022-10-14 16:40:32 +00:00
|
|
|
let env = SnapPivotRef(stateHeader: header)
|
|
|
|
env.fetchAccounts.init(ctx)
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
# Append per-state root environment to LRU queue
|
|
|
|
discard ctx.data.pivotTable.lruAppend(header.stateRoot, env, ctx.buddiesMax)
|
|
|
|
|
|
|
|
|
2022-10-19 14:03:55 +00:00
|
|
|
proc updateSinglePivot(buddy: SnapBuddyRef): Future[bool] {.async.} =
|
2022-10-08 17:20:50 +00:00
|
|
|
## Helper, negotiate pivot unless present
|
|
|
|
if buddy.pivot.pivotHeader.isOk:
|
|
|
|
return true
|
2022-06-06 13:42:08 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
2022-10-08 17:20:50 +00:00
|
|
|
peer = buddy.peer
|
|
|
|
env = ctx.data.pivotTable.lastValue.get(otherwise = nil)
|
|
|
|
nMin = if env.isNil: none(BlockNumber)
|
|
|
|
else: some(env.stateHeader.blockNumber)
|
|
|
|
|
|
|
|
if await buddy.pivot.pivotNegotiate(nMin):
|
|
|
|
var header = buddy.pivot.pivotHeader.value
|
|
|
|
|
|
|
|
# Check whether there is no environment change needed
|
2022-11-01 15:07:44 +00:00
|
|
|
when pivotEnvStopChangingIfComplete:
|
2022-10-08 17:20:50 +00:00
|
|
|
let rc = ctx.data.pivotTable.lastValue
|
2022-11-01 15:07:44 +00:00
|
|
|
if rc.isOk and rc.value.storageDone:
|
2022-10-08 17:20:50 +00:00
|
|
|
# No neede to change
|
|
|
|
if extraTraceMessages:
|
|
|
|
trace "No need to change snap pivot", peer,
|
|
|
|
pivot=("#" & $rc.value.stateHeader.blockNumber),
|
2022-11-01 15:07:44 +00:00
|
|
|
stateRoot=rc.value.stateHeader.stateRoot,
|
2022-10-08 17:20:50 +00:00
|
|
|
multiOk=buddy.ctrl.multiOk, runState=buddy.ctrl.state
|
|
|
|
return true
|
|
|
|
|
|
|
|
buddy.appendPivotEnv(header)
|
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
info "Snap pivot initialised", peer, pivot=("#" & $header.blockNumber),
|
2022-10-08 17:20:50 +00:00
|
|
|
multiOk=buddy.ctrl.multiOk, runState=buddy.ctrl.state
|
|
|
|
|
2022-09-16 07:24:12 +00:00
|
|
|
return true
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
|
|
|
|
proc tickerUpdate*(ctx: SnapCtxRef): TickerStatsUpdater =
|
|
|
|
result = proc: TickerStats =
|
|
|
|
var
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
aSum, aSqSum, uSum, uSqSum, sSum, sSqSum: float
|
2022-08-04 08:04:30 +00:00
|
|
|
count = 0
|
|
|
|
for kvp in ctx.data.pivotTable.nextPairs:
|
|
|
|
|
|
|
|
# Accounts mean & variance
|
|
|
|
let aLen = kvp.data.nAccounts.float
|
|
|
|
if 0 < aLen:
|
|
|
|
count.inc
|
|
|
|
aSum += aLen
|
|
|
|
aSqSum += aLen * aLen
|
|
|
|
|
|
|
|
# Fill utilisation mean & variance
|
2022-10-14 16:40:32 +00:00
|
|
|
let fill = kvp.data.fetchAccounts.unprocessed.emptyFactor
|
2022-08-04 08:04:30 +00:00
|
|
|
uSum += fill
|
|
|
|
uSqSum += fill * fill
|
|
|
|
|
2022-10-21 19:29:42 +00:00
|
|
|
let sLen = kvp.data.nSlotLists.float
|
2022-09-02 18:16:09 +00:00
|
|
|
sSum += sLen
|
|
|
|
sSqSum += sLen * sLen
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
2022-10-08 17:20:50 +00:00
|
|
|
env = ctx.data.pivotTable.lastValue.get(otherwise = nil)
|
|
|
|
pivotBlock = if env.isNil: none(BlockNumber)
|
|
|
|
else: some(env.stateHeader.blockNumber)
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
stoQuLen = if env.isNil: none(uint64)
|
|
|
|
else: some(env.fetchStorage.len.uint64)
|
2022-08-24 13:44:18 +00:00
|
|
|
accCoverage = ctx.data.coveredAccounts.fullFactor
|
2022-09-02 18:16:09 +00:00
|
|
|
accFill = meanStdDev(uSum, uSqSum, count)
|
2022-08-24 13:44:18 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
TickerStats(
|
|
|
|
pivotBlock: pivotBlock,
|
2022-10-08 17:20:50 +00:00
|
|
|
nQueues: ctx.data.pivotTable.len,
|
2022-09-02 18:16:09 +00:00
|
|
|
nAccounts: meanStdDev(aSum, aSqSum, count),
|
2022-10-21 19:29:42 +00:00
|
|
|
nSlotLists: meanStdDev(sSum, sSqSum, count),
|
2022-10-19 10:04:06 +00:00
|
|
|
accountsFill: (accFill[0], accFill[1], accCoverage),
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
nStorageQueue: stoQuLen)
|
2022-06-06 13:42:08 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public start/stop and admin functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc setup*(ctx: SnapCtxRef; tickerOK: bool): bool =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Global set up
|
2022-10-19 10:04:06 +00:00
|
|
|
noExceptionOops("worker.setup()"):
|
|
|
|
ctx.ethWireCtx.poolEnabled(false)
|
2022-10-14 16:40:32 +00:00
|
|
|
ctx.data.coveredAccounts = NodeTagRangeSet.init()
|
2022-10-08 17:20:50 +00:00
|
|
|
ctx.data.snapDb =
|
2022-10-10 02:31:28 +00:00
|
|
|
if ctx.data.dbBackend.isNil: SnapDbRef.init(ctx.chain.db.db)
|
2022-10-08 17:20:50 +00:00
|
|
|
else: SnapDbRef.init(ctx.data.dbBackend)
|
2022-10-19 14:03:55 +00:00
|
|
|
ctx.pivot = BestPivotCtxRef.init(ctx.data.rng)
|
2022-11-01 15:07:44 +00:00
|
|
|
ctx.pivot.pivotRelaxedMode(enable = true)
|
2022-08-04 08:04:30 +00:00
|
|
|
if tickerOK:
|
|
|
|
ctx.data.ticker = TickerRef.init(ctx.tickerUpdate)
|
|
|
|
else:
|
|
|
|
trace "Ticker is disabled"
|
2022-08-24 13:44:18 +00:00
|
|
|
result = true
|
2022-08-04 08:04:30 +00:00
|
|
|
|
|
|
|
proc release*(ctx: SnapCtxRef) =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Global clean up
|
2022-10-19 14:03:55 +00:00
|
|
|
ctx.pivot = nil
|
2022-08-04 08:04:30 +00:00
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.stop()
|
|
|
|
ctx.data.ticker = nil
|
2022-06-16 08:58:50 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc start*(buddy: SnapBuddyRef): bool =
|
|
|
|
## Initialise worker peer
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
|
|
|
if peer.supports(protocol.snap) and
|
|
|
|
peer.supports(protocol.eth) and
|
|
|
|
peer.state(protocol.eth).initialized:
|
2022-10-19 14:03:55 +00:00
|
|
|
buddy.pivot = BestPivotWorkerRef.init(
|
|
|
|
buddy.ctx.pivot, buddy.ctrl, buddy.peer)
|
2022-10-08 17:20:50 +00:00
|
|
|
buddy.data.errors = ComErrorStatsRef()
|
2022-08-04 08:04:30 +00:00
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.startBuddy()
|
2022-06-06 13:42:08 +00:00
|
|
|
return true
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc stop*(buddy: SnapBuddyRef) =
|
2022-06-06 13:42:08 +00:00
|
|
|
## Clean up this peer
|
2022-08-04 08:04:30 +00:00
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
|
|
|
buddy.ctrl.stopped = true
|
2022-10-19 14:03:55 +00:00
|
|
|
buddy.pivot.clear()
|
2022-08-04 08:04:30 +00:00
|
|
|
if not ctx.data.ticker.isNil:
|
|
|
|
ctx.data.ticker.stopBuddy()
|
2022-06-06 13:42:08 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc runSingle*(buddy: SnapBuddyRef) {.async.} =
|
|
|
|
## This peer worker is invoked if the peer-local flag `buddy.ctrl.multiOk`
|
|
|
|
## is set `false` which is the default mode. This flag is updated by the
|
|
|
|
## worker when deemed appropriate.
|
|
|
|
## * For all workers, there can be only one `runSingle()` function active
|
|
|
|
## simultaneously for all worker peers.
|
|
|
|
## * There will be no `runMulti()` function active for the same worker peer
|
|
|
|
## simultaneously
|
|
|
|
## * There will be no `runPool()` iterator active simultaneously.
|
2022-05-09 14:04:48 +00:00
|
|
|
##
|
2022-08-04 08:04:30 +00:00
|
|
|
## Note that this function runs in `async` mode.
|
2022-05-09 14:04:48 +00:00
|
|
|
##
|
2022-10-19 14:03:55 +00:00
|
|
|
let peer = buddy.peer
|
|
|
|
# This pivot finder one harmonises assigned difficulties of at least two
|
|
|
|
# peers. There can only be one `pivot2Exec()` instance active/unfinished
|
|
|
|
# (which is wrapped into the helper function `updateSinglePivot()`.)
|
|
|
|
if not await buddy.updateSinglePivot():
|
|
|
|
# Wait if needed, then return => repeat
|
|
|
|
if not buddy.ctrl.stopped:
|
|
|
|
await sleepAsync(2.seconds)
|
|
|
|
return
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
buddy.ctrl.multiOk = true
|
2022-05-09 14:04:48 +00:00
|
|
|
|
|
|
|
|
2022-08-24 13:44:18 +00:00
|
|
|
proc runPool*(buddy: SnapBuddyRef, last: bool) =
|
2022-08-04 08:04:30 +00:00
|
|
|
## Ocne started, the function `runPool()` is called for all worker peers in
|
|
|
|
## a row (as the body of an iteration.) There will be no other worker peer
|
|
|
|
## functions activated simultaneously.
|
|
|
|
##
|
|
|
|
## This procedure is started if the global flag `buddy.ctx.poolMode` is set
|
|
|
|
## `true` (default is `false`.) It is the responsibility of the `runPool()`
|
|
|
|
## instance to reset the flag `buddy.ctx.poolMode`, typically at the first
|
2022-08-24 13:44:18 +00:00
|
|
|
## peer instance.
|
|
|
|
##
|
|
|
|
## The argument `last` is set `true` if the last entry is reached.
|
2022-08-04 08:04:30 +00:00
|
|
|
##
|
|
|
|
## Note that this function does not run in `async` mode.
|
|
|
|
##
|
2022-08-24 13:44:18 +00:00
|
|
|
let ctx = buddy.ctx
|
|
|
|
if ctx.poolMode:
|
|
|
|
ctx.poolMode = false
|
2022-10-08 17:20:50 +00:00
|
|
|
|
|
|
|
let rc = ctx.data.pivotTable.lastValue
|
|
|
|
if rc.isOk:
|
2022-11-01 15:07:44 +00:00
|
|
|
|
|
|
|
# Check whether last pivot accounts and storage are complete.
|
|
|
|
let
|
|
|
|
env = rc.value
|
|
|
|
peer = buddy.peer
|
|
|
|
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
|
|
|
|
|
|
|
if not env.storageDone:
|
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Check whether accounts download is complete
|
2022-11-01 15:07:44 +00:00
|
|
|
if env.fetchAccounts.unprocessed.isEmpty():
|
|
|
|
|
|
|
|
# FIXME: This check might not be needed. It will visit *every* node
|
|
|
|
# in the hexary trie for checking the account leaves.
|
|
|
|
if buddy.checkAccountsTrieIsComplete(env):
|
|
|
|
env.accountsState = HealerDone
|
|
|
|
|
|
|
|
# Check whether storage slots are complete
|
|
|
|
if env.fetchStorage.len == 0:
|
|
|
|
env.storageDone = true
|
2022-05-09 14:04:48 +00:00
|
|
|
|
2022-10-21 19:29:42 +00:00
|
|
|
if extraTraceMessages:
|
2022-11-01 15:07:44 +00:00
|
|
|
trace "Checked for pivot DB completeness", peer, pivot,
|
|
|
|
nAccounts=env.nAccounts, accountsState=env.accountsState,
|
|
|
|
nSlotLists=env.nSlotLists, storageDone=env.storageDone
|
2022-10-21 19:29:42 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
|
2022-08-04 08:04:30 +00:00
|
|
|
proc runMulti*(buddy: SnapBuddyRef) {.async.} =
|
|
|
|
## This peer worker is invoked if the `buddy.ctrl.multiOk` flag is set
|
|
|
|
## `true` which is typically done after finishing `runSingle()`. This
|
|
|
|
## instance can be simultaneously active for all peer workers.
|
|
|
|
##
|
|
|
|
let
|
|
|
|
ctx = buddy.ctx
|
|
|
|
peer = buddy.peer
|
2022-08-24 13:44:18 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
# Set up current state root environment for accounts snapshot
|
2022-11-01 15:07:44 +00:00
|
|
|
let
|
|
|
|
env = block:
|
|
|
|
let rc = ctx.data.pivotTable.lastValue
|
|
|
|
if rc.isErr:
|
|
|
|
return # nothing to do
|
|
|
|
rc.value
|
|
|
|
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
2022-09-16 07:24:12 +00:00
|
|
|
|
2022-10-08 17:20:50 +00:00
|
|
|
buddy.data.pivotEnv = env
|
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
# Full sync processsing based on current snapshot
|
|
|
|
# -----------------------------------------------
|
|
|
|
if env.storageDone:
|
|
|
|
if not buddy.checkAccountsTrieIsComplete(env):
|
|
|
|
error "Ooops, all accounts fetched but DvnB still incomplete", peer, pivot
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
if not buddy.checkStorageSlotsTrieIsComplete(env):
|
|
|
|
error "Ooops, all storages fetched but DB still incomplete", peer, pivot
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
trace "Snap full sync -- not implemented yet", peer, pivot
|
|
|
|
await sleepAsync(5.seconds)
|
|
|
|
return
|
2022-10-08 17:20:50 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
# Snapshot sync processing
|
|
|
|
# ------------------------
|
|
|
|
|
|
|
|
template runAsync(code: untyped) =
|
|
|
|
await code
|
|
|
|
if buddy.ctrl.stopped:
|
|
|
|
# To be disconnected from peer.
|
|
|
|
return
|
|
|
|
if env != ctx.data.pivotTable.lastValue.value:
|
|
|
|
# Pivot has changed, so restart with the latest one
|
|
|
|
return
|
|
|
|
|
|
|
|
# If this is a new pivot, the previous one can be partially cleaned up.
|
|
|
|
# There is no point in keeping some older space consuming state data any
|
|
|
|
# longer.
|
|
|
|
block:
|
|
|
|
let rc = ctx.data.pivotTable.beforeLastValue
|
|
|
|
if rc.isOk:
|
|
|
|
let nFetchStorage = rc.value.fetchStorage.len
|
|
|
|
if 0 < nFetchStorage:
|
|
|
|
trace "Cleaning up previous pivot", peer, pivot, nFetchStorage
|
|
|
|
rc.value.fetchStorage.clear()
|
|
|
|
rc.value.fetchAccounts.checkNodes.setLen(0)
|
|
|
|
rc.value.fetchAccounts.missingNodes.setLen(0)
|
|
|
|
|
|
|
|
if env.accountsState != HealerDone:
|
|
|
|
runAsync buddy.rangeFetchAccounts()
|
|
|
|
runAsync buddy.rangeFetchStorageSlots()
|
|
|
|
|
|
|
|
# Can only run a single accounts healer instance at a time. This instance
|
|
|
|
# will clear the batch queue so there is nothing to do for another process.
|
|
|
|
if env.accountsState == HealerIdle:
|
|
|
|
env.accountsState = HealerRunning
|
|
|
|
runAsync buddy.healAccounts()
|
|
|
|
env.accountsState = HealerIdle
|
|
|
|
|
|
|
|
# Some additional storage slots might have been popped up
|
|
|
|
runAsync buddy.rangeFetchStorageSlots()
|
|
|
|
|
|
|
|
runAsync buddy.healStorageSlots()
|
|
|
|
|
|
|
|
# Debugging log: analyse pivot against database
|
|
|
|
discard buddy.checkAccountsListOk(env)
|
|
|
|
discard buddy.checkStorageSlotsTrieIsComplete(env)
|
|
|
|
|
|
|
|
# Check whether there are more accounts to fetch.
|
|
|
|
#
|
|
|
|
# Note that some other process might have temporarily borrowed from the
|
|
|
|
# `fetchAccounts.unprocessed` list. Whether we are done can only be decided
|
|
|
|
# if only a single buddy is active. S be it.
|
|
|
|
if env.fetchAccounts.unprocessed.isEmpty():
|
|
|
|
|
|
|
|
# Check whether pivot download is complete.
|
|
|
|
if env.fetchStorage.len == 0:
|
|
|
|
trace "Running pool mode for verifying completeness", peer, pivot
|
|
|
|
buddy.ctx.poolMode = true
|
2022-05-23 16:53:19 +00:00
|
|
|
|
2022-05-17 11:09:49 +00:00
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|