nimbus-eth1/nimbus/sync/misc/ticker.nim

325 lines
9.9 KiB
Nim
Raw Normal View History

# Nimbus - Fetch account and storage states from peers efficiently
#
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[strformat, strutils],
chronos,
chronicles,
eth/[common, p2p],
stint,
../../utils/prettify,
../types,
./timer_helper
logScope:
topics = "tick"
type
TickerSnapStatsUpdater* = proc: TickerSnapStats {.gcsafe, raises: [].}
## Snap sync state update function
TickerFullStatsUpdater* = proc: TickerFullStats {.gcsafe, raises: [].}
## Full sync state update function
SnapDescDetails = object
## Private state descriptor
snapCb: TickerSnapStatsUpdater
recovery: bool
lastRecov: bool
lastStats: TickerSnapStats
FullDescDetails = object
## Private state descriptor
fullCb: TickerFullStatsUpdater
lastStats: TickerFullStats
TickerSnapStats* = object
## Snap sync state (see `TickerSnapStatsUpdater`)
beaconBlock*: Option[BlockNumber]
pivotBlock*: Option[BlockNumber]
nAccounts*: (float,float) ## Mean and standard deviation
accountsFill*: (float,float,float) ## Mean, standard deviation, merged total
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
nAccountStats*: int ## #chunks
nSlotLists*: (float,float) ## Mean and standard deviation
nContracts*: (float,float) ## Mean and standard deviation
nStorageQueue*: Option[int]
nContractQueue*: Option[int]
Prep for full sync after snap (#1253) * Split fetch accounts into sub-modules details: There will be separated modules for accounts snapshot, storage snapshot, and healing for either. * Allow to rebase pivot before negotiated header why: Peers seem to have not too many snapshots available. By setting back the pivot block header slightly, the chances might be higher to find more peers to serve this pivot. Experiment on mainnet showed that setting back too much (tested with 1024), the chances to find matching snapshot peers seem to decrease. * Add accounts healing * Update variable/field naming in `worker_desc` for readability * Handle leaf nodes in accounts healing why: There is no need to fetch accounts when they had been added by the healing process. On the flip side, these accounts must be checked for storage data and the batch queue updated, accordingly. * Reorganising accounts hash ranges batch queue why: The aim is to formally cover as many accounts as possible for different pivot state root environments. Formerly, this was tried by starting the accounts batch queue at a random value for each pivot (and wrapping around.) Now, each pivot environment starts with an interval set mutually disjunct from any interval set retrieved with other pivot state roots. also: Stop fishing for more pivots in `worker` if 100% download is reached * Reorganise/update accounts healing why: Error handling was wrong and the (math. complexity of) whole process could be better managed. details: Much of the algorithm is now documented at the top of the file `heal_accounts.nim`
2022-10-08 17:20:50 +00:00
nQueues*: int
TickerFullStats* = object
## Full sync state (see `TickerFullStatsUpdater`)
pivotBlock*: Option[BlockNumber]
topPersistent*: BlockNumber
nextUnprocessed*: Option[BlockNumber]
nextStaged*: Option[BlockNumber]
nStagedQueue*: int
suspended*: bool
reOrg*: bool
TickerRef* = ref object
## Ticker descriptor object
nBuddies: int
logTicker: TimerCallback
started: Moment
visited: Moment
prettyPrint: proc(t: TickerRef) {.gcsafe, raises: [].}
case fullMode: bool
of false:
snap: SnapDescDetails
of true:
full: FullDescDetails
const
extraTraceMessages = false # or true
## Enabled additional logging noise
tickerStartDelay = chronos.milliseconds(100)
tickerLogInterval = chronos.seconds(1)
tickerLogSuppressMax = chronos.seconds(100)
# ------------------------------------------------------------------------------
# Private functions: pretty printing
# ------------------------------------------------------------------------------
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
proc pc99(val: float): string =
if 0.99 <= val and val < 1.0: "99%"
elif 0.0 < val and val <= 0.01: "1%"
else: val.toPC(0)
proc toStr(a: Option[int]): string =
if a.isNone: "n/a"
else: $a.unsafeGet
# ------------------------------------------------------------------------------
# Private functions: printing ticker messages
# ------------------------------------------------------------------------------
when false:
template logTxt(info: static[string]): static[string] =
"Ticker " & info
template noFmtError(info: static[string]; code: untyped) =
try:
code
except ValueError as e:
raiseAssert "Inconveivable (" & info & "): " & e.msg
proc snapTicker(t: TickerRef) {.gcsafe.} =
let
data = t.snap.snapCb()
now = Moment.now()
if data != t.snap.lastStats or
t.snap.recovery != t.snap.lastRecov or
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
tickerLogSuppressMax < (now - t.visited):
var
nAcc, nSto, nCon: string
pv = "n/a"
let
nStoQ = data.nStorageQueue.toStr
nConQ = data.nContractQueue.toStr
bc = data.beaconBlock.toStr
recoveryDone = t.snap.lastRecov
Prep for full sync after snap make 6 (#1291) * Update log ticker, using time interval rather than ticker count why: Counting and logging ticker occurrences is inherently imprecise. So time intervals are used. * Use separate storage tables for snap sync data * Left boundary proof update why: Was not properly implemented, yet. * Capture pivot in peer worker (aka buddy) tasks why: The pivot environment is linked to the `buddy` descriptor. While there is a task switch, the pivot may change. So it is passed on as function argument `env` rather than retrieved from the buddy at the start of a sub-function. * Split queues `fetchStorage` into `fetchStorageFull` and `fetchStoragePart` * Remove obsolete account range returned from `GetAccountRange` message why: Handler returned the wrong right value of the range. This range was for convenience, only. * Prioritise storage slots if the queue becomes large why: Currently, accounts processing is prioritised up until all accounts are downloaded. The new prioritisation has two thresholds for + start processing storage slots with a new worker + stop account processing and switch to storage processing also: Provide api for `SnapTodoRanges` pair of range sets in `worker_desc.nim` * Generalise left boundary proof for accounts or storage slots. why: Detailed explanation how this works is documented with `snapdb_accounts.importAccounts()`. Instead of enforcing a left boundary proof (which is still the default), the importer functions return a list of `holes` (aka node paths) found in the argument ranges of leaf nodes. This in turn is used by the book keeping software for data download. * Forgot to pass on variable in function wrapper also: + Start healing not before 99% accounts covered (previously 95%) + Logging updated/prettified
2022-11-08 18:56:04 +00:00
accCov = data.accountsFill[0].pc99 &
"(" & data.accountsFill[1].pc99 & ")" &
"/" & data.accountsFill[2].pc99 &
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
"~" & data.nAccountStats.uint.toSI
nInst = t.nBuddies
# With `int64`, there are more than 29*10^10 years range for seconds
up = (now - t.started).seconds.uint64.toSI
mem = getTotalMem().uint.toSI
t.snap.lastStats = data
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
t.visited = now
t.snap.lastRecov = t.snap.recovery
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
if data.pivotBlock.isSome:
pv = data.pivotBlock.toStr & "/" & $data.nQueues
noFmtError("runLogTicker"):
nAcc = (&"{(data.nAccounts[0]+0.5).int64}" &
&"({(data.nAccounts[1]+0.5).int64})")
nSto = (&"{(data.nSlotLists[0]+0.5).int64}" &
&"({(data.nSlotLists[1]+0.5).int64})")
nCon = (&"{(data.nContracts[0]+0.5).int64}" &
&"({(data.nContracts[1]+0.5).int64})")
Prep for full sync after snap make 4 (#1282) * Re-arrange fetching storage slots in batch module why; Previously, fetching partial slot ranges first has a chance of terminating the worker peer 9due to network error) while there were many inheritable storage slots on the queue. Now, inheritance is checked first, then full slot ranges and finally partial ranges. * Update logging * Bundled node information for healing into single object `NodeSpecs` why: Previously, partial paths and node keys were kept in separate variables. This approach was error prone due to copying/reassembling function argument objects. As all partial paths, keys, and node data types are more or less handled as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to hold these `Blob`s as named field in a single object (even if not all fields are active for the current purpose.) * For good housekeeping, using `NodeKey` type only for account keys why: previously, a mixture of `NodeKey` and `Hash256` was used. Now, only state or storage root keys use the `Hash256` type. * Always accept latest pivot (and not a slightly older one) why; For testing it was tried to use a slightly older pivot state root than available. Some anecdotal tests seemed to suggest an advantage so that more peers are willing to serve on that older pivot. But this could not be confirmed in subsequent tests (still anecdotal, though.) As a side note, the distance of the latest pivot to its predecessor is at least 128 (or whatever the constant `minPivotBlockDistance` is assigned to.) * Reshuffle name components for some file and function names why: Clarifies purpose: "storages" becomes: "storage slots" "store" becomes: "range fetch" * Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
if t.snap.recovery:
info "Snap sync ticker (recovery)",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
Snap sync refactor accounts healing (#1392) * Relocated mothballing (i.e. swap-in preparation) logic details: Mothballing was previously tested & started after downloading account ranges in `range_fetch_accounts`. Whenever current download or healing stops because of a pivot change, swap-in preparation is needed (otherwise some storage slots may get lost when swap-in takes place.) Also, `execSnapSyncAction()` has been moved back to `pivot_helper`. * Reorganised source file directories details: Grouped pivot focused modules into `pivot` directory * Renamed `checkNodes`, `sickSubTries` as `nodes.check`, `nodes.missing` why: Both lists are typically used together as pair. Renaming `sickSubTries` reflects moving away from a healing centric view towards a swap-in attitude. * Multi times coverage recording details: Per pivot account ranges are accumulated into coverage range set. This set fill eventually contain a singe range of account hashes [0..2^256] which amounts to 100% capacity. A counter has been added that is incremented whenever max capacity is reached. The accumulated range is then reset to empty. The effect of this setting is that the coverage can be evenly duplicated. So 200% would not accumulate on a particular region. * Update range length comparisons (mod 2^256) why: A range interval can have sizes 1..2^256 as it cannot be empty by definition. The number of points in a range intervals set can have 0..2^256 points. As the scalar range is a residue class modulo 2^256, the residue class 0 means length 2^256 for a range interval, but can be 0 or 2^256 for the number of points in a range intervals set. * Generalised `hexaryEnvelopeDecompose()` details: Compile the complement of the union of some (processed) intervals and express this complement as a list of envelopes of sub-tries. This facility is directly applicable to swap-in book-keeping. * Re-factor `swapIn()` why: Good idea but baloney implementation. The main algorithm is based on the generalised version of `hexaryEnvelopeDecompose()` which has been derived from this implementation. * Refactor `healAccounts()` using `hexaryEnvelopeDecompose()` as main driver why: Previously, the hexary trie was searched recursively for dangling nodes which has a poor worst case performance already when the trie is reasonably populated. The function `hexaryEnvelopeDecompose()` is a magnitude faster because it does not peruse existing sub-tries in order to find missing nodes although result is not fully compatible with the previous function. So recursive search is used in a limited mode only when the decomposer will not deliver a useful result. * Logging & maintenance fixes details: Preparation for abandoning buddy-global healing variables `node`, `resumeCtx`, and `lockTriePerusal`. These variable are trie-perusal centric which will be run on the back burner in favour of `hexaryEnvelopeDecompose()` which is used for accounts healing already.
2022-12-19 21:22:09 +00:00
elif recoveryDone:
info "Snap sync ticker (recovery done)",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
else:
info "Snap sync ticker",
up, nInst, bc, pv, nAcc, accCov, nSto, nStoQ, nCon, nConQ, mem
proc fullTicker(t: TickerRef) {.gcsafe.} =
let
data = t.full.fullCb()
now = Moment.now()
if data != t.full.lastStats or
tickerLogSuppressMax < (now - t.visited):
let
persistent = data.topPersistent.toStr
staged = data.nextStaged.toStr
unprocessed = data.nextUnprocessed.toStr
queued = data.nStagedQueue
reOrg = if data.reOrg: "t" else: "f"
pv = data.pivotBlock.toStr
nInst = t.nBuddies
# With `int64`, there are more than 29*10^10 years range for seconds
up = (now - t.started).seconds.uint64.toSI
mem = getTotalMem().uint.toSI
t.full.lastStats = data
t.visited = now
if data.suspended:
info "Full sync ticker (suspended)", up, nInst, pv,
persistent, staged, unprocessed, queued, reOrg, mem
else:
info "Full sync ticker", up, nInst, pv,
persistent, staged, unprocessed, queued, reOrg, mem
# ------------------------------------------------------------------------------
# Private functions: ticking log messages
# ------------------------------------------------------------------------------
proc setLogTicker(t: TickerRef; at: Moment) {.gcsafe.}
proc runLogTicker(t: TickerRef) {.gcsafe.} =
t.prettyPrint(t)
t.setLogTicker(Moment.fromNow(tickerLogInterval))
proc setLogTicker(t: TickerRef; at: Moment) =
if t.logTicker.isNil:
when extraTraceMessages:
debug logTxt "was stopped", fullMode=t.fullMode, nBuddies=t.nBuddies
else:
t.logTicker = safeSetTimer(at, runLogTicker, t)
proc initImpl(t: TickerRef; cb: TickerSnapStatsUpdater) =
t.fullMode = false
t.prettyPrint = snapTicker
t.snap = SnapDescDetails(snapCb: cb)
proc initImpl(t: TickerRef; cb: TickerFullStatsUpdater) =
t.fullMode = true
t.prettyPrint = fullTicker
t.full = FullDescDetails(fullCb: cb)
proc startImpl(t: TickerRef) =
t.logTicker = safeSetTimer(Moment.fromNow(tickerStartDelay),runLogTicker,t)
if t.started == Moment.default:
t.started = Moment.now()
proc stopImpl(t: TickerRef) =
## Stop ticker unconditionally
t.logTicker = nil
# ------------------------------------------------------------------------------
# Public constructor and start/stop functions
# ------------------------------------------------------------------------------
proc init*(
T: type TickerRef;
cb: TickerSnapStatsUpdater|TickerFullStatsUpdater;
): T =
## Constructor
new result
result.initImpl(cb)
proc init*(t: TickerRef; cb: TickerSnapStatsUpdater) =
## Re-initialise ticket
if not t.isNil:
t.visited.reset
if t.fullMode:
t.prettyPrint(t) # print final state for full sync
t.initImpl(cb)
proc init*(t: TickerRef; cb: TickerFullStatsUpdater) =
## Re-initialise ticket
if not t.isNil:
t.visited.reset
if not t.fullMode:
t.prettyPrint(t) # print final state for snap sync
t.initImpl(cb)
proc start*(t: TickerRef) =
## Re/start ticker unconditionally
if not t.isNil:
when extraTraceMessages:
debug logTxt "start", fullMode=t.fullMode, nBuddies=t.nBuddies
t.startImpl()
proc stop*(t: TickerRef) =
## Stop ticker unconditionally
if not t.isNil:
t.stopImpl()
when extraTraceMessages:
debug logTxt "stop", fullMode=t.fullMode, nBuddies=t.nBuddies
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc startBuddy*(t: TickerRef) =
## Increment buddies counter and start ticker unless running.
if not t.isNil:
if t.nBuddies <= 0:
t.nBuddies = 1
if t.fullMode or not t.snap.recovery:
t.startImpl()
when extraTraceMessages:
debug logTxt "start buddy", fullMode=t.fullMode, nBuddies=t.nBuddies
else:
t.nBuddies.inc
proc startRecovery*(t: TickerRef) =
## Ditto for recovery mode
if not t.isNil and not t.fullMode and not t.snap.recovery:
t.snap.recovery = true
if t.nBuddies <= 0:
t.startImpl()
when extraTraceMessages:
debug logTxt "start recovery", fullMode=t.fullMode, nBuddies=t.nBuddies
proc stopBuddy*(t: TickerRef) =
## Decrement buddies counter and stop ticker if there are no more registered
## buddies.
if not t.isNil:
t.nBuddies.dec
if t.nBuddies <= 0 and not t.fullMode and not t.snap.recovery:
t.nBuddies = 0
t.stopImpl()
when extraTraceMessages:
debug logTxt "stop (buddy)", fullMode=t.fullMode, nBuddies=t.nBuddies
proc stopRecovery*(t: TickerRef) =
## Ditto for recovery mode
if not t.isNil and not t.fullMode and t.snap.recovery:
t.snap.recovery = false
t.stopImpl()
when extraTraceMessages:
debug logTxt "stop (recovery)", fullMode=t.fullMode, nBuddies=t.nBuddies
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------