Snap sync cosmetic code update (#1563)

* Relocate full sync descriptors from global `worker_desc.nim` to local pass

why:
  These settings are needed only for the full sync pass.

* Rename `pivotAccountsCoverage*()` => `accountsCoverage*()`

details:
  Extract from `worker_desc.nim` into separate source file.

* Relocate snap sync sub-descriptors

details:
  ..from global `worker_desc.nim` to local pass module `snap_pass_desc.nam`.

* Rename `SnapPivotRef` => `SnapPassPivotRef`

* Mostly removed `SnapPass` prefix from object type names

why:
  These objects are solely used on the snap pass.
This commit is contained in:
Jordan Hrycaj 2023-04-25 17:34:48 +01:00 committed by GitHub
parent d6ee672ba5
commit 68b2448ce1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 459 additions and 344 deletions

View File

@ -22,6 +22,18 @@ import
../get/get_error,
./pass_desc
type
FullPassCtxRef = ref object of RootRef
## Pass local descriptor extension for full sync process
startNumber*: Option[BlockNumber] ## History starts here (used for logging)
pivot*: BestPivotCtxRef ## Global pivot descriptor
bCtx*: BlockQueueCtxRef ## Global block queue descriptor
FullPassBuddyRef = ref object of RootRef
## Pass local descriptor extension for full sync process
pivot*: BestPivotWorkerRef ## Local pivot worker descriptor
queue*: BlockQueueWorkerRef ## Block queue worker
const
extraTraceMessages = false # or true
## Enabled additional logging noise
@ -45,31 +57,53 @@ template ignoreException(info: static[string]; code: untyped) =
except CatchableError as e:
error "Exception at " & info & ":", name=($e.name), msg=(e.msg)
# ------------------------------------------------------------------------------
# Private getter/setter
# ------------------------------------------------------------------------------
proc pass(pool: SnapCtxData): auto =
## Getter, pass local descriptor
pool.full.FullPassCtxRef
proc pass(only: SnapBuddyData): auto =
## Getter, pass local descriptor
only.full.FullPassBuddyRef
proc `pass=`(pool: var SnapCtxData; val: FullPassCtxRef) =
## Setter, pass local descriptor
pool.full = val
proc `pass=`(only: var SnapBuddyData; val: FullPassBuddyRef) =
## Getter, pass local descriptor
only.full = val
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc tickerUpdater(ctx: SnapCtxRef): TickerFullStatsUpdater =
result = proc: TickerFullStats =
var stats: BlockQueueStats
ctx.pool.bCtx.blockQueueStats(stats)
ctx.pool.pass.bCtx.blockQueueStats(stats)
TickerFullStats(
pivotBlock: ctx.pool.pass.startNumber,
topPersistent: stats.topAccepted,
nextStaged: stats.nextStaged,
nextUnprocessed: stats.nextUnprocessed,
nStagedQueue: stats.nStagedQueue,
reOrg: stats.reOrg)
# ------------------------------------------------------------------------------
# Private functions
# ------------------------------------------------------------------------------
proc processStaged(buddy: SnapBuddyRef): bool =
## Fetch a work item from the `staged` queue an process it to be
## stored on the persistent block chain.
let
ctx {.used.} = buddy.ctx
ctx = buddy.ctx
peer = buddy.peer
chainDb = buddy.ctx.chain.db
chain = buddy.ctx.chain
bq = buddy.only.bQueue
bq = buddy.only.pass.queue
# Get a work item, a list of headers + bodies
wi = block:
@ -129,12 +163,17 @@ proc processStaged(buddy: SnapBuddyRef): bool =
# ------------------------------------------------------------------------------
proc fullSyncSetup(ctx: SnapCtxRef) =
ctx.pool.bCtx = BlockQueueCtxRef.init()
ctx.pool.bPivot = BestPivotCtxRef.init(rng=ctx.pool.rng, minPeers=0)
# Set up descriptor
ctx.pool.pass = FullPassCtxRef()
ctx.pool.pass.bCtx = BlockQueueCtxRef.init()
ctx.pool.pass.pivot = BestPivotCtxRef.init(rng=ctx.pool.rng, minPeers=0)
# Update ticker
ctx.pool.ticker.init(cb = ctx.tickerUpdater())
proc fullSyncRelease(ctx: SnapCtxRef) =
ctx.pool.ticker.stop()
ctx.pool.pass = nil
proc fullSyncStart(buddy: SnapBuddyRef): bool =
@ -142,13 +181,12 @@ proc fullSyncStart(buddy: SnapBuddyRef): bool =
ctx = buddy.ctx
peer = buddy.peer
if peer.supports(protocol.eth) and
peer.state(protocol.eth).initialized:
if peer.supports(protocol.eth) and peer.state(protocol.eth).initialized:
let p = ctx.pool.pass
buddy.only.bQueue = BlockQueueWorkerRef.init(
ctx.pool.bCtx, buddy.ctrl, peer)
buddy.only.bPivot = BestPivotWorkerRef.init(
ctx.pool.bPivot, buddy.ctrl, buddy.peer)
buddy.only.pass = FullPassBuddyRef()
buddy.only.pass.queue = BlockQueueWorkerRef.init(p.bCtx, buddy.ctrl, peer)
buddy.only.pass.pivot = BestPivotWorkerRef.init(p.pivot, buddy.ctrl, peer)
ctx.pool.ticker.startBuddy()
buddy.ctrl.multiOk = false # confirm default mode for soft restart
@ -156,7 +194,7 @@ proc fullSyncStart(buddy: SnapBuddyRef): bool =
return true
proc fullSyncStop(buddy: SnapBuddyRef) =
buddy.only.bPivot.clear()
buddy.only.pass.pivot.clear()
buddy.ctx.pool.ticker.stopBuddy()
# ------------------------------------------------------------------------------
@ -176,8 +214,8 @@ proc fullSyncPool(buddy: SnapBuddyRef, last: bool; laps: int): bool =
let stateHeader = ctx.pool.fullHeader.unsafeGet
# Reinialise block queue descriptor relative to current pivot
ctx.pool.startNumber = some(stateHeader.blockNumber)
ctx.pool.bCtx = BlockQueueCtxRef.init(stateHeader.blockNumber + 1)
ctx.pool.pass.startNumber = some(stateHeader.blockNumber)
ctx.pool.pass.bCtx = BlockQueueCtxRef.init(stateHeader.blockNumber + 1)
# Kick off ticker (was stopped by snap `release()` method)
ctx.pool.ticker.start()
@ -198,25 +236,25 @@ proc fullSyncPool(buddy: SnapBuddyRef, last: bool; laps: int): bool =
ctx.pool.fullHeader = none(BlockHeader)
# Soft re-start buddy peers if on the second lap.
if 0 < laps and ctx.pool.startNumber.isSome:
if 0 < laps and ctx.pool.pass.startNumber.isSome:
if not buddy.fullSyncStart():
# Start() method failed => wait for another peer
buddy.ctrl.stopped = true
if last:
trace logTxt "soft restart done", peer=buddy.peer, last, laps,
pivot=ctx.pool.startNumber.toStr,
pivot=ctx.pool.pass.startNumber.toStr,
mode=ctx.pool.syncMode.active, state= buddy.ctrl.state
return false # does stop magically when looping over peers is exhausted
# Mind the gap, fill in if necessary (function is peer independent)
buddy.only.bQueue.blockQueueGrout()
buddy.only.pass.queue.blockQueueGrout()
true # Stop after running once regardless of peer
proc fullSyncSingle(buddy: SnapBuddyRef) {.async.} =
let
pv = buddy.only.bPivot
bq = buddy.only.bQueue
pv = buddy.only.pass.pivot
bq = buddy.only.pass.queue
bNum = bq.bestNumber.get(otherwise = bq.topAccepted + 1)
# Negotiate in order to derive the pivot header from this `peer`.
@ -243,7 +281,7 @@ proc fullSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
## Full sync processing
let
ctx = buddy.ctx
bq = buddy.only.bQueue
bq = buddy.only.pass.queue
# Fetch work item
let rc = await bq.blockQueueWorker()

View File

@ -21,7 +21,7 @@ import
../get/get_error,
./pass_desc,
./pass_snap/helper/[beacon_header, storage_queue],
./pass_snap/pivot
./pass_snap/[pivot, snap_pass_desc]
logScope:
topics = "snap-play"
@ -77,18 +77,21 @@ proc detectSnapSyncRecovery(ctx: SnapCtxRef) =
## Helper for `setup()`: Initiate snap sync recovery (if any)
let rc = ctx.pool.snapDb.pivotRecoverDB()
if rc.isOk:
ctx.pool.recovery = SnapRecoveryRef(state: rc.value)
let snap = ctx.pool.pass
snap.recovery = RecoveryRef(state: rc.value)
ctx.daemon = true
# Set up early initial pivot
ctx.pool.pivotTable.reverseUpdate(ctx.pool.recovery.state.header, ctx)
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
trace logTxt "recovery started",
checkpoint=(ctx.pool.pivotTable.topNumber.toStr & "(0)")
checkpoint=(snap.pivotTable.topNumber.toStr & "(0)")
if not ctx.pool.ticker.isNil:
ctx.pool.ticker.startRecovery()
proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
let recov = ctx.pool.recovery
let
snap = ctx.pool.pass
recov = snap.recovery
if recov.isNil:
return false
@ -96,7 +99,7 @@ proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
checkpoint = recov.state.header.blockNumber.toStr & "(" & $recov.level & ")"
topLevel = recov.level == 0
env = block:
let rc = ctx.pool.pivotTable.eq recov.state.header.stateRoot
let rc = snap.pivotTable.eq recov.state.header.stateRoot
if rc.isErr:
error logTxt "recovery pivot context gone", checkpoint, topLevel
return false
@ -125,12 +128,12 @@ proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
return false
# Set up next level pivot checkpoint
ctx.pool.recovery = SnapRecoveryRef(
snap.recovery = RecoveryRef(
state: rc.value,
level: recov.level + 1)
# Push onto pivot table and continue recovery (i.e. do not stop it yet)
ctx.pool.pivotTable.reverseUpdate(ctx.pool.recovery.state.header, ctx)
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
return true # continue recovery
@ -152,7 +155,7 @@ proc snapSyncCompleteOk(
error logTxt "inconsistent state, pivot incomplete",
pivot=env.stateHeader.blockNumber.toStr, nAccounts=env.nAccounts
return false
ctx.pool.completePivot = env
ctx.pool.pass.completedPivot = env
ctx.poolMode = true # Fast sync mode must be synchronized among all peers
return true
@ -161,9 +164,12 @@ proc snapSyncCompleteOk(
# ------------------------------------------------------------------------------
proc snapSyncSetup(ctx: SnapCtxRef) =
# Set up snap sync descriptor
ctx.pool.pass = SnapPassCtxRef()
# For snap sync book keeping
ctx.pool.coveredAccounts = NodeTagRangeSet.init()
ctx.pool.ticker.init(cb = ctx.pool.pivotTable.tickerStats(ctx))
ctx.pool.pass.coveredAccounts = NodeTagRangeSet.init()
ctx.pool.ticker.init(cb = ctx.pool.pass.pivotTable.tickerStats(ctx))
ctx.enableRpcMagic() # Allow external pivot update via RPC
ctx.disableWireServices() # Stop unwanted public services
@ -198,7 +204,8 @@ proc snapSyncPool(buddy: SnapBuddyRef, last: bool, laps: int): bool =
##
let
ctx = buddy.ctx
env = ctx.pool.completePivot
snap = ctx.pool.pass
env = snap.completedPivot
# Check whether the snapshot is complete. If so, switch to full sync mode.
# This process needs to be applied to all buddy peers.
@ -224,22 +231,22 @@ proc snapSyncPool(buddy: SnapBuddyRef, last: bool, laps: int): bool =
# Clean up empty pivot slots (never the top one.) This needs to be run on
# a single peer only. So the loop can stop immediately (returning `true`)
# after this job is done.
var rc = ctx.pool.pivotTable.beforeLast
var rc = snap.pivotTable.beforeLast
while rc.isOK:
let (key, env) = (rc.value.key, rc.value.data)
if env.fetchAccounts.processed.isEmpty:
ctx.pool.pivotTable.del key
rc = ctx.pool.pivotTable.prev(key)
snap.pivotTable.del key
rc = snap.pivotTable.prev(key)
true # Stop ok
proc snapSyncDaemon(ctx: SnapCtxRef) {.async.} =
## Enabled while `ctx.daemon` is `true`
##
if not ctx.pool.recovery.isNil:
if not ctx.pool.pass.recovery.isNil:
if not await ctx.recoveryStepContinue():
# Done, stop recovery
ctx.pool.recovery = nil
ctx.pool.pass.recovery = nil
ctx.daemon = false
# Update logging
@ -256,7 +263,7 @@ proc snapSyncSingle(buddy: SnapBuddyRef) {.async.} =
await buddy.beaconHeaderUpdateFromFile()
# Dedicate some process cycles to the recovery process (if any)
if not buddy.ctx.pool.recovery.isNil:
if not buddy.ctx.pool.pass.recovery.isNil:
when extraTraceMessages:
trace "Throttling single mode in favour of recovery", peer=buddy.peer
await sleepAsync 900.milliseconds
@ -275,7 +282,7 @@ proc snapSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
# Fetch latest state root environment
env = block:
let rc = ctx.pool.pivotTable.lastValue
let rc = ctx.pool.pass.pivotTable.lastValue
if rc.isErr:
buddy.ctrl.multiOk = false
return # nothing to do
@ -288,7 +295,7 @@ proc snapSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
# If this is a new snap sync pivot, the previous one can be cleaned up and
# archived. There is no point in keeping some older space consuming state
# data any longer.
ctx.pool.pivotTable.beforeTopMostlyClean()
ctx.pool.pass.pivotTable.beforeTopMostlyClean()
let
peer = buddy.peer

View File

@ -46,11 +46,12 @@ import
stew/[byteutils, interval_set, keyed_queue],
../../../../../utils/prettify,
../../../../protocol,
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../get/[get_error, get_trie_nodes],
../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_nearby,
hexary_paths, hexary_range, snapdb_accounts],
./helper/[missing_nodes, storage_queue, swap_in]
./helper/[missing_nodes, storage_queue, swap_in],
./snap_pass_desc
logScope:
topics = "snap-acc"
@ -89,7 +90,7 @@ proc healingCtx(
"ctl=" & $buddy.ctrl.state & "," &
"nAccounts=" & $env.nAccounts & "," &
("covered=" & $env.fetchAccounts.processed & "/" &
$ctx.pool.coveredAccounts ) & "}"
$ctx.pool.pass.coveredAccounts ) & "}"
# ------------------------------------------------------------------------------
# Private helpers
@ -134,7 +135,7 @@ proc compileMissingNodesList(
for w in mlv.emptyGaps.increasing:
discard env.fetchAccounts.processed.merge w
env.fetchAccounts.unprocessed.reduce w
discard buddy.ctx.pool.coveredAccounts.merge w
discard buddy.ctx.pool.pass.coveredAccounts.merge w
when extraTraceMessages:
trace logTxt "missing nodes", peer,
@ -252,7 +253,7 @@ proc registerAccountLeaf(
if 0 < env.fetchAccounts.processed.merge iv:
env.nAccounts.inc
env.fetchAccounts.unprocessed.reduce iv
discard buddy.ctx.pool.coveredAccounts.merge iv
discard buddy.ctx.pool.pass.coveredAccounts.merge iv
# Update storage slots batch
if acc.storageRoot != EMPTY_ROOT_HASH:

View File

@ -48,11 +48,12 @@ import
stew/[byteutils, interval_set, keyed_queue],
../../../../../utils/prettify,
../../../../protocol,
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../get/[get_error, get_trie_nodes],
../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_range,
snapdb_storage_slots],
./helper/[missing_nodes, storage_queue]
./helper/[missing_nodes, storage_queue],
./snap_pass_desc
logScope:
topics = "snap-slot"

View File

@ -0,0 +1,38 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
stew/interval_set,
"../../../.."/range_desc,
../snap_pass_desc
# ------------------------------------------------------------------------------
# Public helpers: coverage
# ------------------------------------------------------------------------------
proc accountsCoverage*(ctx: SnapCtxRef): float =
## Returns the accounts coverage factor
ctx.pool.pass.coveredAccounts.fullFactor + ctx.pool.pass.covAccTimesFull.float
proc accountsCoverage100PcRollOver*(ctx: SnapCtxRef) =
## Roll over `coveredAccounts` registry when it reaches 100%.
let snap = ctx.pool.pass
if snap.coveredAccounts.isFull:
# All of accounts hashes are covered by completed range fetch processes
# for all pivot environments. So reset covering and record full-ness level.
snap.covAccTimesFull.inc
snap.coveredAccounts.clear()
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -15,8 +15,8 @@ import
chronos,
eth/[common, p2p],
../../../../../misc/sync_ctrl,
../../../../worker_desc,
../../../get/[get_error, get_block_header]
../../../get/[get_error, get_block_header],
../snap_pass_desc
logScope:
topics = "snap-ctrl"
@ -35,13 +35,14 @@ proc beaconHeaderUpdatebuBlockNumber*(
## This function is typically used for testing and debugging.
let
ctx = buddy.ctx
snap = ctx.pool.pass
peer = buddy.peer
trace "fetch beacon header", peer, num
if ctx.pool.beaconHeader.blockNumber < num:
if snap.beaconHeader.blockNumber < num:
let rc = await buddy.getBlockHeader(num)
if rc.isOk:
ctx.pool.beaconHeader = rc.value
snap.beaconHeader = rc.value
proc beaconHeaderUpdateFromFile*(
@ -61,6 +62,7 @@ proc beaconHeaderUpdateFromFile*(
return
rc.value
snap = ctx.pool.pass
peer = buddy.peer
var
@ -72,20 +74,20 @@ proc beaconHeaderUpdateFromFile*(
if isHash:
let hash = hashOrNum.hash
trace "External beacon info", peer, hash
if hash != ctx.pool.beaconHeader.hash:
if hash != snap.beaconHeader.hash:
rc = await buddy.getBlockHeader(hash)
else:
let num = hashOrNum.number
trace "External beacon info", peer, num
if ctx.pool.beaconHeader.blockNumber < num:
if snap.beaconHeader.blockNumber < num:
rc = await buddy.getBlockHeader(num)
except CatchableError as e:
trace "Exception while parsing beacon info", peer, isHash,
name=($e.name), msg=(e.msg)
if rc.isOk:
if ctx.pool.beaconHeader.blockNumber < rc.value.blockNumber:
ctx.pool.beaconHeader = rc.value
if snap.beaconHeader.blockNumber < rc.value.blockNumber:
snap.beaconHeader = rc.value
# ------------------------------------------------------------------------------
# End

View File

@ -63,9 +63,10 @@ import
chronos,
eth/common,
stew/interval_set,
"../../../.."/[constants, range_desc, worker_desc],
"../../../.."/[constants, range_desc],
../../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_inspect,
hexary_nearby]
hexary_nearby],
../snap_pass_desc
logScope:
topics = "snap-find"
@ -107,7 +108,7 @@ template noExceptionOops(info: static[string]; code: untyped) =
# ------------------------------------------------------------------------------
proc missingNodesFind*(
ranges: SnapRangeBatchRef;
ranges: RangeBatchRef;
rootKey: NodeKey;
getFn: HexaryGetFn;
planBLevelMax: uint8;

View File

@ -13,21 +13,22 @@
import
std/sets,
chronicles,
eth/[common, p2p],
eth/common,
stew/[interval_set, keyed_queue],
"../../../.."/[constants, range_desc, worker_desc],
../../../db/[hexary_inspect, snapdb_storage_slots]
"../../../.."/[constants, range_desc],
../../../db/[hexary_inspect, snapdb_storage_slots],
../snap_pass_desc
logScope:
topics = "snap-slots"
type
StoQuSlotsKVP* = KeyedQueuePair[Hash256,SnapSlotsQueueItemRef]
StoQuSlotsKVP* = KeyedQueuePair[Hash256,SlotsQueueItemRef]
## Key-value return code from `SnapSlotsQueue` handler
StoQuPartialSlotsQueue = object
## Return type for `getOrMakePartial()`
stoQu: SnapSlotsQueueItemRef
stoQu: SlotsQueueItemRef
isCompleted: bool
const
@ -44,7 +45,7 @@ template logTxt(info: static[string]): static[string] =
proc `$`(rs: NodeTagRangeSet): string =
rs.fullPC3
proc `$`(tr: SnapTodoRanges): string =
proc `$`(tr: UnprocessedRanges): string =
tr.fullPC3
template noExceptionOops(info: static[string]; code: untyped) =
@ -79,8 +80,8 @@ proc updatePartial(
else:
# New entry
let
stoSlo = SnapRangeBatchRef(processed: NodeTagRangeSet.init())
stoItem = SnapSlotsQueueItemRef(accKey: accKey, slots: stoSlo)
stoSlo = RangeBatchRef(processed: NodeTagRangeSet.init())
stoItem = SlotsQueueItemRef(accKey: accKey, slots: stoSlo)
discard env.fetchStoragePart.append(stoRoot, stoItem)
stoSlo.unprocessed.init(clear = true)
@ -141,8 +142,8 @@ proc appendPartial(
else:
# Restore missing range
let
stoSlo = SnapRangeBatchRef(processed: NodeTagRangeSet.init())
stoItem = SnapSlotsQueueItemRef(accKey: accKey, slots: stoSlo)
stoSlo = RangeBatchRef(processed: NodeTagRangeSet.init())
stoItem = SlotsQueueItemRef(accKey: accKey, slots: stoSlo)
discard env.fetchStoragePart.append(stoRoot, stoItem)
stoSlo.unprocessed.init(clear = true)
discard stoSlo.processed.merge FullNodeTagRange
@ -231,7 +232,7 @@ proc storageQueueAppendFull*(
## a new entry was added.
let
notPart = env.fetchStoragePart.delete(stoRoot).isErr
stoItem = SnapSlotsQueueItemRef(accKey: accKey)
stoItem = SlotsQueueItemRef(accKey: accKey)
env.parkedStorage.excl accKey # Un-park (if any)
env.fetchStorageFull.append(stoRoot, stoItem) and notPart

View File

@ -39,12 +39,13 @@
import
std/[math, sequtils],
chronicles,
eth/[common, p2p],
eth/common,
stew/[byteutils, interval_set, keyed_queue, sorted_set],
../../../../../../utils/prettify,
"../../../.."/[range_desc, worker_desc],
"../../../.."/range_desc,
../../../db/[hexary_desc, hexary_envelope, hexary_error,
hexary_paths, snapdb_accounts]
hexary_paths, snapdb_accounts],
../snap_pass_desc
logScope:
topics = "snap-swapin"
@ -187,12 +188,12 @@ proc otherProcessedRanges(
# ------------------------------------------------------------------------------
proc swapIn(
processed: NodeTagRangeSet; # Covered node ranges to be updated
unprocessed: var SnapTodoRanges; # Uncovered node ranges to be updated
otherPivots: seq[SwapInPivot]; # Other pivots list (read only)
rootKey: NodeKey; # Start node into target hexary trie
getFn: HexaryGetFn; # Abstract database access
loopMax: int; # Prevent from looping too often
processed: NodeTagRangeSet; # Covered node ranges to be updated
unprocessed: var UnprocessedRanges; # Uncovered node ranges to be updated
otherPivots: seq[SwapInPivot]; # Other pivots list (read only)
rootKey: NodeKey; # Start node into target hexary trie
getFn: HexaryGetFn; # Abstract database access
loopMax: int; # Prevent from looping too often
): (seq[NodeTagRangeSet],int) =
## Collect processed already ranges from argument `otherPivots` and merge them
## it onto the argument sets `processed` and `unprocessed`. For each entry
@ -260,7 +261,7 @@ proc swapInAccounts*(
rootKey = env.stateHeader.stateRoot.to(NodeKey)
getFn = ctx.pool.snapDb.getAccountFn
others = toSeq(ctx.pool.pivotTable.nextPairs)
others = toSeq(ctx.pool.pass.pivotTable.nextPairs)
# Swap in from mothballed pivots different from the current one
.filterIt(it.data.archived and it.key.to(NodeKey) != rootKey)
@ -308,7 +309,7 @@ proc swapInAccounts*(
if others[n].pivot.fetchStorageFull.hasKey(stRoot):
let accKey = others[n].pivot.fetchStorageFull[stRoot].accKey
discard env.fetchStorageFull.append(
stRoot, SnapSlotsQueueItemRef(acckey: accKey))
stRoot, SlotsQueueItemRef(acckey: accKey))
nSlotAccounts.inc
rc = others[n].pivot.storageAccounts.gt(rc.value.key)

View File

@ -17,11 +17,12 @@ import
eth/p2p, # trie/trie_defs],
stew/[interval_set, keyed_queue, sorted_set],
"../../../.."/[misc/ticker, sync_desc, types],
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../db/[hexary_error, snapdb_accounts, snapdb_contracts, snapdb_pivot],
./helper/storage_queue,
./helper/[accounts_coverage, storage_queue],
"."/[heal_accounts, heal_storage_slots, range_fetch_accounts,
range_fetch_contracts, range_fetch_storage_slots]
range_fetch_contracts, range_fetch_storage_slots],
./snap_pass_desc
logScope:
topics = "snap-pivot"
@ -55,11 +56,11 @@ proc accountsHealingOk(
): bool =
## Returns `true` if accounts healing is enabled for this pivot.
not env.fetchAccounts.processed.isEmpty and
healAccountsCoverageTrigger <= ctx.pivotAccountsCoverage()
healAccountsCoverageTrigger <= ctx.accountsCoverage()
proc init(
T: type SnapRangeBatchRef; # Collection of sets of account ranges
T: type RangeBatchRef; # Collection of sets of account ranges
ctx: SnapCtxRef; # Some global context
): T =
## Account ranges constructor
@ -68,11 +69,11 @@ proc init(
result.processed = NodeTagRangeSet.init()
# Update coverage level roll over
ctx.pivotAccountsCoverage100PcRollOver()
ctx.accountsCoverage100PcRollOver()
# Initialise accounts range fetch batch, the pair of `fetchAccounts[]` range
# sets. Deprioritise already processed ranges by moving it to the second set.
for iv in ctx.pool.coveredAccounts.increasing:
for iv in ctx.pool.pass.coveredAccounts.increasing:
discard result.unprocessed[0].reduce iv
discard result.unprocessed[1].merge iv
@ -84,14 +85,14 @@ proc init(
## Pivot constructor.
result = T(
stateHeader: header,
fetchAccounts: SnapRangeBatchRef.init(ctx))
fetchAccounts: RangeBatchRef.init(ctx))
result.storageAccounts.init()
# ------------------------------------------------------------------------------
# Public functions: pivot table related
# ------------------------------------------------------------------------------
proc beforeTopMostlyClean*(pivotTable: var SnapPivotTable) =
proc beforeTopMostlyClean*(pivotTable: var PivotTable) =
## Clean up pivot queues of the entry before the top one. The queues are
## the pivot data that need most of the memory. This cleaned pivot is not
## usable any more after cleaning but might be useful as historic record.
@ -99,7 +100,7 @@ proc beforeTopMostlyClean*(pivotTable: var SnapPivotTable) =
if rc.isOk:
rc.value.pivotMothball
proc topNumber*(pivotTable: var SnapPivotTable): BlockNumber =
proc topNumber*(pivotTable: var PivotTable): BlockNumber =
## Return the block number of the top pivot entry, or zero if there is none.
let rc = pivotTable.lastValue
if rc.isOk:
@ -107,9 +108,9 @@ proc topNumber*(pivotTable: var SnapPivotTable): BlockNumber =
proc reverseUpdate*(
pivotTable: var SnapPivotTable; # Pivot table
header: BlockHeader; # Header to generate new pivot from
ctx: SnapCtxRef; # Some global context
pivotTable: var PivotTable; # Pivot table
header: BlockHeader; # Header to generate new pivot from
ctx: SnapCtxRef; # Some global context
) =
## Activate environment for earlier state root implied by `header` argument.
##
@ -130,8 +131,8 @@ proc reverseUpdate*(
proc tickerStats*(
pivotTable: var SnapPivotTable; # Pivot table
ctx: SnapCtxRef; # Some global context
pivotTable: var PivotTable; # Pivot table
ctx: SnapCtxRef; # Some global context
): TickerSnapStatsUpdater =
## This function returns a function of type `TickerStatsUpdater` that prints
## out pivot table statitics. The returned fuction is supposed to drive
@ -149,7 +150,7 @@ proc tickerStats*(
var
aSum, aSqSum, uSum, uSqSum, sSum, sSqSum, cSum, cSqSum: float
count = 0
for kvp in ctx.pool.pivotTable.nextPairs:
for kvp in ctx.pool.pass.pivotTable.nextPairs:
# Accounts mean & variance
let aLen = kvp.data.nAccounts.float
@ -172,9 +173,9 @@ proc tickerStats*(
cSum += cLen
cSqSum += cLen * cLen
let
env = ctx.pool.pivotTable.lastValue.get(otherwise = nil)
accCoverage = (ctx.pool.coveredAccounts.fullFactor +
ctx.pool.covAccTimesFull.float)
env = ctx.pool.pass.pivotTable.lastValue.get(otherwise = nil)
accCoverage = (ctx.pool.pass.coveredAccounts.fullFactor +
ctx.pool.pass.covAccTimesFull.float)
accFill = meanStdDev(uSum, uSqSum, count)
var
beaconBlock = none(BlockNumber)
@ -187,13 +188,13 @@ proc tickerStats*(
procChunks = env.fetchAccounts.processed.chunks
stoQuLen = some(env.storageQueueTotal())
ctraQuLen = some(env.fetchContracts.len)
if 0 < ctx.pool.beaconHeader.blockNumber:
beaconBlock = some(ctx.pool.beaconHeader.blockNumber)
if 0 < ctx.pool.pass.beaconHeader.blockNumber:
beaconBlock = some(ctx.pool.pass.beaconHeader.blockNumber)
TickerSnapStats(
beaconBlock: beaconBlock,
pivotBlock: pivotBlock,
nQueues: ctx.pool.pivotTable.len,
nQueues: ctx.pool.pass.pivotTable.len,
nAccounts: meanStdDev(aSum, aSqSum, count),
nSlotLists: meanStdDev(sSum, sSqSum, count),
nContracts: meanStdDev(cSum, cSqSum, count),
@ -223,7 +224,7 @@ proc pivotMothball*(env: SnapPivotRef) =
# Simplify storage slots queues by resolving partial slots into full list
for kvp in env.fetchStoragePart.nextPairs:
discard env.fetchStorageFull.append(
kvp.key, SnapSlotsQueueItemRef(acckey: kvp.data.accKey))
kvp.key, SlotsQueueItemRef(acckey: kvp.data.accKey))
env.fetchStoragePart.clear()
# Provide index into `fetchStorageFull`
@ -265,7 +266,7 @@ proc execSnapSyncAction*(
await buddy.rangeFetchAccounts(env)
# Update 100% accounting
ctx.pivotAccountsCoverage100PcRollOver()
ctx.accountsCoverage100PcRollOver()
# Run at least one round fetching storage slosts and contracts even if
# the `archived` flag is set in order to keep the batch queue small.
@ -304,7 +305,7 @@ proc execSnapSyncAction*(
proc saveCheckpoint*(
env: SnapPivotRef; # Current pivot environment
env: SnapPivotRef; # Current pivot environment
ctx: SnapCtxRef; # Some global context
): Result[int,HexaryError] =
## Save current sync admin data. On success, the size of the data record
@ -351,7 +352,7 @@ proc pivotRecoverFromCheckpoint*(
## `processed`, `unprocessed`, and the `fetchStorageFull` lists are
## initialised.
##
let recov = ctx.pool.recovery
let recov = ctx.pool.pass.recovery
if recov.isNil:
return
@ -363,8 +364,8 @@ proc pivotRecoverFromCheckpoint*(
if topLevel:
env.fetchAccounts.unprocessed.reduce NodeTagRange.new(minPt, maxPt)
discard env.fetchAccounts.processed.merge(minPt, maxPt)
discard ctx.pool.coveredAccounts.merge(minPt, maxPt)
ctx.pivotAccountsCoverage100PcRollOver() # update coverage level roll over
discard ctx.pool.pass.coveredAccounts.merge(minPt, maxPt)
ctx.accountsCoverage100PcRollOver() # update coverage level roll over
# Handle storage slots
let stateRoot = recov.state.header.stateRoot
@ -420,12 +421,12 @@ proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
## it will not proceed to the next scheduler task.
let
ctx = buddy.ctx
beaconHeader = ctx.pool.beaconHeader
beaconHeader = ctx.pool.pass.beaconHeader
var
pivotHeader: BlockHeader
block:
let rc = ctx.pool.pivotTable.lastValue
let rc = ctx.pool.pass.pivotTable.lastValue
if rc.isOk:
pivotHeader = rc.value.stateHeader
@ -434,7 +435,7 @@ proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
# If the entry before the previous entry is unused, then run a pool mode
# based session (which should enable a pivot table purge).
block:
let rc = ctx.pool.pivotTable.beforeLast
let rc = ctx.pool.pass.pivotTable.beforeLast
if rc.isOk and rc.value.data.fetchAccounts.processed.isEmpty:
ctx.poolMode = true
@ -443,7 +444,7 @@ proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
pivot=pivotHeader.blockNumber.toStr,
beacon=beaconHeader.blockNumber.toStr, poolMode=ctx.poolMode
discard ctx.pool.pivotTable.lruAppend(
discard ctx.pool.pass.pivotTable.lruAppend(
beaconHeader.stateRoot, SnapPivotRef.init(ctx, beaconHeader),
pivotTableLruEntriesMax)
@ -458,10 +459,10 @@ proc pivotUpdateBeaconHeaderCB*(ctx: SnapCtxRef): SyncReqNewHeadCB =
## Update beacon header. This function is intended as a call back function
## for the RPC module.
result = proc(h: BlockHeader) {.gcsafe.} =
if ctx.pool.beaconHeader.blockNumber < h.blockNumber:
if ctx.pool.pass.beaconHeader.blockNumber < h.blockNumber:
# when extraTraceMessages:
# trace logTxt "external beacon info update", header=h.blockNumber.toStr
ctx.pool.beaconHeader = h
ctx.pool.pass.beaconHeader = h
# ------------------------------------------------------------------------------
# Public function, debugging

View File

@ -49,10 +49,11 @@ import
eth/[common, p2p],
stew/[interval_set, keyed_queue],
"../../../.."/[sync_desc, types],
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../get/[get_error, get_account_range],
../../db/[hexary_envelope, snapdb_accounts],
./helper/[storage_queue, swap_in]
./helper/[accounts_coverage, storage_queue, swap_in],
./snap_pass_desc
logScope:
topics = "snap-acc"
@ -188,8 +189,8 @@ proc accountsRangefetchImpl(
fa.unprocessed.reduce w
# Register consumed intervals on the accumulators over all state roots.
discard fa.processed.merge w
discard ctx.pool.coveredAccounts.merge w
ctx.pivotAccountsCoverage100PcRollOver() # update coverage level roll over
discard ctx.pool.pass.coveredAccounts.merge w
ctx.accountsCoverage100PcRollOver() # update coverage level roll over
# Register accounts with storage slots on the storage TODO list.
env.storageQueueAppend dd.withStorage

View File

@ -21,9 +21,10 @@ import
chronos,
eth/[common, p2p],
stew/keyed_queue,
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../get/[get_error, get_byte_codes],
../../db/snapdb_contracts
../../db/snapdb_contracts,
./snap_pass_desc
logScope:
topics = "snap-con"

View File

@ -69,10 +69,11 @@ import
chronos,
eth/p2p,
stew/[interval_set, keyed_queue],
"../../.."/[constants, range_desc, worker_desc],
"../../.."/[constants, range_desc],
../../get/[get_error, get_storage_ranges],
../../db/[hexary_error, snapdb_storage_slots],
./helper/storage_queue
./helper/storage_queue,
./snap_pass_desc
logScope:
topics = "snap-slot"

View File

@ -0,0 +1,233 @@
# Nimbus
# Copyright (c) 2021 Status Research & Development GmbH
# Licensed under either of
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
# http://www.apache.org/licenses/LICENSE-2.0)
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
# http://opensource.org/licenses/MIT)
# at your option. This file may not be copied, modified, or distributed
# except according to those terms.
{.push raises: [].}
import
std/[hashes, sets],
eth/common,
stew/[interval_set, keyed_queue, sorted_set],
"../../.."/[range_desc, worker_desc],
../../db/snapdb_pivot
export
worker_desc # base descriptor
type
AccountsList* = SortedSet[NodeTag,Hash256]
## Sorted pair of `(account,state-root)` entries
SlotsQueue* = KeyedQueue[Hash256,SlotsQueueItemRef]
## Handles list of storage slots data to fetch, indexed by storage root.
##
## Typically, storage data requests cover the full storage slots trie. If
## there is only a partial list of slots to fetch, the queue entry is
## stored left-most for easy access.
SlotsQueueItemRef* = ref object
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
## where the optional `subRange` interval has been replaced by an interval
## range + healing support.
accKey*: NodeKey ## Owner account
slots*: RangeBatchRef ## Clots to fetch, nil => all slots
ContractsQueue* = KeyedQueue[Hash256,NodeKey]
## Handles hash key list of contract data to fetch with accounts associated
UnprocessedRanges* = array[2,NodeTagRangeSet]
## Pair of sets of ``unprocessed`` node ranges that need to be fetched and
## integrated. The ranges in the first set must be handled with priority.
##
## This data structure is used for coordinating peers that run quasi
## parallel.
RangeBatchRef* = ref object
## `NodeTag` ranges to fetch, healing support
unprocessed*: UnprocessedRanges ## Range of slots to be fetched
processed*: NodeTagRangeSet ## Node ranges definitely processed
SnapPivotRef* = ref object
## Per-state root cache for particular snap data environment
stateHeader*: BlockHeader ## Pivot state, containg state root
# Accounts download coverage
fetchAccounts*: RangeBatchRef ## Set of accounts ranges to fetch
# Contract code queue
fetchContracts*: ContractsQueue ## Contacts to fetch & store
# Storage slots download
fetchStorageFull*: SlotsQueue ## Fetch storage trie for these accounts
fetchStoragePart*: SlotsQueue ## Partial storage trie to com[plete
parkedStorage*: HashSet[NodeKey] ## Storage batch items in use
# Info
nAccounts*: uint64 ## Imported # of accounts
nSlotLists*: uint64 ## Imported # of account storage tries
nContracts*: uint64 ## Imported # of contract code sets
# Checkponting
savedFullPivotOk*: bool ## This fully completed pivot was saved
# Mothballing, ready to be swapped into newer pivot record
storageAccounts*: AccountsList ## Accounts with missing storage slots
archived*: bool ## Not latest pivot, anymore
PivotTable* = KeyedQueue[Hash256,SnapPivotRef]
## LRU table, indexed by state root
RecoveryRef* = ref object
## Recovery context
state*: SnapDbPivotRegistry ## Saved recovery context state
level*: int ## top level is zero
SnapPassCtxRef* = ref object of RootRef
## Global context extension, snap sync parameters, pivot table
pivotTable*: PivotTable ## Per state root environment
completedPivot*: SnapPivotRef ## Start full sync from here
beaconHeader*: BlockHeader ## Running on beacon chain
coveredAccounts*: NodeTagRangeSet ## Derived from all available accounts
covAccTimesFull*: uint ## # of 100% coverages
recovery*: RecoveryRef ## Current recovery checkpoint/context
# ------------------------------------------------------------------------------
# Public getter/setter
# ------------------------------------------------------------------------------
proc pass*(pool: SnapCtxData): auto =
## Getter, pass local descriptor
pool.snap.SnapPassCtxRef
proc `pass=`*(pool: var SnapCtxData; val: SnapPassCtxRef) =
## Setter, pass local descriptor
pool.snap = val
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc hash*(a: SlotsQueueItemRef): Hash =
## Table/KeyedQueue mixin
cast[pointer](a).hash
proc hash*(a: Hash256): Hash =
## Table/KeyedQueue mixin
a.data.hash
# ------------------------------------------------------------------------------
# Public helpers: UnprocessedRanges
# ------------------------------------------------------------------------------
proc init*(q: var UnprocessedRanges; clear = false) =
## Populate node range sets with maximal range in the first range set. This
## kind of pair or interval sets is managed as follows:
## * As long as possible, fetch and merge back intervals on the first set.
## * If the first set is empty and some intervals are to be fetched, swap
## first and second interval lists.
## That way, intervals from the first set are prioitised while the rest is
## is considered after the prioitised intervals are exhausted.
q[0] = NodeTagRangeSet.init()
q[1] = NodeTagRangeSet.init()
if not clear:
discard q[0].merge FullNodeTagRange
proc clear*(q: var UnprocessedRanges) =
## Reset argument range sets empty.
q[0].clear()
q[1].clear()
proc merge*(q: var UnprocessedRanges; iv: NodeTagRange) =
## Unconditionally merge the node range into the account ranges list.
discard q[0].merge(iv)
discard q[1].reduce(iv)
proc mergeSplit*(q: var UnprocessedRanges; iv: NodeTagRange) =
## Ditto w/priorities partially reversed
if iv.len == 1:
discard q[0].reduce iv
discard q[1].merge iv
else:
let
# note that (`iv.len` == 0) => (`iv` == `FullNodeTagRange`)
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
iv1 = NodeTagRange.new(iv.minPt, midPt)
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
discard q[0].reduce iv1
discard q[1].merge iv1
discard q[0].merge iv2
discard q[1].reduce iv2
proc reduce*(q: var UnprocessedRanges; iv: NodeTagRange) =
## Unconditionally remove the node range from the account ranges list
discard q[0].reduce(iv)
discard q[1].reduce(iv)
iterator ivItems*(q: var UnprocessedRanges): NodeTagRange =
## Iterator over all list entries
for ivSet in q:
for iv in ivSet.increasing:
yield iv
proc fetch*(
q: var UnprocessedRanges;
maxLen = 0.u256;
): Result[NodeTagRange,void] =
## Fetch interval from node ranges with maximal size `maxLen`, where
## `0.u256` is interpreted as `2^256`.
# Swap batch queues if the first one is empty
if q[0].isEmpty:
swap(q[0], q[1])
# Fetch from first range list
let rc = q[0].ge()
if rc.isErr:
return err()
let
jv = rc.value
iv = block:
if maxLen == 0 or (0 < jv.len and jv.len <= maxLen):
jv
else:
# Note that either:
# (`jv.len` == 0) => (`jv` == `FullNodeTagRange`) => `jv.minPt` == 0
# or
# (`maxLen` < `jv.len`) => (`jv.minPt`+`maxLen` <= `jv.maxPt`)
NodeTagRange.new(jv.minPt, jv.minPt + maxLen)
discard q[0].reduce(iv)
ok(iv)
# -----------------
proc verify*(q: var UnprocessedRanges): bool =
## Verify consistency, i.e. that the two sets of ranges have no overlap.
if q[0].chunks == 0 or q[1].chunks == 0:
# At least one set is empty
return true
# So neither set is empty
if q[0].total == 0 or q[1].total == 0:
# At least one set is maximal and the other non-empty
return false
# So neither set is empty, not full
let (a,b) = if q[0].chunks < q[1].chunks: (0,1) else: (1,0)
for iv in q[a].increasing:
if 0 < q[b].covered(iv):
return false
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------

View File

@ -11,95 +11,21 @@
{.push raises: [].}
import
std/hashes,
chronos,
eth/[common, p2p],
stew/[interval_set, keyed_queue, sorted_set],
../../db/select_backend,
../misc/[best_pivot, block_queue, ticker],
../misc/ticker,
../sync_desc,
./worker/get/get_error,
./worker/db/[snapdb_desc, snapdb_pivot],
./range_desc
./worker/db/[snapdb_desc]
export
sync_desc # worker desc prototype
type
SnapAccountsList* = SortedSet[NodeTag,Hash256]
## Sorted pair of `(account,state-root)` entries
SnapSlotsQueue* = KeyedQueue[Hash256,SnapSlotsQueueItemRef]
## Handles list of storage slots data to fetch, indexed by storage root.
##
## Typically, storage data requests cover the full storage slots trie. If
## there is only a partial list of slots to fetch, the queue entry is
## stored left-most for easy access.
SnapSlotsQueueItemRef* = ref object
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
## where the optional `subRange` interval has been replaced by an interval
## range + healing support.
accKey*: NodeKey ## Owner account
slots*: SnapRangeBatchRef ## Clots to fetch, nil => all slots
SnapCtraQueue* = KeyedQueue[Hash256,NodeKey]
## Handles hash key list of contract data to fetch with accounts associated
SnapTodoRanges* = array[2,NodeTagRangeSet]
## Pair of sets of ``unprocessed`` node ranges that need to be fetched and
## integrated. The ranges in the first set must be handled with priority.
##
## This data structure is used for coordinating peers that run quasi
## parallel.
SnapRangeBatchRef* = ref object
## `NodeTag` ranges to fetch, healing support
unprocessed*: SnapTodoRanges ## Range of slots to be fetched
processed*: NodeTagRangeSet ## Node ranges definitely processed
SnapPivotRef* = ref object
## Per-state root cache for particular snap data environment
stateHeader*: BlockHeader ## Pivot state, containg state root
# Accounts download coverage
fetchAccounts*: SnapRangeBatchRef ## Set of accounts ranges to fetch
# Contract code queue
fetchContracts*: SnapCtraQueue ## Contacts to fetch & store
# Storage slots download
fetchStorageFull*: SnapSlotsQueue ## Fetch storage trie for these accounts
fetchStoragePart*: SnapSlotsQueue ## Partial storage trie to com[plete
parkedStorage*: HashSet[NodeKey] ## Storage batch items in use
# Info
nAccounts*: uint64 ## Imported # of accounts
nSlotLists*: uint64 ## Imported # of account storage tries
nContracts*: uint64 ## Imported # of contract code sets
# Checkponting
savedFullPivotOk*: bool ## This fully completed pivot was saved
# Mothballing, ready to be swapped into newer pivot record
storageAccounts*: SnapAccountsList ## Accounts with missing storage slots
archived*: bool ## Not latest pivot, anymore
SnapPivotTable* = KeyedQueue[Hash256,SnapPivotRef]
## LRU table, indexed by state root
SnapRecoveryRef* = ref object
## Recovery context
state*: SnapDbPivotRegistry ## Saved recovery context state
level*: int ## top level is zero
SnapBuddyData* = object
## Per-worker local descriptor data extension
## Peer-worker local descriptor data extension
errors*: GetErrorStatsRef ## For error handling
# Full sync continuation parameters
bPivot*: BestPivotWorkerRef ## Local pivot worker descriptor
bQueue*: BlockQueueWorkerRef ## Block queue worker
full*: RootRef ## Peer local full sync descriptor
SnapSyncPassType* = enum
## Current sync mode, after a snapshot has been downloaded, the system
@ -128,18 +54,11 @@ type
syncMode*: SnapSyncPass ## Sync mode methods & data
# Snap sync parameters, pivot table
pivotTable*: SnapPivotTable ## Per state root environment
completePivot*: SnapPivotRef ## Start full sync from here
beaconHeader*: BlockHeader ## Running on beacon chain
coveredAccounts*: NodeTagRangeSet ## Derived from all available accounts
covAccTimesFull*: uint ## # of 100% coverages
recovery*: SnapRecoveryRef ## Current recovery checkpoint/context
snap*: RootRef ## Global snap sync descriptor
# Full sync continuation parameters
fullHeader*: Option[BlockHeader] ## Pivot hand over
startNumber*: Option[BlockNumber] ## Start full sync from here
bPivot*: BestPivotCtxRef ## Global pivot descriptor
bCtx*: BlockQueueCtxRef ## Global block queue descriptor
fullHeader*: Option[BlockHeader] ## Start full sync from here
full*: RootRef ## Global full sync descriptor
SnapBuddyRef* = BuddyRef[SnapCtxData,SnapBuddyData]
## Extended worker peer descriptor
@ -147,137 +66,6 @@ type
SnapCtxRef* = CtxRef[SnapCtxData]
## Extended global descriptor
# ------------------------------------------------------------------------------
# Public functions
# ------------------------------------------------------------------------------
proc hash*(a: SnapSlotsQueueItemRef): Hash =
## Table/KeyedQueue mixin
cast[pointer](a).hash
proc hash*(a: Hash256): Hash =
## Table/KeyedQueue mixin
a.data.hash
# ------------------------------------------------------------------------------
# Public helpers: coverage
# ------------------------------------------------------------------------------
proc pivotAccountsCoverage*(ctx: SnapCtxRef): float =
## Returns the accounts coverage factor
ctx.pool.coveredAccounts.fullFactor + ctx.pool.covAccTimesFull.float
proc pivotAccountsCoverage100PcRollOver*(ctx: SnapCtxRef) =
## Roll over `coveredAccounts` registry when it reaches 100%.
if ctx.pool.coveredAccounts.isFull:
# All of accounts hashes are covered by completed range fetch processes
# for all pivot environments. So reset covering and record full-ness level.
ctx.pool.covAccTimesFull.inc
ctx.pool.coveredAccounts.clear()
# ------------------------------------------------------------------------------
# Public helpers: SnapTodoRanges
# ------------------------------------------------------------------------------
proc init*(q: var SnapTodoRanges; clear = false) =
## Populate node range sets with maximal range in the first range set. This
## kind of pair or interval sets is managed as follows:
## * As long as possible, fetch and merge back intervals on the first set.
## * If the first set is empty and some intervals are to be fetched, swap
## first and second interval lists.
## That way, intervals from the first set are prioitised while the rest is
## is considered after the prioitised intervals are exhausted.
q[0] = NodeTagRangeSet.init()
q[1] = NodeTagRangeSet.init()
if not clear:
discard q[0].merge FullNodeTagRange
proc clear*(q: var SnapTodoRanges) =
## Reset argument range sets empty.
q[0].clear()
q[1].clear()
proc merge*(q: var SnapTodoRanges; iv: NodeTagRange) =
## Unconditionally merge the node range into the account ranges list.
discard q[0].merge(iv)
discard q[1].reduce(iv)
proc mergeSplit*(q: var SnapTodoRanges; iv: NodeTagRange) =
## Ditto w/priorities partially reversed
if iv.len == 1:
discard q[0].reduce iv
discard q[1].merge iv
else:
let
# note that (`iv.len` == 0) => (`iv` == `FullNodeTagRange`)
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
iv1 = NodeTagRange.new(iv.minPt, midPt)
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
discard q[0].reduce iv1
discard q[1].merge iv1
discard q[0].merge iv2
discard q[1].reduce iv2
proc reduce*(q: var SnapTodoRanges; iv: NodeTagRange) =
## Unconditionally remove the node range from the account ranges list
discard q[0].reduce(iv)
discard q[1].reduce(iv)
iterator ivItems*(q: var SnapTodoRanges): NodeTagRange =
## Iterator over all list entries
for ivSet in q:
for iv in ivSet.increasing:
yield iv
proc fetch*(q: var SnapTodoRanges; maxLen = 0.u256): Result[NodeTagRange,void] =
## Fetch interval from node ranges with maximal size `maxLen`, where
## `0.u256` is interpreted as `2^256`.
# Swap batch queues if the first one is empty
if q[0].isEmpty:
swap(q[0], q[1])
# Fetch from first range list
let rc = q[0].ge()
if rc.isErr:
return err()
let
jv = rc.value
iv = block:
if maxLen == 0 or (0 < jv.len and jv.len <= maxLen):
jv
else:
# Note that either:
# (`jv.len` == 0) => (`jv` == `FullNodeTagRange`) => `jv.minPt` == 0
# or
# (`maxLen` < `jv.len`) => (`jv.minPt`+`maxLen` <= `jv.maxPt`)
NodeTagRange.new(jv.minPt, jv.minPt + maxLen)
discard q[0].reduce(iv)
ok(iv)
proc verify*(q: var SnapTodoRanges): bool =
## Verify consistency, i.e. that the two sets of ranges have no overlap.
if q[0].chunks == 0 or q[1].chunks == 0:
# At least one set is empty
return true
# So neither set is empty
if q[0].total == 0 or q[1].total == 0:
# At least one set is maximal and the other non-empty
return false
# So neither set is empty, not full
let (a,b) = if q[0].chunks < q[1].chunks: (0,1) else: (1,0)
for iv in q[a].increasing:
if 0 < q[b].covered(iv):
return false
true
# ------------------------------------------------------------------------------
# End
# ------------------------------------------------------------------------------