Update snap client storage slots download and healing (#1529)
* Fix fringe condition for `GetStorageRanges` message handler why: Receiving a proved empty range was not considered at all. This lead to inconsistencies of the return value which led to subsequent errors. * Update storage range bulk download details; Mainly re-org of storage queue processing in `storage_queue_helper.nim` * Update logging variables/messages * Update storage slots healing details: Mainly clean up after improved helper functions from the sources `find_missing_nodes.nim` and `storage_queue_helper.nim`. * Simplify account fetch why: To much fuss made tolerating some errors. There will be an overall strategy implemented where the concert of download and healing function is orchestrated. * Add error resilience to the concert of download and healing. why: The idea is that a peer might stop serving snap/1 accounts and storage slot downloads while still able to support fetching nodes for healing.
This commit is contained in:
parent
9453d5bb3c
commit
5e865edec0
|
@ -123,7 +123,7 @@ proc getSlotsSpecs(
|
||||||
# Ignore missing account entry
|
# Ignore missing account entry
|
||||||
if accData.len == 0:
|
if accData.len == 0:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "getSlotsSpecs: no such account", accKey
|
trace logTxt "getSlotsSpecs: no such account", accKey, rootKey
|
||||||
return err()
|
return err()
|
||||||
|
|
||||||
# Ignore empty storage list
|
# Ignore empty storage list
|
||||||
|
@ -169,7 +169,8 @@ iterator doTrieNodeSpecs(
|
||||||
|
|
||||||
# Fail on this group
|
# Fail on this group
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "doTrieNodeSpecs (blind)", nBlind=w.slotPaths.len
|
trace logTxt "doTrieNodeSpecs (blind)", accPath=w.accPath.toHex,
|
||||||
|
nBlind=w.slotPaths.len, nBlind0=w.slotPaths[0].toHex
|
||||||
yield (NodeKey.default, nil, EmptyBlob, w.slotPaths.len)
|
yield (NodeKey.default, nil, EmptyBlob, w.slotPaths.len)
|
||||||
|
|
||||||
|
|
||||||
|
@ -416,14 +417,9 @@ method getStorageRanges*(
|
||||||
dataAllocated += rangeProof.leafsSize
|
dataAllocated += rangeProof.leafsSize
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
if accounts.len == 1:
|
trace logTxt "getStorageRanges: data slots", iv, sizeMax, dataAllocated,
|
||||||
trace logTxt "getStorageRanges: single account", iv,
|
nAccounts=accounts.len, accKey=accHash.to(NodeKey), stoRoot=sp.stoRoot,
|
||||||
accKey=accHash.to(NodeKey), stoRoot=sp.stoRoot
|
nSlots=rangeProof.leafs.len, nProof=rangeProof.proof.len
|
||||||
|
|
||||||
#when extraTraceMessages:
|
|
||||||
# trace logTxt "getStorageRanges: data slots", iv, sizeMax, dataAllocated,
|
|
||||||
# accKey, stoRoot, nSlots=rangeProof.leafs.len,
|
|
||||||
# nProof=rangeProof.proof.len
|
|
||||||
|
|
||||||
slotLists.add rangeProof.leafs.mapIt(it.to(SnapStorage))
|
slotLists.add rangeProof.leafs.mapIt(it.to(SnapStorage))
|
||||||
if 0 < rangeProof.proof.len:
|
if 0 < rangeProof.proof.len:
|
||||||
|
@ -494,8 +490,7 @@ method getTrieNodes*(
|
||||||
let steps = partPath.hexPrefixDecode[1].hexaryPath(stateKey, getFn)
|
let steps = partPath.hexPrefixDecode[1].hexaryPath(stateKey, getFn)
|
||||||
if 0 < steps.path.len and
|
if 0 < steps.path.len and
|
||||||
steps.tail.len == 0 and steps.path[^1].nibble < 0:
|
steps.tail.len == 0 and steps.path[^1].nibble < 0:
|
||||||
let data = steps.path[^1].node.convertTo(Blob)
|
steps.path[^1].node.convertTo(Blob)
|
||||||
data
|
|
||||||
else:
|
else:
|
||||||
EmptyBlob
|
EmptyBlob
|
||||||
|
|
||||||
|
|
|
@ -11,12 +11,16 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sets,
|
||||||
eth/[common, trie/nibbles]
|
eth/[common, trie/nibbles]
|
||||||
|
|
||||||
const
|
const
|
||||||
EmptyBlob* = seq[byte].default
|
EmptyBlob* = seq[byte].default
|
||||||
## Useful shortcut
|
## Useful shortcut
|
||||||
|
|
||||||
|
EmptyBlobSet* = HashSet[Blob].default
|
||||||
|
## Useful shortcut
|
||||||
|
|
||||||
EmptyBlobSeq* = seq[Blob].default
|
EmptyBlobSeq* = seq[Blob].default
|
||||||
## Useful shortcut
|
## Useful shortcut
|
||||||
|
|
||||||
|
@ -63,11 +67,6 @@ const
|
||||||
|
|
||||||
# --------------
|
# --------------
|
||||||
|
|
||||||
accountsFetchRetryMax* = 2
|
|
||||||
## The request intervals will be slightly re-arranged after failure.
|
|
||||||
## So re-trying to fetch another range might be successful (set to 0
|
|
||||||
## for disabling retries.)
|
|
||||||
|
|
||||||
accountsSaveProcessedChunksMax* = 1000
|
accountsSaveProcessedChunksMax* = 1000
|
||||||
## Recovery data are stored if the processed ranges list contains no more
|
## Recovery data are stored if the processed ranges list contains no more
|
||||||
## than this many range *chunks*.
|
## than this many range *chunks*.
|
||||||
|
@ -82,6 +81,14 @@ const
|
||||||
## If there are too many dangling nodes, no data will be saved and restart
|
## If there are too many dangling nodes, no data will be saved and restart
|
||||||
## has to perform from scratch or an earlier checkpoint.
|
## has to perform from scratch or an earlier checkpoint.
|
||||||
|
|
||||||
|
# --------------
|
||||||
|
|
||||||
|
storageSlotsFetchFailedFullMax* = fetchRequestStorageSlotsMax + 100
|
||||||
|
## Maximal number of failures when fetching full range storage slots.
|
||||||
|
## These failed slot ranges are only called for once in the same cycle.
|
||||||
|
|
||||||
|
storageSlotsFetchFailedPartialMax* = 300
|
||||||
|
## Ditto for partial range storage slots.
|
||||||
|
|
||||||
storageSlotsTrieInheritPerusalMax* = 30_000
|
storageSlotsTrieInheritPerusalMax* = 30_000
|
||||||
## Maximal number of nodes to visit in order to find out whether this
|
## Maximal number of nodes to visit in order to find out whether this
|
||||||
|
@ -108,26 +115,33 @@ const
|
||||||
healAccountsInspectionPlanBLevel* = 4
|
healAccountsInspectionPlanBLevel* = 4
|
||||||
## Search this level deep for missing nodes if `hexaryEnvelopeDecompose()`
|
## Search this level deep for missing nodes if `hexaryEnvelopeDecompose()`
|
||||||
## only produces existing nodes.
|
## only produces existing nodes.
|
||||||
##
|
|
||||||
## The maximal number of nodes visited at level 3 is *4KiB* and at level 4
|
|
||||||
## is *64Kib*.
|
|
||||||
|
|
||||||
healAccountsInspectionPlanBRetryMax* = 2
|
healAccountsInspectionPlanBRetryMax* = 2
|
||||||
## Retry inspection if this may times unless there is at least one dangling
|
## Retry inspection with depth level argument starting at
|
||||||
## node found.
|
## `healAccountsInspectionPlanBLevel-1` and counting down at most this
|
||||||
|
## many times until there is at least one dangling node found and the
|
||||||
|
## depth level argument remains positive. The cumulative depth of the
|
||||||
|
## iterated seach is
|
||||||
|
## ::
|
||||||
|
## b 1
|
||||||
|
## Σ ν = --- (b - a + 1) (a + b)
|
||||||
|
## a 2
|
||||||
|
## for
|
||||||
|
## ::
|
||||||
|
## b = healAccountsInspectionPlanBLevel
|
||||||
|
## a = b - healAccountsInspectionPlanBRetryMax
|
||||||
|
##
|
||||||
|
|
||||||
healAccountsInspectionPlanBRetryNapMSecs* = 2
|
healAccountsInspectionPlanBRetryNapMSecs* = 2
|
||||||
## Sleep beween inspection retrys to allow thread switch. If this constant
|
## Sleep beween inspection retrys to allow thread switch. If this constant
|
||||||
## is set `0`, `1`ns wait is used.
|
## is set `0`, `1`ns wait is used.
|
||||||
|
|
||||||
healSlorageSlotsTrigger* = 0.70
|
# --------------
|
||||||
## Consider per account storage slost healing if a per-account hexary
|
|
||||||
## sub-trie has reached this factor of completeness.
|
|
||||||
|
|
||||||
healStorageSlotsInspectionPlanBLevel* = 4
|
healStorageSlotsInspectionPlanBLevel* = 5
|
||||||
## Similar to `healAccountsInspectionPlanBLevel`
|
## Similar to `healAccountsInspectionPlanBLevel`
|
||||||
|
|
||||||
healStorageSlotsInspectionPlanBRetryMax* = 2
|
healStorageSlotsInspectionPlanBRetryMax* = 99 # 5 + 4 + .. + 1 => 15
|
||||||
## Similar to `healAccountsInspectionPlanBRetryMax`
|
## Similar to `healAccountsInspectionPlanBRetryMax`
|
||||||
|
|
||||||
healStorageSlotsInspectionPlanBRetryNapMSecs* = 2
|
healStorageSlotsInspectionPlanBRetryNapMSecs* = 2
|
||||||
|
@ -138,6 +152,9 @@ const
|
||||||
## this many items will be removed from the batch queue. These items will
|
## this many items will be removed from the batch queue. These items will
|
||||||
## then be processed one by one.
|
## then be processed one by one.
|
||||||
|
|
||||||
|
healStorageSlotsFailedMax* = 300
|
||||||
|
## Ditto for partial range storage slots.
|
||||||
|
|
||||||
# --------------
|
# --------------
|
||||||
|
|
||||||
comErrorsTimeoutMax* = 3
|
comErrorsTimeoutMax* = 3
|
||||||
|
@ -167,17 +184,8 @@ const
|
||||||
|
|
||||||
static:
|
static:
|
||||||
doAssert storageSlotsQuPrioThresh < accountsSaveStorageSlotsMax
|
doAssert storageSlotsQuPrioThresh < accountsSaveStorageSlotsMax
|
||||||
|
doAssert 0 <= storageSlotsFetchFailedFullMax
|
||||||
|
doAssert 0 <= storageSlotsFetchFailedPartialMax
|
||||||
# Deprecated, to be expired
|
|
||||||
const
|
|
||||||
healInspectionBatch* = 10_000
|
|
||||||
## Number of nodes to inspect in a single batch. In between batches, a
|
|
||||||
## task/thread switch is allowed.
|
|
||||||
|
|
||||||
healInspectionBatchWaitNanoSecs* = 500
|
|
||||||
## Wait some time asynchroneously after processing `healInspectionBatch`
|
|
||||||
## nodes to allow for a pseudo -task switch.
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -13,9 +13,10 @@
|
||||||
import
|
import
|
||||||
std/[math, sequtils, strutils, hashes],
|
std/[math, sequtils, strutils, hashes],
|
||||||
eth/common,
|
eth/common,
|
||||||
stew/[byteutils, interval_set],
|
stew/interval_set,
|
||||||
stint,
|
stint,
|
||||||
../../constants,
|
../../constants,
|
||||||
|
../../utils/prettify,
|
||||||
../protocol,
|
../protocol,
|
||||||
../types
|
../types
|
||||||
|
|
||||||
|
@ -71,6 +72,11 @@ type
|
||||||
storageRoot*: Hash256 ## Start of storage tree
|
storageRoot*: Hash256 ## Start of storage tree
|
||||||
subRange*: Option[NodeTagRange] ## Sub-range of slot range covered
|
subRange*: Option[NodeTagRange] ## Sub-range of slot range covered
|
||||||
|
|
||||||
|
AccountSlotsChanged* = object
|
||||||
|
## Variant of `AccountSlotsHeader` representing some transition
|
||||||
|
account*: AccountSlotsHeader ## Account header
|
||||||
|
newRange*: Option[NodeTagRange] ## New sub-range (if-any)
|
||||||
|
|
||||||
AccountStorageRange* = object
|
AccountStorageRange* = object
|
||||||
## List of storage descriptors, the last `AccountSlots` storage data might
|
## List of storage descriptors, the last `AccountSlots` storage data might
|
||||||
## be incomplete and the `proof` is needed for proving validity.
|
## be incomplete and the `proof` is needed for proving validity.
|
||||||
|
@ -83,6 +89,8 @@ type
|
||||||
account*: AccountSlotsHeader
|
account*: AccountSlotsHeader
|
||||||
data*: seq[SnapStorage]
|
data*: seq[SnapStorage]
|
||||||
|
|
||||||
|
# See below for definition of constant `FullNodeTagRange`
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -189,6 +197,10 @@ proc digestTo*(data: Blob; T: type NodeTag): T =
|
||||||
## Hash the `data` argument
|
## Hash the `data` argument
|
||||||
keccakHash(data).to(T)
|
keccakHash(data).to(T)
|
||||||
|
|
||||||
|
const
|
||||||
|
# Cannot be defined earlier: `NodeTag` operations needed
|
||||||
|
FullNodeTagRange* = NodeTagRange.new(low(NodeTag),high(NodeTag))
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions: `NodeTagRange` helpers
|
# Public functions: `NodeTagRange` helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -205,11 +217,34 @@ proc isEmpty*(lrs: openArray[NodeTagRangeSet]): bool =
|
||||||
return false
|
return false
|
||||||
true
|
true
|
||||||
|
|
||||||
|
proc isEmpty*(iv: NodeTagRange): bool =
|
||||||
|
## Ditto for an interval range.
|
||||||
|
false # trivially by definition
|
||||||
|
|
||||||
|
|
||||||
proc isFull*(lrs: NodeTagRangeSet): bool =
|
proc isFull*(lrs: NodeTagRangeSet): bool =
|
||||||
## Returns `true` if the argument set `lrs` contains of the single
|
## Returns `true` if the argument set `lrs` contains of the single
|
||||||
## interval [low(NodeTag),high(NodeTag)].
|
## interval [low(NodeTag),high(NodeTag)].
|
||||||
lrs.total == 0 and 0 < lrs.chunks
|
lrs.total == 0 and 0 < lrs.chunks
|
||||||
|
|
||||||
|
proc isFull*(lrs: openArray[NodeTagRangeSet]): bool =
|
||||||
|
## Variant of `isFull()` where intervals are distributed across several
|
||||||
|
## sets. This function makes sense only if the interval sets are mutually
|
||||||
|
## disjunct.
|
||||||
|
var accu: NodeTag
|
||||||
|
for ivSet in lrs:
|
||||||
|
if 0 < ivSet.total:
|
||||||
|
if high(NodeTag) - ivSet.total < accu:
|
||||||
|
return true
|
||||||
|
accu = accu + ivSet.total
|
||||||
|
elif 0 < ivSet.chunks:
|
||||||
|
# number of points in `ivSet` is `2^256 + 1`
|
||||||
|
return true
|
||||||
|
|
||||||
|
proc isFull*(iv: NodeTagRange): bool =
|
||||||
|
## Ditto for an interval range.
|
||||||
|
iv == FullNodeTagRange
|
||||||
|
|
||||||
|
|
||||||
proc emptyFactor*(lrs: NodeTagRangeSet): float =
|
proc emptyFactor*(lrs: NodeTagRangeSet): float =
|
||||||
## Relative uncovered total, i.e. `#points-not-covered / 2^256` to be used
|
## Relative uncovered total, i.e. `#points-not-covered / 2^256` to be used
|
||||||
|
@ -235,9 +270,11 @@ proc emptyFactor*(lrs: openArray[NodeTagRangeSet]): float =
|
||||||
discard
|
discard
|
||||||
else: # number of points in `ivSet` is `2^256 + 1`
|
else: # number of points in `ivSet` is `2^256 + 1`
|
||||||
return 0.0
|
return 0.0
|
||||||
|
# Calculate: (2^256 - accu) / 2^256
|
||||||
if accu == 0.to(NodeTag):
|
if accu == 0.to(NodeTag):
|
||||||
return 1.0
|
1.0
|
||||||
((high(NodeTag) - accu).u256 + 1).to(float) / (2.0^256)
|
else:
|
||||||
|
((high(NodeTag) - accu).u256 + 1).to(float) / (2.0^256)
|
||||||
|
|
||||||
|
|
||||||
proc fullFactor*(lrs: NodeTagRangeSet): float =
|
proc fullFactor*(lrs: NodeTagRangeSet): float =
|
||||||
|
@ -250,6 +287,22 @@ proc fullFactor*(lrs: NodeTagRangeSet): float =
|
||||||
else:
|
else:
|
||||||
1.0 # number of points in `lrs` is `2^256 + 1`
|
1.0 # number of points in `lrs` is `2^256 + 1`
|
||||||
|
|
||||||
|
proc fullFactor*(lrs: openArray[NodeTagRangeSet]): float =
|
||||||
|
## Variant of `fullFactor()` where intervals are distributed across several
|
||||||
|
## sets. This function makes sense only if the interval sets are mutually
|
||||||
|
## disjunct.
|
||||||
|
var accu: NodeTag
|
||||||
|
for ivSet in lrs:
|
||||||
|
if 0 < ivSet.total:
|
||||||
|
if high(NodeTag) - ivSet.total < accu:
|
||||||
|
return 1.0
|
||||||
|
accu = accu + ivSet.total
|
||||||
|
elif ivSet.chunks == 0:
|
||||||
|
discard
|
||||||
|
else: # number of points in `ivSet` is `2^256 + 1`
|
||||||
|
return 1.0
|
||||||
|
accu.u256.to(float) / (2.0^256)
|
||||||
|
|
||||||
proc fullFactor*(iv: NodeTagRange): float =
|
proc fullFactor*(iv: NodeTagRange): float =
|
||||||
## Relative covered length of an inetrval, i.e. `#points-covered / 2^256`
|
## Relative covered length of an inetrval, i.e. `#points-covered / 2^256`
|
||||||
if 0 < iv.len:
|
if 0 < iv.len:
|
||||||
|
@ -266,8 +319,16 @@ proc `$`*(nodeTag: NodeTag): string =
|
||||||
"2^256-1"
|
"2^256-1"
|
||||||
elif nodeTag == 0.u256.NodeTag:
|
elif nodeTag == 0.u256.NodeTag:
|
||||||
"0"
|
"0"
|
||||||
|
elif nodeTag == 2.u256.pow(255).NodeTag:
|
||||||
|
"2^255" # 800...
|
||||||
|
elif nodeTag == 2.u256.pow(254).NodeTag:
|
||||||
|
"2^254" # 400..
|
||||||
|
elif nodeTag == 2.u256.pow(253).NodeTag:
|
||||||
|
"2^253" # 200...
|
||||||
|
elif nodeTag == 2.u256.pow(251).NodeTag:
|
||||||
|
"2^252" # 100...
|
||||||
else:
|
else:
|
||||||
nodeTag.to(Hash256).data.toHex
|
nodeTag.UInt256.toHex
|
||||||
|
|
||||||
proc `$`*(nodeKey: NodeKey): string =
|
proc `$`*(nodeKey: NodeKey): string =
|
||||||
$nodeKey.to(NodeTag)
|
$nodeKey.to(NodeTag)
|
||||||
|
@ -293,6 +354,37 @@ proc `$`*(iv: NodeTagRange): string =
|
||||||
leafRangePp iv
|
leafRangePp iv
|
||||||
|
|
||||||
|
|
||||||
|
proc fullPC3*(w: NodeTagRangeSet|NodeTagRange): string =
|
||||||
|
## Pretty print fill state of range sets.
|
||||||
|
if w.isEmpty:
|
||||||
|
"0%"
|
||||||
|
elif w.isFull:
|
||||||
|
"100%"
|
||||||
|
else:
|
||||||
|
let ff = w.fullFactor
|
||||||
|
if ff <= 0.99999:
|
||||||
|
ff.toPC(3)
|
||||||
|
else:
|
||||||
|
"99.999"
|
||||||
|
|
||||||
|
proc fullPC3*(w: openArray[NodeTagRangeSet]): string =
|
||||||
|
## Variant of `fullPC3()` where intervals are distributed across several
|
||||||
|
## sets. This function makes sense only if the interval sets are mutually
|
||||||
|
## disjunct.
|
||||||
|
if w.isEmpty:
|
||||||
|
"0%"
|
||||||
|
else:
|
||||||
|
let partition = "~" & $w.mapIt(it.chunks).foldl(a+b)
|
||||||
|
if w.isFull:
|
||||||
|
"100%" & partition
|
||||||
|
else:
|
||||||
|
let ff = w.fullFactor
|
||||||
|
if ff <= 0.99999:
|
||||||
|
ff.toPC(3) & partition
|
||||||
|
else:
|
||||||
|
"99.999" & partition
|
||||||
|
|
||||||
|
|
||||||
proc dump*(
|
proc dump*(
|
||||||
ranges: openArray[NodeTagRangeSet];
|
ranges: openArray[NodeTagRangeSet];
|
||||||
moan: proc(overlap: UInt256; iv: NodeTagRange) {.gcsafe.};
|
moan: proc(overlap: UInt256; iv: NodeTagRange) {.gcsafe.};
|
||||||
|
|
|
@ -9,14 +9,13 @@
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[options, sets, strutils],
|
std/[options, sets],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common, p2p],
|
eth/[common, p2p],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
../../common as nimcom,
|
../../common as nimcom,
|
||||||
../../db/select_backend,
|
../../db/select_backend,
|
||||||
../../utils/prettify,
|
|
||||||
".."/[handlers, protocol, sync_desc],
|
".."/[handlers, protocol, sync_desc],
|
||||||
./worker/[pivot, ticker],
|
./worker/[pivot, ticker],
|
||||||
./worker/com/com_error,
|
./worker/com/com_error,
|
||||||
|
@ -251,11 +250,9 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} =
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
block:
|
block:
|
||||||
let
|
trace "Multi sync runner", peer, pivot, nAccounts=env.nAccounts,
|
||||||
nAccounts {.used.} = env.nAccounts
|
nSlotLists=env.nSlotLists,
|
||||||
nSlotLists {.used.} = env.nSlotLists
|
processed=env.fetchAccounts.processed.fullPC3,
|
||||||
processed {.used.} = env.fetchAccounts.processed.fullFactor.toPC(2)
|
|
||||||
trace "Multi sync runner", peer, pivot, nAccounts, nSlotLists, processed,
|
|
||||||
nStoQu=nStorQuAtStart
|
nStoQu=nStorQuAtStart
|
||||||
|
|
||||||
# This one is the syncing work horse which downloads the database
|
# This one is the syncing work horse which downloads the database
|
||||||
|
@ -263,10 +260,10 @@ proc runMulti*(buddy: SnapBuddyRef) {.async.} =
|
||||||
|
|
||||||
# Various logging entries (after accounts and storage slots download)
|
# Various logging entries (after accounts and storage slots download)
|
||||||
let
|
let
|
||||||
nAccounts = env.nAccounts
|
nAccounts {.used.} = env.nAccounts
|
||||||
nSlotLists = env.nSlotLists
|
nSlotLists {.used.} = env.nSlotLists
|
||||||
processed = env.fetchAccounts.processed.fullFactor.toPC(2)
|
processed {.used.} = env.fetchAccounts.processed.fullPC3
|
||||||
nStoQuLater = env.fetchStorageFull.len + env.fetchStoragePart.len
|
nStoQuLater {.used.} = env.fetchStorageFull.len + env.fetchStoragePart.len
|
||||||
|
|
||||||
if env.archived:
|
if env.archived:
|
||||||
# Archive pivot if it became stale
|
# Archive pivot if it became stale
|
||||||
|
|
|
@ -33,9 +33,12 @@ type
|
||||||
# proof*: seq[SnapProof]
|
# proof*: seq[SnapProof]
|
||||||
|
|
||||||
GetStorageRanges* = object
|
GetStorageRanges* = object
|
||||||
leftOver*: seq[AccountSlotsHeader]
|
leftOver*: seq[AccountSlotsChanged]
|
||||||
data*: AccountStorageRange
|
data*: AccountStorageRange
|
||||||
|
|
||||||
|
const
|
||||||
|
extraTraceMessages = false or true
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -68,9 +71,8 @@ proc getStorageRangesReq(
|
||||||
return ok(reply)
|
return ok(reply)
|
||||||
|
|
||||||
except CatchableError as e:
|
except CatchableError as e:
|
||||||
let error {.used.} = e.msg
|
|
||||||
trace trSnapRecvError & "waiting for GetStorageRanges reply", peer, pivot,
|
trace trSnapRecvError & "waiting for GetStorageRanges reply", peer, pivot,
|
||||||
error
|
name=($e.name), error=(e.msg)
|
||||||
return err()
|
return err()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -84,25 +86,28 @@ proc getStorageRanges*(
|
||||||
pivot: string; ## For logging, instead of `stateRoot`
|
pivot: string; ## For logging, instead of `stateRoot`
|
||||||
): Future[Result[GetStorageRanges,ComError]]
|
): Future[Result[GetStorageRanges,ComError]]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
## Fetch data using the `snap#` protocol, returns the range covered.
|
## Fetch data using the `snap/1` protocol, returns the range covered.
|
||||||
##
|
##
|
||||||
## If the first `accounts` argument sequence item has the `firstSlot` field
|
## If the first `accounts` argument sequence item has the optional `subRange`
|
||||||
## set non-zero, only this account is fetched with a range. Otherwise all
|
## field set, only this account is fetched with for the range `subRange`.
|
||||||
## accounts are asked for without a range (non-zero `firstSlot` fields are
|
## Otherwise all accounts are asked for without a range (`subRange` fields
|
||||||
## ignored of later sequence items.)
|
## are ignored for later accounts list items.)
|
||||||
let
|
var nAccounts = accounts.len
|
||||||
peer {.used.} = buddy.peer
|
|
||||||
var
|
|
||||||
nAccounts = accounts.len
|
|
||||||
|
|
||||||
if nAccounts == 0:
|
if nAccounts == 0:
|
||||||
return err(ComEmptyAccountsArguments)
|
return err(ComEmptyAccountsArguments)
|
||||||
|
|
||||||
if trSnapTracePacketsOk:
|
let
|
||||||
trace trSnapSendSending & "GetStorageRanges", peer, pivot, nAccounts
|
peer {.used.} = buddy.peer
|
||||||
|
iv = accounts[0].subRange
|
||||||
|
|
||||||
|
when trSnapTracePacketsOk:
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace trSnapSendSending & "GetStorageRanges", peer, pivot, nAccounts,
|
||||||
|
iv=iv.get(otherwise=FullNodeTagRange)
|
||||||
|
else:
|
||||||
|
trace trSnapSendSending & "GetStorageRanges", peer, pivot, nAccounts
|
||||||
|
|
||||||
let
|
let
|
||||||
iv = accounts[0].subRange
|
|
||||||
snStoRanges = block:
|
snStoRanges = block:
|
||||||
let rc = await buddy.getStorageRangesReq(stateRoot,
|
let rc = await buddy.getStorageRangesReq(stateRoot,
|
||||||
accounts.mapIt(it.accKey.to(Hash256)), iv, pivot)
|
accounts.mapIt(it.accKey.to(Hash256)), iv, pivot)
|
||||||
|
@ -119,7 +124,6 @@ proc getStorageRanges*(
|
||||||
return err(ComTooManyStorageSlots)
|
return err(ComTooManyStorageSlots)
|
||||||
rc.value.get
|
rc.value.get
|
||||||
|
|
||||||
let
|
|
||||||
nSlotLists = snStoRanges.slotLists.len
|
nSlotLists = snStoRanges.slotLists.len
|
||||||
nProof = snStoRanges.proof.nodes.len
|
nProof = snStoRanges.proof.nodes.len
|
||||||
|
|
||||||
|
@ -148,40 +152,52 @@ proc getStorageRanges*(
|
||||||
# Filter remaining `slots` responses:
|
# Filter remaining `slots` responses:
|
||||||
# * Accounts for empty ones go back to the `leftOver` list.
|
# * Accounts for empty ones go back to the `leftOver` list.
|
||||||
for n in 0 ..< nSlotLists:
|
for n in 0 ..< nSlotLists:
|
||||||
# Empty data for a slot indicates missing data
|
if 0 < snStoRanges.slotLists[n].len or (n == nSlotLists-1 and 0 < nProof):
|
||||||
if snStoRanges.slotLists[n].len == 0:
|
# Storage slot data available. The last storage slots list may
|
||||||
dd.leftOver.add accounts[n]
|
# be a proved empty sub-range.
|
||||||
else:
|
|
||||||
dd.data.storages.add AccountSlots(
|
dd.data.storages.add AccountSlots(
|
||||||
account: accounts[n], # known to be no fewer accounts than slots
|
account: accounts[n], # known to be no fewer accounts than slots
|
||||||
data: snStoRanges.slotLists[n])
|
data: snStoRanges.slotLists[n])
|
||||||
|
|
||||||
# Complete the part that was not answered by the peer
|
else: # if n < nSlotLists-1 or nProof == 0:
|
||||||
if nProof == 0:
|
# Empty data here indicate missing data
|
||||||
# assigning empty slice is ok
|
dd.leftOver.add AccountSlotsChanged(
|
||||||
dd.leftOver = dd.leftOver & accounts[nSlotLists ..< nAccounts]
|
account: accounts[n])
|
||||||
|
|
||||||
else:
|
if 0 < nProof:
|
||||||
# Ok, we have a proof now
|
# Ok, we have a proof now. In that case, there is always a duplicate
|
||||||
if 0 < snStoRanges.slotLists[^1].len:
|
# of the proved entry on the `dd.leftOver` list.
|
||||||
# If the storage data for the last account comes with a proof, then the
|
#
|
||||||
# data set is incomplete. So record the missing part on the `dd.leftOver`
|
# Note that `storages[^1]` exists due to the clause
|
||||||
# list.
|
# `(n==nSlotLists-1 and 0<nProof)` in the above `for` loop.
|
||||||
|
let topAcc = dd.data.storages[^1].account
|
||||||
|
dd.leftOver.add AccountSlotsChanged(account: topAcc)
|
||||||
|
if 0 < dd.data.storages[^1].data.len:
|
||||||
let
|
let
|
||||||
reqTop = if accounts[0].subRange.isNone: high(NodeTag)
|
reqMaxPt = topAcc.subRange.get(otherwise = FullNodeTagRange).maxPt
|
||||||
else: accounts[0].subRange.unsafeGet.maxPt
|
respMaxPt = dd.data.storages[^1].data[^1].slotHash.to(NodeTag)
|
||||||
respTop = dd.data.storages[^1].data[^1].slotHash.to(NodeTag)
|
if respMaxPt < reqMaxPt:
|
||||||
if respTop < reqTop:
|
dd.leftOver[^1].newRange = some(
|
||||||
dd.leftOver.add AccountSlotsHeader(
|
NodeTagRange.new(respMaxPt + 1.u256, reqMaxPt))
|
||||||
subRange: some(NodeTagRange.new(respTop + 1.u256, reqTop)),
|
elif 0 < dd.data.storages.len:
|
||||||
accKey: accounts[nSlotLists-1].accKey,
|
let topAcc = dd.data.storages[^1].account
|
||||||
storageRoot: accounts[nSlotLists-1].storageRoot)
|
if topAcc.subRange.isSome:
|
||||||
|
#
|
||||||
|
# Fringe case when a partial request was answered without a proof.
|
||||||
|
# This means, that the interval requested covers the complete trie.
|
||||||
|
#
|
||||||
|
# Copying the request to the `leftOver`, the ranges reflect the new
|
||||||
|
# state: `topAcc.subRange.isSome` and `newRange.isNone`.
|
||||||
|
dd.leftOver.add AccountSlotsChanged(account: topAcc)
|
||||||
|
|
||||||
# Do thew rest (assigning empty slice is ok)
|
# Complete the part that was not answered by the peer.
|
||||||
dd.leftOver = dd.leftOver & accounts[nSlotLists ..< nAccounts]
|
dd.leftOver = dd.leftOver & accounts[nSlotLists ..< nAccounts].mapIt(
|
||||||
|
AccountSlotsChanged(account: it))
|
||||||
|
|
||||||
trace trSnapRecvReceived & "StorageRanges", peer, pivot, nAccounts,
|
when trSnapTracePacketsOk:
|
||||||
nSlotLists, nProof, nLeftOver=dd.leftOver.len
|
trace trSnapRecvReceived & "StorageRanges", peer, pivot, nAccounts,
|
||||||
|
nSlotLists, nProof, nSlotLstRc=dd.data.storages.len,
|
||||||
|
nLeftOver=dd.leftOver.len
|
||||||
|
|
||||||
return ok(dd)
|
return ok(dd)
|
||||||
|
|
||||||
|
|
|
@ -226,6 +226,7 @@ proc execSnapSyncAction*(
|
||||||
if buddy.ctrl.stopped or env.archived:
|
if buddy.ctrl.stopped or env.archived:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
var rangeFetchOk = true
|
||||||
if not env.fetchAccounts.processed.isFull:
|
if not env.fetchAccounts.processed.isFull:
|
||||||
await buddy.rangeFetchAccounts(env)
|
await buddy.rangeFetchAccounts(env)
|
||||||
|
|
||||||
|
@ -234,26 +235,35 @@ proc execSnapSyncAction*(
|
||||||
|
|
||||||
# Run at least one round fetching storage slosts even if the `archived`
|
# Run at least one round fetching storage slosts even if the `archived`
|
||||||
# flag is set in order to keep the batch queue small.
|
# flag is set in order to keep the batch queue small.
|
||||||
if not buddy.ctrl.stopped:
|
if buddy.ctrl.running:
|
||||||
await buddy.rangeFetchStorageSlots(env)
|
await buddy.rangeFetchStorageSlots(env)
|
||||||
|
else:
|
||||||
if buddy.ctrl.stopped or env.archived:
|
rangeFetchOk = false
|
||||||
|
if env.archived:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
# Uncconditonally try healing if enabled.
|
||||||
if env.accountsHealingOk(ctx):
|
if env.accountsHealingOk(ctx):
|
||||||
|
# Let this procedure decide whether to ditch this peer (if any.) The idea
|
||||||
|
# is that the healing process might address different peer ressources
|
||||||
|
# than the fetch procedure. So that peer might still be useful unless
|
||||||
|
# physically disconnected.
|
||||||
|
buddy.ctrl.forceRun = true
|
||||||
await buddy.healAccounts(env)
|
await buddy.healAccounts(env)
|
||||||
if buddy.ctrl.stopped or env.archived:
|
if env.archived:
|
||||||
return
|
return
|
||||||
|
|
||||||
# Some additional storage slots might have been popped up
|
# Some additional storage slots might have been popped up
|
||||||
await buddy.rangeFetchStorageSlots(env)
|
if rangeFetchOk:
|
||||||
if buddy.ctrl.stopped or env.archived:
|
await buddy.rangeFetchStorageSlots(env)
|
||||||
return
|
if env.archived:
|
||||||
|
return
|
||||||
|
|
||||||
# Don't bother with storage slots healing before accounts healing takes
|
# Don't bother with storage slots healing before accounts healing takes
|
||||||
# place. This saves communication bandwidth. The pivot might change soon,
|
# place. This saves communication bandwidth. The pivot might change soon,
|
||||||
# anyway.
|
# anyway.
|
||||||
if env.accountsHealingOk(ctx):
|
if env.accountsHealingOk(ctx):
|
||||||
|
buddy.ctrl.forceRun = true
|
||||||
await buddy.healStorageSlots(env)
|
await buddy.healStorageSlots(env)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -114,6 +114,7 @@ proc findMissingNodes*(
|
||||||
planBLevelMax: uint8;
|
planBLevelMax: uint8;
|
||||||
planBRetryMax: int;
|
planBRetryMax: int;
|
||||||
planBRetrySleepMs: int;
|
planBRetrySleepMs: int;
|
||||||
|
forcePlanBOk = false;
|
||||||
): Future[MissingNodesSpecs]
|
): Future[MissingNodesSpecs]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
## Find some missing nodes in the hexary trie database.
|
## Find some missing nodes in the hexary trie database.
|
||||||
|
@ -138,7 +139,7 @@ proc findMissingNodes*(
|
||||||
|
|
||||||
# Plan B, carefully employ `hexaryInspect()`
|
# Plan B, carefully employ `hexaryInspect()`
|
||||||
var nRetryCount = 0
|
var nRetryCount = 0
|
||||||
if 0 < nodes.len:
|
if 0 < nodes.len or forcePlanBOk:
|
||||||
ignExceptionOops("compileMissingNodesList"):
|
ignExceptionOops("compileMissingNodesList"):
|
||||||
let
|
let
|
||||||
paths = nodes.mapIt it.partialPath
|
paths = nodes.mapIt it.partialPath
|
||||||
|
@ -152,11 +153,13 @@ proc findMissingNodes*(
|
||||||
|
|
||||||
while stats.dangling.len == 0 and
|
while stats.dangling.len == 0 and
|
||||||
nRetryCount < planBRetryMax and
|
nRetryCount < planBRetryMax and
|
||||||
|
1 < maxLevel and
|
||||||
not stats.resumeCtx.isNil:
|
not stats.resumeCtx.isNil:
|
||||||
await sleepAsync suspend
|
await sleepAsync suspend
|
||||||
nRetryCount.inc
|
nRetryCount.inc
|
||||||
maxLevel = (120 * maxLevel + 99) div 100 # ~20% increase
|
maxLevel.dec
|
||||||
trace logTxt "plan B retry", nRetryCount, maxLevel
|
when extraTraceMessages:
|
||||||
|
trace logTxt "plan B retry", forcePlanBOk, nRetryCount, maxLevel
|
||||||
stats = getFn.hexaryInspectTrie(rootKey,
|
stats = getFn.hexaryInspectTrie(rootKey,
|
||||||
resumeCtx = stats.resumeCtx,
|
resumeCtx = stats.resumeCtx,
|
||||||
stopAtLevel = maxLevel,
|
stopAtLevel = maxLevel,
|
||||||
|
@ -169,19 +172,20 @@ proc findMissingNodes*(
|
||||||
|
|
||||||
if 0 < result.missing.len:
|
if 0 < result.missing.len:
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "plan B", nNodes=nodes.len, nDangling=result.missing.len,
|
trace logTxt "plan B", forcePlanBOk, nNodes=nodes.len,
|
||||||
level=result.level, nVisited=result.visited, nRetryCount
|
nDangling=result.missing.len, level=result.level,
|
||||||
|
nVisited=result.visited, nRetryCount
|
||||||
return
|
return
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "plan B not applicable", nNodes=nodes.len,
|
trace logTxt "plan B not applicable", forcePlanBOk, nNodes=nodes.len,
|
||||||
level=result.level, nVisited=result.visited, nRetryCount
|
level=result.level, nVisited=result.visited, nRetryCount
|
||||||
|
|
||||||
# Plan C, clean up intervals
|
# Plan C, clean up intervals
|
||||||
|
|
||||||
# Calculate `gaps` as the complement of the `processed` set of intervals
|
# Calculate `gaps` as the complement of the `processed` set of intervals
|
||||||
let gaps = NodeTagRangeSet.init()
|
let gaps = NodeTagRangeSet.init()
|
||||||
discard gaps.merge(low(NodeTag),high(NodeTag))
|
discard gaps.merge FullNodeTagRange
|
||||||
for w in ranges.processed.increasing: discard gaps.reduce w
|
for w in ranges.processed.increasing: discard gaps.reduce w
|
||||||
|
|
||||||
# Clean up empty gaps in the processed range
|
# Clean up empty gaps in the processed range
|
||||||
|
|
|
@ -53,14 +53,12 @@ import
|
||||||
"."/[find_missing_nodes, storage_queue_helper, swap_in]
|
"."/[find_missing_nodes, storage_queue_helper, swap_in]
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-heal"
|
topics = "snap-acc"
|
||||||
|
|
||||||
const
|
const
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
## Enabled additional logging noise
|
## Enabled additional logging noise
|
||||||
|
|
||||||
EmptyBlobSet = HashSet[Blob].default
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private logging helpers
|
# Private logging helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
@ -72,11 +70,10 @@ proc `$`(node: NodeSpecs): string =
|
||||||
node.partialPath.toHex
|
node.partialPath.toHex
|
||||||
|
|
||||||
proc `$`(rs: NodeTagRangeSet): string =
|
proc `$`(rs: NodeTagRangeSet): string =
|
||||||
let ff = rs.fullFactor
|
rs.fullPC3
|
||||||
if 0.99 <= ff and ff < 1.0: "99%" else: ff.toPC(0)
|
|
||||||
|
|
||||||
proc `$`(iv: NodeTagRange): string =
|
proc `$`(iv: NodeTagRange): string =
|
||||||
iv.fullFactor.toPC(3)
|
iv.fullPC3
|
||||||
|
|
||||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||||
|
@ -88,7 +85,8 @@ proc healingCtx(
|
||||||
): string =
|
): string =
|
||||||
let ctx = buddy.ctx
|
let ctx = buddy.ctx
|
||||||
"{" &
|
"{" &
|
||||||
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
|
"piv=" & "#" & $env.stateHeader.blockNumber & "," &
|
||||||
|
"ctl=" & $buddy.ctrl.state & "," &
|
||||||
"nAccounts=" & $env.nAccounts & "," &
|
"nAccounts=" & $env.nAccounts & "," &
|
||||||
("covered=" & $env.fetchAccounts.processed & "/" &
|
("covered=" & $env.fetchAccounts.processed & "/" &
|
||||||
$ctx.pool.coveredAccounts ) & "}"
|
$ctx.pool.coveredAccounts ) & "}"
|
||||||
|
@ -146,7 +144,7 @@ proc compileMissingNodesList(
|
||||||
return mlv.missing
|
return mlv.missing
|
||||||
|
|
||||||
|
|
||||||
proc fetchMissingNodes(
|
proc getNodesFromNetwork(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
missingNodes: seq[NodeSpecs]; # Nodes to fetch from the network
|
missingNodes: seq[NodeSpecs]; # Nodes to fetch from the network
|
||||||
ignore: HashSet[Blob]; # Except for these partial paths listed
|
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||||
|
@ -156,7 +154,6 @@ proc fetchMissingNodes(
|
||||||
## Extract from `nodes.missing` the next batch of nodes that need
|
## Extract from `nodes.missing` the next batch of nodes that need
|
||||||
## to be merged it into the database
|
## to be merged it into the database
|
||||||
let
|
let
|
||||||
ctx {.used.} = buddy.ctx
|
|
||||||
peer {.used.} = buddy.peer
|
peer {.used.} = buddy.peer
|
||||||
rootHash = env.stateHeader.stateRoot
|
rootHash = env.stateHeader.stateRoot
|
||||||
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
||||||
|
@ -205,11 +202,7 @@ proc kvAccountLeaf(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): (bool,NodeKey,Account) =
|
): (bool,NodeKey,Account) =
|
||||||
## Re-read leaf node from persistent database (if any)
|
## Re-read leaf node from persistent database (if any)
|
||||||
let
|
var nNibbles = -1
|
||||||
peer {.used.} = buddy.peer
|
|
||||||
var
|
|
||||||
nNibbles = -1
|
|
||||||
|
|
||||||
discardRlpError("kvAccountLeaf"):
|
discardRlpError("kvAccountLeaf"):
|
||||||
let
|
let
|
||||||
nodeRlp = rlpFromBytes node.data
|
nodeRlp = rlpFromBytes node.data
|
||||||
|
@ -226,7 +219,7 @@ proc kvAccountLeaf(
|
||||||
return (true, nodeKey, accData)
|
return (true, nodeKey, accData)
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "non-leaf node path or corrupt data", peer,
|
trace logTxt "non-leaf node path or corrupt data", peer=buddy.peer,
|
||||||
ctx=buddy.healingCtx(env), nNibbles
|
ctx=buddy.healingCtx(env), nNibbles
|
||||||
|
|
||||||
|
|
||||||
|
@ -297,7 +290,7 @@ proc accountsHealingImpl(
|
||||||
return (0,EmptyBlobSet) # nothing to do
|
return (0,EmptyBlobSet) # nothing to do
|
||||||
|
|
||||||
# Get next batch of nodes that need to be merged it into the database
|
# Get next batch of nodes that need to be merged it into the database
|
||||||
let fetchedNodes = await buddy.fetchMissingNodes(missingNodes, ignore, env)
|
let fetchedNodes = await buddy.getNodesFromNetwork(missingNodes, ignore, env)
|
||||||
if fetchedNodes.len == 0:
|
if fetchedNodes.len == 0:
|
||||||
return (0,EmptyBlobSet)
|
return (0,EmptyBlobSet)
|
||||||
|
|
||||||
|
@ -308,8 +301,8 @@ proc accountsHealingImpl(
|
||||||
|
|
||||||
if 0 < report.len and report[^1].slot.isNone:
|
if 0 < report.len and report[^1].slot.isNone:
|
||||||
# Storage error, just run the next lap (not much else that can be done)
|
# Storage error, just run the next lap (not much else that can be done)
|
||||||
error logTxt "error updating persistent database", peer,
|
error logTxt "databse error", peer, ctx=buddy.healingCtx(env),
|
||||||
ctx=buddy.healingCtx(env), nFetchedNodes, error=report[^1].error
|
nFetchedNodes, error=report[^1].error
|
||||||
return (-1,EmptyBlobSet)
|
return (-1,EmptyBlobSet)
|
||||||
|
|
||||||
# Filter out error and leaf nodes
|
# Filter out error and leaf nodes
|
||||||
|
@ -349,9 +342,7 @@ proc healAccounts*(
|
||||||
) {.async.} =
|
) {.async.} =
|
||||||
## Fetching and merging missing account trie database nodes.
|
## Fetching and merging missing account trie database nodes.
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
let
|
let peer {.used.} = buddy.peer
|
||||||
ctx {.used.} = buddy.ctx
|
|
||||||
peer {.used.} = buddy.peer
|
|
||||||
trace logTxt "started", peer, ctx=buddy.healingCtx(env)
|
trace logTxt "started", peer, ctx=buddy.healingCtx(env)
|
||||||
|
|
||||||
let
|
let
|
||||||
|
@ -364,7 +355,7 @@ proc healAccounts*(
|
||||||
while not fa.processed.isFull() and
|
while not fa.processed.isFull() and
|
||||||
buddy.ctrl.running and
|
buddy.ctrl.running and
|
||||||
not env.archived:
|
not env.archived:
|
||||||
var (nNodes, rejected) = await buddy.accountsHealingImpl(ignore, env)
|
let (nNodes, rejected) = await buddy.accountsHealingImpl(ignore, env)
|
||||||
if nNodes <= 0:
|
if nNodes <= 0:
|
||||||
break
|
break
|
||||||
ignore = ignore + rejected
|
ignore = ignore + rejected
|
||||||
|
@ -372,8 +363,8 @@ proc healAccounts*(
|
||||||
nFetchLoop.inc
|
nFetchLoop.inc
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "job done", peer, ctx=buddy.healingCtx(env),
|
trace logTxt "done", peer, ctx=buddy.healingCtx(env),
|
||||||
nNodesFetched, nFetchLoop, nIgnore=ignore.len, runState=buddy.ctrl.state
|
nNodesFetched, nFetchLoop, nIgnore=ignore.len
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -38,12 +38,10 @@
|
||||||
## healing algorithm again.
|
## healing algorithm again.
|
||||||
##
|
##
|
||||||
|
|
||||||
# ###### --- CHECK FOR DEADLOCK ---- ####
|
|
||||||
|
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[math, sequtils, tables],
|
std/[math, sequtils, sets, tables],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common, p2p, trie/nibbles],
|
eth/[common, p2p, trie/nibbles],
|
||||||
|
@ -52,11 +50,12 @@ import
|
||||||
"../../.."/[sync_desc, protocol, types],
|
"../../.."/[sync_desc, protocol, types],
|
||||||
"../.."/[constants, range_desc, worker_desc],
|
"../.."/[constants, range_desc, worker_desc],
|
||||||
../com/[com_error, get_trie_nodes],
|
../com/[com_error, get_trie_nodes],
|
||||||
../db/[hexary_desc, hexary_envelope, snapdb_storage_slots],
|
../db/[hexary_desc, hexary_envelope, hexary_error, hexary_range,
|
||||||
|
snapdb_storage_slots],
|
||||||
"."/[find_missing_nodes, storage_queue_helper]
|
"."/[find_missing_nodes, storage_queue_helper]
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-heal"
|
topics = "snap-slot"
|
||||||
|
|
||||||
const
|
const
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
|
@ -73,10 +72,10 @@ proc `$`(node: NodeSpecs): string =
|
||||||
node.partialPath.toHex
|
node.partialPath.toHex
|
||||||
|
|
||||||
proc `$`(rs: NodeTagRangeSet): string =
|
proc `$`(rs: NodeTagRangeSet): string =
|
||||||
rs.fullFactor.toPC(0)
|
rs.fullPC3
|
||||||
|
|
||||||
proc `$`(iv: NodeTagRange): string =
|
proc `$`(iv: NodeTagRange): string =
|
||||||
iv.fullFactor.toPC(3)
|
iv.fullPC3
|
||||||
|
|
||||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||||
|
@ -87,33 +86,36 @@ proc healingCtx(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): string {.used.} =
|
): string {.used.} =
|
||||||
"{" &
|
"{" &
|
||||||
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
|
"piv=" & "#" & $env.stateHeader.blockNumber & "," &
|
||||||
"runState=" & $buddy.ctrl.state & "," &
|
"ctl=" & $buddy.ctrl.state & "," &
|
||||||
"nStoQu=" & $env.storageQueueTotal() & "," &
|
"nStoQu=" & $env.storageQueueTotal() & "," &
|
||||||
|
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||||
|
"nParked=" & $env.parkedStorage.len & "," &
|
||||||
"nSlotLists=" & $env.nSlotLists & "}"
|
"nSlotLists=" & $env.nSlotLists & "}"
|
||||||
|
|
||||||
proc healingCtx(
|
proc healingCtx(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
kvp: SnapSlotsQueuePair;
|
kvp: StoQuSlotsKVP;
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): string =
|
): string =
|
||||||
"{" &
|
"{" &
|
||||||
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
|
"piv=" & "#" & $env.stateHeader.blockNumber & "," &
|
||||||
"runState=" & $buddy.ctrl.state & "," &
|
"ctl=" & $buddy.ctrl.state & "," &
|
||||||
"covered=" & $kvp.data.slots.processed & "," &
|
"processed=" & $kvp.data.slots.processed & "," &
|
||||||
"nStoQu=" & $env.storageQueueTotal() & "," &
|
"nStoQu=" & $env.storageQueueTotal() & "," &
|
||||||
|
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||||
|
"nParked=" & $env.parkedStorage.len & "," &
|
||||||
"nSlotLists=" & $env.nSlotLists & "}"
|
"nSlotLists=" & $env.nSlotLists & "}"
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
template noExceptionOops(info: static[string]; code: untyped) =
|
template discardRlpError(info: static[string]; code: untyped) =
|
||||||
try:
|
try:
|
||||||
code
|
code
|
||||||
except CatchableError as e:
|
except RlpError:
|
||||||
raiseAssert "Inconveivable (" &
|
discard
|
||||||
info & "): name=" & $e.name & " msg=" & e.msg
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
|
@ -121,7 +123,7 @@ template noExceptionOops(info: static[string]; code: untyped) =
|
||||||
|
|
||||||
proc compileMissingNodesList(
|
proc compileMissingNodesList(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
kvp: SnapSlotsQueuePair;
|
kvp: StoQuSlotsKVP;
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): Future[seq[NodeSpecs]]
|
): Future[seq[NodeSpecs]]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
|
@ -138,7 +140,14 @@ proc compileMissingNodesList(
|
||||||
rootKey, getFn,
|
rootKey, getFn,
|
||||||
healStorageSlotsInspectionPlanBLevel,
|
healStorageSlotsInspectionPlanBLevel,
|
||||||
healStorageSlotsInspectionPlanBRetryMax,
|
healStorageSlotsInspectionPlanBRetryMax,
|
||||||
healStorageSlotsInspectionPlanBRetryNapMSecs)
|
healStorageSlotsInspectionPlanBRetryNapMSecs,
|
||||||
|
forcePlanBOk = true)
|
||||||
|
|
||||||
|
# Clean up empty account ranges found while looking for nodes
|
||||||
|
if not mlv.emptyGaps.isNil:
|
||||||
|
for w in mlv.emptyGaps.increasing:
|
||||||
|
discard slots.processed.merge w
|
||||||
|
slots.unprocessed.reduce w
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "missing nodes", peer,
|
trace logTxt "missing nodes", peer,
|
||||||
|
@ -150,73 +159,124 @@ proc compileMissingNodesList(
|
||||||
|
|
||||||
proc getNodesFromNetwork(
|
proc getNodesFromNetwork(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
kvp: SnapSlotsQueuePair;
|
missingNodes: seq[NodeSpecs]; # Nodes to fetch from the network
|
||||||
missing: seq[NodeSpecs];
|
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||||
env: SnapPivotRef;
|
kvp: StoQuSlotsKVP; # Storage slots context
|
||||||
|
env: SnapPivotRef; # For logging
|
||||||
): Future[seq[NodeSpecs]]
|
): Future[seq[NodeSpecs]]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
## Extract from `missing` the next batch of nodes that need
|
## Extract from `missing` the next batch of nodes that need
|
||||||
## to be merged it into the database
|
## to be merged it into the database
|
||||||
let
|
let
|
||||||
ctx {.used.} = buddy.ctx
|
|
||||||
peer {.used.} = buddy.peer
|
peer {.used.} = buddy.peer
|
||||||
accPath = kvp.data.accKey.to(Blob)
|
accPath = kvp.data.accKey.to(Blob)
|
||||||
storageRoot = kvp.key
|
rootHash = env.stateHeader.stateRoot
|
||||||
fetchNodes = missing[0 ..< fetchRequestTrieNodesMax]
|
|
||||||
|
|
||||||
if fetchNodes.len == 0:
|
|
||||||
return # Nothing to do
|
|
||||||
|
|
||||||
# Initalise for `getTrieNodes()` for fetching nodes from the network
|
|
||||||
var
|
|
||||||
nodeKey: Table[Blob,NodeKey] # Temporary `path -> key` mapping
|
|
||||||
req = SnapTriePaths(accPath: accpath)
|
|
||||||
for w in fetchNodes:
|
|
||||||
req.slotPaths.add w.partialPath
|
|
||||||
nodeKey[w.partialPath] = w.nodeKey
|
|
||||||
|
|
||||||
# Fetch nodes from the network.
|
|
||||||
let
|
|
||||||
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
pivot = "#" & $env.stateHeader.blockNumber # for logging
|
||||||
rc = await buddy.getTrieNodes(storageRoot, @[req], pivot)
|
|
||||||
if rc.isOk:
|
|
||||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
|
||||||
buddy.only.errors.resetComError()
|
|
||||||
|
|
||||||
return rc.value.nodes.mapIt(NodeSpecs(
|
# Initalise for fetching nodes from the network via `getTrieNodes()`
|
||||||
partialPath: it.partialPath,
|
var
|
||||||
nodeKey: nodeKey[it.partialPath],
|
nodeKey: Table[Blob,NodeKey] # Temporary `path -> key` mapping
|
||||||
data: it.data))
|
req = SnapTriePaths(accPath: accPath) # Argument for `getTrieNodes()`
|
||||||
|
|
||||||
let error = rc.error
|
# There is no point in fetching too many nodes as it will be rejected. So
|
||||||
if await buddy.ctrl.stopAfterSeriousComError(error, buddy.only.errors):
|
# rest of the `missingNodes` list is ignored to be picked up later.
|
||||||
|
for w in missingNodes:
|
||||||
|
if w.partialPath notin ignore and not nodeKey.hasKey(w.partialPath):
|
||||||
|
req.slotPaths.add w.partialPath
|
||||||
|
nodeKey[w.partialPath] = w.nodeKey
|
||||||
|
if fetchRequestTrieNodesMax <= req.slotPaths.len:
|
||||||
|
break
|
||||||
|
|
||||||
|
if 0 < req.slotPaths.len:
|
||||||
|
# Fetch nodes from the network.
|
||||||
|
let rc = await buddy.getTrieNodes(rootHash, @[req], pivot)
|
||||||
|
if rc.isOk:
|
||||||
|
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||||
|
buddy.only.errors.resetComError()
|
||||||
|
|
||||||
|
return rc.value.nodes.mapIt(NodeSpecs(
|
||||||
|
partialPath: it.partialPath,
|
||||||
|
nodeKey: nodeKey[it.partialPath],
|
||||||
|
data: it.data))
|
||||||
|
|
||||||
|
# Process error ...
|
||||||
|
let
|
||||||
|
error = rc.error
|
||||||
|
ok = await buddy.ctrl.stopAfterSeriousComError(error, buddy.only.errors)
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "fetch nodes error => stop", peer,
|
trace logTxt "reply error", peer, ctx=buddy.healingCtx(kvp,env),
|
||||||
ctx=buddy.healingCtx(kvp,env), error
|
error, stop=ok
|
||||||
|
|
||||||
|
return @[]
|
||||||
|
|
||||||
|
|
||||||
proc slotKey(node: NodeSpecs): (bool,NodeKey) =
|
proc kvStoSlotsLeaf(
|
||||||
## Read leaf node from persistent database (if any)
|
buddy: SnapBuddyRef;
|
||||||
try:
|
node: NodeSpecs; # Node data fetched from network
|
||||||
|
kvp: StoQuSlotsKVP; # For logging
|
||||||
|
env: SnapPivotRef; # For logging
|
||||||
|
): (bool,NodeKey) =
|
||||||
|
## Re-read leaf node from persistent database (if any)
|
||||||
|
var nNibbles = -1
|
||||||
|
discardRlpError("kvStorageSlotsLeaf"):
|
||||||
let
|
let
|
||||||
nodeRlp = rlpFromBytes node.data
|
nodeRlp = rlpFromBytes node.data
|
||||||
(_,prefix) = hexPrefixDecode node.partialPath
|
prefix = (hexPrefixDecode node.partialPath)[1]
|
||||||
(_,segment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
segment = (hexPrefixDecode nodeRlp.listElem(0).toBytes)[1]
|
||||||
nibbles = prefix & segment
|
nibbles = prefix & segment
|
||||||
if nibbles.len == 64:
|
|
||||||
|
nNibbles = nibbles.len
|
||||||
|
if nNibbles == 64:
|
||||||
return (true, nibbles.getBytes.convertTo(NodeKey))
|
return (true, nibbles.getBytes.convertTo(NodeKey))
|
||||||
except CatchableError:
|
|
||||||
discard
|
when extraTraceMessages:
|
||||||
|
trace logTxt "non-leaf node path or corrupt data", peer=buddy.peer,
|
||||||
|
ctx=buddy.healingCtx(kvp,env), nNibbles
|
||||||
|
|
||||||
|
|
||||||
|
proc registerStoSlotsLeaf(
|
||||||
|
buddy: SnapBuddyRef;
|
||||||
|
slotKey: NodeKey;
|
||||||
|
kvp: StoQuSlotsKVP;
|
||||||
|
env: SnapPivotRef;
|
||||||
|
) =
|
||||||
|
## Process single account node as would be done with an interval by
|
||||||
|
## the `storeAccounts()` function
|
||||||
|
let
|
||||||
|
ctx = buddy.ctx
|
||||||
|
peer = buddy.peer
|
||||||
|
rootKey = kvp.key.to(NodeKey)
|
||||||
|
getSlotFn = ctx.pool.snapDb.getStorageSlotsFn kvp.data.accKey
|
||||||
|
pt = slotKey.to(NodeTag)
|
||||||
|
|
||||||
|
# Extend interval [pt,pt] if possible
|
||||||
|
var iv: NodeTagRange
|
||||||
|
try:
|
||||||
|
iv = getSlotFn.hexaryRangeInflate(rootKey, pt)
|
||||||
|
except CatchableError as e:
|
||||||
|
error logTxt "inflating interval oops", peer, ctx=buddy.healingCtx(kvp,env),
|
||||||
|
accKey=kvp.data.accKey, slotKey, name=($e.name), msg=e.msg
|
||||||
|
iv = NodeTagRange.new(pt,pt)
|
||||||
|
|
||||||
|
# Register isolated leaf node
|
||||||
|
if 0 < kvp.data.slots.processed.merge iv:
|
||||||
|
kvp.data.slots.unprocessed.reduce iv
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "registered single slot", peer, ctx=buddy.healingCtx(env),
|
||||||
|
leftSlack=(iv.minPt < pt), rightSlack=(pt < iv.maxPt)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions: do the healing for one work item (sub-trie)
|
# Private functions: do the healing for one work item (sub-trie)
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc storageSlotsHealing(
|
proc stoSlotsHealingImpl(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
kvp: SnapSlotsQueuePair;
|
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||||
|
kvp: StoQuSlotsKVP;
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
) {.async.} =
|
): Future[(int,HashSet[Blob])]
|
||||||
|
{.async.} =
|
||||||
## Returns `true` is the sub-trie is complete (probably inherited), and
|
## Returns `true` is the sub-trie is complete (probably inherited), and
|
||||||
## `false` if there are nodes left to be completed.
|
## `false` if there are nodes left to be completed.
|
||||||
let
|
let
|
||||||
|
@ -227,51 +287,53 @@ proc storageSlotsHealing(
|
||||||
|
|
||||||
if missing.len == 0:
|
if missing.len == 0:
|
||||||
trace logTxt "nothing to do", peer, ctx=buddy.healingCtx(kvp,env)
|
trace logTxt "nothing to do", peer, ctx=buddy.healingCtx(kvp,env)
|
||||||
return
|
return (0,EmptyBlobSet) # nothing to do
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "started", peer, ctx=buddy.healingCtx(kvp,env)
|
|
||||||
|
|
||||||
# Get next batch of nodes that need to be merged it into the database
|
# Get next batch of nodes that need to be merged it into the database
|
||||||
let nodeSpecs = await buddy.getNodesFromNetwork(kvp, missing, env)
|
let fetchedNodes = await buddy.getNodesFromNetwork(missing, ignore, kvp, env)
|
||||||
if nodeSpecs.len == 0:
|
if fetchedNodes.len == 0:
|
||||||
return
|
when extraTraceMessages:
|
||||||
|
trace logTxt "node set unavailable", nMissing=missing.len
|
||||||
|
return (0,EmptyBlobSet)
|
||||||
|
|
||||||
# Store nodes onto disk
|
# Store nodes onto disk
|
||||||
let report = db.importRawStorageSlotsNodes(peer, kvp.data.accKey, nodeSpecs)
|
let
|
||||||
|
nFetchedNodes = fetchedNodes.len
|
||||||
|
report = db.importRawStorageSlotsNodes(peer, kvp.data.accKey, fetchedNodes)
|
||||||
|
|
||||||
if 0 < report.len and report[^1].slot.isNone:
|
if 0 < report.len and report[^1].slot.isNone:
|
||||||
# Storage error, just run the next lap (not much else that can be done)
|
# Storage error, just run the next lap (not much else that can be done)
|
||||||
error logTxt "database error", peer, ctx=buddy.healingCtx(kvp,env),
|
error logTxt "database error", peer, ctx=buddy.healingCtx(kvp,env),
|
||||||
nNodes=nodeSpecs.len, error=report[^1].error
|
nFetchedNodes, error=report[^1].error
|
||||||
return
|
return (-1,EmptyBlobSet)
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "nodes merged into database", peer,
|
|
||||||
ctx=buddy.healingCtx(kvp,env), nNodes=nodeSpecs.len
|
|
||||||
|
|
||||||
# Filter out leaf nodes
|
# Filter out leaf nodes
|
||||||
var nLeafNodes = 0 # for logging
|
var
|
||||||
|
nLeafNodes = 0 # for logging
|
||||||
|
rejected: HashSet[Blob]
|
||||||
|
trace logTxt "importRawStorageSlotsNodes", nReport=report.len #########
|
||||||
for w in report:
|
for w in report:
|
||||||
if w.slot.isSome and w.kind.get(otherwise = Branch) == Leaf:
|
if w.slot.isSome: # non-indexed entries appear typically at the end, though
|
||||||
|
let inx = w.slot.unsafeGet
|
||||||
|
|
||||||
# Leaf Node has been stored, so register it
|
# Node error, will need to pick up later and download again. Node that
|
||||||
let
|
# there need not be an expicit node specs (so `kind` is opted out.)
|
||||||
inx = w.slot.unsafeGet
|
if w.kind.isNone or w.error != HexaryError(0):
|
||||||
(isLeaf, slotKey) = nodeSpecs[inx].slotKey
|
rejected.incl fetchedNodes[inx].partialPath
|
||||||
if isLeaf:
|
|
||||||
let
|
|
||||||
slotTag = slotKey.to(NodeTag)
|
|
||||||
iv = NodeTagRange.new(slotTag,slotTag)
|
|
||||||
kvp.data.slots.unprocessed.reduce iv
|
|
||||||
discard kvp.data.slots.processed.merge iv
|
|
||||||
nLeafNodes.inc
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
elif w.kind.unsafeGet == Leaf:
|
||||||
trace logTxt "stored slot", peer,
|
# Leaf node has been stored, double check
|
||||||
ctx=buddy.healingCtx(kvp,env), slotKey=slotTag
|
let (isLeaf, key) = buddy.kvStoSlotsLeaf(fetchedNodes[inx], kvp, env)
|
||||||
|
if isLeaf:
|
||||||
|
# Update `unprocessed` registry, collect storage roots (if any)
|
||||||
|
buddy.registerStoSlotsLeaf(key, kvp, env)
|
||||||
|
nLeafNodes.inc
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "job done", peer, ctx=buddy.healingCtx(kvp,env), nLeafNodes
|
trace logTxt "merged into database", peer, ctx=buddy.healingCtx(kvp,env),
|
||||||
|
nLeafNodes
|
||||||
|
|
||||||
|
return (nFetchedNodes - rejected.len, rejected)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
|
@ -282,48 +344,48 @@ proc healStorageSlots*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
) {.async.} =
|
) {.async.} =
|
||||||
## Fetching and merging missing slorage slots trie database nodes.
|
## Fetching and merging missing slorage slots trie database nodes.
|
||||||
let
|
when extraTraceMessages:
|
||||||
ctx {.used.} = buddy.ctx
|
let peer {.used.} = buddy.peer
|
||||||
peer {.used.} = buddy.peer
|
trace logTxt "started", peer, ctx=buddy.healingCtx(env)
|
||||||
|
|
||||||
# Extract healing slot items from partial slots list
|
var
|
||||||
var toBeHealed: seq[SnapSlotsQueuePair]
|
nNodesFetched = 0
|
||||||
for kvp in env.fetchStoragePart.nextPairs:
|
nFetchLoop = 0
|
||||||
# Delete from queue and process this entry
|
ignore: HashSet[Blob]
|
||||||
env.fetchStoragePart.del kvp.key
|
visited: HashSet[NodeKey]
|
||||||
|
|
||||||
# Move to returned list unless duplicated in full slots list
|
while buddy.ctrl.running and
|
||||||
if env.fetchStorageFull.eq(kvp.key).isErr:
|
visited.len <= healStorageSlotsBatchMax and
|
||||||
toBeHealed.add kvp
|
ignore.len <= healStorageSlotsFailedMax and
|
||||||
env.parkedStorage.incl kvp.data.accKey # temporarily parked
|
not env.archived:
|
||||||
if healStorageSlotsBatchMax <= toBeHealed.len:
|
# Pull out the next request list from the queue
|
||||||
|
let kvp = block:
|
||||||
|
let rc = env.storageQueueUnlinkPartialItem visited
|
||||||
|
if rc.isErr:
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "queue exhausted", peer, ctx=buddy.healingCtx(env),
|
||||||
|
nIgnore=ignore.len, nVisited=visited.len
|
||||||
break
|
break
|
||||||
|
rc.value
|
||||||
|
|
||||||
# Run against local batch
|
nFetchLoop.inc
|
||||||
let nHealerQueue = toBeHealed.len
|
|
||||||
if 0 < nHealerQueue:
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "processing", peer, ctx=buddy.healingCtx(env), nHealerQueue
|
|
||||||
|
|
||||||
for n in 0 ..< toBeHealed.len:
|
# Process request range for healing
|
||||||
# Stop processing, hand back the rest
|
let (nNodes, rejected) = await buddy.stoSlotsHealingImpl(ignore, kvp, env)
|
||||||
if buddy.ctrl.stopped:
|
if kvp.data.slots.processed.isFull:
|
||||||
for m in n ..< toBeHealed.len:
|
env.nSlotLists.inc
|
||||||
let kvp = toBeHealed[n]
|
env.parkedStorage.excl kvp.data.accKey
|
||||||
discard env.fetchStoragePart.append(kvp.key, kvp.data)
|
else:
|
||||||
env.parkedStorage.excl kvp.data.accKey
|
# Re-queue again, to be re-processed in another cycle
|
||||||
break
|
visited.incl kvp.data.accKey
|
||||||
|
env.storageQueueAppend kvp
|
||||||
|
|
||||||
let kvp = toBeHealed[n]
|
ignore = ignore + rejected
|
||||||
await buddy.storageSlotsHealing(kvp, env)
|
nNodesFetched.inc(nNodes)
|
||||||
|
|
||||||
# Re-queue again unless ready
|
|
||||||
env.parkedStorage.excl kvp.data.accKey # un-register
|
|
||||||
if not kvp.data.slots.processed.isFull:
|
|
||||||
discard env.fetchStoragePart.append(kvp.key, kvp.data)
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "done", peer, ctx=buddy.healingCtx(env), nHealerQueue
|
trace logTxt "done", peer, ctx=buddy.healingCtx(env),
|
||||||
|
nNodesFetched, nFetchLoop, nIgnore=ignore.len, nVisited=visited.len
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -57,7 +57,7 @@ import
|
||||||
"."/[storage_queue_helper, swap_in]
|
"."/[storage_queue_helper, swap_in]
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-range"
|
topics = "snap-acc"
|
||||||
|
|
||||||
const
|
const
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
|
@ -70,19 +70,19 @@ const
|
||||||
template logTxt(info: static[string]): static[string] =
|
template logTxt(info: static[string]): static[string] =
|
||||||
"Accounts range " & info
|
"Accounts range " & info
|
||||||
|
|
||||||
#proc `$`(rs: NodeTagRangeSet): string =
|
proc `$`(rs: NodeTagRangeSet): string =
|
||||||
# rs.fullFactor.toPC(0)
|
rs.fullPC3
|
||||||
|
|
||||||
proc `$`(iv: NodeTagRange): string =
|
proc `$`(iv: NodeTagRange): string =
|
||||||
iv.fullFactor.toPC(3)
|
iv.fullPC3
|
||||||
|
|
||||||
proc fetchCtx(
|
proc fetchCtx(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): string {.used.} =
|
): string {.used.} =
|
||||||
"{" &
|
"{" &
|
||||||
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
|
"piv=" & "#" & $env.stateHeader.blockNumber & "," &
|
||||||
"runState=" & $buddy.ctrl.state & "," &
|
"ctl=" & $buddy.ctrl.state & "," &
|
||||||
"nStoQu=" & $env.storageQueueTotal() & "," &
|
"nStoQu=" & $env.storageQueueTotal() & "," &
|
||||||
"nSlotLists=" & $env.nSlotLists & "}"
|
"nSlotLists=" & $env.nSlotLists & "}"
|
||||||
|
|
||||||
|
@ -133,12 +133,10 @@ proc accountsRangefetchImpl(
|
||||||
rc = await buddy.getAccountRange(stateRoot, iv, pivot)
|
rc = await buddy.getAccountRange(stateRoot, iv, pivot)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
fa.unprocessed.mergeSplit iv # fail => interval back to pool
|
fa.unprocessed.mergeSplit iv # fail => interval back to pool
|
||||||
let error = rc.error
|
if await buddy.ctrl.stopAfterSeriousComError(rc.error, buddy.only.errors):
|
||||||
if await buddy.ctrl.stopAfterSeriousComError(error, buddy.only.errors):
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
let reqLen {.used.} = $iv
|
|
||||||
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
||||||
reqLen, error
|
reqLen=iv, error=rc.error
|
||||||
return
|
return
|
||||||
rc.value
|
rc.value
|
||||||
|
|
||||||
|
@ -169,9 +167,8 @@ proc accountsRangefetchImpl(
|
||||||
# Bad data, just try another peer
|
# Bad data, just try another peer
|
||||||
buddy.ctrl.zombie = true
|
buddy.ctrl.zombie = true
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
let reqLen {.used.} = $iv
|
|
||||||
trace logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
trace logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
||||||
gotAccounts, gotStorage, reqLen, covered, error=rc.error
|
gotAccounts, gotStorage, reqLen=iv, covered, error=rc.error
|
||||||
return
|
return
|
||||||
rc.value
|
rc.value
|
||||||
|
|
||||||
|
@ -221,32 +218,21 @@ proc rangeFetchAccounts*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
) {.async.} =
|
) {.async.} =
|
||||||
## Fetch accounts and store them in the database.
|
## Fetch accounts and store them in the database.
|
||||||
let
|
let fa = env.fetchAccounts
|
||||||
fa = env.fetchAccounts
|
|
||||||
|
|
||||||
if not fa.processed.isFull():
|
if not fa.processed.isFull():
|
||||||
let
|
|
||||||
ctx {.used.} = buddy.ctx
|
|
||||||
peer {.used.} = buddy.peer
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "start", peer, ctx=buddy.fetchCtx(env)
|
trace logTxt "start", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||||
|
|
||||||
static:
|
var nFetchAccounts = 0 # for logging
|
||||||
doAssert 0 <= accountsFetchRetryMax
|
|
||||||
var
|
|
||||||
nFetchAccounts = 0 # for logging
|
|
||||||
nRetry = 0
|
|
||||||
while not fa.processed.isFull() and
|
while not fa.processed.isFull() and
|
||||||
buddy.ctrl.running and
|
buddy.ctrl.running and
|
||||||
not env.archived and
|
not env.archived:
|
||||||
nRetry <= accountsFetchRetryMax:
|
|
||||||
# May repeat fetching with re-arranged request intervals
|
# May repeat fetching with re-arranged request intervals
|
||||||
if await buddy.accountsRangefetchImpl(env):
|
if not await buddy.accountsRangefetchImpl(env):
|
||||||
nFetchAccounts.inc
|
break
|
||||||
nRetry = 0
|
|
||||||
else:
|
nFetchAccounts.inc
|
||||||
nRetry.inc
|
|
||||||
|
|
||||||
# Clean up storage slots queue first it it becomes too large
|
# Clean up storage slots queue first it it becomes too large
|
||||||
let nStoQu = env.fetchStorageFull.len + env.fetchStoragePart.len
|
let nStoQu = env.fetchStorageFull.len + env.fetchStoragePart.len
|
||||||
|
@ -254,7 +240,8 @@ proc rangeFetchAccounts*(
|
||||||
break
|
break
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
trace logTxt "done", peer, ctx=buddy.fetchCtx(env), nFetchAccounts
|
trace logTxt "done", peer=buddy.peer, ctx=buddy.fetchCtx(env),
|
||||||
|
nFetchAccounts
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -65,19 +65,20 @@
|
||||||
{.push raises: [].}
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sets,
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos,
|
chronos,
|
||||||
eth/[common, p2p],
|
eth/[common, p2p],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
stint,
|
stint,
|
||||||
../../../sync_desc,
|
../../../sync_desc,
|
||||||
"../.."/[range_desc, worker_desc],
|
"../.."/[constants, range_desc, worker_desc],
|
||||||
../com/[com_error, get_storage_ranges],
|
../com/[com_error, get_storage_ranges],
|
||||||
../db/[hexary_error, snapdb_storage_slots],
|
../db/[hexary_error, snapdb_storage_slots],
|
||||||
./storage_queue_helper
|
./storage_queue_helper
|
||||||
|
|
||||||
logScope:
|
logScope:
|
||||||
topics = "snap-range"
|
topics = "snap-slot"
|
||||||
|
|
||||||
const
|
const
|
||||||
extraTraceMessages = false or true
|
extraTraceMessages = false or true
|
||||||
|
@ -93,27 +94,26 @@ proc fetchCtx(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): string =
|
): string =
|
||||||
let
|
|
||||||
nStoQu = (env.fetchStorageFull.len +
|
|
||||||
env.fetchStoragePart.len +
|
|
||||||
env.parkedStorage.len)
|
|
||||||
"{" &
|
"{" &
|
||||||
"pivot=" & "#" & $env.stateHeader.blockNumber & "," &
|
"piv=" & "#" & $env.stateHeader.blockNumber & "," &
|
||||||
"runState=" & $buddy.ctrl.state & "," &
|
"ctl=" & $buddy.ctrl.state & "," &
|
||||||
"nStoQu=" & $nStoQu & "," &
|
"nQuFull=" & $env.fetchStorageFull.len & "," &
|
||||||
|
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||||
|
"nParked=" & $env.parkedStorage.len & "," &
|
||||||
"nSlotLists=" & $env.nSlotLists & "}"
|
"nSlotLists=" & $env.nSlotLists & "}"
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc storeStoragesSingleBatch(
|
proc fetchStorageSlotsImpl(
|
||||||
buddy: SnapBuddyRef;
|
buddy: SnapBuddyRef;
|
||||||
req: seq[AccountSlotsHeader];
|
req: seq[AccountSlotsHeader];
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
): Future[bool]
|
): Future[Result[HashSet[NodeKey],void]]
|
||||||
{.async.} =
|
{.async.} =
|
||||||
## Fetch account storage slots and store them in the database.
|
## Fetch account storage slots and store them in the database, returns
|
||||||
|
## number of error or -1 for total failure.
|
||||||
let
|
let
|
||||||
ctx = buddy.ctx
|
ctx = buddy.ctx
|
||||||
peer = buddy.peer
|
peer = buddy.peer
|
||||||
|
@ -124,29 +124,28 @@ proc storeStoragesSingleBatch(
|
||||||
var stoRange = block:
|
var stoRange = block:
|
||||||
let rc = await buddy.getStorageRanges(stateRoot, req, pivot)
|
let rc = await buddy.getStorageRanges(stateRoot, req, pivot)
|
||||||
if rc.isErr:
|
if rc.isErr:
|
||||||
let error = rc.error
|
if await buddy.ctrl.stopAfterSeriousComError(rc.error, buddy.only.errors):
|
||||||
if await buddy.ctrl.stopAfterSeriousComError(error, buddy.only.errors):
|
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
||||||
trace logTxt "fetch error => stop", peer, ctx=buddy.fetchCtx(env),
|
nReq=req.len, error=rc.error
|
||||||
nReq=req.len, error
|
return err() # all of `req` failed
|
||||||
return false # all of `req` failed
|
|
||||||
rc.value
|
rc.value
|
||||||
|
|
||||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||||
buddy.only.errors.resetComError()
|
buddy.only.errors.resetComError()
|
||||||
|
|
||||||
var gotSlotLists = stoRange.data.storages.len
|
var
|
||||||
if 0 < gotSlotLists:
|
nSlotLists = stoRange.data.storages.len
|
||||||
|
reject: HashSet[NodeKey]
|
||||||
|
|
||||||
|
if 0 < nSlotLists:
|
||||||
# Verify/process storages data and save it to disk
|
# Verify/process storages data and save it to disk
|
||||||
let report = ctx.pool.snapDb.importStorageSlots(peer, stoRange.data)
|
let report = ctx.pool.snapDb.importStorageSlots(peer, stoRange.data)
|
||||||
if 0 < report.len:
|
if 0 < report.len:
|
||||||
if report[^1].slot.isNone:
|
if report[^1].slot.isNone:
|
||||||
# Failed to store on database, not much that can be done here
|
# Failed to store on database, not much that can be done here
|
||||||
gotSlotLists.dec(report.len - 1) # for logging only
|
|
||||||
|
|
||||||
error logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
error logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
||||||
nSlotLists=gotSlotLists, nReq=req.len, error=report[^1].error
|
nSlotLists=0, nReq=req.len, error=report[^1].error
|
||||||
return false # all of `req` failed
|
return err() # all of `req` failed
|
||||||
|
|
||||||
# Push back error entries to be processed later
|
# Push back error entries to be processed later
|
||||||
for w in report:
|
for w in report:
|
||||||
|
@ -155,17 +154,15 @@ proc storeStoragesSingleBatch(
|
||||||
let
|
let
|
||||||
inx = w.slot.get
|
inx = w.slot.get
|
||||||
acc = stoRange.data.storages[inx].account
|
acc = stoRange.data.storages[inx].account
|
||||||
|
splitOk = w.error in {RootNodeMismatch,RightBoundaryProofFailed}
|
||||||
|
|
||||||
if w.error == RootNodeMismatch:
|
reject.incl acc.accKey
|
||||||
# Some pathological case, needs further investigation. For the
|
|
||||||
# moment, provide partial fetches.
|
|
||||||
env.storageQueueAppendPartialBisect acc
|
|
||||||
|
|
||||||
elif w.error == RightBoundaryProofFailed and
|
if splitOk:
|
||||||
acc.subRange.isSome and 1 < acc.subRange.unsafeGet.len:
|
# Some pathological cases need further investigation. For the
|
||||||
# Some pathological case, needs further investigation. For the
|
# moment, provide partial split requeue. So a different range
|
||||||
# moment, provide a partial fetches.
|
# will be unqueued and processed, next time.
|
||||||
env.storageQueueAppendPartialBisect acc
|
env.storageQueueAppendPartialSplit acc
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Reset any partial result (which would be the last entry) to
|
# Reset any partial result (which would be the last entry) to
|
||||||
|
@ -173,33 +170,24 @@ proc storeStoragesSingleBatch(
|
||||||
# re-fetched completely for this account.
|
# re-fetched completely for this account.
|
||||||
env.storageQueueAppendFull acc
|
env.storageQueueAppendFull acc
|
||||||
|
|
||||||
# Last entry might be partial (if any)
|
error logTxt "import error", peer, ctx=buddy.fetchCtx(env), splitOk,
|
||||||
#
|
nSlotLists, nRejected=reject.len, nReqInx=inx, nReq=req.len,
|
||||||
# Forget about partial result processing if the last partial entry
|
|
||||||
# was reported because
|
|
||||||
# * either there was an error processing it
|
|
||||||
# * or there were some gaps reprored as dangling links
|
|
||||||
stoRange.data.proof = @[]
|
|
||||||
|
|
||||||
# Update local statistics counter for `nSlotLists` counter update
|
|
||||||
gotSlotLists.dec
|
|
||||||
|
|
||||||
error logTxt "processing error", peer, ctx=buddy.fetchCtx(env),
|
|
||||||
nSlotLists=gotSlotLists, nReqInx=inx, nReq=req.len,
|
|
||||||
nDangling=w.dangling.len, error=w.error
|
nDangling=w.dangling.len, error=w.error
|
||||||
|
|
||||||
# Update statistics
|
# Return unprocessed left overs to batch queue. The `req[^1].subRange` is
|
||||||
if gotSlotLists == 1 and
|
# the original range requested for the last item (if any.)
|
||||||
req[0].subRange.isSome and
|
let (_,removed) = env.storageQueueUpdate(stoRange.leftOver, reject)
|
||||||
env.fetchStoragePart.hasKey req[0].storageRoot:
|
|
||||||
# Successful partial request, but not completely done with yet.
|
|
||||||
gotSlotLists = 0
|
|
||||||
|
|
||||||
env.nSlotLists.inc(gotSlotLists)
|
# Update statistics. The variable removed is set if the queue for a partial
|
||||||
|
# slot range was logically removed. A partial slot range list has one entry.
|
||||||
|
# So the correction factor for the slot lists statistics is `removed - 1`.
|
||||||
|
env.nSlotLists.inc(nSlotLists - reject.len + (removed - 1))
|
||||||
|
|
||||||
# Return unprocessed left overs to batch queue
|
# Clean up, un-park successful slots (if any)
|
||||||
env.storageQueueAppend(stoRange.leftOver, req[^1].subRange)
|
for w in stoRange.data.storages:
|
||||||
return true
|
env.parkedStorage.excl w.account.accKey
|
||||||
|
|
||||||
|
return ok(reject)
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions
|
# Public functions
|
||||||
|
@ -214,63 +202,58 @@ proc rangeFetchStorageSlots*(
|
||||||
## each work item on the queue at least once.For partial partial slot range
|
## each work item on the queue at least once.For partial partial slot range
|
||||||
## items this means in case of success that the outstanding range has become
|
## items this means in case of success that the outstanding range has become
|
||||||
## at least smaller.
|
## at least smaller.
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "start", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||||
|
|
||||||
# Fetch storage data and save it on disk. Storage requests are managed by
|
# Fetch storage data and save it on disk. Storage requests are managed by
|
||||||
# request queues for handling full/partial replies and re-fetch issues. For
|
# request queues for handling full/partial replies and re-fetch issues. For
|
||||||
# all practical puroses, this request queue should mostly be empty.
|
# all practical puroses, this request queue should mostly be empty.
|
||||||
if 0 < env.fetchStorageFull.len or 0 < env.fetchStoragePart.len:
|
for (fetchFn, failMax) in [
|
||||||
let
|
(storageQueueFetchFull, storageSlotsFetchFailedFullMax),
|
||||||
ctx = buddy.ctx
|
(storageQueueFetchPartial, storageSlotsFetchFailedPartialMax)]:
|
||||||
peer {.used.} = buddy.peer
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "start", peer, ctx=buddy.fetchCtx(env)
|
|
||||||
|
|
||||||
|
var
|
||||||
|
ignored: HashSet[NodeKey]
|
||||||
|
rc = Result[HashSet[NodeKey],void].ok(ignored) # set ok() start value
|
||||||
|
|
||||||
# Run batch even if `archived` flag is set in order to shrink the queues.
|
# Run batch even if `archived` flag is set in order to shrink the queues.
|
||||||
var delayed: seq[AccountSlotsHeader]
|
while buddy.ctrl.running and
|
||||||
while buddy.ctrl.running:
|
rc.isOk and
|
||||||
|
ignored.len <= failMax:
|
||||||
|
|
||||||
# Pull out the next request list from the queue
|
# Pull out the next request list from the queue
|
||||||
let (req, nComplete {.used.}, nPartial {.used.}) =
|
let reqList = buddy.ctx.fetchFn(env, ignored)
|
||||||
ctx.storageQueueFetchFull(env)
|
if reqList.len == 0:
|
||||||
if req.len == 0:
|
when extraTraceMessages:
|
||||||
|
trace logTxt "queue exhausted", peer=buddy.peer,
|
||||||
|
ctx=buddy.fetchCtx(env),
|
||||||
|
isPartQueue=(fetchFn==storageQueueFetchPartial)
|
||||||
break
|
break
|
||||||
|
|
||||||
when extraTraceMessages:
|
# Process list, store in database. The `reqList` is re-queued accordingly
|
||||||
trace logTxt "fetch full", peer, ctx=buddy.fetchCtx(env),
|
# in the `fetchStorageSlotsImpl()` function unless there is an error. In
|
||||||
nStorageQuFull=env.fetchStorageFull.len, nReq=req.len,
|
# the error case, the whole argument list `reqList` is left untouched.
|
||||||
nPartial, nComplete
|
rc = await buddy.fetchStorageSlotsImpl(reqList, env)
|
||||||
|
if rc.isOk:
|
||||||
if await buddy.storeStoragesSingleBatch(req, env):
|
for w in rc.value:
|
||||||
for w in req:
|
ignored.incl w # Ignoring bogus response items
|
||||||
env.parkedStorage.excl w.accKey # Done with these items
|
|
||||||
else:
|
else:
|
||||||
delayed &= req
|
# Push back unprocessed jobs after error
|
||||||
env.storageQueueAppend delayed
|
env.storageQueueAppendPartialSplit reqList
|
||||||
|
|
||||||
# Ditto for partial queue
|
|
||||||
delayed.setLen(0)
|
|
||||||
while buddy.ctrl.running:
|
|
||||||
# Pull out the next request item from the queue
|
|
||||||
let rc = env.storageQueueFetchPartial()
|
|
||||||
if rc.isErr:
|
|
||||||
break
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
when extraTraceMessages:
|
||||||
let
|
trace logTxt "processed", peer=buddy.peer, ctx=buddy.fetchCtx(env),
|
||||||
subRange {.used.} = rc.value.subRange.get
|
isPartQueue=(fetchFn==storageQueueFetchPartial),
|
||||||
account {.used.} = rc.value.accKey
|
nReqList=reqList.len,
|
||||||
trace logTxt "fetch partial", peer, ctx=buddy.fetchCtx(env),
|
nIgnored=ignored.len,
|
||||||
nStorageQuPart=env.fetchStoragePart.len, subRange, account
|
subRange0=reqList[0].subRange.get(otherwise=FullNodeTagRange),
|
||||||
|
account0=reqList[0].accKey,
|
||||||
|
rc=(if rc.isOk: rc.value.len else: -1)
|
||||||
|
# End `while`
|
||||||
|
# End `for`
|
||||||
|
|
||||||
if await buddy.storeStoragesSingleBatch(@[rc.value], env):
|
when extraTraceMessages:
|
||||||
env.parkedStorage.excl rc.value.accKey # Done with this item
|
trace logTxt "done", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||||
else:
|
|
||||||
delayed.add rc.value
|
|
||||||
env.storageQueueAppend delayed
|
|
||||||
|
|
||||||
when extraTraceMessages:
|
|
||||||
trace logTxt "done", peer, ctx=buddy.fetchCtx(env)
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -8,19 +8,46 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed
|
# at your option. This file may not be copied, modified, or distributed
|
||||||
# except according to those terms.
|
# except according to those terms.
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
|
std/sets,
|
||||||
|
chronicles,
|
||||||
eth/[common, p2p],
|
eth/[common, p2p],
|
||||||
stew/[interval_set, keyed_queue],
|
stew/[interval_set, keyed_queue],
|
||||||
../../../sync_desc,
|
../../../sync_desc,
|
||||||
"../.."/[constants, range_desc, worker_desc],
|
"../.."/[constants, range_desc, worker_desc],
|
||||||
../db/[hexary_inspect, snapdb_storage_slots]
|
../db/[hexary_inspect, snapdb_storage_slots]
|
||||||
|
|
||||||
{.push raises: [].}
|
logScope:
|
||||||
|
topics = "snap-slots"
|
||||||
|
|
||||||
|
type
|
||||||
|
StoQuSlotsKVP* = KeyedQueuePair[Hash256,SnapSlotsQueueItemRef]
|
||||||
|
## Key-value return code from `SnapSlotsQueue` handler
|
||||||
|
|
||||||
|
StoQuPartialSlotsQueue = object
|
||||||
|
## Return type for `getOrMakePartial()`
|
||||||
|
stoQu: SnapSlotsQueueItemRef
|
||||||
|
isCompleted: bool
|
||||||
|
|
||||||
|
const
|
||||||
|
extraTraceMessages = false # or true
|
||||||
|
## Enabled additional logging noise
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Private helpers
|
# Private helpers
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
template logTxt(info: static[string]): static[string] =
|
||||||
|
"Storage queue " & info
|
||||||
|
|
||||||
|
proc `$`(rs: NodeTagRangeSet): string =
|
||||||
|
rs.fullPC3
|
||||||
|
|
||||||
|
proc `$`(tr: SnapTodoRanges): string =
|
||||||
|
tr.fullPC3
|
||||||
|
|
||||||
template noExceptionOops(info: static[string]; code: untyped) =
|
template noExceptionOops(info: static[string]; code: untyped) =
|
||||||
try:
|
try:
|
||||||
code
|
code
|
||||||
|
@ -32,23 +59,151 @@ template noExceptionOops(info: static[string]; code: untyped) =
|
||||||
# Private functions
|
# Private functions
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc getOrMakePartial(
|
proc updatePartial(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef; # Current pivot environment
|
||||||
stoRoot: Hash256;
|
req: AccountSlotsChanged; # Left over account data
|
||||||
accKey: NodeKey;
|
): bool = # List entry was added
|
||||||
): (SnapSlotsQueueItemRef, bool) =
|
## Update the range of account argument `req` to the partial slot ranges
|
||||||
## Create record on `fetchStoragePart` or return existing one
|
## queue.
|
||||||
let rc = env.fetchStoragePart.lruFetch stoRoot
|
##
|
||||||
if rc.isOk:
|
## The function returns `true` if a new list entry was added.
|
||||||
result = (rc.value, true) # Value exists
|
let
|
||||||
else:
|
accKey = req.account.accKey
|
||||||
result = (SnapSlotsQueueItemRef(accKey: accKey), false) # New value
|
stoRoot = req.account.storageRoot
|
||||||
env.parkedStorage.excl accKey # Un-park
|
noFullEntry = env.fetchStorageFull.delete(stoRoot).isErr
|
||||||
discard env.fetchStoragePart.append(stoRoot, result[0])
|
iv = req.account.subRange.get(otherwise = FullNodeTagRange)
|
||||||
|
jv = req.newRange.get(otherwise = FullNodeTagRange)
|
||||||
|
(slots, newEntry, newPartEntry) = block:
|
||||||
|
let rc = env.fetchStoragePart.lruFetch stoRoot
|
||||||
|
if rc.isOk:
|
||||||
|
(rc.value.slots, false, false)
|
||||||
|
else:
|
||||||
|
# New entry
|
||||||
|
let
|
||||||
|
stoSlo = SnapRangeBatchRef(processed: NodeTagRangeSet.init())
|
||||||
|
stoItem = SnapSlotsQueueItemRef(accKey: accKey, slots: stoSlo)
|
||||||
|
discard env.fetchStoragePart.append(stoRoot, stoItem)
|
||||||
|
stoSlo.unprocessed.init(clear = true)
|
||||||
|
|
||||||
if result[0].slots.isNil:
|
# Initalise ranges
|
||||||
result[0].slots = SnapRangeBatchRef(processed: NodeTagRangeSet.init())
|
var newItem = false
|
||||||
result[0].slots.unprocessed.init()
|
if iv == FullNodeTagRange:
|
||||||
|
# New record (probably was a full range, before)
|
||||||
|
stoSlo.unprocessed.mergeSplit FullNodeTagRange
|
||||||
|
newItem = noFullEntry
|
||||||
|
else:
|
||||||
|
# Restore `processed` range, `iv` was the left over.
|
||||||
|
discard stoSlo.processed.merge FullNodeTagRange
|
||||||
|
discard stoSlo.processed.reduce iv
|
||||||
|
(stoSlo, newItem, true)
|
||||||
|
|
||||||
|
# Remove delta state relative to original state
|
||||||
|
if iv != jv:
|
||||||
|
# Calculate `iv - jv`
|
||||||
|
let ivSet = NodeTagRangeSet.init()
|
||||||
|
discard ivSet.merge iv # Previous range
|
||||||
|
discard ivSet.reduce jv # Left over range
|
||||||
|
|
||||||
|
# Update `processed` by delta range
|
||||||
|
for w in ivSet.increasing:
|
||||||
|
discard slots.processed.merge w
|
||||||
|
|
||||||
|
# Update left over
|
||||||
|
slots.unprocessed.merge jv # Left over range
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "updated partially", accKey, iv, jv,
|
||||||
|
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||||
|
noFullEntry, newEntry, newPartEntry
|
||||||
|
|
||||||
|
env.parkedStorage.excl accKey # Un-park (if any)
|
||||||
|
newEntry
|
||||||
|
|
||||||
|
|
||||||
|
proc appendPartial(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
acc: AccountSlotsHeader; # Left over account data
|
||||||
|
splitMerge: bool; # Bisect or straight merge
|
||||||
|
): bool = # List entry was added
|
||||||
|
## Append to partial queue. The argument range of `acc` is split so that
|
||||||
|
## the next request of this range will result in the right most half size
|
||||||
|
## of this very range.
|
||||||
|
##
|
||||||
|
## The function returns `true` if a new list entry was added.
|
||||||
|
let
|
||||||
|
accKey = acc.accKey
|
||||||
|
stoRoot = acc.storageRoot
|
||||||
|
notFull = env.fetchStorageFull.delete(stoRoot).isErr
|
||||||
|
iv = acc.subRange.get(otherwise = FullNodeTagRange)
|
||||||
|
rc = env.fetchStoragePart.lruFetch acc.storageRoot
|
||||||
|
(slots,newEntry) = block:
|
||||||
|
if rc.isOk:
|
||||||
|
(rc.value.slots, false)
|
||||||
|
else:
|
||||||
|
# Restore missing range
|
||||||
|
let
|
||||||
|
stoSlo = SnapRangeBatchRef(processed: NodeTagRangeSet.init())
|
||||||
|
stoItem = SnapSlotsQueueItemRef(accKey: accKey, slots: stoSlo)
|
||||||
|
discard env.fetchStoragePart.append(stoRoot, stoItem)
|
||||||
|
stoSlo.unprocessed.init(clear = true)
|
||||||
|
discard stoSlo.processed.merge FullNodeTagRange
|
||||||
|
discard stoSlo.processed.reduce iv
|
||||||
|
(stoSlo, notFull)
|
||||||
|
|
||||||
|
if splitMerge:
|
||||||
|
slots.unprocessed.mergeSplit iv
|
||||||
|
else:
|
||||||
|
slots.unprocessed.merge iv
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "merged partial", splitMerge, accKey, iv,
|
||||||
|
processed=slots.processed, unprocessed=slots.unprocessed, newEntry
|
||||||
|
|
||||||
|
env.parkedStorage.excl accKey # Un-park (if any)
|
||||||
|
newEntry
|
||||||
|
|
||||||
|
|
||||||
|
proc reducePartial(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
acc: AccountSlotsHeader; # Left over account data
|
||||||
|
): bool = # List entry was removed
|
||||||
|
## Reduce range from partial ranges list.
|
||||||
|
##
|
||||||
|
## The function returns `true` if a list entry was removed.
|
||||||
|
# So `iv` was not the full range in which case all of `iv` was fully
|
||||||
|
# processed and there is nothing left.
|
||||||
|
let
|
||||||
|
accKey = acc.accKey
|
||||||
|
stoRoot = acc.storageRoot
|
||||||
|
notFull = env.fetchStorageFull.delete(stoRoot).isErr
|
||||||
|
iv = acc.subRange.get(otherwise = FullNodeTagRange)
|
||||||
|
rc = env.fetchStoragePart.lruFetch stoRoot
|
||||||
|
|
||||||
|
var entryRemoved = false
|
||||||
|
if rc.isErr:
|
||||||
|
# This was the last missing range anyway. So there is no need to
|
||||||
|
# re-insert this entry.
|
||||||
|
entryRemoved = true # Virtually deleted
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "reduced partial, discarded", accKey, iv, entryRemoved
|
||||||
|
else:
|
||||||
|
let slots = rc.value.slots
|
||||||
|
discard slots.processed.merge iv
|
||||||
|
|
||||||
|
if slots.processed.isFull:
|
||||||
|
env.fetchStoragePart.del stoRoot
|
||||||
|
result = true
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "reduced partial, deleted", accKey, iv, entryRemoved
|
||||||
|
else:
|
||||||
|
slots.unprocessed.reduce iv
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "reduced partial, completed", accKey, iv,
|
||||||
|
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||||
|
entryRemoved
|
||||||
|
|
||||||
|
env.parkedStorage.excl accKey # Un-park (if any)
|
||||||
|
entryRemoved
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public helpers
|
# Public helpers
|
||||||
|
@ -66,130 +221,150 @@ proc storageQueueAppendFull*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
stoRoot: Hash256;
|
stoRoot: Hash256;
|
||||||
accKey: NodeKey;
|
accKey: NodeKey;
|
||||||
) =
|
): bool
|
||||||
## Append item to `fetchStorageFull` queue
|
{.discardable.} =
|
||||||
env.fetchStoragePart.del stoRoot # Not a partial item anymore (if any)
|
## Append item to `fetchStorageFull` queue. This undoes the effect of the
|
||||||
env.parkedStorage.excl accKey # Un-park
|
## function `storageQueueFetchFull()`. The function returns `true` if
|
||||||
discard env.fetchStorageFull.append(
|
## a new entry was added.
|
||||||
stoRoot, SnapSlotsQueueItemRef(accKey: accKey))
|
let
|
||||||
|
notPart = env.fetchStoragePart.delete(stoRoot).isErr
|
||||||
|
stoItem = SnapSlotsQueueItemRef(accKey: accKey)
|
||||||
|
env.parkedStorage.excl accKey # Un-park (if any)
|
||||||
|
env.fetchStorageFull.append(stoRoot, stoItem) and notPart
|
||||||
|
|
||||||
proc storageQueueAppendFull*(
|
proc storageQueueAppendFull*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef;
|
||||||
acc: AccountSlotsHeader;
|
acc: AccountSlotsHeader;
|
||||||
) =
|
): bool
|
||||||
## variant of `storageQueueAppendFull()`
|
{.discardable.} =
|
||||||
|
## Variant of `storageQueueAppendFull()`
|
||||||
env.storageQueueAppendFull(acc.storageRoot, acc.accKey)
|
env.storageQueueAppendFull(acc.storageRoot, acc.accKey)
|
||||||
|
|
||||||
proc storageQueueAppendFull*(
|
proc storageQueueAppendPartialSplit*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef; # Current pivot environment
|
||||||
kvp: SnapSlotsQueuePair;
|
acc: AccountSlotsHeader; # Left over account data
|
||||||
|
): bool
|
||||||
|
{.discardable.} =
|
||||||
|
## Merge slot range back into partial queue. This undoes the effect of the
|
||||||
|
## function `storageQueueFetchPartial()` with the additional feature that
|
||||||
|
## the argument range of `acc` is split. So some next range request for this
|
||||||
|
## account will result in the right most half size of this very range just
|
||||||
|
## inserted.
|
||||||
|
##
|
||||||
|
## The function returns `true` if a new entry was added.
|
||||||
|
env.appendPartial(acc, splitMerge=true)
|
||||||
|
|
||||||
|
proc storageQueueAppendPartialSplit*(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
req: openArray[AccountSlotsHeader]; # List of entries to push back
|
||||||
) =
|
) =
|
||||||
## variant of `storageQueueAppendFull()`
|
## Variant of `storageQueueAppendPartialSplit()`
|
||||||
env.storageQueueAppendFull(kvp.key, kvp.data.accKey)
|
for w in req:
|
||||||
|
discard env.appendPartial(w, splitMerge=true)
|
||||||
|
|
||||||
proc storageQueueAppendPartialBisect*(
|
|
||||||
env: SnapPivotRef;
|
|
||||||
acc: AccountSlotsHeader;
|
|
||||||
) =
|
|
||||||
## Append to partial queue so that the next fetch range is half the size of
|
|
||||||
## the current next range.
|
|
||||||
|
|
||||||
# Fetch/rotate queue item
|
|
||||||
let data = env.getOrMakePartial(acc.storageRoot, acc.accKey)[0]
|
|
||||||
|
|
||||||
# Derive unprocessed ranges => into lower priority queue
|
|
||||||
data.slots.unprocessed.clear()
|
|
||||||
discard data.slots.unprocessed[1].merge(low(NodeTag),high(NodeTag))
|
|
||||||
for iv in data.slots.processed.increasing:
|
|
||||||
discard data.slots.unprocessed[1].reduce iv # complements processed ranges
|
|
||||||
|
|
||||||
# Prioritise half of first unprocessed range
|
|
||||||
let rc = data.slots.unprocessed[1].ge()
|
|
||||||
if rc.isErr:
|
|
||||||
env.fetchStoragePart.del acc.storageRoot # Oops, nothing to do
|
|
||||||
return # Done
|
|
||||||
let halfTag = rc.value.minPt + ((rc.value.maxPt - rc.value.minPt) div 2)
|
|
||||||
data.slots.unprocessed.merge NodeTagRange.new(rc.value.minPt, halfTag)
|
|
||||||
|
|
||||||
|
|
||||||
proc storageQueueAppend*(
|
proc storageQueueAppend*(
|
||||||
env: SnapPivotRef;
|
env: SnapPivotRef; # Current pivot environment
|
||||||
reqList: openArray[AccountSlotsHeader];
|
req: openArray[AccountSlotsHeader]; # List of entries to push back
|
||||||
subRange = none(NodeTagRange); # For a partially fetched slot
|
|
||||||
) =
|
) =
|
||||||
for n,w in reqList:
|
## Append a job list of ranges. This undoes the effect of either function
|
||||||
env.parkedStorage.excl w.accKey # Un-park
|
## `storageQueueFetchFull()` or `storageQueueFetchPartial()`.
|
||||||
|
for w in req:
|
||||||
# Only last item (when `n+1 == reqList.len`) may be registered partial
|
let iv = w.subRange.get(otherwise = FullNodeTagRange)
|
||||||
if w.subRange.isNone or n + 1 < reqList.len:
|
if iv == FullNodeTagRange:
|
||||||
env.storageQueueAppendFull w
|
env.storageQueueAppendFull w
|
||||||
|
|
||||||
else:
|
else:
|
||||||
env.fetchStorageFull.del w.storageRoot
|
discard env.appendPartial(w, splitMerge=false)
|
||||||
|
|
||||||
|
proc storageQueueAppend*(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
kvp: StoQuSlotsKVP; # List of entries to push back
|
||||||
|
) =
|
||||||
|
## Insert back a full administrative queue record. This function is typically
|
||||||
|
## used after a record was unlinked vis `storageQueueUnlinkPartialItem()`.
|
||||||
|
let accKey = kvp.data.accKey
|
||||||
|
env.parkedStorage.excl accKey # Un-park (if any)
|
||||||
|
|
||||||
|
if kvp.data.slots.isNil:
|
||||||
|
env.fetchStoragePart.del kvp.key # Sanitise data
|
||||||
|
discard env.fetchStorageFull.append(kvp.key, kvp.data)
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "re-queued full", accKey
|
||||||
|
else:
|
||||||
|
env.fetchStorageFull.del kvp.key # Sanitise data
|
||||||
|
|
||||||
|
let rc = env.fetchStoragePart.eq kvp.key
|
||||||
|
if rc.isErr:
|
||||||
|
discard env.fetchStoragePart.append(kvp.key, kvp.data)
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "re-queued partial",
|
||||||
|
processed=kvp.data.slots.processed,
|
||||||
|
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||||
|
else:
|
||||||
|
# Merge `processed` ranges
|
||||||
|
for w in kvp.data.slots.processed.increasing:
|
||||||
|
discard rc.value.slots.processed.merge w
|
||||||
|
|
||||||
|
# Intersect `unprocessed` ranges
|
||||||
|
for w in kvp.data.slots.unprocessed.ivItems:
|
||||||
|
rc.value.slots.unprocessed.reduce w
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "re-merged partial",
|
||||||
|
processed=kvp.data.slots.processed,
|
||||||
|
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||||
|
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
# Public functions, modify/update/remove queue items
|
||||||
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
|
proc storageQueueUpdate*(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
req: openArray[AccountSlotsChanged]; # List of entries to push back
|
||||||
|
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||||
|
): (int,int) = # Added, removed
|
||||||
|
## Similar to `storageQueueAppend()`, this functions appends account header
|
||||||
|
## entries back into the storage queues. Different to `storageQueueAppend()`,
|
||||||
|
## this function is aware of changes after partial downloads from the network.
|
||||||
|
##
|
||||||
|
## The function returns the tuple `(added, removed)` reflecting the numbers
|
||||||
|
## of changed list items (accumulated for partial and full range lists.)
|
||||||
|
for w in req:
|
||||||
|
if w.account.accKey notin ignore:
|
||||||
let
|
let
|
||||||
(data, hasItem) = env.getOrMakePartial(w.storageRoot, w.accKey)
|
iv = w.account.subRange.get(otherwise = FullNodeTagRange)
|
||||||
iv = w.subRange.unsafeGet
|
jv = w.newRange.get(otherwise = FullNodeTagRange)
|
||||||
|
if jv != FullNodeTagRange:
|
||||||
# Register partial range
|
# So `jv` is some rest after processing. Typically this entry is
|
||||||
if subRange.isSome:
|
# related to partial range response message that came with a proof.
|
||||||
# The `subRange` is the original request, `iv` the uncompleted part
|
if env.updatePartial w:
|
||||||
let reqRange = subRange.unsafeGet
|
result[0].inc
|
||||||
if not hasItem:
|
when extraTraceMessages:
|
||||||
# Re-initialise book keeping
|
trace logTxt "update/append partial", accKey=w.account.accKey,
|
||||||
discard data.slots.processed.merge(low(NodeTag),high(NodeTag))
|
iv, jv, nAdded=result[0], nRemoved=result[1]
|
||||||
discard data.slots.processed.reduce reqRange
|
elif jv == iv:
|
||||||
data.slots.unprocessed.clear()
|
if env.storageQueueAppendFull w.account:
|
||||||
|
result[0].inc
|
||||||
# Calculate `reqRange - iv` which are the completed ranges
|
#when extraTraceMessages:
|
||||||
let temp = NodeTagRangeSet.init()
|
# trace logTxt "update/append full", accKey=w.account.accKey,
|
||||||
discard temp.merge reqRange
|
# nAdded=result[0], nRemoved=result[1]t
|
||||||
discard temp.reduce iv
|
|
||||||
|
|
||||||
# Update `processed` ranges by adding `reqRange - iv`
|
|
||||||
for w in temp.increasing:
|
|
||||||
discard data.slots.processed.merge w
|
|
||||||
|
|
||||||
# Update `unprocessed` ranges
|
|
||||||
data.slots.unprocessed.merge reqRange
|
|
||||||
data.slots.unprocessed.reduce iv
|
|
||||||
|
|
||||||
elif hasItem:
|
|
||||||
# Restore unfetched request
|
|
||||||
data.slots.unprocessed.merge iv
|
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# Makes no sense with a `leftOver` item
|
if env.reducePartial w.account:
|
||||||
env.storageQueueAppendFull w
|
result[1].inc
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "update/reduce partial", accKey=w.account.accKey,
|
||||||
|
iv, jv, nAdded=result[0], nRemoved=result[1]
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# Public functions, make/create queue items
|
# Public functions, fetch/remove queue items
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
|
|
||||||
proc storageQueueGetOrMakePartial*(
|
|
||||||
env: SnapPivotRef;
|
|
||||||
stoRoot: Hash256;
|
|
||||||
accKey: NodeKey;
|
|
||||||
): SnapSlotsQueueItemRef =
|
|
||||||
## Create record on `fetchStoragePart` or return existing one
|
|
||||||
env.getOrMakePartial(stoRoot, accKey)[0]
|
|
||||||
|
|
||||||
proc storageQueueGetOrMakePartial*(
|
|
||||||
env: SnapPivotRef;
|
|
||||||
acc: AccountSlotsHeader;
|
|
||||||
): SnapSlotsQueueItemRef =
|
|
||||||
## Variant of `storageQueueGetOrMakePartial()`
|
|
||||||
env.getOrMakePartial(acc.storageRoot, acc.accKey)[0]
|
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
|
||||||
# Public functions, fetch and remove queue items
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc storageQueueFetchFull*(
|
proc storageQueueFetchFull*(
|
||||||
ctx: SnapCtxRef; # Global context
|
ctx: SnapCtxRef; # Global context
|
||||||
env: SnapPivotRef; # Current pivot environment
|
env: SnapPivotRef; # Current pivot environment
|
||||||
): (seq[AccountSlotsHeader],int,int) =
|
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||||
|
): seq[AccountSlotsHeader] =
|
||||||
## Fetch a list of at most `fetchRequestStorageSlotsMax` full work items
|
## Fetch a list of at most `fetchRequestStorageSlotsMax` full work items
|
||||||
## from the batch queue.
|
## from the batch queue.
|
||||||
##
|
##
|
||||||
|
@ -207,84 +382,118 @@ proc storageQueueFetchFull*(
|
||||||
## number of items moved to the partial queue is returned as third item of
|
## number of items moved to the partial queue is returned as third item of
|
||||||
## the return code tuple.
|
## the return code tuple.
|
||||||
##
|
##
|
||||||
var
|
|
||||||
rcList: seq[AccountSlotsHeader]
|
|
||||||
nComplete = 0
|
|
||||||
nPartial = 0
|
|
||||||
|
|
||||||
noExceptionOops("getNextSlotItemsFull"):
|
noExceptionOops("getNextSlotItemsFull"):
|
||||||
for kvp in env.fetchStorageFull.nextPairs:
|
for kvp in env.fetchStorageFull.nextPairs:
|
||||||
let
|
if kvp.data.accKey notin ignore:
|
||||||
getFn = ctx.pool.snapDb.getStorageSlotsFn kvp.data.accKey
|
let
|
||||||
rootKey = kvp.key.to(NodeKey)
|
getFn = ctx.pool.snapDb.getStorageSlotsFn kvp.data.accKey
|
||||||
accItem = AccountSlotsHeader(
|
rootKey = kvp.key.to(NodeKey)
|
||||||
accKey: kvp.data.accKey,
|
accItem = AccountSlotsHeader(
|
||||||
storageRoot: kvp.key)
|
accKey: kvp.data.accKey,
|
||||||
|
storageRoot: kvp.key)
|
||||||
|
|
||||||
# This item will either be returned, discarded, or moved to the partial
|
# This item will eventuallly be returned, discarded, or moved to the
|
||||||
# queue subject for healing. So it will be removed from this queue.
|
# partial queue (also subject for healing.) So it will be removed from
|
||||||
env.fetchStorageFull.del kvp.key # OK to delete current link
|
# the full range lists queue.
|
||||||
|
env.fetchStorageFull.del kvp.key # OK to delete current link
|
||||||
|
|
||||||
# Check whether the tree is fully empty
|
# Check whether the database trie is empty. Otherwise the sub-trie is
|
||||||
if rootKey.ByteArray32.getFn.len == 0:
|
# at least partially allocated.
|
||||||
# Collect for return
|
if rootKey.ByteArray32.getFn.len == 0:
|
||||||
rcList.add accItem
|
# Collect for return
|
||||||
env.parkedStorage.incl accItem.accKey # Registerd as absent
|
result.add accItem
|
||||||
|
env.parkedStorage.incl accItem.accKey # Registerd as absent
|
||||||
|
|
||||||
# Maximal number of items to fetch
|
# Maximal number of items to fetch
|
||||||
if fetchRequestStorageSlotsMax <= rcList.len:
|
if fetchRequestStorageSlotsMax <= result.len:
|
||||||
break
|
break # stop here
|
||||||
else:
|
|
||||||
# Check how much there is below the top level storage slots node. For
|
|
||||||
# a small storage trie, this check will be exhaustive.
|
|
||||||
let stats = getFn.hexaryInspectTrie(rootKey,
|
|
||||||
suspendAfter = storageSlotsTrieInheritPerusalMax,
|
|
||||||
maxDangling = 1)
|
|
||||||
|
|
||||||
if stats.dangling.len == 0 and stats.resumeCtx.isNil:
|
|
||||||
# This storage trie could be fully searched and there was no dangling
|
|
||||||
# node. So it is complete and can be fully removed from the batch.
|
|
||||||
nComplete.inc # Update for logging
|
|
||||||
else:
|
else:
|
||||||
# This item becomes a partially available slot
|
# Check how much there is below the top level storage slots node. For
|
||||||
#let data = env.storageQueueGetOrMakePartial accItem -- notused
|
# a small storage trie, this check will be exhaustive.
|
||||||
nPartial.inc # Update for logging
|
let stats = getFn.hexaryInspectTrie(rootKey,
|
||||||
|
suspendAfter = storageSlotsTrieInheritPerusalMax,
|
||||||
|
maxDangling = 1)
|
||||||
|
|
||||||
(rcList, nComplete, nPartial)
|
if stats.dangling.len == 0 and stats.resumeCtx.isNil:
|
||||||
|
# This storage trie could be fully searched and there was no
|
||||||
|
# dangling node. So it is complete and can be considered done.
|
||||||
|
# It can be left removed from the batch queue.
|
||||||
|
env.nSlotLists.inc # Update for logging
|
||||||
|
else:
|
||||||
|
# This item must be treated as a partially available slot
|
||||||
|
env.storageQueueAppendPartialSplit accItem
|
||||||
|
|
||||||
proc storageQueueFetchPartial*(
|
proc storageQueueFetchPartial*(
|
||||||
env: SnapPivotRef;
|
ctx: SnapCtxRef; # Global context (unused here)
|
||||||
): Result[AccountSlotsHeader,void] =
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||||
|
): seq[AccountSlotsHeader] = # At most one item
|
||||||
## Get work item from the batch queue. This will typically return the full
|
## Get work item from the batch queue. This will typically return the full
|
||||||
## work item and remove it from the queue unless the parially completed
|
## work item and remove it from the queue unless the parially completed
|
||||||
## range is fragmented.
|
## range is fragmented.
|
||||||
block findItem:
|
for kvp in env.fetchStoragePart.nextPairs:
|
||||||
for kvp in env.fetchStoragePart.nextPairs:
|
# Extract range and return single item request queue
|
||||||
# Extract range and return single item request queue
|
let
|
||||||
let rc = kvp.data.slots.unprocessed.fetch(maxLen = high(UInt256))
|
slots = kvp.data.slots
|
||||||
|
accKey = kvp.data.accKey
|
||||||
|
accepted = accKey notin ignore
|
||||||
|
if accepted:
|
||||||
|
let rc = slots.unprocessed.fetch()
|
||||||
if rc.isOk:
|
if rc.isOk:
|
||||||
result = ok(AccountSlotsHeader(
|
let reqItem = AccountSlotsHeader(
|
||||||
accKey: kvp.data.accKey,
|
accKey: accKey,
|
||||||
storageRoot: kvp.key,
|
storageRoot: kvp.key,
|
||||||
subRange: some rc.value))
|
subRange: some rc.value)
|
||||||
|
|
||||||
# Delete from batch queue if the `unprocessed` range set becomes empty
|
# Delete from batch queue if the `unprocessed` range has become empty.
|
||||||
# and the `processed` set is the complemet of `rc.value`.
|
if slots.unprocessed.isEmpty and
|
||||||
if kvp.data.slots.unprocessed.isEmpty and
|
high(UInt256) - rc.value.len <= slots.processed.total:
|
||||||
high(UInt256) - rc.value.len <= kvp.data.slots.processed.total:
|
# If this is all the rest, the record can be deleted from the todo
|
||||||
env.fetchStoragePart.del kvp.key
|
# list. If not fully downloaded at a later stage, a new record will
|
||||||
env.parkedStorage.incl kvp.data.accKey # Temporarily parked
|
# be created on-the-fly.
|
||||||
return
|
env.parkedStorage.incl accKey # Temporarily parked
|
||||||
|
env.fetchStoragePart.del kvp.key # Last one not needed
|
||||||
else:
|
else:
|
||||||
# Otherwise rotate queue
|
# Otherwise accept and update/rotate queue. Note that `lruFetch`
|
||||||
break findItem
|
# does leave the item on the queue.
|
||||||
# End for()
|
discard env.fetchStoragePart.lruFetch reqItem.storageRoot
|
||||||
|
|
||||||
return err()
|
when extraTraceMessages:
|
||||||
|
trace logTxt "fetched partial",
|
||||||
|
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||||
|
accKey, iv=rc.value
|
||||||
|
return @[reqItem] # done
|
||||||
|
|
||||||
# Rotate queue item
|
when extraTraceMessages:
|
||||||
discard env.fetchStoragePart.lruFetch result.value.storageRoot
|
trace logTxt "rejected partial", accepted,
|
||||||
|
processed=slots.processed, unprocessed=slots.unprocessed, accKey
|
||||||
|
# End for()
|
||||||
|
|
||||||
|
proc storageQueueUnlinkPartialItem*(
|
||||||
|
env: SnapPivotRef; # Current pivot environment
|
||||||
|
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||||
|
): Result[StoQuSlotsKVP,void] =
|
||||||
|
## Fetch an item from the partial list. This item will be removed from the
|
||||||
|
## list and ca be re-queued via `storageQueueAppend()`.
|
||||||
|
for kvp in env.fetchStoragePart.nextPairs:
|
||||||
|
# Extract range and return single item request queue
|
||||||
|
let
|
||||||
|
accKey = kvp.data.accKey
|
||||||
|
accepted = accKey notin ignore
|
||||||
|
if accepted:
|
||||||
|
env.parkedStorage.incl accKey # Temporarily parked
|
||||||
|
env.fetchStoragePart.del kvp.key # Last one not needed
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "unlink partial item", processed=kvp.data.slots.processed,
|
||||||
|
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||||
|
return ok(kvp) # done
|
||||||
|
|
||||||
|
when extraTraceMessages:
|
||||||
|
trace logTxt "unlink partial skip", accepted,
|
||||||
|
processed=kvp.data.slots.processed,
|
||||||
|
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||||
|
# End for()
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
|
|
|
@ -72,10 +72,10 @@ proc `$`(node: NodeSpecs): string =
|
||||||
node.partialPath.toHex
|
node.partialPath.toHex
|
||||||
|
|
||||||
proc `$`(rs: NodeTagRangeSet): string =
|
proc `$`(rs: NodeTagRangeSet): string =
|
||||||
rs.fullFactor.toPC(3)
|
rs.fullPC3
|
||||||
|
|
||||||
proc `$`(iv: NodeTagRange): string =
|
proc `$`(iv: NodeTagRange): string =
|
||||||
iv.fullFactor.toPC(3)
|
iv.fullPC3
|
||||||
|
|
||||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||||
|
|
|
@ -32,9 +32,6 @@ type
|
||||||
## there is only a partial list of slots to fetch, the queue entry is
|
## there is only a partial list of slots to fetch, the queue entry is
|
||||||
## stored left-most for easy access.
|
## stored left-most for easy access.
|
||||||
|
|
||||||
SnapSlotsQueuePair* = KeyedQueuePair[Hash256,SnapSlotsQueueItemRef]
|
|
||||||
## Key-value return code from `SnapSlotsQueue` handler
|
|
||||||
|
|
||||||
SnapSlotsQueueItemRef* = ref object
|
SnapSlotsQueueItemRef* = ref object
|
||||||
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
|
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
|
||||||
## where the optional `subRange` interval has been replaced by an interval
|
## where the optional `subRange` interval has been replaced by an interval
|
||||||
|
@ -71,7 +68,7 @@ type
|
||||||
nSlotLists*: uint64 ## Imported # of account storage tries
|
nSlotLists*: uint64 ## Imported # of account storage tries
|
||||||
|
|
||||||
# Mothballing, ready to be swapped into newer pivot record
|
# Mothballing, ready to be swapped into newer pivot record
|
||||||
storageAccounts*: SnapAccountsList ## Accounts with missing stortage slots
|
storageAccounts*: SnapAccountsList ## Accounts with missing storage slots
|
||||||
archived*: bool ## Not latest pivot, anymore
|
archived*: bool ## Not latest pivot, anymore
|
||||||
|
|
||||||
SnapPivotTable* = KeyedQueue[Hash256,SnapPivotRef]
|
SnapPivotTable* = KeyedQueue[Hash256,SnapPivotRef]
|
||||||
|
@ -142,7 +139,7 @@ proc pivotAccountsCoverage100PcRollOver*(ctx: SnapCtxRef) =
|
||||||
# Public helpers: SnapTodoRanges
|
# Public helpers: SnapTodoRanges
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
||||||
proc init*(q: var SnapTodoRanges) =
|
proc init*(q: var SnapTodoRanges; clear = false) =
|
||||||
## Populate node range sets with maximal range in the first range set. This
|
## Populate node range sets with maximal range in the first range set. This
|
||||||
## kind of pair or interval sets is managed as follows:
|
## kind of pair or interval sets is managed as follows:
|
||||||
## * As long as possible, fetch and merge back intervals on the first set.
|
## * As long as possible, fetch and merge back intervals on the first set.
|
||||||
|
@ -152,7 +149,8 @@ proc init*(q: var SnapTodoRanges) =
|
||||||
## is considered after the prioitised intervals are exhausted.
|
## is considered after the prioitised intervals are exhausted.
|
||||||
q[0] = NodeTagRangeSet.init()
|
q[0] = NodeTagRangeSet.init()
|
||||||
q[1] = NodeTagRangeSet.init()
|
q[1] = NodeTagRangeSet.init()
|
||||||
discard q[0].merge(low(NodeTag),high(NodeTag))
|
if not clear:
|
||||||
|
discard q[0].merge FullNodeTagRange
|
||||||
|
|
||||||
proc clear*(q: var SnapTodoRanges) =
|
proc clear*(q: var SnapTodoRanges) =
|
||||||
## Reset argument range sets empty.
|
## Reset argument range sets empty.
|
||||||
|
@ -167,8 +165,12 @@ proc merge*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
||||||
|
|
||||||
proc mergeSplit*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
proc mergeSplit*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
||||||
## Ditto w/priorities partially reversed
|
## Ditto w/priorities partially reversed
|
||||||
if 1 < iv.len:
|
if iv.len == 1:
|
||||||
|
discard q[0].reduce iv
|
||||||
|
discard q[1].merge iv
|
||||||
|
else:
|
||||||
let
|
let
|
||||||
|
# note that (`iv.len` == 0) => (`iv` == `FullNodeTagRange`)
|
||||||
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
|
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
|
||||||
iv1 = NodeTagRange.new(iv.minPt, midPt)
|
iv1 = NodeTagRange.new(iv.minPt, midPt)
|
||||||
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
|
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
|
||||||
|
@ -176,9 +178,6 @@ proc mergeSplit*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
||||||
discard q[1].merge iv1
|
discard q[1].merge iv1
|
||||||
discard q[0].merge iv2
|
discard q[0].merge iv2
|
||||||
discard q[1].reduce iv2
|
discard q[1].reduce iv2
|
||||||
else:
|
|
||||||
discard q[0].reduce iv
|
|
||||||
discard q[1].merge iv
|
|
||||||
|
|
||||||
|
|
||||||
proc reduce*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
proc reduce*(q: var SnapTodoRanges; iv: NodeTagRange) =
|
||||||
|
@ -194,8 +193,9 @@ iterator ivItems*(q: var SnapTodoRanges): NodeTagRange =
|
||||||
yield iv
|
yield iv
|
||||||
|
|
||||||
|
|
||||||
proc fetch*(q: var SnapTodoRanges; maxLen: UInt256): Result[NodeTagRange,void] =
|
proc fetch*(q: var SnapTodoRanges; maxLen = 0.u256): Result[NodeTagRange,void] =
|
||||||
## Fetch interval from node ranges with maximal size `maxLen`
|
## Fetch interval from node ranges with maximal size `maxLen`, where
|
||||||
|
## `0.u256` is interpreted as `2^256`.
|
||||||
|
|
||||||
# Swap batch queues if the first one is empty
|
# Swap batch queues if the first one is empty
|
||||||
if q[0].isEmpty:
|
if q[0].isEmpty:
|
||||||
|
@ -207,9 +207,17 @@ proc fetch*(q: var SnapTodoRanges; maxLen: UInt256): Result[NodeTagRange,void] =
|
||||||
return err()
|
return err()
|
||||||
|
|
||||||
let
|
let
|
||||||
val = rc.value
|
jv = rc.value
|
||||||
iv = if 0 < val.len and val.len <= maxLen: val # val.len==0 => 2^256
|
iv = block:
|
||||||
else: NodeTagRange.new(val.minPt, val.minPt + (maxLen - 1.u256))
|
if maxLen == 0 or (0 < jv.len and jv.len <= maxLen):
|
||||||
|
jv
|
||||||
|
else:
|
||||||
|
# Note that either:
|
||||||
|
# (`jv.len` == 0) => (`jv` == `FullNodeTagRange`) => `jv.minPt` == 0
|
||||||
|
# or
|
||||||
|
# (`maxLen` < `jv.len`) => (`jv.minPt`+`maxLen` <= `jv.maxPt`)
|
||||||
|
NodeTagRange.new(jv.minPt, jv.minPt + maxLen)
|
||||||
|
|
||||||
discard q[0].reduce(iv)
|
discard q[0].reduce(iv)
|
||||||
ok(iv)
|
ok(iv)
|
||||||
|
|
||||||
|
|
|
@ -13,19 +13,19 @@
|
||||||
##
|
##
|
||||||
## Public descriptors
|
## Public descriptors
|
||||||
|
|
||||||
|
{.push raises: [].}
|
||||||
|
|
||||||
import
|
import
|
||||||
#std/options,
|
#std/options,
|
||||||
eth/[common, p2p],
|
eth/[common, p2p],
|
||||||
../core/chain,
|
../core/chain,
|
||||||
../db/db_chain,
|
../db/db_chain,
|
||||||
./handlers
|
./handlers/eth
|
||||||
|
|
||||||
export
|
export
|
||||||
chain,
|
chain,
|
||||||
db_chain
|
db_chain
|
||||||
|
|
||||||
{.push raises: [].}
|
|
||||||
|
|
||||||
type
|
type
|
||||||
BuddyRunState* = enum
|
BuddyRunState* = enum
|
||||||
Running = 0 ## Running, default state
|
Running = 0 ## Running, default state
|
||||||
|
@ -121,6 +121,13 @@ proc `stopped=`*(ctrl: BuddyCtrlRef; value: bool) =
|
||||||
else:
|
else:
|
||||||
discard
|
discard
|
||||||
|
|
||||||
|
proc `forceRun=`*(ctrl: BuddyCtrlRef; value: bool) =
|
||||||
|
## Setter, gets out of `Zombie` jail/locked state with `true argument.
|
||||||
|
if value:
|
||||||
|
ctrl.runState = Running
|
||||||
|
else:
|
||||||
|
ctrl.stopped = true
|
||||||
|
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
# End
|
# End
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
|
|
Loading…
Reference in New Issue