2022-10-14 16:40:32 +00:00
|
|
|
# nimbus-eth1
|
|
|
|
# Copyright (c) 2021 Status Research & Development GmbH
|
|
|
|
# Licensed under either of
|
|
|
|
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0)
|
|
|
|
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
|
|
|
# http://opensource.org/licenses/MIT)
|
|
|
|
# at your option. This file may not be copied, modified, or distributed
|
|
|
|
# except according to those terms.
|
|
|
|
|
|
|
|
import
|
2022-11-08 18:56:04 +00:00
|
|
|
std/[algorithm, sequtils, tables],
|
2022-10-14 16:40:32 +00:00
|
|
|
chronicles,
|
2022-10-20 16:59:54 +00:00
|
|
|
eth/[common, p2p, rlp, trie/nibbles],
|
2022-11-28 09:03:23 +00:00
|
|
|
stew/[byteutils, interval_set],
|
2022-10-14 16:40:32 +00:00
|
|
|
../../range_desc,
|
2022-10-20 16:59:54 +00:00
|
|
|
"."/[hexary_desc, hexary_error, hexary_import, hexary_interpolate,
|
|
|
|
hexary_inspect, hexary_paths, snapdb_desc, snapdb_persistent]
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
{.push raises: [Defect].}
|
|
|
|
|
|
|
|
logScope:
|
|
|
|
topics = "snap-db"
|
|
|
|
|
|
|
|
type
|
|
|
|
SnapDbAccountsRef* = ref object of SnapDbBaseRef
|
2022-10-20 16:59:54 +00:00
|
|
|
peer: Peer ## For log messages
|
2022-10-14 16:40:32 +00:00
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
SnapAccountsGaps* = object
|
|
|
|
innerGaps*: seq[NodeSpecs]
|
|
|
|
dangling*: seq[NodeSpecs]
|
|
|
|
|
2022-10-14 16:40:32 +00:00
|
|
|
const
|
|
|
|
extraTraceMessages = false or true
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc to(h: Hash256; T: type NodeKey): T =
|
|
|
|
h.data.T
|
|
|
|
|
|
|
|
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
|
|
|
discard result.data.NodeKey.init(data) # size error => zero
|
|
|
|
|
|
|
|
template noKeyError(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert "Not possible (" & info & "): " & e.msg
|
|
|
|
|
|
|
|
template noRlpExceptionOops(info: static[string]; code: untyped) =
|
|
|
|
try:
|
|
|
|
code
|
|
|
|
except RlpError:
|
|
|
|
return err(RlpEncoding)
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert "Not possible (" & info & "): " & e.msg
|
|
|
|
except Defect as e:
|
|
|
|
raise e
|
|
|
|
except Exception as e:
|
|
|
|
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Private functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc persistentAccounts(
|
|
|
|
db: HexaryTreeDbRef; ## Current table
|
|
|
|
ps: SnapDbAccountsRef; ## For persistent database
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[void,HexaryError]
|
2022-10-14 16:40:32 +00:00
|
|
|
{.gcsafe, raises: [Defect,OSError,KeyError].} =
|
|
|
|
## Store accounts trie table on databse
|
|
|
|
if ps.rockDb.isNil:
|
2022-10-20 16:59:54 +00:00
|
|
|
let rc = db.persistentAccountsPut(ps.kvDb)
|
2022-10-14 16:40:32 +00:00
|
|
|
if rc.isErr: return rc
|
|
|
|
else:
|
2022-10-20 16:59:54 +00:00
|
|
|
let rc = db.persistentAccountsPut(ps.rockDb)
|
2022-10-14 16:40:32 +00:00
|
|
|
if rc.isErr: return rc
|
|
|
|
ok()
|
|
|
|
|
|
|
|
|
|
|
|
proc collectAccounts(
|
|
|
|
peer: Peer, ## for log messages
|
|
|
|
base: NodeTag;
|
|
|
|
acc: seq[PackedAccount];
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[seq[RLeafSpecs],HexaryError]
|
2022-10-14 16:40:32 +00:00
|
|
|
{.gcsafe, raises: [Defect, RlpError].} =
|
|
|
|
## Repack account records into a `seq[RLeafSpecs]` queue. The argument data
|
|
|
|
## `acc` are as received with the snap message `AccountRange`).
|
|
|
|
##
|
|
|
|
## The returned list contains leaf node information for populating a repair
|
|
|
|
## table. The accounts, together with some hexary trie records for proofs
|
|
|
|
## can be used for validating the argument account data.
|
|
|
|
var rcAcc: seq[RLeafSpecs]
|
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
if 0 < acc.len:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
let pathTag0 = acc[0].accKey.to(NodeTag)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
# Verify lower bound
|
|
|
|
if pathTag0 < base:
|
2022-11-08 18:56:04 +00:00
|
|
|
let error = LowerBoundAfterFirstEntry
|
2022-10-14 16:40:32 +00:00
|
|
|
trace "collectAccounts()", peer, base, accounts=acc.len, error
|
|
|
|
return err(error)
|
|
|
|
|
|
|
|
# Add first account
|
|
|
|
rcAcc.add RLeafSpecs(pathTag: pathTag0, payload: acc[0].accBlob)
|
|
|
|
|
|
|
|
# Veify & add other accounts
|
|
|
|
for n in 1 ..< acc.len:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
let nodeTag = acc[n].accKey.to(NodeTag)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
if nodeTag <= rcAcc[^1].pathTag:
|
|
|
|
let error = AccountsNotSrictlyIncreasing
|
|
|
|
trace "collectAccounts()", peer, item=n, base, accounts=acc.len, error
|
|
|
|
return err(error)
|
|
|
|
|
|
|
|
rcAcc.add RLeafSpecs(pathTag: nodeTag, payload: acc[n].accBlob)
|
|
|
|
|
|
|
|
ok(rcAcc)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public constructor
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc init*(
|
|
|
|
T: type SnapDbAccountsRef;
|
|
|
|
pv: SnapDbRef;
|
|
|
|
root: Hash256;
|
|
|
|
peer: Peer = nil
|
|
|
|
): T =
|
|
|
|
## Constructor, starts a new accounts session.
|
|
|
|
new result
|
2022-10-19 10:04:06 +00:00
|
|
|
result.init(pv, root.to(NodeKey))
|
|
|
|
result.peer = peer
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
proc dup*(
|
|
|
|
ps: SnapDbAccountsRef;
|
|
|
|
root: Hash256;
|
|
|
|
peer: Peer;
|
|
|
|
): SnapDbAccountsRef =
|
|
|
|
## Resume an accounts session with different `root` key and `peer`.
|
|
|
|
new result
|
|
|
|
result[].shallowCopy(ps[])
|
|
|
|
result.root = root.to(NodeKey)
|
|
|
|
result.peer = peer
|
|
|
|
|
|
|
|
proc dup*(
|
|
|
|
ps: SnapDbAccountsRef;
|
|
|
|
root: Hash256;
|
|
|
|
): SnapDbAccountsRef =
|
|
|
|
## Variant of `dup()` without the `peer` argument.
|
|
|
|
new result
|
|
|
|
result[].shallowCopy(ps[])
|
|
|
|
result.root = root.to(NodeKey)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn =
|
|
|
|
## Return `HexaryGetFn` closure.
|
|
|
|
let getFn = ps.kvDb.persistentAccountsGetFn()
|
|
|
|
return proc(key: openArray[byte]): Blob = getFn(key)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
proc getAccountFn*(pv: SnapDbRef): HexaryGetFn =
|
|
|
|
## Variant of `getAccountFn()`
|
|
|
|
let getFn = pv.kvDb.persistentAccountsGetFn()
|
|
|
|
return proc(key: openArray[byte]): Blob = getFn(key)
|
2022-11-16 23:51:06 +00:00
|
|
|
|
2022-10-14 16:40:32 +00:00
|
|
|
proc importAccounts*(
|
|
|
|
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
2022-11-16 23:51:06 +00:00
|
|
|
base: NodeTag; ## Before or at first account entry in `data`
|
|
|
|
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
|
|
|
persistent = false; ## Store data on disk
|
2022-11-08 18:56:04 +00:00
|
|
|
noBaseBoundCheck = false; ## Ignore left boundary proof check if `true`
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[SnapAccountsGaps,HexaryError] =
|
2022-10-14 16:40:32 +00:00
|
|
|
## Validate and import accounts (using proofs as received with the snap
|
|
|
|
## message `AccountRange`). This function accumulates data in a memory table
|
2022-11-16 23:51:06 +00:00
|
|
|
## which can be written to disk with the argument `persistent` set `true`.
|
|
|
|
## The memory table is held in the descriptor argument`ps`.
|
2022-10-14 16:40:32 +00:00
|
|
|
##
|
2022-11-16 23:51:06 +00:00
|
|
|
## On success, the function returns a list `innerGaps` of dangling node
|
|
|
|
## links from the argument `proof` list of nodes after the populating with
|
|
|
|
## accounts. The following example may illustrate the case:
|
2022-11-08 18:56:04 +00:00
|
|
|
##
|
|
|
|
## Assume an accounts hexary trie
|
|
|
|
## ::
|
|
|
|
## | 0 1 2 3 4 5 6 7 8 9 a b c d e f -- nibble positions
|
|
|
|
## | root -> (a, .. b, .. c, .. d, .. ,) -- root branch node
|
|
|
|
## | | | | |
|
|
|
|
## | ... v v v
|
|
|
|
## | (x,X) (y,Y) (z,Z)
|
|
|
|
##
|
|
|
|
## with `a`,`b`,`c`,`d` node hashes, `x`,`y`,`z` partial paths and account
|
|
|
|
## hashes `3&x`,`7&y`,`b&z` for account values `X`,`Y`,`Z`. All other
|
|
|
|
## links in the *root branch node* are assumed nil.
|
|
|
|
##
|
|
|
|
## The passing to this function
|
|
|
|
## * base: `3&x`
|
|
|
|
## * data.proof: *root branch node*
|
|
|
|
## * data.accounts: `(3&x,X)`, `(7&y,Y)`, `(b&z,Z)`
|
|
|
|
## a partial tree can be fully constructed and boundary proofs succeed.
|
|
|
|
## The return value will be an empty list.
|
|
|
|
##
|
|
|
|
## Leaving out `(7&y,Y)` the boundary proofs still succeed but the
|
|
|
|
## return value will be @[`(7&y,c)`].
|
|
|
|
##
|
|
|
|
## The left boundary proof might be omitted by passing `true` for the
|
|
|
|
## `noBaseBoundCheck` argument. In this case, the boundary check must be
|
|
|
|
## performed on the return code as
|
|
|
|
## * if `data.accounts` is empty, the return value must be an empty list
|
|
|
|
## * otherwise, all type `NodeSpecs` items `w` of the return code must
|
|
|
|
## satisfy
|
|
|
|
## ::
|
|
|
|
## let leastAccountPath = data.accounts[0].accKey.to(NodeTag)
|
|
|
|
## leastAccountPath <= w.partialPath.max(NodeKey).to(NodeTag)
|
|
|
|
##
|
2022-11-16 23:51:06 +00:00
|
|
|
## Besides the inner gaps, the function also returns the dangling nodes left
|
|
|
|
## from the `proof` list.
|
|
|
|
##
|
2022-10-14 16:40:32 +00:00
|
|
|
## Note that the `peer` argument is for log messages, only.
|
2022-11-08 18:56:04 +00:00
|
|
|
var
|
2022-11-28 09:03:23 +00:00
|
|
|
accounts: seq[RLeafSpecs] # validated accounts to add to database
|
|
|
|
gaps: SnapAccountsGaps # return value
|
|
|
|
proofStats: TrieNodeStat # `proof` data dangling links
|
|
|
|
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
2022-10-14 16:40:32 +00:00
|
|
|
try:
|
|
|
|
if 0 < data.proof.len:
|
2022-11-08 18:56:04 +00:00
|
|
|
let rc = ps.mergeProofs(ps.peer, data.proof)
|
2022-10-14 16:40:32 +00:00
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
block:
|
|
|
|
let rc = ps.peer.collectAccounts(base, data.accounts)
|
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
accounts = rc.value
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
# Inspect trie for dangling nodes from prrof data (if any.)
|
|
|
|
if 0 < data.proof.len:
|
|
|
|
proofStats = ps.hexaDb.hexaryInspectTrie(ps.root, @[])
|
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
if 0 < accounts.len:
|
|
|
|
if 0 < data.proof.len:
|
|
|
|
# Inspect trie for dangling nodes. This is not a big deal here as the
|
|
|
|
# proof data is typically small.
|
2022-11-28 09:03:23 +00:00
|
|
|
let topTag = accounts[^1].pathTag
|
2022-11-08 18:56:04 +00:00
|
|
|
for w in proofStats.dangling:
|
2022-11-28 09:03:23 +00:00
|
|
|
let iv = w.partialPath.pathEnvelope
|
|
|
|
if iv.maxPt < base or topTag < iv.minPt:
|
|
|
|
# Dangling link with partial path envelope outside accounts range
|
|
|
|
gaps.dangling.add w
|
2022-11-16 23:51:06 +00:00
|
|
|
else:
|
2022-11-28 09:03:23 +00:00
|
|
|
# Overlapping partial path envelope.
|
|
|
|
innerSubTrie.add w
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
# Build partial or full hexary trie
|
2022-10-14 16:40:32 +00:00
|
|
|
let rc = ps.hexaDb.hexaryInterpolate(
|
|
|
|
ps.root, accounts, bootstrap = (data.proof.len == 0))
|
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
|
2022-11-08 18:56:04 +00:00
|
|
|
# Collect missing inner sub-trees in the reconstructed partial hexary
|
|
|
|
# trie (if any).
|
|
|
|
let bottomTag = accounts[0].pathTag
|
|
|
|
for w in innerSubTrie:
|
2022-11-28 09:03:23 +00:00
|
|
|
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
|
|
|
if not noBaseBoundCheck:
|
|
|
|
# Verify that `base` is to the left of the first account and there
|
|
|
|
# is nothing in between.
|
|
|
|
#
|
|
|
|
# Without `proof` data available there can only be a complete
|
|
|
|
# set/list of accounts so there are no dangling nodes in the first
|
|
|
|
# place. But there must be `proof` data for an empty list.
|
|
|
|
if w.partialPath.pathEnvelope.maxPt < bottomTag:
|
|
|
|
return err(LowerBoundProofError)
|
|
|
|
# Otherwise register left over entry
|
|
|
|
gaps.innerGaps.add w
|
2022-11-08 18:56:04 +00:00
|
|
|
|
|
|
|
if persistent:
|
|
|
|
let rc = ps.hexaDb.persistentAccounts(ps)
|
|
|
|
if rc.isErr:
|
|
|
|
return err(rc.error)
|
|
|
|
|
|
|
|
elif data.proof.len == 0:
|
|
|
|
# There must be a proof for an empty argument list.
|
|
|
|
return err(LowerBoundProofError)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
2022-11-28 09:03:23 +00:00
|
|
|
else:
|
|
|
|
if not noBaseBoundCheck:
|
|
|
|
for w in proofStats.dangling:
|
|
|
|
if base <= w.partialPath.pathEnvelope.maxPt:
|
|
|
|
return err(LowerBoundProofError)
|
|
|
|
gaps.dangling = proofStats.dangling
|
|
|
|
|
2022-10-14 16:40:32 +00:00
|
|
|
except RlpError:
|
|
|
|
return err(RlpEncoding)
|
|
|
|
except KeyError as e:
|
2022-11-16 23:51:06 +00:00
|
|
|
raiseAssert "Not possible @ importAccounts(KeyError): " & e.msg
|
2022-10-14 16:40:32 +00:00
|
|
|
except OSError as e:
|
2022-10-28 07:26:17 +00:00
|
|
|
error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg
|
2022-10-14 16:40:32 +00:00
|
|
|
return err(OSErrorException)
|
2022-11-16 23:51:06 +00:00
|
|
|
except Exception as e:
|
|
|
|
raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg
|
2022-10-14 16:40:32 +00:00
|
|
|
|
2022-11-01 15:07:44 +00:00
|
|
|
#when extraTraceMessages:
|
2022-11-08 18:56:04 +00:00
|
|
|
# trace "Accounts imported", peer=ps.peer, root=ps.root.ByteArray32.toHex,
|
|
|
|
# proof=data.proof.len, base, accounts=data.accounts.len,
|
2022-11-16 23:51:06 +00:00
|
|
|
# top=accounts[^1].pathTag, innerGapsLen=gaps.innerGaps.len,
|
|
|
|
# danglingLen=gaps.dangling.len
|
2022-11-08 18:56:04 +00:00
|
|
|
|
2022-11-16 23:51:06 +00:00
|
|
|
ok(gaps)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
proc importAccounts*(
|
2022-12-02 04:39:12 +00:00
|
|
|
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
2022-11-08 18:56:04 +00:00
|
|
|
peer: Peer; ## For log messages
|
|
|
|
root: Hash256; ## State root
|
|
|
|
base: NodeTag; ## Before or at first account entry in `data`
|
|
|
|
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
|
|
|
noBaseBoundCheck = false; ## Ignore left bound proof check if `true`
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[SnapAccountsGaps,HexaryError] =
|
2022-11-16 23:51:06 +00:00
|
|
|
## Variant of `importAccounts()` for presistent storage, only.
|
2022-10-14 16:40:32 +00:00
|
|
|
SnapDbAccountsRef.init(
|
2022-11-08 18:56:04 +00:00
|
|
|
pv, root, peer).importAccounts(
|
|
|
|
base, data, persistent=true, noBaseBoundCheck)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc importRawAccountsNodes*(
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
|
|
|
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
|
|
|
reportNodes = {Leaf}; ## Additional node types to report
|
|
|
|
persistent = false; ## store data on disk
|
2022-10-14 16:40:32 +00:00
|
|
|
): seq[HexaryNodeReport] =
|
|
|
|
## Store data nodes given as argument `nodes` on the persistent database.
|
|
|
|
##
|
|
|
|
## If there were an error when processing a particular argument `notes` item,
|
|
|
|
## it will be reported with the return value providing argument slot/index,
|
|
|
|
## node type, end error code.
|
|
|
|
##
|
|
|
|
## If there was an error soring persistent data, the last report item will
|
|
|
|
## have an error code, only.
|
|
|
|
##
|
|
|
|
## Additional node items might be reported if the node type is in the
|
|
|
|
## argument set `reportNodes`. These reported items will have no error
|
|
|
|
## code set (i.e. `NothingSerious`.)
|
|
|
|
##
|
|
|
|
let
|
|
|
|
peer = ps.peer
|
|
|
|
db = HexaryTreeDbRef.init(ps)
|
|
|
|
nItems = nodes.len
|
|
|
|
var
|
|
|
|
nErrors = 0
|
|
|
|
slot: Option[int]
|
|
|
|
try:
|
|
|
|
# Import nodes
|
2022-10-28 07:26:17 +00:00
|
|
|
for n,node in nodes:
|
|
|
|
if 0 < node.data.len: # otherwise ignore empty placeholder
|
2022-10-14 16:40:32 +00:00
|
|
|
slot = some(n)
|
2022-10-28 07:26:17 +00:00
|
|
|
var rep = db.hexaryImport(node)
|
2022-10-14 16:40:32 +00:00
|
|
|
if rep.error != NothingSerious:
|
|
|
|
rep.slot = slot
|
|
|
|
result.add rep
|
|
|
|
nErrors.inc
|
|
|
|
trace "Error importing account nodes", peer, inx=n, nItems,
|
|
|
|
error=rep.error, nErrors
|
|
|
|
elif rep.kind.isSome and rep.kind.unsafeGet in reportNodes:
|
|
|
|
rep.slot = slot
|
|
|
|
result.add rep
|
|
|
|
|
|
|
|
# Store to disk
|
|
|
|
if persistent and 0 < db.tab.len:
|
|
|
|
slot = none(int)
|
|
|
|
let rc = db.persistentAccounts(ps)
|
|
|
|
if rc.isErr:
|
|
|
|
result.add HexaryNodeReport(slot: slot, error: rc.error)
|
|
|
|
|
|
|
|
except RlpError:
|
|
|
|
result.add HexaryNodeReport(slot: slot, error: RlpEncoding)
|
|
|
|
nErrors.inc
|
|
|
|
trace "Error importing account nodes", peer, slot, nItems,
|
|
|
|
error=RlpEncoding, nErrors
|
|
|
|
except KeyError as e:
|
|
|
|
raiseAssert "Not possible @ importRawAccountNodes: " & e.msg
|
|
|
|
except OSError as e:
|
|
|
|
result.add HexaryNodeReport(slot: slot, error: OSErrorException)
|
|
|
|
nErrors.inc
|
2022-10-28 07:26:17 +00:00
|
|
|
error "Import account nodes exception", peer, slot, nItems,
|
2022-10-14 16:40:32 +00:00
|
|
|
name=($e.name), msg=e.msg, nErrors
|
|
|
|
|
|
|
|
when extraTraceMessages:
|
|
|
|
if nErrors == 0:
|
|
|
|
trace "Raw account nodes imported", peer, slot, nItems, report=result.len
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc importRawAccountsNodes*(
|
2022-12-02 04:39:12 +00:00
|
|
|
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
2022-10-14 16:40:32 +00:00
|
|
|
peer: Peer, ## For log messages, only
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
2022-10-14 16:40:32 +00:00
|
|
|
reportNodes = {Leaf}; ## Additional node types to report
|
|
|
|
): seq[HexaryNodeReport] =
|
|
|
|
## Variant of `importRawNodes()` for persistent storage.
|
|
|
|
SnapDbAccountsRef.init(
|
2022-10-19 10:04:06 +00:00
|
|
|
pv, Hash256(), peer).importRawAccountsNodes(
|
2022-10-14 16:40:32 +00:00
|
|
|
nodes, reportNodes, persistent=true)
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc getAccountsNodeKey*(
|
2022-10-14 16:40:32 +00:00
|
|
|
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
|
|
|
path: Blob; ## Partial node path
|
|
|
|
persistent = false; ## Read data from disk
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[NodeKey,HexaryError] =
|
2022-10-14 16:40:32 +00:00
|
|
|
## For a partial node path argument `path`, return the raw node key.
|
|
|
|
var rc: Result[NodeKey,void]
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
noRlpExceptionOops("getAccountsNodeKey()"):
|
2022-10-14 16:40:32 +00:00
|
|
|
if persistent:
|
2022-11-28 09:03:23 +00:00
|
|
|
rc = ps.getAccountFn.hexaryInspectPath(ps.root, path)
|
2022-10-14 16:40:32 +00:00
|
|
|
else:
|
|
|
|
rc = ps.hexaDb.hexaryInspectPath(ps.root, path)
|
|
|
|
if rc.isOk:
|
|
|
|
return ok(rc.value)
|
|
|
|
err(NodeNotFound)
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc getAccountsNodeKey*(
|
2022-12-02 04:39:12 +00:00
|
|
|
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
2022-10-14 16:40:32 +00:00
|
|
|
root: Hash256; ## state root
|
|
|
|
path: Blob; ## Partial node path
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[NodeKey,HexaryError] =
|
2022-10-19 10:04:06 +00:00
|
|
|
## Variant of `getAccountsNodeKey()` for persistent storage.
|
2022-10-14 16:40:32 +00:00
|
|
|
SnapDbAccountsRef.init(
|
2022-11-25 14:56:42 +00:00
|
|
|
pv, root, Peer()).getAccountsNodeKey(path, persistent=true)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc getAccountsData*(
|
2022-10-14 16:40:32 +00:00
|
|
|
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
|
|
|
path: NodeKey; ## Account to visit
|
|
|
|
persistent = false; ## Read data from disk
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[Account,HexaryError] =
|
2022-10-14 16:40:32 +00:00
|
|
|
## Fetch account data.
|
|
|
|
##
|
|
|
|
## Caveat: There is no unit test yet for the non-persistent version
|
|
|
|
var acc: Account
|
|
|
|
|
|
|
|
noRlpExceptionOops("getAccountData()"):
|
|
|
|
var leaf: Blob
|
|
|
|
if persistent:
|
2022-11-28 09:03:23 +00:00
|
|
|
leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData
|
2022-10-14 16:40:32 +00:00
|
|
|
else:
|
|
|
|
leaf = path.hexaryPath(ps.root.to(RepairKey),ps.hexaDb).leafData
|
|
|
|
|
|
|
|
if leaf.len == 0:
|
|
|
|
return err(AccountNotFound)
|
|
|
|
acc = rlp.decode(leaf,Account)
|
|
|
|
|
|
|
|
return ok(acc)
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc getAccountsData*(
|
2022-12-02 04:39:12 +00:00
|
|
|
pv: SnapDbRef; ## Base descriptor on `ChainDBRef`
|
2022-10-19 10:04:06 +00:00
|
|
|
root: Hash256; ## State root
|
2022-10-14 16:40:32 +00:00
|
|
|
path: NodeKey; ## Account to visit
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[Account,HexaryError] =
|
2022-10-19 10:04:06 +00:00
|
|
|
## Variant of `getAccountsData()` for persistent storage.
|
|
|
|
SnapDbAccountsRef.init(
|
2022-11-25 14:56:42 +00:00
|
|
|
pv, root, Peer()).getAccountsData(path, persistent=true)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# Public functions: additional helpers
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
|
|
|
|
proc sortMerge*(base: openArray[NodeTag]): NodeTag =
|
|
|
|
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
|
|
|
|
## so that there are no overlap which would be rejected by `merge()`.
|
|
|
|
##
|
|
|
|
## This function selects a `NodeTag` from a list.
|
|
|
|
result = high(NodeTag)
|
|
|
|
for w in base:
|
|
|
|
if w < result:
|
|
|
|
result = w
|
|
|
|
|
|
|
|
proc sortMerge*(acc: openArray[seq[PackedAccount]]): seq[PackedAccount] =
|
|
|
|
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
|
|
|
|
## so that there are no overlap which would be rejected by `merge()`.
|
|
|
|
##
|
|
|
|
## This function flattens and sorts the argument account lists.
|
|
|
|
noKeyError("sortMergeAccounts"):
|
|
|
|
var accounts: Table[NodeTag,PackedAccount]
|
|
|
|
for accList in acc:
|
|
|
|
for item in accList:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
accounts[item.accKey.to(NodeTag)] = item
|
2022-10-14 16:40:32 +00:00
|
|
|
result = toSeq(accounts.keys).sorted(cmp).mapIt(accounts[it])
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc getAccountsChainDb*(
|
2022-10-14 16:40:32 +00:00
|
|
|
ps: SnapDbAccountsRef;
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
accKey: NodeKey;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[Account,HexaryError] =
|
2022-12-02 04:39:12 +00:00
|
|
|
## Fetch account via `ChainDBRef`
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
ps.getAccountsData(accKey, persistent = true)
|
2022-10-14 16:40:32 +00:00
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc nextAccountsChainDbKey*(
|
2022-10-14 16:40:32 +00:00
|
|
|
ps: SnapDbAccountsRef;
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
accKey: NodeKey;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[NodeKey,HexaryError] =
|
2022-12-02 04:39:12 +00:00
|
|
|
## Fetch the account path on the `ChainDBRef`, the one next to the
|
2022-10-20 16:59:54 +00:00
|
|
|
## argument account key.
|
2022-10-14 16:40:32 +00:00
|
|
|
noRlpExceptionOops("getChainDbAccount()"):
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
let path = accKey
|
2022-11-28 09:03:23 +00:00
|
|
|
.hexaryPath(ps.root, ps.getAccountFn)
|
|
|
|
.next(ps.getAccountFn)
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
.getNibbles
|
2022-10-14 16:40:32 +00:00
|
|
|
if 64 == path.len:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
return ok(path.getBytes.convertTo(Hash256).to(NodeKey))
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
err(AccountNotFound)
|
|
|
|
|
2022-10-19 10:04:06 +00:00
|
|
|
proc prevAccountsChainDbKey*(
|
2022-10-14 16:40:32 +00:00
|
|
|
ps: SnapDbAccountsRef;
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
accKey: NodeKey;
|
2022-11-28 09:03:23 +00:00
|
|
|
): Result[NodeKey,HexaryError] =
|
2022-12-02 04:39:12 +00:00
|
|
|
## Fetch the account path on the `ChainDBRef`, the one before to the
|
2022-10-14 16:40:32 +00:00
|
|
|
## argument account.
|
|
|
|
noRlpExceptionOops("getChainDbAccount()"):
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
let path = accKey
|
2022-11-28 09:03:23 +00:00
|
|
|
.hexaryPath(ps.root, ps.getAccountFn)
|
|
|
|
.prev(ps.getAccountFn)
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
.getNibbles
|
2022-10-14 16:40:32 +00:00
|
|
|
if 64 == path.len:
|
Prep for full sync after snap make 4 (#1282)
* Re-arrange fetching storage slots in batch module
why;
Previously, fetching partial slot ranges first has a chance of
terminating the worker peer 9due to network error) while there were
many inheritable storage slots on the queue.
Now, inheritance is checked first, then full slot ranges and finally
partial ranges.
* Update logging
* Bundled node information for healing into single object `NodeSpecs`
why:
Previously, partial paths and node keys were kept in separate variables.
This approach was error prone due to copying/reassembling function
argument objects.
As all partial paths, keys, and node data types are more or less handled
as `Blob`s over the network (using Eth/6x, or Snap/1) it makes sense to
hold these `Blob`s as named field in a single object (even if not all
fields are active for the current purpose.)
* For good housekeeping, using `NodeKey` type only for account keys
why:
previously, a mixture of `NodeKey` and `Hash256` was used. Now, only
state or storage root keys use the `Hash256` type.
* Always accept latest pivot (and not a slightly older one)
why;
For testing it was tried to use a slightly older pivot state root than
available. Some anecdotal tests seemed to suggest an advantage so that
more peers are willing to serve on that older pivot. But this could not
be confirmed in subsequent tests (still anecdotal, though.)
As a side note, the distance of the latest pivot to its predecessor is
at least 128 (or whatever the constant `minPivotBlockDistance` is
assigned to.)
* Reshuffle name components for some file and function names
why:
Clarifies purpose:
"storages" becomes: "storage slots"
"store" becomes: "range fetch"
* Stash away currently unused modules in sub-folder named "notused"
2022-10-27 13:49:28 +00:00
|
|
|
return ok(path.getBytes.convertTo(Hash256).to(NodeKey))
|
2022-10-14 16:40:32 +00:00
|
|
|
|
|
|
|
err(AccountNotFound)
|
|
|
|
|
|
|
|
# ------------------------------------------------------------------------------
|
|
|
|
# End
|
|
|
|
# ------------------------------------------------------------------------------
|