mirror of
https://github.com/status-im/nimbus-eth1.git
synced 2025-02-23 17:28:27 +00:00
remove snap sync from git working tree (#2265)
This commit is contained in:
parent
483b2d8ef4
commit
b72ebca2db
@ -1,133 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/p2p,
|
||||
../core/chain,
|
||||
./snap/[worker, worker_desc],
|
||||
"."/[protocol, sync_sched]
|
||||
|
||||
logScope:
|
||||
topics = "snap-sync"
|
||||
|
||||
type
|
||||
SnapSyncRef* = RunnerSyncRef[SnapCtxData,SnapBuddyData]
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enable additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template traceMsg(f, info: static[string]; args: varargs[untyped]) =
|
||||
trace "Snap scheduler " & f & "() " & info, args
|
||||
|
||||
template traceMsgCtx(f, info: static[string]; c: SnapCtxRef) =
|
||||
when extraTraceMessages:
|
||||
block:
|
||||
let
|
||||
poolMode {.inject.} = c.poolMode
|
||||
daemon {.inject.} = c.daemon
|
||||
f.traceMsg info, poolMode, daemon
|
||||
|
||||
template traceMsgBuddy(f, info: static[string]; b: SnapBuddyRef) =
|
||||
when extraTraceMessages:
|
||||
block:
|
||||
let
|
||||
peer {.inject.} = b.peer
|
||||
runState {.inject.} = b.ctrl.state
|
||||
multiOk {.inject.} = b.ctrl.multiOk
|
||||
poolMode {.inject.} = b.ctx.poolMode
|
||||
daemon {.inject.} = b.ctx.daemon
|
||||
f.traceMsg info, peer, runState, multiOk, poolMode, daemon
|
||||
|
||||
|
||||
template tracerFrameCtx(f: static[string]; c: SnapCtxRef; code: untyped) =
|
||||
f.traceMsgCtx "begin", c
|
||||
code
|
||||
f.traceMsgCtx "end", c
|
||||
|
||||
template tracerFrameBuddy(f: static[string]; b: SnapBuddyRef; code: untyped) =
|
||||
f.traceMsgBuddy "begin", b
|
||||
code
|
||||
f.traceMsgBuddy "end", b
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Virtual methods/interface, `mixin` functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc runSetup(ctx: SnapCtxRef): bool =
|
||||
tracerFrameCtx("runSetup", ctx):
|
||||
result = worker.setup(ctx)
|
||||
|
||||
proc runRelease(ctx: SnapCtxRef) =
|
||||
tracerFrameCtx("runRelease", ctx):
|
||||
worker.release(ctx)
|
||||
|
||||
proc runDaemon(ctx: SnapCtxRef) {.async.} =
|
||||
tracerFrameCtx("runDaemon", ctx):
|
||||
await worker.runDaemon(ctx)
|
||||
|
||||
proc runStart(buddy: SnapBuddyRef): bool =
|
||||
tracerFrameBuddy("runStart", buddy):
|
||||
result = worker.start(buddy)
|
||||
|
||||
proc runStop(buddy: SnapBuddyRef) =
|
||||
tracerFrameBuddy("runStop", buddy):
|
||||
worker.stop(buddy)
|
||||
|
||||
proc runPool(buddy: SnapBuddyRef; last: bool; laps: int): bool =
|
||||
tracerFrameBuddy("runPool", buddy):
|
||||
result = worker.runPool(buddy, last=last, laps=laps)
|
||||
|
||||
proc runSingle(buddy: SnapBuddyRef) {.async.} =
|
||||
tracerFrameBuddy("runSingle", buddy):
|
||||
await worker.runSingle(buddy)
|
||||
|
||||
proc runMulti(buddy: SnapBuddyRef) {.async.} =
|
||||
tracerFrameBuddy("runMulti", buddy):
|
||||
await worker.runMulti(buddy)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type SnapSyncRef;
|
||||
ethNode: EthereumNode;
|
||||
chain: ChainRef;
|
||||
rng: ref HmacDrbgContext;
|
||||
maxPeers: int;
|
||||
enableTicker = false;
|
||||
exCtrlFile = none(string);
|
||||
): T =
|
||||
new result
|
||||
result.initSync(ethNode, chain, maxPeers, exCtrlFile)
|
||||
result.ctx.chain = chain # explicitely override
|
||||
result.ctx.pool.rng = rng
|
||||
result.ctx.pool.enableTicker = enableTicker
|
||||
# Required to have been initialised via `addEthHandlerCapability()`
|
||||
doAssert not result.ctx.ethWireCtx.isNil
|
||||
|
||||
proc start*(ctx: SnapSyncRef) =
|
||||
doAssert ctx.startSync()
|
||||
|
||||
proc stop*(ctx: SnapSyncRef) =
|
||||
ctx.stopSync()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,67 +0,0 @@
|
||||
Test & debugging scenario with nimbus-eth1 client/server
|
||||
========================================================
|
||||
|
||||
|
||||
Start snap/1 server
|
||||
-------------------
|
||||
|
||||
# Enter nimbus directory for snap/1 protocol server.
|
||||
cd server
|
||||
|
||||
# Tell nimbus to stop full sync after 2 mio blocks.
|
||||
echo 2000000 > full-limit.txt
|
||||
|
||||
# Tell nimbus to use this predefined key ID
|
||||
echo 123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef0 > full-id.key
|
||||
|
||||
./build/nimbus \
|
||||
--tcp-port:30319 --nat=None --sync-mode=full \
|
||||
--protocols=snap --discovery=none \
|
||||
--net-key=./full-id.key \
|
||||
--sync-ctrl-file=./full-limit.txt \
|
||||
--log-level:TRACE
|
||||
|
||||
# Wait for several hours until enough blocks have been downloaded so that
|
||||
# snap sync data are available. The full 2 mio blocks are available if the
|
||||
# log ticker shows something like
|
||||
#
|
||||
# INF 2023-03-17 [..] Sync statistics (suspended) topics="full-tick" [..] persistent=#2000080 [..]
|
||||
#
|
||||
# where the persistent=#2000080 field might vary
|
||||
|
||||
|
||||
Start snap/1 client
|
||||
-------------------
|
||||
|
||||
# Note: When the snap/1 server has enough blocks, the client can be started.
|
||||
|
||||
# Enter nimbus directory for snap/1 protocol server
|
||||
cd client
|
||||
|
||||
# Tell nimbus to use this pivot block number. This number must be smaller
|
||||
# than the 2000000 written into the file full-limit.txt above.
|
||||
echo 600000 > snap/snap-update.txt
|
||||
|
||||
# Tell nimbus to stop somewhere after 1000000 blocks have been downloaded
|
||||
# with full sync follow up after snap sync has completed (2nd line of
|
||||
# external setuip file.)
|
||||
echo 1000000 >> snap/snap-update.txt
|
||||
|
||||
# Tell nimbus to use this hard coded peer enode.
|
||||
echo enode://192d7e7a302bd4ff27f48d7852621e0d3cb863a6dd67dd44e0314a25a3aa866837f0d2460b4444dc66e7b7a2cd56a2de1c31b2a2ba4e23549bf3ba3b0c4f2eb5@127.0.0.1:30319 > snap/full-servers.txt
|
||||
|
||||
./build/nimbus \
|
||||
--tcp-port:30102 --nat=None --sync-mode=snap \
|
||||
--protocols=none --discovery=none \
|
||||
--static-peers-file=./full-servers.txt \
|
||||
--sync-ctrl-file=./snap-update.txt \
|
||||
--log-level:TRACE
|
||||
|
||||
|
||||
Modifications while the programs are syncing
|
||||
--------------------------------------------
|
||||
|
||||
# Increasing the number in the files full/full-limit.txt or
|
||||
# snap/snap-update.txt will be recognised while running. Decreasing
|
||||
# or removing will be ignored.
|
||||
|
@ -1,96 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Sync mode pass multiplexer
|
||||
## ==========================
|
||||
##
|
||||
## Pass state diagram:
|
||||
## ::
|
||||
## <init> -> <snap-sync> -> <full-sync> ---+
|
||||
## ^ |
|
||||
## | |
|
||||
## +----------+
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
./range_desc,
|
||||
./worker/pass,
|
||||
./worker_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-worker"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template ignoreException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
error "Exception at " & info & ":", name=($e.name), msg=(e.msg)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public start/stop and admin functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc setup*(ctx: SnapCtxRef): bool =
|
||||
## Global set up
|
||||
ctx.passInitSetup()
|
||||
ignoreException("setup"):
|
||||
ctx.passActor.setup(ctx)
|
||||
true
|
||||
|
||||
proc release*(ctx: SnapCtxRef) =
|
||||
## Global clean up
|
||||
ignoreException("release"):
|
||||
ctx.passActor.release(ctx)
|
||||
ctx.passInitRelease()
|
||||
|
||||
proc start*(buddy: SnapBuddyRef): bool =
|
||||
## Initialise worker peer
|
||||
ignoreException("start"):
|
||||
result = buddy.ctx.passActor.start(buddy)
|
||||
|
||||
proc stop*(buddy: SnapBuddyRef) =
|
||||
## Clean up this peer
|
||||
ignoreException("stop"):
|
||||
buddy.ctx.passActor.stop(buddy)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, sync handler multiplexers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc runDaemon*(ctx: SnapCtxRef) {.async.} =
|
||||
## Sync processsing multiplexer
|
||||
ignoreException("runDaemon"):
|
||||
await ctx.passActor.daemon(ctx)
|
||||
|
||||
proc runSingle*(buddy: SnapBuddyRef) {.async.} =
|
||||
## Sync processsing multiplexer
|
||||
ignoreException("runSingle"):
|
||||
await buddy.ctx.passActor.single(buddy)
|
||||
|
||||
proc runPool*(buddy: SnapBuddyRef, last: bool; laps: int): bool =
|
||||
## Sync processsing multiplexer
|
||||
ignoreException("runPool"):
|
||||
result = buddy.ctx.passActor.pool(buddy,last,laps)
|
||||
|
||||
proc runMulti*(buddy: SnapBuddyRef) {.async.} =
|
||||
## Sync processsing multiplexer
|
||||
ignoreException("runMulti"):
|
||||
await buddy.ctx.passActor.multi(buddy)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,677 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Find node paths in hexary tries.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, sets, strutils, tables, times],
|
||||
chronos,
|
||||
eth/[common, trie/nibbles],
|
||||
stew/byteutils,
|
||||
results,
|
||||
"../.."/[constants, range_desc],
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
||||
var
|
||||
disablePrettyKeys* = false ## Degugging, print raw keys if `true`
|
||||
|
||||
proc next*(path: XPath; getFn: HexaryGetFn; minDepth = 64): XPath
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
proc prev*(path: XPath; getFn: HexaryGetFn; minDepth = 64): XPath
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private pretty printing helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc asDateTime(m: Moment): DateTime =
|
||||
## Approximate UTC based `DateTime` for a `Moment`
|
||||
let
|
||||
utcNow = times.now().utc
|
||||
momNow = Moment.now()
|
||||
utcNow + initDuration(nanoseconds = (m - momNow).nanoseconds)
|
||||
|
||||
# --------------
|
||||
|
||||
proc toPfx(indent: int): string =
|
||||
"\n" & " ".repeat(indent)
|
||||
|
||||
proc ppImpl(s: string; hex = false): string =
|
||||
## For long strings print `begin..end` only
|
||||
if hex:
|
||||
let n = (s.len + 1) div 2
|
||||
(if s.len < 20: s else: s[0 .. 5] & ".." & s[s.len-8 .. s.len-1]) &
|
||||
"[" & (if 0 < n: "#" & $n else: "") & "]"
|
||||
elif s.len <= 30:
|
||||
s
|
||||
else:
|
||||
(if (s.len and 1) == 0: s[0 ..< 8] else: "0" & s[0 ..< 7]) &
|
||||
"..(" & $s.len & ").." & s[s.len-16 ..< s.len]
|
||||
|
||||
proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string =
|
||||
if key.isZero:
|
||||
return "ø"
|
||||
if not key.isNodeKey:
|
||||
var num: uint64
|
||||
(addr num).copyMem(unsafeAddr key.ByteArray33[25], 8)
|
||||
return "%" & $num
|
||||
try:
|
||||
if not disablePrettyKeys and not db.keyPp.isNil:
|
||||
return db.keyPp(key)
|
||||
except CatchableError:
|
||||
discard
|
||||
key.ByteArray33.toSeq.toHex.toLowerAscii
|
||||
|
||||
proc ppImpl(key: NodeKey; db: HexaryTreeDbRef): string =
|
||||
key.to(RepairKey).ppImpl(db)
|
||||
|
||||
proc ppImpl(w: openArray[RepairKey]; db: HexaryTreeDbRef): string =
|
||||
w.mapIt(it.ppImpl(db)).join(",")
|
||||
|
||||
proc ppImpl(w: openArray[Blob]; db: HexaryTreeDbRef): string =
|
||||
var q: seq[RepairKey]
|
||||
for a in w:
|
||||
var key: RepairKey
|
||||
discard key.init(a)
|
||||
q.add key
|
||||
q.ppImpl(db)
|
||||
|
||||
proc ppStr(blob: Blob): string =
|
||||
if blob.len == 0: ""
|
||||
else: blob.toHex.toLowerAscii.ppImpl(hex = true)
|
||||
|
||||
proc ppImpl(n: RNodeRef; db: HexaryTreeDbRef): string =
|
||||
let so = n.state.ord
|
||||
case n.kind:
|
||||
of Leaf:
|
||||
["l","ł","L","R"][so] & "(" & $n.lPfx & "," & n.lData.ppStr & ")"
|
||||
of Extension:
|
||||
["e","€","E","R"][so] & "(" & $n.ePfx & "," & n.eLink.ppImpl(db) & ")"
|
||||
of Branch:
|
||||
["b","þ","B","R"][so] & "(" & n.bLink.ppImpl(db) & "," & n.bData.ppStr & ")"
|
||||
|
||||
proc ppImpl(n: XNodeObj; db: HexaryTreeDbRef): string =
|
||||
case n.kind:
|
||||
of Leaf:
|
||||
"l(" & $n.lPfx & "," & n.lData.ppStr & ")"
|
||||
of Extension:
|
||||
var key: RepairKey
|
||||
discard key.init(n.eLink)
|
||||
"e(" & $n.ePfx & "," & key.ppImpl(db) & ")"
|
||||
of Branch:
|
||||
"b(" & n.bLink[0..15].ppImpl(db) & "," & n.bLink[16].ppStr & ")"
|
||||
|
||||
func hex(x: int8): string =
|
||||
result.add char((x and 0x0F'i8) + '0'.int8)
|
||||
result = result.toLowerAscii
|
||||
|
||||
proc ppImpl(w: RPathStep; db: HexaryTreeDbRef): string =
|
||||
let
|
||||
nibble = if 0 <= w.nibble: w.nibble.hex else: "ø"
|
||||
key = w.key.ppImpl(db)
|
||||
"(" & key & "," & nibble & "," & w.node.ppImpl(db) & ")"
|
||||
|
||||
proc ppImpl(w: XPathStep; db: HexaryTreeDbRef): string =
|
||||
let nibble = if 0 <= w.nibble: w.nibble.hex else: "ø"
|
||||
var key: RepairKey
|
||||
discard key.init(w.key)
|
||||
"(" & key.ppImpl(db) & "," & $nibble & "," & w.node.ppImpl(db) & ")"
|
||||
|
||||
proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] =
|
||||
## Dump the entries from the a generic repair tree. This function assumes
|
||||
## that mapped keys are printed `$###` if a node is locked or static, and
|
||||
## some substitute for the first letter `$` otherwise (if they are mutable.)
|
||||
proc toKey(s: string): uint64 =
|
||||
try:
|
||||
result = s[1 ..< s.len].parseUint
|
||||
except ValueError as e:
|
||||
raiseAssert "Ooops ppImpl(s=" & s & "): name=" & $e.name & " msg=" & e.msg
|
||||
if s[0] != '$':
|
||||
result = result or (1u64 shl 63)
|
||||
proc cmpIt(x, y: (uint64,string)): int =
|
||||
cmp(x[0],y[0])
|
||||
|
||||
var accu: seq[(uint64,string)]
|
||||
if root.ByteArray32 != ByteArray32.default:
|
||||
accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")]
|
||||
for key,node in db.tab.pairs:
|
||||
accu.add (
|
||||
key.ppImpl(db).toKey,
|
||||
"(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")")
|
||||
|
||||
accu.sorted(cmpIt).mapIt(it[1])
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getNibblesImpl(path: XPath; start = 0): NibblesSeq =
|
||||
## Re-build the key path
|
||||
for n in start ..< path.path.len:
|
||||
let it = path.path[n]
|
||||
case it.node.kind:
|
||||
of Branch:
|
||||
result = result & @[it.nibble.byte].initNibbleRange.slice(1)
|
||||
of Extension:
|
||||
result = result & it.node.ePfx
|
||||
of Leaf:
|
||||
result = result & it.node.lPfx
|
||||
result = result & path.tail
|
||||
|
||||
proc getLeafData(path: XPath): Blob =
|
||||
## Return the leaf data from a successful `XPath` computation (if any.)
|
||||
## Note that this function also exists as `hexary_paths.leafData()` but
|
||||
## the import of this file is avoided.
|
||||
if path.tail.len == 0 and 0 < path.path.len:
|
||||
let node = path.path[^1].node
|
||||
case node.kind:
|
||||
of Branch:
|
||||
return node.bLink[16]
|
||||
of Leaf:
|
||||
return node.lData
|
||||
of Extension:
|
||||
discard
|
||||
|
||||
proc toBranchNode(
|
||||
rlp: Rlp
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
var rlp = rlp
|
||||
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
|
||||
|
||||
proc toLeafNode(
|
||||
rlp: Rlp;
|
||||
pSegm: NibblesSeq
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
|
||||
|
||||
proc toExtensionNode(
|
||||
rlp: Rlp;
|
||||
pSegm: NibblesSeq
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError].} =
|
||||
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
|
||||
|
||||
|
||||
proc to(node: XNodeObj; T: type RNodeRef): T =
|
||||
case node.kind:
|
||||
of Leaf:
|
||||
result = T(
|
||||
kind: Leaf,
|
||||
lData: node.lData,
|
||||
lPfx: node.lPfx)
|
||||
of Extension:
|
||||
result = T(
|
||||
kind: Extension,
|
||||
eLink: node.eLink.convertTo(RepairKey),
|
||||
ePfx: node.ePfx)
|
||||
of Branch:
|
||||
result = T(
|
||||
kind: Branch,
|
||||
bData: node.bLink[16])
|
||||
for n in 0 .. 15:
|
||||
result.bLink[n] = node.bLink[n].convertTo(RepairKey)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pathLeast(
|
||||
path: XPath;
|
||||
key: Blob;
|
||||
getFn: HexaryGetFn;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## For the partial path given, extend by branch nodes with least node
|
||||
## indices.
|
||||
result = path
|
||||
result.tail = EmptyNibbleSeq
|
||||
result.depth = result.getNibblesImpl.len
|
||||
|
||||
var
|
||||
key = key
|
||||
value = key.getFn()
|
||||
if value.len == 0:
|
||||
return
|
||||
|
||||
while true:
|
||||
block loopContinue:
|
||||
let nodeRlp = rlpFromBytes value
|
||||
case nodeRlp.listLen:
|
||||
of 2:
|
||||
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||
|
||||
# Leaf node
|
||||
if isLeaf:
|
||||
let node = nodeRlp.toLeafNode(pathSegment)
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
result.depth += pathSegment.len
|
||||
return # done ok
|
||||
|
||||
let node = nodeRlp.toExtensionNode(pathSegment)
|
||||
if 0 < node.eLink.len:
|
||||
value = node.eLink.getFn()
|
||||
if 0 < value.len:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
result.depth += pathSegment.len
|
||||
key = node.eLink
|
||||
break loopContinue
|
||||
of 17:
|
||||
# Branch node
|
||||
let node = nodeRlp.toBranchNode
|
||||
if node.bLink[16].len != 0 and 64 <= result.depth:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
return # done ok
|
||||
|
||||
for inx in 0 .. 15:
|
||||
let newKey = node.bLink[inx]
|
||||
if 0 < newKey.len:
|
||||
value = newKey.getFn()
|
||||
if 0 < value.len:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: inx.int8)
|
||||
result.depth.inc
|
||||
key = newKey
|
||||
break loopContinue
|
||||
else:
|
||||
discard
|
||||
|
||||
# Recurse (iteratively)
|
||||
while true:
|
||||
block loopRecurse:
|
||||
# Modify last branch node and try again
|
||||
if result.path[^1].node.kind == Branch:
|
||||
for inx in result.path[^1].nibble+1 .. 15:
|
||||
let newKey = result.path[^1].node.bLink[inx]
|
||||
if 0 < newKey.len:
|
||||
value = newKey.getFn()
|
||||
if 0 < value.len:
|
||||
result.path[^1].nibble = inx.int8
|
||||
key = newKey
|
||||
break loopContinue
|
||||
# Failed, step back and try predecessor branch.
|
||||
while path.path.len < result.path.len:
|
||||
case result.path[^1].node.kind:
|
||||
of Branch:
|
||||
result.depth.dec
|
||||
result.path.setLen(result.path.len - 1)
|
||||
break loopRecurse
|
||||
of Extension:
|
||||
result.depth -= result.path[^1].node.ePfx.len
|
||||
result.path.setLen(result.path.len - 1)
|
||||
of Leaf:
|
||||
return # Ooops
|
||||
return # Failed
|
||||
# Notreached
|
||||
# End while
|
||||
# Notreached
|
||||
|
||||
|
||||
proc pathMost(
|
||||
path: XPath;
|
||||
key: Blob;
|
||||
getFn: HexaryGetFn;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## For the partial path given, extend by branch nodes with greatest node
|
||||
## indices.
|
||||
result = path
|
||||
result.tail = EmptyNibbleSeq
|
||||
result.depth = result.getNibblesImpl.len
|
||||
|
||||
var
|
||||
key = key
|
||||
value = key.getFn()
|
||||
if value.len == 0:
|
||||
return
|
||||
|
||||
while true:
|
||||
block loopContinue:
|
||||
let nodeRlp = rlpFromBytes value
|
||||
case nodeRlp.listLen:
|
||||
of 2:
|
||||
let (isLeaf,pathSegment) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||
|
||||
# Leaf node
|
||||
if isLeaf:
|
||||
let node = nodeRlp.toLeafNode(pathSegment)
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
result.depth += pathSegment.len
|
||||
return # done ok
|
||||
|
||||
# Extension node
|
||||
let node = nodeRlp.toExtensionNode(pathSegment)
|
||||
if 0 < node.eLink.len:
|
||||
value = node.eLink.getFn()
|
||||
if 0 < value.len:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
result.depth += pathSegment.len
|
||||
key = node.eLink
|
||||
break loopContinue
|
||||
of 17:
|
||||
# Branch node
|
||||
let node = nodeRlp.toBranchNode
|
||||
if node.bLink[16].len != 0 and 64 <= result.depth:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: -1)
|
||||
return # done ok
|
||||
|
||||
for inx in 15.countDown(0):
|
||||
let newKey = node.bLink[inx]
|
||||
if 0 < newKey.len:
|
||||
value = newKey.getFn()
|
||||
if 0 < value.len:
|
||||
result.path.add XPathStep(key: key, node: node, nibble: inx.int8)
|
||||
result.depth.inc
|
||||
key = newKey
|
||||
break loopContinue
|
||||
else:
|
||||
discard
|
||||
|
||||
# Recurse (iteratively)
|
||||
while true:
|
||||
block loopRecurse:
|
||||
# Modify last branch node and try again
|
||||
if result.path[^1].node.kind == Branch:
|
||||
for inx in (result.path[^1].nibble-1).countDown(0):
|
||||
let newKey = result.path[^1].node.bLink[inx]
|
||||
if 0 < newKey.len:
|
||||
value = newKey.getFn()
|
||||
if 0 < value.len:
|
||||
result.path[^1].nibble = inx.int8
|
||||
key = newKey
|
||||
break loopContinue
|
||||
# Failed, step back and try predecessor branch.
|
||||
while path.path.len < result.path.len:
|
||||
case result.path[^1].node.kind:
|
||||
of Branch:
|
||||
result.depth.dec
|
||||
result.path.setLen(result.path.len - 1)
|
||||
break loopRecurse
|
||||
of Extension:
|
||||
result.depth -= result.path[^1].node.ePfx.len
|
||||
result.path.setLen(result.path.len - 1)
|
||||
of Leaf:
|
||||
return # Ooops
|
||||
return # Failed
|
||||
# Notreached
|
||||
# End while
|
||||
# Notreached
|
||||
|
||||
# ---------------
|
||||
|
||||
proc fillFromLeft(
|
||||
db: HexaryTreeDbRef; # Target in-memory database
|
||||
rootKey: NodeKey; # State root for persistent source database
|
||||
getFn: HexaryGetFn; # Source database abstraction
|
||||
maxLeafs = 5000; # Error if more than this many leaf nodes
|
||||
): Result[int,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Import persistent sub-tree into target database
|
||||
|
||||
# Find first least path
|
||||
var
|
||||
here = XPath(root: rootKey).pathLeast(rootKey.to(Blob), getFn)
|
||||
countSteps = 0
|
||||
|
||||
if 0 < here.path.len:
|
||||
while true:
|
||||
countSteps.inc
|
||||
|
||||
# Import records
|
||||
for step in here.path:
|
||||
db.tab[step.key.convertTo(RepairKey)] = step.node.to(RNodeRef)
|
||||
|
||||
# Get next path
|
||||
let topKey = here.path[^1].key
|
||||
here = here.next(getFn)
|
||||
|
||||
# Check for end condition
|
||||
if here.path.len == 0:
|
||||
break
|
||||
if topKey == here.path[^1].key:
|
||||
return err(GarbledNextLeaf) # Ooops
|
||||
if maxLeafs <= countSteps:
|
||||
return err(LeafMaxExceeded)
|
||||
|
||||
ok(countSteps)
|
||||
|
||||
proc fillFromRight(
|
||||
db: HexaryTreeDbRef; # Target in-memory database
|
||||
rootKey: NodeKey; # State root for persistent source database
|
||||
getFn: HexaryGetFn; # Source database abstraction
|
||||
maxLeafs = 5000; # Error if more than this many leaf nodes
|
||||
): Result[int,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Import persistent sub-tree into target database
|
||||
|
||||
# Find first least path
|
||||
var
|
||||
here = XPath(root: rootKey).pathMost(rootKey.to(Blob), getFn)
|
||||
countSteps = 0
|
||||
|
||||
if 0 < here.path.len:
|
||||
while true:
|
||||
countSteps.inc
|
||||
|
||||
# Import records
|
||||
for step in here.path:
|
||||
db.tab[step.key.convertTo(RepairKey)] = step.node.to(RNodeRef)
|
||||
|
||||
# Get next path
|
||||
let topKey = here.path[^1].key
|
||||
here = here.prev(getFn)
|
||||
|
||||
# Check for end condition
|
||||
if here.path.len == 0:
|
||||
break
|
||||
if topKey == here.path[^1].key:
|
||||
return err(GarbledNextLeaf) # Ooops
|
||||
if maxLeafs <= countSteps:
|
||||
return err(LeafMaxExceeded)
|
||||
|
||||
ok(countSteps)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, pretty printing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(s: string; hex = false): string =
|
||||
## For long strings print `begin..end` only
|
||||
s.ppImpl(hex)
|
||||
|
||||
proc pp*(w: NibblesSeq): string =
|
||||
$w
|
||||
|
||||
proc pp*(key: RepairKey): string =
|
||||
## Raw key, for referenced key dump use `key.pp(db)` below
|
||||
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.tolowerAscii
|
||||
|
||||
proc pp*(key: NodeKey): string =
|
||||
## Raw key, for referenced key dump use `key.pp(db)` below
|
||||
key.ByteArray32.toSeq.mapIt(it.toHex(2)).join.tolowerAscii
|
||||
|
||||
proc pp*(key: NodeKey|RepairKey; db: HexaryTreeDbRef): string =
|
||||
key.ppImpl(db)
|
||||
|
||||
proc pp*(
|
||||
w: RNodeRef|XNodeObj|RPathStep|XPathStep;
|
||||
db: HexaryTreeDbRef;
|
||||
): string =
|
||||
w.ppImpl(db)
|
||||
|
||||
proc pp*(
|
||||
w: openArray[RPathStep|XPathStep];
|
||||
db:HexaryTreeDbRef;
|
||||
delim: string;
|
||||
): string =
|
||||
w.toSeq.mapIt(it.ppImpl(db)).join(delim)
|
||||
|
||||
proc pp*(
|
||||
w: openArray[RPathStep|XPathStep];
|
||||
db: HexaryTreeDbRef;
|
||||
indent = 4;
|
||||
): string =
|
||||
w.pp(db, indent.toPfx)
|
||||
|
||||
proc pp*(w: RPath|XPath; db: HexaryTreeDbRef; delim: string): string =
|
||||
result = "<" & w.root.pp(db) & ">"
|
||||
if 0 < w.path.len:
|
||||
result &= delim & w.path.pp(db, delim)
|
||||
result &= delim & "(" & $w.tail
|
||||
when typeof(w) is XPath:
|
||||
result &= "," & $w.depth
|
||||
result &= ")"
|
||||
|
||||
proc pp*(w: RPath|XPath; db: HexaryTreeDbRef; indent=4): string =
|
||||
w.pp(db, indent.toPfx)
|
||||
|
||||
|
||||
proc pp*(db: HexaryTreeDbRef; root: NodeKey; delim: string): string =
|
||||
## Dump the entries from the a generic accounts trie. These are
|
||||
## key value pairs for
|
||||
## ::
|
||||
## Branch: ($1,b(<$2,$3,..,$17>,))
|
||||
## Extension: ($18,e(832b5e..06e697,$19))
|
||||
## Leaf: ($20,l(cc9b5d..1c3b4,f84401..f9e5129d[#70]))
|
||||
##
|
||||
## where keys are typically represented as `$<id>` or `¶<id>` or `ø`
|
||||
## depending on whether a key is final (`$<id>`), temporary (`¶<id>`)
|
||||
## or unset/missing (`ø`).
|
||||
##
|
||||
## The node types are indicated by a letter after the first key before
|
||||
## the round brackets
|
||||
## ::
|
||||
## Branch: 'b', 'þ', or 'B'
|
||||
## Extension: 'e', '€', or 'E'
|
||||
## Leaf: 'l', 'ł', or 'L'
|
||||
##
|
||||
## Here a small letter indicates a `Static` node which was from the
|
||||
## original `proofs` list, a capital letter indicates a `Mutable` node
|
||||
## added on the fly which might need some change, and the decorated
|
||||
## letters stand for `Locked` nodes which are like `Static` ones but
|
||||
## added later (typically these nodes are update `Mutable` nodes.)
|
||||
##
|
||||
## Beware: dumping a large database is not recommended
|
||||
db.ppImpl(root).join(delim)
|
||||
|
||||
proc pp*(db: HexaryTreeDbRef; root: NodeKey; indent=4): string =
|
||||
## Dump the entries from the a generic repair tree.
|
||||
db.pp(root, indent.toPfx)
|
||||
|
||||
proc pp*(db: HexaryTreeDbRef; root: Hash256; indent=4): string =
|
||||
## Dump the entries from the a generic repair tree.
|
||||
db.pp(root.to(NodeKey), indent.toPfx)
|
||||
|
||||
proc pp*(m: Moment): string =
|
||||
## Prints a moment in time similar to *chronicles* time format.
|
||||
m.asDateTime.format "yyyy-MM-dd HH:mm:ss'.'fff'+00:00'"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, traversal over partial tree in persistent database
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc next*(
|
||||
path: XPath;
|
||||
getFn: HexaryGetFn;
|
||||
minDepth = 64;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Advance the argument `path` to the next leaf node (if any.). The
|
||||
## `minDepth` argument requires the result of `next()` to satisfy
|
||||
## `minDepth <= next().getNibbles.len`.
|
||||
var pLen = path.path.len
|
||||
|
||||
# Find the last branch in the path, increase link and step down
|
||||
while 0 < pLen:
|
||||
|
||||
# Find branch none
|
||||
pLen.dec
|
||||
|
||||
let it = path.path[pLen]
|
||||
if it.node.kind == Branch and it.nibble < 15:
|
||||
|
||||
# Find the next item to the right in the branch list
|
||||
for inx in (it.nibble + 1) .. 15:
|
||||
let link = it.node.bLink[inx]
|
||||
if link.len != 0:
|
||||
let
|
||||
branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8)
|
||||
walk = path.path[0 ..< pLen] & branch
|
||||
newPath = XPath(root: path.root, path: walk).pathLeast(link, getFn)
|
||||
if minDepth <= newPath.depth and 0 < newPath.getLeafData.len:
|
||||
return newPath
|
||||
|
||||
|
||||
proc prev*(
|
||||
path: XPath;
|
||||
getFn: HexaryGetFn;
|
||||
minDepth = 64;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Advance the argument `path` to the previous leaf node (if any.) The
|
||||
## `minDepth` argument requires the result of `next()` to satisfy
|
||||
## `minDepth <= next().getNibbles.len`.
|
||||
var pLen = path.path.len
|
||||
|
||||
# Find the last branch in the path, decrease link and step down
|
||||
while 0 < pLen:
|
||||
|
||||
# Find branch none
|
||||
pLen.dec
|
||||
let it = path.path[pLen]
|
||||
if it.node.kind == Branch and 0 < it.nibble:
|
||||
|
||||
# Find the next item to the right in the branch list
|
||||
for inx in (it.nibble - 1).countDown(0):
|
||||
let link = it.node.bLink[inx]
|
||||
if link.len != 0:
|
||||
let
|
||||
branch = XPathStep(key: it.key, node: it.node, nibble: inx.int8)
|
||||
walk = path.path[0 ..< pLen] & branch
|
||||
newPath = XPath(root: path.root, path: walk).pathMost(link,getFn)
|
||||
if minDepth <= newPath.depth and 0 < newPath.getLeafData.len:
|
||||
return newPath
|
||||
|
||||
|
||||
proc fromPersistent*(
|
||||
db: HexaryTreeDbRef; # Target in-memory database
|
||||
rootKey: NodeKey; # State root for persistent source database
|
||||
getFn: HexaryGetFn; # Source database abstraction
|
||||
maxLeafs = 5000; # Error if more than this many leaf nodes
|
||||
reverse = false; # Fill left to right by default
|
||||
): Result[int,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Import persistent sub-tree into target database
|
||||
if reverse:
|
||||
db.fillFromLeft(rootKey, getFn, maxLeafs)
|
||||
else:
|
||||
db.fillFromRight(rootKey, getFn, maxLeafs)
|
||||
|
||||
proc fromPersistent*(
|
||||
rootKey: NodeKey; # State root for persistent source database
|
||||
getFn: HexaryGetFn; # Source database abstraction
|
||||
maxLeafs = 5000; # Error if more than this many leaf nodes
|
||||
reverse = false; # Fill left to right by default
|
||||
): Result[HexaryTreeDbRef,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `fromPersistent()` for an ad-hoc table
|
||||
let
|
||||
db = HexaryTreeDbRef()
|
||||
rc = db.fromPersistent(rootKey, getFn, maxLeafs, reverse)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
ok(db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,297 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/endians2,
|
||||
stint,
|
||||
"../.."/[constants, range_desc],
|
||||
./hexary_error
|
||||
|
||||
type
|
||||
HexaryPpFn* =
|
||||
proc(key: RepairKey): string {.gcsafe, raises: [CatchableError].}
|
||||
## For testing/debugging: key pretty printer function
|
||||
|
||||
ByteArray33* = array[33,byte]
|
||||
## Used for 31 byte database keys, i.e. <marker> + <32-byte-key>
|
||||
|
||||
RepairKey* = distinct ByteArray33
|
||||
## Byte prefixed `NodeKey` for internal DB records
|
||||
|
||||
# Example trie from https://eth.wiki/en/fundamentals/patricia-tree
|
||||
#
|
||||
# lookup data:
|
||||
# "do": "verb"
|
||||
# "dog": "puppy"
|
||||
# "dodge": "coin"
|
||||
# "horse": "stallion"
|
||||
#
|
||||
# trie DB:
|
||||
# root: [16 A]
|
||||
# A: [* * * * B * * * [20+"orse" "stallion"] * * * * * * * *]
|
||||
# B: [00+"o" D]
|
||||
# D: [* * * * * * E * * * * * * * * * "verb"]
|
||||
# E: [17 [* * * * * * [35 "coin"] * * * * * * * * * "puppy"]]
|
||||
#
|
||||
# with first nibble of two-column rows:
|
||||
# hex bits | node type length
|
||||
# ---------+------------------
|
||||
# 0 0000 | extension even
|
||||
# 1 0001 | extension odd
|
||||
# 2 0010 | leaf even
|
||||
# 3 0011 | leaf odd
|
||||
#
|
||||
# and key path:
|
||||
# "do": 6 4 6 f
|
||||
# "dog": 6 4 6 f 6 7
|
||||
# "dodge": 6 4 6 f 6 7 6 5
|
||||
# "horse": 6 8 6 f 7 2 7 3 6 5
|
||||
|
||||
NodeKind* = enum
|
||||
Branch
|
||||
Extension
|
||||
Leaf
|
||||
|
||||
RNodeState* = enum
|
||||
Static = 0 ## Inserted as proof record
|
||||
Locked ## Like `Static`, only added on-the-fly
|
||||
Mutable ## Open for modification
|
||||
TmpRoot ## Mutable root node
|
||||
|
||||
RNodeRef* = ref object
|
||||
## Node for building a temporary hexary trie coined `repair tree`.
|
||||
state*: RNodeState ## `Static` if added from proof data set
|
||||
case kind*: NodeKind
|
||||
of Leaf:
|
||||
lPfx*: NibblesSeq ## Portion of path segment
|
||||
lData*: Blob
|
||||
of Extension:
|
||||
ePfx*: NibblesSeq ## Portion of path segment
|
||||
eLink*: RepairKey ## Single down link
|
||||
of Branch:
|
||||
bLink*: array[16,RepairKey] ## Down links
|
||||
#
|
||||
# Paraphrased comment from Andri's `stateless/readme.md` file in chapter
|
||||
# `Deviation from yellow paper`, (also found here
|
||||
# github.com/status-im/nimbus-eth1
|
||||
# /tree/master/stateless#deviation-from-yellow-paper)
|
||||
# [..] In the Yellow Paper, the 17th elem of the branch node can contain
|
||||
# a value. But it is always empty in a real Ethereum state trie. The
|
||||
# block witness spec also ignores this 17th elem when encoding or
|
||||
# decoding a branch node. This can happen because in a Ethereum secure
|
||||
# hexary trie, every keys have uniform length of 32 bytes or 64 nibbles.
|
||||
# With the absence of the 17th element, a branch node will never contain
|
||||
# a leaf value.
|
||||
bData*: Blob
|
||||
|
||||
XNodeObj* = object
|
||||
## Simplified version of `RNodeRef` to be used as a node for `XPathStep`
|
||||
case kind*: NodeKind
|
||||
of Leaf:
|
||||
lPfx*: NibblesSeq ## Portion of path segment
|
||||
lData*: Blob
|
||||
of Extension:
|
||||
ePfx*: NibblesSeq ## Portion of path segment
|
||||
eLink*: Blob ## Single down link
|
||||
of Branch:
|
||||
bLink*: array[17,Blob] ## Down links followed by data
|
||||
|
||||
RPathStep* = object
|
||||
## For constructing a repair tree traversal path `RPath`
|
||||
key*: RepairKey ## Tree label, node hash
|
||||
node*: RNodeRef ## Referes to data record
|
||||
nibble*: int8 ## Branch node selector (if any)
|
||||
|
||||
RPath* = object
|
||||
root*: RepairKey ## Root node needed when `path.len == 0`
|
||||
path*: seq[RPathStep]
|
||||
tail*: NibblesSeq ## Stands for non completed leaf path
|
||||
|
||||
XPathStep* = object
|
||||
## Similar to `RPathStep` for an arbitrary (sort of transparent) trie
|
||||
key*: Blob ## Node hash implied by `node` data
|
||||
node*: XNodeObj
|
||||
nibble*: int8 ## Branch node selector (if any)
|
||||
|
||||
XPath* = object
|
||||
root*: NodeKey ## Root node needed when `path.len == 0`
|
||||
path*: seq[XPathStep]
|
||||
tail*: NibblesSeq ## Stands for non completed leaf path
|
||||
depth*: int ## May indicate path length (typically 64)
|
||||
|
||||
RLeafSpecs* = object
|
||||
## Temporarily stashed leaf data (as for an account.) Proper records
|
||||
## have non-empty payload. Records with empty payload are administrative
|
||||
## items, e.g. lower boundary records.
|
||||
pathTag*: NodeTag ## Equivalent to account hash
|
||||
nodeKey*: RepairKey ## Leaf hash into hexary repair table
|
||||
payload*: Blob ## Data payload
|
||||
|
||||
HexaryTreeDbRef* = ref object
|
||||
## Hexary trie plus helper structures
|
||||
tab*: Table[RepairKey,RNodeRef] ## key-value trie table, in-memory db
|
||||
repairKeyGen*: uint64 ## Unique tmp key generator
|
||||
keyPp*: HexaryPpFn ## For debugging, might go away
|
||||
|
||||
HexaryGetFn* = proc(key: openArray[byte]): Blob
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
## Persistent database `get()` function. For read-only cases, this
|
||||
## function can be seen as the persistent alternative to ``tab[]` on
|
||||
## a `HexaryTreeDbRef` descriptor.
|
||||
|
||||
HexaryNodeReport* = object
|
||||
## Return code for single node operations
|
||||
slot*: Option[int] ## May refer to indexed argument slots
|
||||
kind*: Option[NodeKind] ## Node type (if any)
|
||||
dangling*: seq[NodeSpecs] ## Missing inner sub-tries
|
||||
error*: HexaryError ## Error code, or `HexaryError(0)`
|
||||
|
||||
static:
|
||||
# Not that there is no doubt about this ...
|
||||
doAssert NodeKey.default.ByteArray32.initNibbleRange.len == 64
|
||||
|
||||
proc isNodeKey*(a: RepairKey): bool {.gcsafe.}
|
||||
proc isZero*(a: RepairKey): bool {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc append(writer: var RlpWriter, node: RNodeRef) =
|
||||
## Mixin for RLP writer
|
||||
proc appendOk(writer: var RlpWriter; key: RepairKey): bool =
|
||||
if key.isZero:
|
||||
writer.append(EmptyBlob)
|
||||
elif key.isNodeKey:
|
||||
var hash: Hash256
|
||||
(addr hash.data[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
|
||||
writer.append(hash)
|
||||
else:
|
||||
return false
|
||||
true
|
||||
|
||||
case node.kind:
|
||||
of Branch:
|
||||
writer.startList(17)
|
||||
for n in 0 ..< 16:
|
||||
if not writer.appendOk(node.bLink[n]):
|
||||
return # empty `Blob`
|
||||
writer.append(node.bData)
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append(node.ePfx.hexPrefixEncode(isleaf = false))
|
||||
if not writer.appendOk(node.eLink):
|
||||
return # empty `Blob`
|
||||
of Leaf:
|
||||
writer.startList(2)
|
||||
writer.append(node.lPfx.hexPrefixEncode(isleaf = true))
|
||||
writer.append(node.lData)
|
||||
|
||||
|
||||
proc append(writer: var RlpWriter, node: XNodeObj) =
|
||||
## Mixin for RLP writer
|
||||
case node.kind:
|
||||
of Branch:
|
||||
writer.append(node.bLink)
|
||||
of Extension:
|
||||
writer.startList(2)
|
||||
writer.append(node.ePfx.hexPrefixEncode(isleaf = false))
|
||||
writer.append(node.eLink)
|
||||
of Leaf:
|
||||
writer.startList(2)
|
||||
writer.append(node.lPfx.hexPrefixEncode(isleaf = true))
|
||||
writer.append(node.lData)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor (or similar)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(key: var RepairKey; data: openArray[byte]): bool =
|
||||
key.reset
|
||||
if 0 < data.len and data.len <= 33:
|
||||
let trg = addr key.ByteArray33[33 - data.len]
|
||||
trg.copyMem(unsafeAddr data[0], data.len)
|
||||
return true
|
||||
|
||||
proc newRepairKey*(db: HexaryTreeDbRef): RepairKey =
|
||||
db.repairKeyGen.inc
|
||||
# Storing in proper endian handy for debugging (but not really important)
|
||||
when cpuEndian == bigEndian:
|
||||
var src = db.repairKeyGen.toBytesBE
|
||||
else:
|
||||
var src = db.repairKeyGen.toBytesLE
|
||||
(addr result.ByteArray33[25]).copyMem(addr src[0], 8)
|
||||
result.ByteArray33[0] = 1
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hash*(a: RepairKey): Hash =
|
||||
## Tables mixin
|
||||
a.ByteArray33.hash
|
||||
|
||||
proc `==`*(a, b: RepairKey): bool =
|
||||
## Tables mixin
|
||||
a.ByteArray33 == b.ByteArray33
|
||||
|
||||
proc to*(key: NodeKey; T: type NibblesSeq): T =
|
||||
key.ByteArray32.initNibbleRange
|
||||
|
||||
proc to*(key: NodeKey; T: type RepairKey): T =
|
||||
(addr result.ByteArray33[1]).copyMem(unsafeAddr key.ByteArray32[0], 32)
|
||||
|
||||
proc isZero*(a: RepairKey): bool =
|
||||
a == typeof(a).default
|
||||
|
||||
proc isZero*[T: NodeTag|NodeKey](a: T): bool =
|
||||
a == typeof(a).default
|
||||
|
||||
proc isNodeKey*(a: RepairKey): bool =
|
||||
a.ByteArray33[0] == 0
|
||||
|
||||
proc convertTo*(data: Blob; T: type NodeKey): T =
|
||||
## Probably lossy conversion, use `init()` for safe conversion
|
||||
discard result.init(data)
|
||||
|
||||
proc convertTo*(data: Blob; T: type NodeTag): T =
|
||||
## Ditto for node tag
|
||||
data.convertTo(NodeKey).to(NodeTag)
|
||||
|
||||
proc convertTo*(data: Blob; T: type RepairKey): T =
|
||||
## Probably lossy conversion, use `init()` for safe conversion
|
||||
discard result.init(data)
|
||||
|
||||
proc convertTo*(node: RNodeRef; T: type Blob): T =
|
||||
## Write the node as an RLP-encoded blob
|
||||
var writer = initRlpWriter()
|
||||
writer.append node
|
||||
writer.finish()
|
||||
|
||||
proc convertTo*(node: XNodeObj; T: type Blob): T =
|
||||
## Variant of `convertTo()` for `XNodeObj` nodes.
|
||||
var writer = initRlpWriter()
|
||||
writer.append node
|
||||
writer.finish()
|
||||
|
||||
proc convertTo*(nodeList: openArray[XNodeObj]; T: type Blob): T =
|
||||
## Variant of `convertTo()` for a list of `XNodeObj` nodes.
|
||||
var writer = initRlpList(nodeList.len)
|
||||
for w in nodeList:
|
||||
writer.append w
|
||||
writer.finish
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,624 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Envelope tools for nodes and hex encoded *partial paths*
|
||||
## ========================================================
|
||||
##
|
||||
## Envelope
|
||||
## --------
|
||||
## Given a hex encoded *partial path*, this is the maximum range of leaf node
|
||||
## paths (of data type `NodeTag`) that starts with the *partial path*. It is
|
||||
## obtained by creating an interval (of type `NodeTagRange`) with end points
|
||||
## starting with the *partial path* and extening it with *zero* nibbles for
|
||||
## the left end, and *0xf* nibbles for the right end.
|
||||
##
|
||||
## Boundary proofs
|
||||
## ---------------
|
||||
## The *boundary proof* for a range `iv` of leaf node paths (e.g. account
|
||||
## hashes) for a given *state root* is a set of nodes enough to construct the
|
||||
## partial *Merkel Patricia trie* containing the leafs. If the given range
|
||||
## `iv` is larger than the left or right most leaf node paths, the *boundary
|
||||
## proof* also implies that there is no other leaf path between the range
|
||||
## boundary and the left or rightmost leaf path. There is not minimalist
|
||||
## requirement of a *boundary proof*.
|
||||
##
|
||||
## Envelope decomposition
|
||||
## ----------------------
|
||||
## The idea is to compute the difference of the envelope of a hex encoded
|
||||
## *partial path* off some range of leaf node paths and express the result as
|
||||
## a list of envelopes (represented by either nodes or *partial paths*.)
|
||||
##
|
||||
## Prerequisites
|
||||
## ^^^^^^^^^^^^^
|
||||
## More formally, assume
|
||||
##
|
||||
## * ``partialPath`` is a hex encoded *partial path* (of type ``Blob``)
|
||||
##
|
||||
## * ``iv`` is a range of leaf node paths (of type ``NodeTagRange``)
|
||||
##
|
||||
## and assume further that
|
||||
##
|
||||
## * ``partialPath`` points to an allocated node
|
||||
##
|
||||
## * for `iv` there are left and right *boundary proofs in the database
|
||||
## (e.g. as downloaded via the `snap/1` protocol.)
|
||||
##
|
||||
## The decomposition
|
||||
## ^^^^^^^^^^^^^^^^^
|
||||
## Then there is a (probably empty) set `W` of *partial paths* (represented by
|
||||
## nodes or *partial paths*) where the envelope of each *partial path* in `W`
|
||||
## has no common leaf path in `iv` (i.e. disjunct to the sub-range of `iv`
|
||||
## where the boundaries are existing node keys.)
|
||||
##
|
||||
## Let this set `W` be maximal in the sense that for every *partial path* `p`
|
||||
## which is prefixed by `partialPath` the envelope of which has no common leaf
|
||||
## node in `iv` there exists a *partial path* `w` in `W` that prefixes `p`. In
|
||||
## other words the envelope of `p` is contained in the envelope of `w`.
|
||||
##
|
||||
## Formally:
|
||||
##
|
||||
## * if ``p = partialPath & p-ext`` with ``(envelope of p) * iv`` has no
|
||||
## allocated nodes for in the hexary trie database
|
||||
##
|
||||
## * then there is a ``w = partialPath & w-ext`` in ``W`` with
|
||||
## ``p-ext = w-ext & some-ext``.
|
||||
##
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/interval_set,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_nodes_helper,
|
||||
hexary_paths]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc eq(a, b: XPathStep|RPathStep): bool =
|
||||
a.key == b.key and a.nibble == b.nibble and a.node == b.node
|
||||
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use
|
||||
discard result.init(key.ByteArray33[1 .. 32])
|
||||
|
||||
proc toNodeSpecs(nodeKey: RepairKey; partialPath: Blob): NodeSpecs =
|
||||
NodeSpecs(
|
||||
nodeKey: nodeKey.convertTo(NodeKey),
|
||||
partialPath: partialPath)
|
||||
|
||||
proc toNodeSpecs(nodeKey: Blob; partialPath: Blob): NodeSpecs =
|
||||
NodeSpecs(
|
||||
nodeKey: nodeKey.convertTo(NodeKey),
|
||||
partialPath: partialPath)
|
||||
|
||||
when false:
|
||||
template noKeyErrorOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Impossible KeyError (" & info & "): " & e.msg
|
||||
|
||||
template noRlpErrorOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError as e:
|
||||
raiseAssert "Impossible RlpError (" & info & "): " & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc doDecomposeLeft(
|
||||
envQ: RPath|XPath;
|
||||
ivQ: RPath|XPath;
|
||||
): Result[seq[NodeSpecs],HexaryError] =
|
||||
## Helper for `hexaryEnvelopeDecompose()` for handling left side of
|
||||
## envelope from partial path argument
|
||||
#
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# ivQ[x]==envQ[x] \ -- envelope left end of partial path
|
||||
# | \
|
||||
# ivQ[x+1] -- `iv`, not fully covering left of `env`
|
||||
# :
|
||||
#
|
||||
var collect: seq[NodeSpecs]
|
||||
block rightCurbEnvelope:
|
||||
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
|
||||
if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]):
|
||||
#
|
||||
# At this point, the `node` entries of either `.path[n]` step are
|
||||
# the same. This is so because the predecessor steps were the same
|
||||
# or were the `rootKey` in case n == 0.
|
||||
#
|
||||
# But then (`node` entries being equal) the only way for the `.path[n]`
|
||||
# steps to differ is in the entry selector `nibble` for a branch node.
|
||||
#
|
||||
for m in n ..< ivQ.path.len:
|
||||
let
|
||||
pfx = ivQ.getNibbles(0, m) # common path segment
|
||||
top = ivQ.path[m].nibble # need nibbles smaller than top
|
||||
#
|
||||
# Incidentally for a non-`Branch` node, the value `top` becomes
|
||||
# `-1` and the `for`- loop will be ignored (which is correct)
|
||||
for nibble in 0 ..< top:
|
||||
let nodeKey = ivQ.path[m].node.bLink[nibble]
|
||||
if not nodeKey.isZeroLink:
|
||||
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||
break rightCurbEnvelope
|
||||
#
|
||||
# Fringe case, e.g. when `partialPath` is an empty prefix (aka `@[0]`)
|
||||
# and the database has a single leaf node `(a,some-value)` where the
|
||||
# `rootKey` is the hash of this node. In that case, `pMin == 0` and
|
||||
# `pMax == high(NodeTag)` and `iv == [a,a]`.
|
||||
#
|
||||
return err(DecomposeDegenerated)
|
||||
|
||||
ok(collect)
|
||||
|
||||
proc doDecomposeRight(
|
||||
envQ: RPath|XPath;
|
||||
ivQ: RPath|XPath;
|
||||
): Result[seq[NodeSpecs],HexaryError] =
|
||||
## Helper for `hexaryEnvelopeDecompose()` for handling right side of
|
||||
## envelope from partial path argument
|
||||
#
|
||||
# partialPath
|
||||
# / \
|
||||
# / \
|
||||
# / ivQ[x]==envQ[^1] -- envelope right end of partial path
|
||||
# / |
|
||||
# ivQ[x+1] -- `iv`, not fully covering right of `env`
|
||||
# :
|
||||
#
|
||||
var collect: seq[NodeSpecs]
|
||||
block leftCurbEnvelope:
|
||||
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
|
||||
if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]):
|
||||
for m in n ..< ivQ.path.len:
|
||||
let
|
||||
pfx = ivQ.getNibbles(0, m) # common path segment
|
||||
base = ivQ.path[m].nibble # need nibbles greater/equal
|
||||
if 0 <= base:
|
||||
for nibble in base+1 .. 15:
|
||||
let nodeKey = ivQ.path[m].node.bLink[nibble]
|
||||
if not nodeKey.isZeroLink:
|
||||
collect.add nodeKey.toNodeSpecs hexPrefixEncode(
|
||||
pfx & @[nibble.byte].initNibbleRange.slice(1),isLeaf=false)
|
||||
break leftCurbEnvelope
|
||||
return err(DecomposeDegenerated)
|
||||
|
||||
ok(collect)
|
||||
|
||||
|
||||
proc decomposeLeftImpl(
|
||||
env: NodeTagRange; # Envelope for some partial path
|
||||
rootKey: NodeKey; # State root
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
|
||||
var nodeSpex: seq[NodeSpecs]
|
||||
|
||||
# So ranges do overlap. The case that the `partialPath` envelope is fully
|
||||
# contained in `iv` results in `@[]` which is implicitely handled by
|
||||
# non-matching of the below if clause.
|
||||
if env.minPt < iv.minPt:
|
||||
let
|
||||
envQ = env.minPt.hexaryPath(rootKey, db)
|
||||
# Make sure that the min point is the nearest node to the right
|
||||
ivQ = block:
|
||||
let rc = iv.minPt.hexaryPath(rootKey, db).hexaryNearbyRight(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
block:
|
||||
let rc = envQ.doDecomposeLeft ivQ
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
nodeSpex &= rc.value
|
||||
|
||||
ok(nodeSpex)
|
||||
|
||||
|
||||
proc decomposeRightImpl(
|
||||
env: NodeTagRange; # Envelope for some partial path
|
||||
rootKey: NodeKey; # State root
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
|
||||
var nodeSpex: seq[NodeSpecs]
|
||||
if iv.maxPt < env.maxPt:
|
||||
let
|
||||
envQ = env.maxPt.hexaryPath(rootKey, db)
|
||||
ivQ = block:
|
||||
let rc = iv.maxPt.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
rc.value
|
||||
block:
|
||||
let rc = envQ.doDecomposeRight ivQ
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
nodeSpex &= rc.value
|
||||
|
||||
ok(nodeSpex)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, envelope constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryEnvelope*(partialPath: Blob): NodeTagRange =
|
||||
## Convert partial path to range of all concievable node keys starting with
|
||||
## the partial path argument `partialPath`.
|
||||
let pfx = partialPath.hexPrefixDecode[1]
|
||||
NodeTagRange.new(
|
||||
pfx.padPartialPath(0).to(NodeTag),
|
||||
pfx.padPartialPath(255).to(NodeTag))
|
||||
|
||||
proc hexaryEnvelope*(node: NodeSpecs): NodeTagRange =
|
||||
## variant of `hexaryEnvelope()`
|
||||
node.partialPath.hexaryEnvelope()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryEnvelopeUniq*(
|
||||
partialPaths: openArray[Blob];
|
||||
): seq[Blob]
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Sort and simplify a list of partial paths by sorting envelopes while
|
||||
## removing nested entries.
|
||||
if partialPaths.len < 2:
|
||||
return partialPaths.toSeq
|
||||
|
||||
var tab: Table[NodeTag,(Blob,bool)]
|
||||
for w in partialPaths:
|
||||
let iv = w.hexaryEnvelope
|
||||
tab[iv.minPt] = (w,true) # begin entry
|
||||
tab[iv.maxPt] = (@[],false) # end entry
|
||||
|
||||
# When sorted, nested entries look like
|
||||
#
|
||||
# 123000000.. (w0, true)
|
||||
# 123400000.. (w1, true) <--- nested
|
||||
# 1234fffff.. (, false) <--- nested
|
||||
# 123ffffff.. (, false)
|
||||
# ...
|
||||
# 777000000.. (w2, true)
|
||||
#
|
||||
var level = 0
|
||||
for key in toSeq(tab.keys).sorted(cmp):
|
||||
let (w,begin) = tab[key]
|
||||
if begin:
|
||||
if level == 0:
|
||||
result.add w
|
||||
level.inc
|
||||
else:
|
||||
level.dec
|
||||
|
||||
proc hexaryEnvelopeUniq*(
|
||||
nodes: openArray[NodeSpecs];
|
||||
): seq[NodeSpecs]
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Variant of `hexaryEnvelopeUniq` for sorting a `NodeSpecs` list by
|
||||
## partial paths.
|
||||
if nodes.len < 2:
|
||||
return nodes.toSeq
|
||||
|
||||
var tab: Table[NodeTag,(NodeSpecs,bool)]
|
||||
for w in nodes:
|
||||
let iv = w.partialPath.hexaryEnvelope
|
||||
tab[iv.minPt] = (w,true) # begin entry
|
||||
tab[iv.maxPt] = (NodeSpecs(),false) # end entry
|
||||
|
||||
var level = 0
|
||||
for key in toSeq(tab.keys).sorted(cmp):
|
||||
let (w,begin) = tab[key]
|
||||
if begin:
|
||||
if level == 0:
|
||||
result.add w
|
||||
level.inc
|
||||
else:
|
||||
level.dec
|
||||
|
||||
|
||||
proc hexaryEnvelopeTouchedBy*(
|
||||
rangeSet: NodeTagRangeSet; # Set of intervals (aka ranges)
|
||||
partialPath: Blob; # Partial path for some node
|
||||
): NodeTagRangeSet =
|
||||
## For the envelope interval of the `partialPath` argument, this function
|
||||
## returns the complete set of intervals from the argument set `rangeSet`
|
||||
## that have a common point with the envelope (i.e. they are non-disjunct to
|
||||
## the envelope.)
|
||||
##
|
||||
## Note that this function always returns a new set (which might be equal to
|
||||
## the argument set `rangeSet`.)
|
||||
let probe = partialPath.hexaryEnvelope
|
||||
|
||||
# `probe.len==0`(mod 2^256) => `probe==[0,high]` as `probe` cannot be empty
|
||||
if probe.len.isZero:
|
||||
return rangeSet.clone
|
||||
|
||||
result = NodeTagRangeSet.init() # return empty set unless coverage
|
||||
|
||||
if 0 < rangeSet.covered probe:
|
||||
# Find an interval `start` that starts before the `probe` interval.
|
||||
# Preferably, this interval is the rightmost one starting before `probe`.
|
||||
var startSearch = low(NodeTag)
|
||||
|
||||
# Try least interval starting within or to the right of `probe`.
|
||||
let rc = rangeSet.ge probe.minPt
|
||||
if rc.isOk:
|
||||
# Try predecessor
|
||||
let rx = rangeSet.le rc.value.minPt
|
||||
if rx.isOk:
|
||||
# Predecessor interval starts before `probe`, e.g.
|
||||
#
|
||||
# .. [..rx..] [..rc..] ..
|
||||
# [..probe..]
|
||||
#
|
||||
startSearch = rx.value.minPt
|
||||
else:
|
||||
# No predecessor, so `rc.value` is the very first interval, e.g.
|
||||
#
|
||||
# [..rc..] ..
|
||||
# [..probe..]
|
||||
#
|
||||
startSearch = rc.value.minPt
|
||||
else:
|
||||
# No interval starts in or after `probe`.
|
||||
#
|
||||
# So, if an interval ends before the right end of `probe`, it must
|
||||
# start before `probe`.
|
||||
let rx = rangeSet.le probe.maxPt
|
||||
if rx.isOk:
|
||||
#
|
||||
# .. [..rx..] ..
|
||||
# [..probe..]
|
||||
#
|
||||
startSearch = rx.value.minPt
|
||||
else:
|
||||
# Otherwise there is no interval preceding `probe`, so the zero
|
||||
# value for `start` will do the job, e.g.
|
||||
#
|
||||
# [.....rx......]
|
||||
# [..probe..]
|
||||
discard
|
||||
|
||||
# Collect intervals left-to-right for non-disjunct to `probe`
|
||||
for w in increasing[NodeTag,UInt256](rangeSet, startSearch):
|
||||
if (w * probe).isOk:
|
||||
discard result.merge w
|
||||
elif probe.maxPt < w.minPt:
|
||||
break # all the `w` following will be disjuct, too
|
||||
# End if
|
||||
|
||||
|
||||
proc hexaryEnvelopeTouchedBy*(
|
||||
rangeSet: NodeTagRangeSet; # Set of intervals (aka ranges)
|
||||
node: NodeSpecs; # Node w/hex encoded partial path
|
||||
): NodeTagRangeSet =
|
||||
## Variant of `hexaryEnvelopeTouchedBy()`
|
||||
rangeSet.hexaryEnvelopeTouchedBy(node.partialPath)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, complement sub-tries
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryEnvelopeDecompose*(
|
||||
partialPath: Blob; # Hex encoded partial path
|
||||
rootKey: NodeKey; # State root
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## This function computes the decomposition of the argument `partialPath`
|
||||
## relative to the argument range `iv`.
|
||||
##
|
||||
## * Comparison with `hexaryInspect()`
|
||||
##
|
||||
## The function `hexaryInspect()` implements a width-first search for
|
||||
## dangling nodes starting at the state root (think of the cathode ray of
|
||||
## a CRT.) For the sake of comparison with `hexaryEnvelopeDecompose()`, the
|
||||
## search may be amended to ignore nodes the envelope of is fully contained
|
||||
## in some range `iv`. For a fully allocated hexary trie, there will be at
|
||||
## least one sub-trie of length *N* with leafs not in `iv`. So the number
|
||||
## of nodes visited is *O(16^N)* for some *N* at most 63 (note that *N*
|
||||
## itself is *O(log M)* where M is the size of the leaf elements *M*, and
|
||||
## *O(16^N)* = *O(M)*.)
|
||||
##
|
||||
## The function `hexaryEnvelopeDecompose()` take the left or rightmost leaf
|
||||
## path from `iv`, calculates a chain length *N* of nodes from the state
|
||||
## root to the leaf, and for each node collects the links not pointing
|
||||
## inside the range `iv`. The number of nodes visited is *O(N)*.
|
||||
##
|
||||
## The results of both functions are not interchangeable, though. The first
|
||||
## function `hexaryInspect()`, always returns dangling nodes if there are
|
||||
## any in which case the hexary trie is incomplete and there will be no way
|
||||
## to visit all nodes as they simply do not exist. But iteratively adding
|
||||
## nodes or sub-tries and re-running this algorithm will end up with having
|
||||
## all nodes visited.
|
||||
##
|
||||
## The other function `hexaryEnvelopeDecompose()` always returns the same
|
||||
## result where some nodes might be dangling and may be treated similar to
|
||||
## what was discussed in the previous paragraph. This function also reveals
|
||||
## allocated nodes which might be checked for whether they exist fully or
|
||||
## partially for another state root hexary trie.
|
||||
##
|
||||
## So both are sort of complementary where the function
|
||||
## `hexaryEnvelopeDecompose()` is a fast one and `hexaryInspect()` the
|
||||
## thorough one of last resort.
|
||||
##
|
||||
let env = partialPath.hexaryEnvelope
|
||||
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
|
||||
return err(DecomposeDisjunct) # empty result
|
||||
|
||||
noRlpErrorOops("hexaryEnvelopeDecompose"):
|
||||
let left = block:
|
||||
let rc = env.decomposeLeftImpl(rootKey, iv, db)
|
||||
if rc.isErr:
|
||||
return rc
|
||||
rc.value
|
||||
let right = block:
|
||||
let rc = env.decomposeRightImpl(rootKey, iv, db)
|
||||
if rc.isErr:
|
||||
return rc
|
||||
rc.value
|
||||
return ok(left & right)
|
||||
# Notreached
|
||||
|
||||
|
||||
proc hexaryEnvelopeDecompose*(
|
||||
partialPath: Blob; # Hex encoded partial path
|
||||
ranges: NodeTagRangeSet; # To be complemented
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError] =
|
||||
## Variant of `hexaryEnvelopeDecompose()` for an argument set `ranges` of
|
||||
## intervals rather than a single one.
|
||||
##
|
||||
## Given that for the arguement `partialPath` there is an allocated node,
|
||||
## and all intervals in the `ranges` argument are boundary proofed, then
|
||||
## this function compiles the complement of the union of the interval
|
||||
## elements `ranges` relative to the envelope of the argument `partialPath`.
|
||||
## The function expresses this complement as a list of envelopes of
|
||||
## sub-tries. In other words, it finds a list `L` with
|
||||
##
|
||||
## * ``L`` is a list of (existing but not necessarily allocated) nodes.
|
||||
##
|
||||
## * The union ``U(L)`` of envelopes of elements of ``L`` is a subset of the
|
||||
## envelope ``E(partialPath)`` of ``partialPath``.
|
||||
##
|
||||
## * ``U(L)`` has no common point with any interval of the set ``ranges``.
|
||||
##
|
||||
## * ``L`` is maximal in the sense that any node ``w`` which is prefixed by
|
||||
## a node from ``E(partialPath)`` and with an envelope ``E(w)`` without
|
||||
## common node for any interval of ``ranges`` is also prefixed by a node
|
||||
## from ``L``.
|
||||
##
|
||||
## * The envelopes of the nodes in ``L`` are disjunct (i.e. the size of `L`
|
||||
## is minimal.)
|
||||
##
|
||||
## The function fails if `E(partialPath)` is disjunct from any interval of
|
||||
## `ranges`. The function returns an empty list if `E(partialPath)` overlaps
|
||||
## with some interval from `ranges` but there exists no common nodes. Nodes
|
||||
## that cause *RLP* decoding errors are ignored and will get lost.
|
||||
##
|
||||
## Note: Two intervals over the set of nodes might not be disjunct but
|
||||
## nevertheless have no node in common simply fot the fact that thre
|
||||
## are no such nodes in the database (with a path in the intersection
|
||||
## of the two intervals.)
|
||||
##
|
||||
# Find all intervals from the set of `ranges` ranges that have a point
|
||||
# in common with `partialPath`.
|
||||
let touched = ranges.hexaryEnvelopeTouchedBy partialPath
|
||||
if touched.chunks == 0:
|
||||
return err(DecomposeDisjunct)
|
||||
|
||||
# Decompose the complement of the `node` envelope off `iv` into
|
||||
# envelopes/sub-tries.
|
||||
let
|
||||
startNode = NodeSpecs(partialPath: partialPath)
|
||||
var
|
||||
leftQueue: seq[NodeSpecs] # To be appended only in loop below
|
||||
rightQueue = @[startNode] # To be replaced/modified in loop below
|
||||
|
||||
for iv in touched.increasing:
|
||||
#
|
||||
# For the interval `iv` and the list `rightQueue`, the following holds:
|
||||
# * `iv` is larger (to the right) of its predecessor `iu` (if any)
|
||||
# * all nodes `w` of the list `rightQueue` are larger than `iu` (if any)
|
||||
#
|
||||
# So collect all intervals to the left `iv` and keep going with the
|
||||
# remainder to the right:
|
||||
# ::
|
||||
# before decomposing:
|
||||
# v---------v v---------v v--------v -- right queue envelopes
|
||||
# |-----------| -- iv
|
||||
#
|
||||
# after decomposing the right queue:
|
||||
# v---v -- left queue envelopes
|
||||
# v----v v--------v -- right queue envelopes
|
||||
# |-----------| -- iv
|
||||
#
|
||||
var delayed: seq[NodeSpecs]
|
||||
for n,w in rightQueue:
|
||||
|
||||
let env = w.hexaryEnvelope
|
||||
if env.maxPt < iv.minPt:
|
||||
leftQueue.add w # Envelope fully to the left of `iv`
|
||||
continue
|
||||
|
||||
if iv.maxPt < env.minPt:
|
||||
# All remaining entries are fullly to the right of `iv`.
|
||||
delayed &= rightQueue[n ..< rightQueue.len]
|
||||
# Node that `w` != `startNode` because otherwise `touched` would
|
||||
# have been empty.
|
||||
break
|
||||
|
||||
try:
|
||||
block:
|
||||
let rc = env.decomposeLeftImpl(rootKey, iv, db)
|
||||
if rc.isOk:
|
||||
leftQueue &= rc.value # Queue left side smaller than `iv`
|
||||
block:
|
||||
let rc = env.decomposeRightImpl(rootKey, iv, db)
|
||||
if rc.isOk:
|
||||
delayed &= rc.value # Queue right side for next lap
|
||||
except CatchableError:
|
||||
# Cannot decompose `w`, so just drop it
|
||||
discard
|
||||
|
||||
# At this location in code, `delayed` can never contain `startNode` as it
|
||||
# is decomosed in the algorithm above.
|
||||
rightQueue = delayed
|
||||
|
||||
# End for() loop over `touched`
|
||||
|
||||
ok(leftQueue & rightQueue)
|
||||
|
||||
|
||||
proc hexaryEnvelopeDecompose*(
|
||||
node: NodeSpecs; # The envelope of which to be complemented
|
||||
ranges: NodeTagRangeSet; # To be complemented
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `hexaryEnvelopeDecompose()` for ranges and a `NodeSpecs`
|
||||
## argument rather than a partial path.
|
||||
node.partialPath.hexaryEnvelopeDecompose(ranges, rootKey, db)
|
||||
|
||||
proc hexaryEnvelopeDecompose*(
|
||||
ranges: NodeTagRangeSet; # To be complemented
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `hexaryEnvelopeDecompose()` for ranges and an implicit maximal
|
||||
## partial path envelope.
|
||||
## argument rather than a partial path.
|
||||
@[0.byte].hexaryEnvelopeDecompose(ranges, rootKey, db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,88 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
type
|
||||
HexaryError* = enum
|
||||
NothingSerious = 0
|
||||
|
||||
AccountNotFound
|
||||
AccountsNotSrictlyIncreasing
|
||||
AccountRangesOverlap
|
||||
LowerBoundAfterFirstEntry
|
||||
LowerBoundProofError
|
||||
NodeNotFound
|
||||
RlpEncoding
|
||||
SlotsNotFound
|
||||
SlotsNotSrictlyIncreasing
|
||||
TrieLoopAlert
|
||||
TrieIsEmpty
|
||||
TrieIsLockedForPerusal
|
||||
TooManyChunksInAccountsQueue
|
||||
TooManyQueuedStorageSlots
|
||||
TooManyQueuedContracts
|
||||
NoAccountsYet
|
||||
|
||||
# debug
|
||||
LeafMaxExceeded
|
||||
GarbledNextLeaf
|
||||
|
||||
# snap handler
|
||||
DataSizeError
|
||||
|
||||
# range
|
||||
LeafNodeExpected
|
||||
FailedNextNode
|
||||
|
||||
# nearby/boundary proofs
|
||||
NearbyExtensionError
|
||||
NearbyBranchError
|
||||
NearbyGarbledNode
|
||||
NearbyNestingTooDeep
|
||||
NearbyUnexpectedNode
|
||||
NearbyFailed
|
||||
NearbyEmptyPath
|
||||
NearbyLeafExpected
|
||||
NearbyDanglingLink
|
||||
NearbyPathTail
|
||||
NearbyBeyondRange
|
||||
|
||||
# envelope
|
||||
DecomposeDegenerated
|
||||
DecomposeDisjunct
|
||||
|
||||
# import
|
||||
DifferentNodeValueExists
|
||||
ExpectedNodeKeyDiffers
|
||||
Rlp2Or17ListEntries
|
||||
RlpBlobExpected
|
||||
RlpBranchLinkExpected
|
||||
RlpExtPathEncoding
|
||||
RlpNonEmptyBlobExpected
|
||||
|
||||
# interpolate
|
||||
AccountRepairBlocked
|
||||
InternalDbInconsistency
|
||||
RightBoundaryProofFailed
|
||||
RootNodeMismatch
|
||||
RootNodeMissing
|
||||
|
||||
# bulk storage
|
||||
AddBulkItemFailed
|
||||
CannotOpenRocksDbBulkSession
|
||||
CommitBulkItemsFailed
|
||||
NoRocksDbBackend
|
||||
UnresolvedRepairNode
|
||||
OSErrorException
|
||||
IOErrorException
|
||||
ExceptionError
|
||||
StateRootNotFound
|
||||
|
||||
# End
|
||||
|
@ -1,222 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
#proc pp(q: openArray[byte]): string =
|
||||
# q.toSeq.mapIt(it.toHex(2)).join.toLowerAscii.pp(hex = true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryImport*(
|
||||
db: HexaryTreeDbRef; ## Contains node table
|
||||
recData: Blob; ## Node to add
|
||||
unrefNodes: var HashSet[RepairKey]; ## Keep track of freestanding nodes
|
||||
nodeRefs: var HashSet[RepairKey]; ## Ditto
|
||||
): HexaryNodeReport
|
||||
{.gcsafe, raises: [RlpError, KeyError].} =
|
||||
## Decode a single trie item for adding to the table and add it to the
|
||||
## database. Branch and exrension record links are collected.
|
||||
if recData.len == 0:
|
||||
return HexaryNodeReport(error: RlpNonEmptyBlobExpected)
|
||||
let
|
||||
nodeKey = recData.digestTo(NodeKey)
|
||||
repairKey = nodeKey.to(RepairKey) # for repair table
|
||||
var
|
||||
rlp = recData.rlpFromBytes
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,RepairKey] # reconstruct branch node
|
||||
blob16: Blob # reconstruct branch node
|
||||
top = 0 # count entries
|
||||
rNode: RNodeRef # repair tree node
|
||||
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` will raise a `Defect`
|
||||
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return HexaryNodeReport(error: RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
var key: NodeKey
|
||||
if not key.init(rlp.read(Blob)):
|
||||
return HexaryNodeReport(error: RlpBranchLinkExpected)
|
||||
# Update ref pool
|
||||
links[top] = key.to(RepairKey)
|
||||
unrefNodes.excl links[top] # is referenced, now (if any)
|
||||
nodeRefs.incl links[top]
|
||||
of 16:
|
||||
if not w.isBlob:
|
||||
return HexaryNodeReport(error: RlpBlobExpected)
|
||||
blob16 = rlp.read(Blob)
|
||||
else:
|
||||
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
# Verify extension data
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return HexaryNodeReport(error: RlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
|
||||
if isLeaf:
|
||||
rNode = RNodeRef(
|
||||
kind: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: blobs[1])
|
||||
else:
|
||||
var key: NodeKey
|
||||
if not key.init(blobs[1]):
|
||||
return HexaryNodeReport(error: RlpExtPathEncoding)
|
||||
# Update ref pool
|
||||
rNode = RNodeRef(
|
||||
kind: Extension,
|
||||
ePfx: pathSegment,
|
||||
eLink: key.to(RepairKey))
|
||||
unrefNodes.excl rNode.eLink # is referenced, now (if any)
|
||||
nodeRefs.incl rNode.eLink
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
var key: NodeKey
|
||||
if not key.init(blobs[n]):
|
||||
return HexaryNodeReport(error: RlpBranchLinkExpected)
|
||||
# Update ref pool
|
||||
links[n] = key.to(RepairKey)
|
||||
unrefNodes.excl links[n] # is referenced, now (if any)
|
||||
nodeRefs.incl links[n]
|
||||
rNode = RNodeRef(
|
||||
kind: Branch,
|
||||
bLink: links,
|
||||
bData: blob16)
|
||||
else:
|
||||
discard
|
||||
|
||||
# Add to database
|
||||
if not db.tab.hasKey(repairKey):
|
||||
db.tab[repairKey] = rNode
|
||||
|
||||
# Update unreferenced nodes list
|
||||
if repairKey notin nodeRefs:
|
||||
unrefNodes.incl repairKey # keep track of stray nodes
|
||||
|
||||
elif db.tab[repairKey].convertTo(Blob) != recData:
|
||||
return HexaryNodeReport(error: DifferentNodeValueExists)
|
||||
|
||||
HexaryNodeReport(kind: some(rNode.kind))
|
||||
|
||||
|
||||
proc hexaryImport*(
|
||||
db: HexaryTreeDbRef; ## Contains node table
|
||||
rec: NodeSpecs; ## Expected key and value data pair
|
||||
): HexaryNodeReport
|
||||
{.gcsafe, raises: [RlpError, KeyError].} =
|
||||
## Ditto without referece checks but expected node key argument.
|
||||
if rec.data.len == 0:
|
||||
return HexaryNodeReport(error: RlpNonEmptyBlobExpected)
|
||||
if rec.nodeKey != rec.data.digestTo(NodeKey):
|
||||
return HexaryNodeReport(error: ExpectedNodeKeyDiffers)
|
||||
|
||||
let
|
||||
repairKey = rec.nodeKey.to(RepairKey) # for repair table
|
||||
var
|
||||
rlp = rec.data.rlpFromBytes
|
||||
blobs = newSeq[Blob](2) # temporary, cache
|
||||
links: array[16,RepairKey] # reconstruct branch node
|
||||
blob16: Blob # reconstruct branch node
|
||||
top = 0 # count entries
|
||||
rNode: RNodeRef # repair tree node
|
||||
|
||||
if not rlp.isList:
|
||||
# Otherwise `rlp.items` will raise a `Defect`
|
||||
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||
|
||||
# Collect lists of either 2 or 17 blob entries.
|
||||
for w in rlp.items:
|
||||
case top
|
||||
of 0, 1:
|
||||
if not w.isBlob:
|
||||
return HexaryNodeReport(error: RlpBlobExpected)
|
||||
blobs[top] = rlp.read(Blob)
|
||||
of 2 .. 15:
|
||||
var key: NodeKey
|
||||
if not key.init(rlp.read(Blob)):
|
||||
return HexaryNodeReport(error: RlpBranchLinkExpected)
|
||||
# Update ref pool
|
||||
links[top] = key.to(RepairKey)
|
||||
of 16:
|
||||
if not w.isBlob:
|
||||
return HexaryNodeReport(error: RlpBlobExpected)
|
||||
blob16 = rlp.read(Blob)
|
||||
else:
|
||||
return HexaryNodeReport(error: Rlp2Or17ListEntries)
|
||||
top.inc
|
||||
|
||||
# Verify extension data
|
||||
case top
|
||||
of 2:
|
||||
if blobs[0].len == 0:
|
||||
return HexaryNodeReport(error: RlpNonEmptyBlobExpected)
|
||||
let (isLeaf, pathSegment) = hexPrefixDecode blobs[0]
|
||||
if isLeaf:
|
||||
rNode = RNodeRef(
|
||||
kind: Leaf,
|
||||
lPfx: pathSegment,
|
||||
lData: blobs[1])
|
||||
else:
|
||||
var key: NodeKey
|
||||
if not key.init(blobs[1]):
|
||||
return HexaryNodeReport(error: RlpExtPathEncoding)
|
||||
# Update ref pool
|
||||
rNode = RNodeRef(
|
||||
kind: Extension,
|
||||
ePfx: pathSegment,
|
||||
eLink: key.to(RepairKey))
|
||||
of 17:
|
||||
for n in [0,1]:
|
||||
var key: NodeKey
|
||||
if not key.init(blobs[n]):
|
||||
return HexaryNodeReport(error: RlpBranchLinkExpected)
|
||||
# Update ref pool
|
||||
links[n] = key.to(RepairKey)
|
||||
rNode = RNodeRef(
|
||||
kind: Branch,
|
||||
bLink: links,
|
||||
bData: blob16)
|
||||
else:
|
||||
discard
|
||||
|
||||
# Add to database
|
||||
if not db.tab.hasKey(repairKey):
|
||||
db.tab[repairKey] = rNode
|
||||
|
||||
elif db.tab[repairKey].convertTo(Blob) != rec.data:
|
||||
return HexaryNodeReport(error: DifferentNodeValueExists)
|
||||
|
||||
HexaryNodeReport(kind: some(rNode.kind))
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,318 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, strutils, tables],
|
||||
chronicles,
|
||||
eth/[common, trie/nibbles],
|
||||
stew/[byteutils],
|
||||
results,
|
||||
"../.."/[constants, range_desc],
|
||||
"."/[hexary_desc, hexary_nodes_helper, hexary_paths]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
type
|
||||
TrieNodeStatCtxRef* = ref object
|
||||
## Context to resume searching for dangling links
|
||||
case persistent*: bool
|
||||
of true:
|
||||
hddCtx*: seq[(NodeKey,NibblesSeq)]
|
||||
else:
|
||||
memCtx*: seq[(RepairKey,NibblesSeq)]
|
||||
|
||||
TrieNodeStat* = object
|
||||
## Trie inspection report
|
||||
dangling*: seq[NodeSpecs] ## Referes to nodes with incomplete refs
|
||||
count*: uint64 ## Number of nodes visited
|
||||
level*: uint8 ## Maximum nesting depth of dangling nodes
|
||||
stopped*: bool ## Potential loop detected if `true`
|
||||
resumeCtx*: TrieNodeStatCtxRef ## Context for resuming inspection
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
when extraTraceMessages:
|
||||
import stew/byteutils
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers, debugging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc ppDangling(a: seq[NodeSpecs]; maxItems = 30): string =
|
||||
proc ppBlob(w: Blob): string =
|
||||
w.toHex.toLowerAscii
|
||||
let
|
||||
q = a.mapIt(it.partialPath.ppBlob)[0 ..< min(maxItems,a.len)]
|
||||
andMore = if maxItems < a.len: ", ..[#" & $a.len & "].." else: ""
|
||||
"{" & q.join(",") & andMore & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use
|
||||
discard result.init(key.ByteArray33[1 .. 32])
|
||||
|
||||
proc convertTo(key: NodeKey; T: type NodeKey): T =
|
||||
## For simplifying generic functions
|
||||
key
|
||||
|
||||
proc convertTo(key: RepairKey; T: type RepairKey): T =
|
||||
## For simplifying generic functions
|
||||
key
|
||||
|
||||
proc isNodeKey(key: Blob): bool =
|
||||
## For simplifying generic functions
|
||||
key.len == 32 or key.len == 0
|
||||
|
||||
proc to(key: NodeKey; T: type NodeKey): T =
|
||||
## For simplifying generic functions
|
||||
key
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc processLink[Q](
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
stats: var TrieNodeStat; # Collecting results
|
||||
inspect: var Q; # Intermediate todo list
|
||||
trail: NibblesSeq; # Todo list argument
|
||||
child: RepairKey|Blob; # Todo list argument
|
||||
) {.gcsafe, raises: [CatchableError]} =
|
||||
## Helper for `inspectTrieImpl()`
|
||||
if not child.isZeroLink:
|
||||
if not child.isNodeKey:
|
||||
# Oops -- caught in the middle of a repair process? Just register
|
||||
# this node
|
||||
stats.dangling.add NodeSpecs(
|
||||
partialPath: trail.hexPrefixEncode(isLeaf = false))
|
||||
elif child.getNode(db).isOk:
|
||||
inspect.add (child.convertTo(typeof(inspect[0][0])), trail)
|
||||
else:
|
||||
stats.dangling.add NodeSpecs(
|
||||
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
||||
nodeKey: child.convertTo(NodeKey))
|
||||
|
||||
proc inspectTrieImpl(
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
partialPaths: seq[Blob]; # Starting paths for search
|
||||
resumeCtx: TrieNodeStatCtxRef; # Context for resuming inspection
|
||||
suspendAfter: uint64; # To be resumed
|
||||
stopAtLevel: uint8; # Width-first depth level
|
||||
maxDangling: int; # Maximal number of dangling results
|
||||
): TrieNodeStat
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## ...
|
||||
when extraTraceMessages:
|
||||
let nPaths = partialPaths.len
|
||||
|
||||
if rootKey.getNode(db).isErr:
|
||||
when extraTraceMessages:
|
||||
trace "Hexary inspect: missing root", nPaths, maxDangling,
|
||||
rootKey=rootKey.convertTo(NodeKey)
|
||||
return TrieNodeStat()
|
||||
|
||||
var
|
||||
reVisit: seq[(typeof(rootKey),NibblesSeq)]
|
||||
again: seq[(typeof(rootKey),NibblesSeq)]
|
||||
resumeOk = false
|
||||
|
||||
# Initialise lists from previous session
|
||||
if not resumeCtx.isNil:
|
||||
when typeof(db) is HexaryTreeDbRef:
|
||||
if not resumeCtx.persistent and 0 < resumeCtx.memCtx.len:
|
||||
resumeOk = true
|
||||
reVisit = resumeCtx.memCtx
|
||||
else:
|
||||
if resumeCtx.persistent and 0 < resumeCtx.hddCtx.len:
|
||||
resumeOk = true
|
||||
reVisit = resumeCtx.hddCtx
|
||||
|
||||
if partialPaths.len == 0 and not resumeOk:
|
||||
reVisit.add (rootKey,EmptyNibbleSeq)
|
||||
else:
|
||||
# Add argument paths
|
||||
for w in partialPaths:
|
||||
let (isLeaf,nibbles) = hexPrefixDecode w
|
||||
if not isLeaf:
|
||||
let rc = nibbles.hexaryPathNodeKey(rootKey, db, missingOk=false)
|
||||
if rc.isOk:
|
||||
reVisit.add (rc.value.to(typeof(rootKey)), nibbles)
|
||||
|
||||
# Stopping on `suspendAfter` has precedence over `stopAtLevel`
|
||||
while 0 < reVisit.len and result.count <= suspendAfter:
|
||||
when extraTraceMessages:
|
||||
trace "Hexary inspect processing", nPaths, maxDangling,
|
||||
level=result.level, nReVisit=reVisit.len, nDangling=result.dangling.len
|
||||
|
||||
if stopAtLevel < result.level:
|
||||
result.stopped = true
|
||||
break
|
||||
|
||||
for n in 0 ..< reVisit.len:
|
||||
if suspendAfter < result.count or
|
||||
maxDangling <= result.dangling.len:
|
||||
# Swallow rest
|
||||
again &= reVisit[n ..< reVisit.len]
|
||||
break
|
||||
|
||||
let
|
||||
(rKey, parentTrail) = reVisit[n]
|
||||
rc = rKey.getNode(db)
|
||||
if rc.isErr:
|
||||
continue # ignore this node
|
||||
let node = rc.value
|
||||
|
||||
case node.kind:
|
||||
of Extension:
|
||||
let
|
||||
trail = parentTrail & node.ePfx
|
||||
child = node.eLink
|
||||
db.processLink(stats=result, inspect=again, trail, child)
|
||||
of Branch:
|
||||
for n in 0 ..< 16:
|
||||
let
|
||||
trail = parentTrail & @[n.byte].initNibbleRange.slice(1)
|
||||
child = node.bLink[n]
|
||||
db.processLink(stats=result, inspect=again, trail, child)
|
||||
of Leaf:
|
||||
# Ooops, forget node and key
|
||||
discard
|
||||
|
||||
result.count.inc
|
||||
# End `for`
|
||||
|
||||
result.level.inc
|
||||
swap(reVisit, again)
|
||||
again.setLen(0)
|
||||
# End while
|
||||
|
||||
# Collect left overs for resuming search
|
||||
if 0 < reVisit.len:
|
||||
when typeof(db) is HexaryTreeDbRef:
|
||||
result.resumeCtx = TrieNodeStatCtxRef(
|
||||
persistent: false,
|
||||
memCtx: reVisit)
|
||||
else:
|
||||
result.resumeCtx = TrieNodeStatCtxRef(
|
||||
persistent: true,
|
||||
hddCtx: reVisit)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace "Hexary inspect finished", nPaths, maxDangling,
|
||||
level=result.level, nResumeCtx=reVisit.len, nDangling=result.dangling.len,
|
||||
maxLevel=stopAtLevel, stopped=result.stopped
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to*(resumeCtx: TrieNodeStatCtxRef; T: type seq[NodeSpecs]): T =
|
||||
## Convert resumption context to nodes that can be used otherwise. This
|
||||
## function might be useful for error recovery.
|
||||
##
|
||||
## Note: In a non-persistant case, temporary `RepairKey` type node specs
|
||||
## that cannot be converted to `NodeKey` type nodes are silently dropped.
|
||||
## This should be no problem as a hexary trie with `RepairKey` type node
|
||||
## refs must be repaired or discarded anyway.
|
||||
if resumeCtx.persistent:
|
||||
for (key,trail) in resumeCtx.hddCtx:
|
||||
result.add NodeSpecs(
|
||||
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
||||
nodeKey: key)
|
||||
else:
|
||||
for (key,trail) in resumeCtx.memCtx:
|
||||
if key.isNodeKey:
|
||||
result.add NodeSpecs(
|
||||
partialPath: trail.hexPrefixEncode(isLeaf = false),
|
||||
nodeKey: key.convertTo(NodeKey))
|
||||
|
||||
|
||||
proc hexaryInspectTrie*(
|
||||
db: HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey; # State root
|
||||
partialPaths = EmptyBlobSeq; # Starting paths for search
|
||||
resumeCtx = TrieNodeStatCtxRef(nil); # Context for resuming inspection
|
||||
suspendAfter = high(uint64); # To be resumed
|
||||
stopAtLevel = 64u8; # Width-first depth level
|
||||
maxDangling = high(int); # Maximal number of dangling results
|
||||
): TrieNodeStat
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Starting with the argument list `paths`, find all the non-leaf nodes in
|
||||
## the hexary trie which have at least one node key reference missing in
|
||||
## the trie database. The references for these nodes are collected and
|
||||
## returned.
|
||||
##
|
||||
## * Argument `partialPaths` list entries that do not refer to an existing
|
||||
## and allocated hexary trie node are silently ignored. So are enytries
|
||||
## that not refer to either a valid extension or a branch type node.
|
||||
##
|
||||
## * This function traverses the hexary trie in *width-first* mode
|
||||
## simultaneously for any entry of the argument `partialPaths` list. Abart
|
||||
## from completing the search there are three conditions when the search
|
||||
## pauses to return the current state (via `resumeCtx`, see next bullet
|
||||
## point):
|
||||
## + The depth level of the running algorithm exceeds `stopAtLevel`.
|
||||
## + The number of visited nodes exceeds `suspendAfter`.
|
||||
## + Te number of cunnently collected dangling nodes exceeds `maxDangling`.
|
||||
## If the function pauses because the current depth exceeds `stopAtLevel`
|
||||
## then the `stopped` flag of the result object will be set, as well.
|
||||
##
|
||||
## * When paused for some of the reasons listed above, the `resumeCtx` field
|
||||
## of the result object contains the current state so that the function
|
||||
## can resume searching from where is paused. An application using this
|
||||
## feature could look like:
|
||||
## ::
|
||||
## var ctx = TrieNodeStatCtxRef()
|
||||
## while not ctx.isNil:
|
||||
## let state = hexaryInspectTrie(db, root, paths, resumeCtx=ctx, 1024)
|
||||
## ...
|
||||
## ctx = state.resumeCtx
|
||||
## paths = EmptyBlobSeq
|
||||
##
|
||||
db.inspectTrieImpl(rootKey.to(RepairKey),
|
||||
partialPaths, resumeCtx, suspendAfter, stopAtLevel, maxDangling)
|
||||
|
||||
|
||||
proc hexaryInspectTrie*(
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
rootKey: NodeKey; # State root
|
||||
partialPaths = EmptyBlobSeq; # Starting paths for search
|
||||
resumeCtx: TrieNodeStatCtxRef = nil; # Context for resuming inspection
|
||||
suspendAfter = high(uint64); # To be resumed
|
||||
stopAtLevel = 64u8; # Width-first depth level
|
||||
maxDangling = high(int); # Maximal number of dangling results
|
||||
): TrieNodeStat
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryInspectTrie()` for persistent database.
|
||||
getFn.inspectTrieImpl(
|
||||
rootKey, partialPaths, resumeCtx, suspendAfter, stopAtLevel, maxDangling)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, debugging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(a: TrieNodeStat; db: HexaryTreeDbRef; maxItems = 30): string =
|
||||
result = "(" & $a.level
|
||||
if a.stopped:
|
||||
result &= "stopped,"
|
||||
result &= $a.dangling.len & "," &
|
||||
a.dangling.ppDangling(maxItems) & ")"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,627 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## For a given path, sdd missing nodes to a hexary trie.
|
||||
##
|
||||
## This module function is temporary and proof-of-concept. for production
|
||||
## purposes, it should be replaced by the new facility of the upcoming
|
||||
## re-factored database layer.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[tables],
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
"../.."/[constants, range_desc],
|
||||
"."/[hexary_desc, hexary_error, hexary_paths]
|
||||
|
||||
type
|
||||
RPathXStep = object
|
||||
## Extended `RPathStep` needed for `NodeKey` assignmant
|
||||
pos*: int ## Some position into `seq[RPathStep]`
|
||||
step*: RPathStep ## Modified copy of an `RPathStep`
|
||||
canLock*: bool ## Can set `Locked` state
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
import std/[sequtils, strutils]
|
||||
|
||||
proc pp(w: RPathXStep; db: HexaryTreeDbRef): string =
|
||||
let y = if w.canLock: "lockOk" else: "noLock"
|
||||
"(" & $w.pos & "," & y & "," & w.step.pp(db) & ")"
|
||||
|
||||
proc pp(w: seq[RPathXStep]; db: HexaryTreeDbRef; indent = 4): string =
|
||||
let pfx = "\n" & " ".repeat(indent)
|
||||
w.mapIt(it.pp(db)).join(pfx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc dup(node: RNodeRef): RNodeRef =
|
||||
new result
|
||||
result[] = node[]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private getters & setters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc xPfx(node: RNodeRef): NibblesSeq =
|
||||
case node.kind:
|
||||
of Leaf:
|
||||
return node.lPfx
|
||||
of Extension:
|
||||
return node.ePfx
|
||||
of Branch:
|
||||
doAssert node.kind != Branch # Ooops
|
||||
|
||||
proc `xPfx=`(node: RNodeRef, val: NibblesSeq) =
|
||||
case node.kind:
|
||||
of Leaf:
|
||||
node.lPfx = val
|
||||
of Extension:
|
||||
node.ePfx = val
|
||||
of Branch:
|
||||
doAssert node.kind != Branch # Ooops
|
||||
|
||||
#proc xData(node: RNodeRef): Blob =
|
||||
# case node.kind:
|
||||
# of Branch:
|
||||
# return node.bData
|
||||
# of Leaf:
|
||||
# return node.lData
|
||||
# of Extension:
|
||||
# doAssert node.kind != Extension # Ooops
|
||||
|
||||
proc `xData=`(node: RNodeRef; val: Blob) =
|
||||
case node.kind:
|
||||
of Branch:
|
||||
node.bData = val
|
||||
of Leaf:
|
||||
node.lData = val
|
||||
of Extension:
|
||||
doAssert node.kind != Extension # Ooops
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, repair tree action helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rTreeExtendLeaf(
|
||||
db: HexaryTreeDbRef;
|
||||
rPath: RPath;
|
||||
key: RepairKey
|
||||
): RPath =
|
||||
## Append a `Leaf` node to a `Branch` node (see `rTreeExtend()`.)
|
||||
if 0 < rPath.tail.len:
|
||||
let
|
||||
nibble = rPath.path[^1].nibble
|
||||
leaf = RNodeRef(
|
||||
state: Mutable,
|
||||
kind: Leaf,
|
||||
lPfx: rPath.tail)
|
||||
db.tab[key] = leaf
|
||||
if not key.isNodeKey:
|
||||
rPath.path[^1].node.bLink[nibble] = key
|
||||
return RPath(
|
||||
root: rPath.root,
|
||||
path: rPath.path & RPathStep(key: key, node: leaf, nibble: -1),
|
||||
tail: EmptyNibbleSeq)
|
||||
|
||||
proc rTreeExtendLeaf(
|
||||
db: HexaryTreeDbRef;
|
||||
rPath: RPath;
|
||||
key: RepairKey;
|
||||
node: RNodeRef;
|
||||
): RPath =
|
||||
## Register `node` and append/link a `Leaf` node to a `Branch` node (see
|
||||
## `rTreeExtend()`.)
|
||||
if 1 < rPath.tail.len and node.state in {Mutable,TmpRoot}:
|
||||
let
|
||||
nibble = rPath.tail[0].int8
|
||||
xStep = RPathStep(key: key, node: node, nibble: nibble)
|
||||
xPath = RPath(
|
||||
root: rPath.root,
|
||||
path: rPath.path & xStep,
|
||||
tail: rPath.tail.slice(1))
|
||||
return db.rTreeExtendLeaf(xPath, db.newRepairKey())
|
||||
|
||||
|
||||
proc rTreeSplitNode(
|
||||
db: HexaryTreeDbRef;
|
||||
rPath: RPath;
|
||||
key: RepairKey;
|
||||
node: RNodeRef;
|
||||
): RPath =
|
||||
## Replace `Leaf` or `Extension` node in tuple `(key,node)` by parts (see
|
||||
## `rTreeExtend()`):
|
||||
##
|
||||
## left(Extension) -> middle(Branch) -> right(Extension or Leaf)
|
||||
## ^ ^
|
||||
## | |
|
||||
## added-to-path added-to-path
|
||||
##
|
||||
## where either `left()` or `right()` extensions might be missing.
|
||||
##
|
||||
let
|
||||
nibbles = node.xPfx
|
||||
lLen = rPath.tail.sharedPrefixLen(nibbles)
|
||||
if nibbles.len == 0 or rPath.tail.len <= lLen:
|
||||
return # Ooops (^^^^^ otherwise `rPath` was not the longest)
|
||||
var
|
||||
mKey = key
|
||||
let
|
||||
mNibble = nibbles[lLen] # exists as `lLen < tail.len`
|
||||
rPfx = nibbles.slice(lLen + 1) # might be empty OK
|
||||
|
||||
result = rPath
|
||||
|
||||
# Insert node (if any): left(Extension)
|
||||
if 0 < lLen:
|
||||
let lNode = RNodeRef(
|
||||
state: Mutable,
|
||||
kind: Extension,
|
||||
ePfx: result.tail.slice(0,lLen),
|
||||
eLink: db.newRepairKey())
|
||||
db.tab[key] = lNode
|
||||
result.path.add RPathStep(key: key, node: lNode, nibble: -1)
|
||||
result.tail = result.tail.slice(lLen)
|
||||
mKey = lNode.eLink
|
||||
|
||||
# Insert node: middle(Branch)
|
||||
let mNode = RNodeRef(
|
||||
state: Mutable,
|
||||
kind: Branch)
|
||||
db.tab[mKey] = mNode
|
||||
result.path.add RPathStep(key: mKey, node: mNode, nibble: -1) # no nibble yet
|
||||
|
||||
# Insert node (if any): right(Extension) -- not to be registered in `rPath`
|
||||
if 0 < rPfx.len:
|
||||
let rKey = db.newRepairKey()
|
||||
# Re-use argument node
|
||||
mNode.bLink[mNibble] = rKey
|
||||
db.tab[rKey] = node
|
||||
node.xPfx = rPfx
|
||||
# Otherwise merge argument node
|
||||
elif node.kind == Extension:
|
||||
mNode.bLink[mNibble] = node.eLink
|
||||
else:
|
||||
# Oops, does it make sense, at all?
|
||||
mNode.bData = node.lData
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, repair tree actions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rTreeInterpolate(
|
||||
rPath: RPath;
|
||||
db: HexaryTreeDbRef;
|
||||
): RPath
|
||||
{.gcsafe, raises: [KeyError]} =
|
||||
## Extend path, add missing nodes to tree. The last node added will be
|
||||
## a `Leaf` node if this function succeeds.
|
||||
##
|
||||
## The function assumed that the `RPath` argument is the longest possible
|
||||
## as just constructed by `pathExtend()`
|
||||
if 0 < rPath.path.len and 0 < rPath.tail.len:
|
||||
let step = rPath.path[^1]
|
||||
case step.node.kind:
|
||||
of Branch:
|
||||
# Now, the slot must not be empty. An empty slot would lead to a
|
||||
# rejection of this record as last valid step, contrary to the
|
||||
# assumption `path` is the longest one.
|
||||
if step.nibble < 0:
|
||||
return # sanitary check failed
|
||||
let key = step.node.bLink[step.nibble]
|
||||
if key.isZero:
|
||||
return # sanitary check failed
|
||||
|
||||
# Case: unused slot => add leaf record
|
||||
if not db.tab.hasKey(key):
|
||||
return db.rTreeExtendLeaf(rPath, key)
|
||||
|
||||
# So a `child` node exits but it is something that could not be used to
|
||||
# extend the argument `path` which is assumed the longest possible one.
|
||||
let child = db.tab[key]
|
||||
case child.kind:
|
||||
of Branch:
|
||||
# So a `Leaf` node can be linked into the `child` branch
|
||||
return db.rTreeExtendLeaf(rPath, key, child)
|
||||
|
||||
# Need to split the right `grandChild` in `child -> grandChild`
|
||||
# into parts:
|
||||
#
|
||||
# left(Extension) -> middle(Branch)
|
||||
# | |
|
||||
# | +-----> right(Extension or Leaf) ...
|
||||
# +---------> new Leaf record
|
||||
#
|
||||
# where either `left()` or `right()` extensions might be missing
|
||||
of Extension, Leaf:
|
||||
var xPath = db.rTreeSplitNode(rPath, key, child)
|
||||
if 0 < xPath.path.len:
|
||||
# Append `Leaf` node
|
||||
xPath.path[^1].nibble = xPath.tail[0].int8
|
||||
xPath.tail = xPath.tail.slice(1)
|
||||
return db.rTreeExtendLeaf(xPath, db.newRepairKey())
|
||||
of Leaf:
|
||||
return # Oops
|
||||
of Extension:
|
||||
let key = step.node.eLink
|
||||
|
||||
var child: RNodeRef
|
||||
if db.tab.hasKey(key):
|
||||
child = db.tab[key]
|
||||
# `Extension` can only be followed by a `Branch` node
|
||||
if child.kind != Branch:
|
||||
return
|
||||
else:
|
||||
# Case: unused slot => add `Branch` and `Leaf` record
|
||||
child = RNodeRef(
|
||||
state: Mutable,
|
||||
kind: Branch)
|
||||
db.tab[key] = child
|
||||
|
||||
# So a `Leaf` node can be linked into the `child` branch
|
||||
return db.rTreeExtendLeaf(rPath, key, child)
|
||||
|
||||
proc rTreeInterpolate(
|
||||
rPath: RPath;
|
||||
db: HexaryTreeDbRef;
|
||||
payload: Blob;
|
||||
): RPath
|
||||
{.gcsafe, raises: [KeyError]} =
|
||||
## Variant of `rTreeExtend()` which completes a `Leaf` record.
|
||||
result = rPath.rTreeInterpolate(db)
|
||||
if 0 < result.path.len and result.tail.len == 0:
|
||||
let node = result.path[^1].node
|
||||
if node.kind != Extension and node.state in {Mutable,TmpRoot}:
|
||||
node.xData = payload
|
||||
|
||||
|
||||
proc rTreeUpdateKeys(
|
||||
rPath: RPath;
|
||||
db: HexaryTreeDbRef;
|
||||
): Result[void,bool]
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## The argument `rPath` is assumed to organise database nodes as
|
||||
##
|
||||
## root -> ... -> () -> () -> ... -> () -> () ...
|
||||
## |-------------| |------------| |------
|
||||
## static nodes locked nodes mutable nodes
|
||||
##
|
||||
## Where
|
||||
## * Static nodes are read-only nodes provided by the proof database
|
||||
## * Locked nodes are added read-only nodes that satisfy the proof condition
|
||||
## * Mutable nodes are incomplete nodes
|
||||
##
|
||||
## Then update nodes from the right end and set all the mutable nodes
|
||||
## locked if possible.
|
||||
##
|
||||
## On error, a boolean value is returned indicating whether there were some
|
||||
## significant changes made to the database, ie. some nodes could be locked.
|
||||
var
|
||||
rTop = rPath.path.len
|
||||
stack: seq[RPathXStep]
|
||||
changed = false
|
||||
|
||||
if 0 < rTop and
|
||||
rPath.path[^1].node.state == Mutable and
|
||||
rPath.path[0].node.state != Mutable:
|
||||
|
||||
# Set `Leaf` entry
|
||||
let leafNode = rPath.path[^1].node.dup
|
||||
stack.add RPathXStep(
|
||||
pos: rTop - 1,
|
||||
canLock: true,
|
||||
step: RPathStep(
|
||||
node: leafNode,
|
||||
key: leafNode.convertTo(Blob).digestTo(NodeKey).to(RepairKey),
|
||||
nibble: -1))
|
||||
|
||||
while 1 < rTop:
|
||||
rTop.dec
|
||||
|
||||
# Update parent node (note that `2 <= rPath.path.len`)
|
||||
let
|
||||
thisKey = stack[^1].step.key
|
||||
preStep = rPath.path[rTop-1]
|
||||
preNibble = preStep.nibble
|
||||
|
||||
# End reached
|
||||
if preStep.node.state notin {Mutable,TmpRoot}:
|
||||
|
||||
# Verify the tail matches
|
||||
var key = RepairKey.default
|
||||
case preStep.node.kind:
|
||||
of Branch:
|
||||
key = preStep.node.bLink[preNibble]
|
||||
of Extension:
|
||||
key = preStep.node.eLink
|
||||
of Leaf:
|
||||
discard
|
||||
if key != thisKey:
|
||||
if not db.tab.hasKey(key) or
|
||||
db.tab[key].state notin {Mutable,TmpRoot}:
|
||||
return err(false) # no changes were made
|
||||
|
||||
# Ok, replace database records by stack entries
|
||||
var lockOk = true
|
||||
for n in countDown(stack.len-1,0):
|
||||
let item = stack[n]
|
||||
db.tab.del(rPath.path[item.pos].key)
|
||||
db.tab[item.step.key] = item.step.node
|
||||
if lockOk:
|
||||
if item.canLock:
|
||||
changed = true
|
||||
item.step.node.state = Locked
|
||||
else:
|
||||
lockOk = false
|
||||
if not lockOk:
|
||||
return err(changed)
|
||||
return ok() # Done ok()
|
||||
|
||||
stack.add RPathXStep(
|
||||
pos: rTop - 1,
|
||||
step: RPathStep(
|
||||
node: preStep.node.dup, # (!)
|
||||
nibble: preNibble,
|
||||
key: preStep.key))
|
||||
|
||||
case stack[^1].step.node.kind:
|
||||
of Branch:
|
||||
stack[^1].step.node.bLink[preNibble] = thisKey
|
||||
# Check whether all keys are proper, non-temporary keys
|
||||
stack[^1].canLock = true
|
||||
for n in 0 ..< 16:
|
||||
if not stack[^1].step.node.bLink[n].isNodeKey:
|
||||
stack[^1].canLock = false
|
||||
break
|
||||
of Extension:
|
||||
stack[^1].step.node.eLink = thisKey
|
||||
stack[^1].canLock = thisKey.isNodeKey
|
||||
of Leaf:
|
||||
return err(false) # no changes were made
|
||||
|
||||
# Must not overwrite a non-temprary key
|
||||
if stack[^1].canLock:
|
||||
stack[^1].step.key =
|
||||
stack[^1].step.node.convertTo(Blob).digestTo(NodeKey).to(RepairKey)
|
||||
|
||||
# End while 1 < rTop
|
||||
|
||||
if stack[0].step.node.state != Mutable:
|
||||
# Nothing that can be done, here
|
||||
return err(false) # no changes were made
|
||||
|
||||
# Ok, replace database records by stack entries
|
||||
block:
|
||||
var lockOk = true
|
||||
for n in countDown(stack.len-1,0):
|
||||
let item = stack[n]
|
||||
if item.step.node.state == TmpRoot:
|
||||
db.tab[rPath.path[item.pos].key] = item.step.node
|
||||
else:
|
||||
db.tab.del(rPath.path[item.pos].key)
|
||||
db.tab[item.step.key] = item.step.node
|
||||
if lockOk:
|
||||
if item.canLock:
|
||||
changed = true
|
||||
item.step.node.state = Locked
|
||||
else:
|
||||
lockOk = false
|
||||
if not lockOk:
|
||||
return err(changed)
|
||||
# Done ok()
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions for proof-less (i.e. empty) databases
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rTreeBranchAppendleaf(
|
||||
db: HexaryTreeDbRef;
|
||||
bNode: RNodeRef;
|
||||
leaf: RLeafSpecs;
|
||||
): bool =
|
||||
## Database prefill helper.
|
||||
let nibbles = leaf.pathTag.to(NodeKey).ByteArray32.initNibbleRange
|
||||
if bNode.bLink[nibbles[0]].isZero:
|
||||
let key = db.newRepairKey()
|
||||
bNode.bLink[nibbles[0]] = key
|
||||
db.tab[key] = RNodeRef(
|
||||
state: Mutable,
|
||||
kind: Leaf,
|
||||
lPfx: nibbles.slice(1),
|
||||
lData: leaf.payload)
|
||||
return true
|
||||
|
||||
proc rTreePrefill(
|
||||
db: HexaryTreeDbRef;
|
||||
rootKey: NodeKey;
|
||||
dbItems: var seq[RLeafSpecs];
|
||||
) =
|
||||
## Fill missing root node.
|
||||
let nibbles = dbItems[^1].pathTag.to(NodeKey).ByteArray32.initNibbleRange
|
||||
if dbItems.len == 1:
|
||||
db.tab[rootKey.to(RepairKey)] = RNodeRef(
|
||||
state: TmpRoot,
|
||||
kind: Leaf,
|
||||
lPfx: nibbles,
|
||||
lData: dbItems[^1].payload)
|
||||
else:
|
||||
# let key = db.newRepairKey() -- notused
|
||||
var node = RNodeRef(
|
||||
state: TmpRoot,
|
||||
kind: Branch)
|
||||
discard db.rTreeBranchAppendleaf(node, dbItems[^1])
|
||||
db.tab[rootKey.to(RepairKey)] = node
|
||||
|
||||
proc rTreeSquashRootNode(
|
||||
db: HexaryTreeDbRef;
|
||||
rootKey: NodeKey;
|
||||
): RNodeRef
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Handle fringe case and return root node. This function assumes that the
|
||||
## root node has been installed, already. This function will check the root
|
||||
## node for a combination `Branch->Extension/Leaf` for a single child root
|
||||
## branch node and replace the pair by a single extension or leaf node. In
|
||||
## a similar fashion, a combination `Branch->Branch` for a single child root
|
||||
## is replaced by a `Extension->Branch` combination.
|
||||
let
|
||||
rootRKey = rootKey.to(RepairKey)
|
||||
node = db.tab[rootRKey]
|
||||
if node.kind == Branch:
|
||||
# Check whether there is more than one link, only
|
||||
var (nextKey, nibble) = (RepairKey.default, -1)
|
||||
for inx in 0 ..< 16:
|
||||
if not node.bLink[inx].isZero:
|
||||
if 0 <= nibble:
|
||||
return node # Nothing to do here
|
||||
(nextKey, nibble) = (node.bLink[inx], inx)
|
||||
if 0 <= nibble and db.tab.hasKey(nextKey):
|
||||
# Ok, exactly one link
|
||||
let
|
||||
nextNode = db.tab[nextKey]
|
||||
nibblePfx = @[nibble.byte].initNibbleRange.slice(1)
|
||||
if nextNode.kind == Branch:
|
||||
# Replace root node by an extension node
|
||||
let thisNode = RNodeRef(
|
||||
kind: Extension,
|
||||
ePfx: nibblePfx,
|
||||
eLink: nextKey)
|
||||
db.tab[rootRKey] = thisNode
|
||||
return thisNode
|
||||
else:
|
||||
# Nodes can be squashed: the child node replaces the root node
|
||||
nextNode.xPfx = nibblePfx & nextNode.xPfx
|
||||
db.tab.del(nextKey)
|
||||
db.tab[rootRKey] = nextNode
|
||||
return nextNode
|
||||
|
||||
return node
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryInterpolate*(
|
||||
db: HexaryTreeDbRef; # Database
|
||||
rootKey: NodeKey; # Root node hash
|
||||
dbItems: var seq[RLeafSpecs]; # List of path and leaf items
|
||||
bootstrap = false; # Can create root node on-the-fly
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## From the argument list `dbItems`, leaf nodes will be added to the hexary
|
||||
## trie while interpolating the path for the leaf nodes by adding missing
|
||||
## nodes. This action is typically not a full trie rebuild. Some partial node
|
||||
## entries might have been added, already which is typical for a boundary
|
||||
## proof that comes with the `snap/1` protocol.
|
||||
##
|
||||
## If successful, there will be a complete hexary trie avaliable with the
|
||||
## `payload` fields of the `dbItems` argument list as leaf node values. The
|
||||
## argument list `dbItems` will have been updated by registering the node
|
||||
## keys of the leaf items.
|
||||
##
|
||||
## The algorithm employed here tries to minimise hashing hexary nodes for
|
||||
## the price of re-vising the same node again.
|
||||
##
|
||||
## When interpolating, a skeleton of the hexary trie is constructed first
|
||||
## using temorary keys instead of node hashes.
|
||||
##
|
||||
## In a second run, all these temporary keys are replaced by proper node
|
||||
## hashes so that each node will be hashed only once.
|
||||
##
|
||||
if dbItems.len == 0:
|
||||
return ok() # nothing to do
|
||||
|
||||
# Handle bootstrap, dangling `rootKey`. This mode adds some pseudo
|
||||
# proof-nodes in order to keep the algoritm going.
|
||||
var addedRootNode = false
|
||||
if not db.tab.hasKey(rootKey.to(RepairKey)):
|
||||
if not bootstrap:
|
||||
return err(RootNodeMissing)
|
||||
addedRootNode = true
|
||||
db.rTreePrefill(rootKey, dbItems)
|
||||
|
||||
# ---------------------------------------
|
||||
# Construnct skeleton with temporary keys
|
||||
# ---------------------------------------
|
||||
|
||||
# Walk top down and insert/complete missing account access nodes
|
||||
for n in (dbItems.len-1).countDown(0):
|
||||
let dbItem = dbItems[n]
|
||||
if dbItem.payload.len != 0:
|
||||
var
|
||||
rPath = dbItem.pathTag.hexaryPath(rootKey, db)
|
||||
repairKey = dbItem.nodeKey
|
||||
if rPath.path.len == 0 and addedRootNode:
|
||||
let node = db.tab[rootKey.to(RepairKey)]
|
||||
if db.rTreeBranchAppendleaf(node, dbItem):
|
||||
rPath = dbItem.pathTag.hexaryPath(rootKey, db)
|
||||
if repairKey.isZero and 0 < rPath.path.len and rPath.tail.len == 0:
|
||||
repairKey = rPath.path[^1].key
|
||||
dbItems[n].nodeKey = repairKey
|
||||
if repairKey.isZero:
|
||||
let
|
||||
update = rPath.rTreeInterpolate(db, dbItem.payload)
|
||||
final = dbItem.pathTag.hexaryPath(rootKey, db)
|
||||
if update != final:
|
||||
return err(AccountRepairBlocked)
|
||||
dbItems[n].nodeKey = rPath.path[^1].key
|
||||
|
||||
# --------------------------------------------
|
||||
# Replace temporary keys by proper node hashes
|
||||
# --------------------------------------------
|
||||
|
||||
# Replace temporary repair keys by proper hash based node keys.
|
||||
var reVisit: seq[NodeTag]
|
||||
for n in countDown(dbItems.len-1,0):
|
||||
let dbItem = dbItems[n]
|
||||
if not dbItem.nodeKey.isZero:
|
||||
let rPath = dbItem.pathTag.hexaryPath(rootKey, db)
|
||||
if rPath.path[^1].node.state == Mutable:
|
||||
let rc = rPath.rTreeUpdateKeys(db)
|
||||
if rc.isErr:
|
||||
reVisit.add dbItem.pathTag
|
||||
|
||||
while 0 < reVisit.len:
|
||||
var
|
||||
again: seq[NodeTag]
|
||||
changed = false
|
||||
for n,nodeTag in reVisit:
|
||||
let rc = nodeTag.hexaryPath(rootKey, db).rTreeUpdateKeys(db)
|
||||
if rc.isErr:
|
||||
again.add nodeTag
|
||||
if rc.error:
|
||||
changed = true
|
||||
if reVisit.len <= again.len and not changed:
|
||||
if addedRootNode:
|
||||
return err(InternalDbInconsistency)
|
||||
return err(RightBoundaryProofFailed)
|
||||
reVisit = again
|
||||
|
||||
# Update root node (if any). If the root node was constructed from scratch,
|
||||
# it must be consistent.
|
||||
if addedRootNode:
|
||||
let node = db.rTreeSquashRootNode(rootKey)
|
||||
if rootKey != node.convertTo(Blob).digestTo(NodeKey):
|
||||
return err(RootNodeMismatch)
|
||||
node.state = Locked
|
||||
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,458 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_nodes_helper, hexary_paths]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc `<=`(a, b: NibblesSeq): bool =
|
||||
## Compare nibbles, different lengths are padded to the right with zeros
|
||||
let abMin = min(a.len, b.len)
|
||||
for n in 0 ..< abMin:
|
||||
if a[n] < b[n]:
|
||||
return true
|
||||
if b[n] < a[n]:
|
||||
return false
|
||||
# otherwise a[n] == b[n]
|
||||
|
||||
# Assuming zero for missing entries
|
||||
if b.len < a.len:
|
||||
for n in abMin + 1 ..< a.len:
|
||||
if 0 < a[n]:
|
||||
return false
|
||||
true
|
||||
|
||||
proc `<`(a, b: NibblesSeq): bool =
|
||||
not (b <= a)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc complete(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
key: RepairKey|NodeKey|Blob; # Start key
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
pathLenMax: int; # Beware of loops (if any)
|
||||
doLeast: static[bool]; # Direction: *least* or *most*
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Extend path using least or last nodes without recursion.
|
||||
var uPath = typeof(path)(root: path.root, path: path.path)
|
||||
|
||||
let firstNode = key.getNode(db)
|
||||
if firstNode.isErr:
|
||||
return Result[typeof(path),HexaryError].err(firstNode.error)
|
||||
var
|
||||
key = key
|
||||
node = firstNode.value
|
||||
|
||||
while uPath.path.len < pathLenMax:
|
||||
case node.kind:
|
||||
of Leaf:
|
||||
uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
return ok(uPath) # done
|
||||
|
||||
of Extension:
|
||||
let newKey = node.eLink
|
||||
if not newKey.isZeroLink:
|
||||
let newNode = newKey.getNode(db)
|
||||
if newNode.isOK:
|
||||
uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
key = newKey
|
||||
node = newNode.value
|
||||
continue
|
||||
return err(NearbyExtensionError) # Oops, no way
|
||||
|
||||
of Branch:
|
||||
let n = block:
|
||||
when doLeast:
|
||||
node.branchNibbleMin 0
|
||||
else:
|
||||
node.branchNibbleMax 15
|
||||
if 0 <= n:
|
||||
let
|
||||
newKey = node.bLink[n]
|
||||
newNode = newKey.getNode(db)
|
||||
if newNode.isOK:
|
||||
uPath.path.add typeof(path.path[0])(key: key, node: node, nibble: n)
|
||||
key = newKey
|
||||
node = newNode.value
|
||||
continue
|
||||
return err(NearbyBranchError) # Oops, no way
|
||||
|
||||
err(NearbyNestingTooDeep)
|
||||
|
||||
|
||||
proc zeroAdjust(
|
||||
path: XPath|RPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
doLeast: static[bool]; # Direction: *least* or *most*
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Adjust empty argument path to the first node entry to the right. Ths
|
||||
## applies is the argument path `path` is before the first entry in the
|
||||
## database. The result is a path which is aligned with the first entry.
|
||||
proc accept(p: typeof(path); pfx: NibblesSeq): bool =
|
||||
when doLeast:
|
||||
p.tail <= pfx
|
||||
else:
|
||||
pfx <= p.tail
|
||||
|
||||
proc branchBorderNibble(w: typeof(path.path[0].node); n: int8): int8 =
|
||||
when doLeast:
|
||||
w.branchNibbleMin n
|
||||
else:
|
||||
w.branchNibbleMax n
|
||||
|
||||
if path.path.len != 0:
|
||||
return Result[typeof(path),HexaryError].ok(path)
|
||||
|
||||
let root = path.root.getNode(db)
|
||||
if root.isOk:
|
||||
block fail:
|
||||
var pfx: NibblesSeq
|
||||
case root.value.kind:
|
||||
of Branch:
|
||||
# Find first non-dangling link and assign it
|
||||
if path.tail.len == 0:
|
||||
break fail
|
||||
let n = root.value.branchBorderNibble path.tail[0].int8
|
||||
if n < 0:
|
||||
# Before or after the database range
|
||||
return err(NearbyBeyondRange)
|
||||
pfx = @[n.byte].initNibbleRange.slice(1)
|
||||
|
||||
of Extension:
|
||||
let ePfx = root.value.ePfx
|
||||
# Must be followed by a branch node
|
||||
if path.tail.len < 2 or not path.accept(ePfx):
|
||||
break fail
|
||||
let node = root.value.eLink.getNode(db)
|
||||
if node.isErr:
|
||||
break fail
|
||||
let n = node.value.branchBorderNibble path.tail[1].int8
|
||||
if n < 0:
|
||||
# Before or after the database range
|
||||
return err(NearbyBeyondRange)
|
||||
pfx = ePfx & @[n.byte].initNibbleRange.slice(1)
|
||||
|
||||
of Leaf:
|
||||
pfx = root.value.lPfx
|
||||
if not path.accept(pfx):
|
||||
# Before or after the database range
|
||||
return err(NearbyBeyondRange)
|
||||
|
||||
let newPath = pfx.padPartialPath(0).hexaryPath(path.root, db)
|
||||
if 0 < newPath.path.len:
|
||||
return ok(newPath)
|
||||
|
||||
err(NearbyEmptyPath)
|
||||
|
||||
|
||||
proc finalise(
|
||||
path: XPath|RPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
moveRight: static[bool]; # Direction of next node
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Handle some pathological cases after main processing failed
|
||||
proc beyond(p: typeof(path); pfx: NibblesSeq): bool =
|
||||
when moveRight:
|
||||
pfx < p.tail
|
||||
else:
|
||||
p.tail < pfx
|
||||
|
||||
proc branchBorderNibble(w: typeof(path.path[0].node)): int8 =
|
||||
when moveRight:
|
||||
w.branchNibbleMax 15
|
||||
else:
|
||||
w.branchNibbleMin 0
|
||||
|
||||
# Just for completeness (this case should have been handled, already)
|
||||
if path.path.len == 0:
|
||||
return Result[typeof(path),HexaryError].err(NearbyEmptyPath)
|
||||
|
||||
# Check whether the path is beyond the database range
|
||||
if 0 < path.tail.len: # nothing to compare against, otherwise
|
||||
let top = path.path[^1]
|
||||
# Note that only a `Branch` nodes has a non-zero nibble
|
||||
if 0 <= top.nibble and top.nibble == top.node.branchBorderNibble:
|
||||
# Check the following up node
|
||||
let rc = top.node.bLink[top.nibble].getNode(db)
|
||||
if rc.isErr:
|
||||
return err(NearbyDanglingLink)
|
||||
var pfx: NibblesSeq
|
||||
case rc.value.kind:
|
||||
of Leaf:
|
||||
pfx = rc.value.lPfx
|
||||
of Extension:
|
||||
pfx = rc.value.ePfx
|
||||
of Branch:
|
||||
pfx = @[rc.value.branchBorderNibble.byte].initNibbleRange.slice(1)
|
||||
if path.beyond pfx:
|
||||
return err(NearbyBeyondRange)
|
||||
|
||||
# Pathological cases
|
||||
# * finalise right: nfffff.. for n < f or
|
||||
# * finalise left: n00000.. for 0 < n
|
||||
if path.path[0].node.kind == Branch or
|
||||
(1 < path.path.len and path.path[1].node.kind == Branch):
|
||||
return err(NearbyFailed) # no more nodes
|
||||
|
||||
err(NearbyUnexpectedNode) # error
|
||||
|
||||
|
||||
proc nearbyNext(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
moveRight: static[bool]; # Direction of next node
|
||||
pathLenMax = 64; # Beware of loops (if any)
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Unified implementation of `hexaryNearbyRight()` and `hexaryNearbyLeft()`.
|
||||
proc accept(nibble: int8): bool =
|
||||
## Accept `nibble` unless on boundaty dependent on `moveRight`
|
||||
when moveRight:
|
||||
nibble < 15
|
||||
else:
|
||||
0 < nibble
|
||||
|
||||
proc accept(p: typeof(path); pfx: NibblesSeq): bool =
|
||||
when moveRight:
|
||||
p.tail <= pfx
|
||||
else:
|
||||
pfx <= p.tail
|
||||
|
||||
proc branchNibbleNext(w: typeof(path.path[0].node); n: int8): int8 =
|
||||
when moveRight:
|
||||
w.branchNibbleMin(n + 1)
|
||||
else:
|
||||
w.branchNibbleMax(n - 1)
|
||||
|
||||
# Some easy cases
|
||||
var path = block:
|
||||
let rc = path.zeroAdjust(db, doLeast=moveRight)
|
||||
if rc.isErr:
|
||||
return Result[typeof(path),HexaryError].err(rc.error)
|
||||
rc.value
|
||||
|
||||
var
|
||||
uPath = path
|
||||
start = true
|
||||
while 0 < uPath.path.len:
|
||||
let top = uPath.path[^1]
|
||||
case top.node.kind:
|
||||
of Leaf:
|
||||
return ok(uPath)
|
||||
of Branch:
|
||||
if top.nibble < 0 or uPath.tail.len == 0:
|
||||
return err(NearbyUnexpectedNode)
|
||||
of Extension:
|
||||
uPath.tail = top.node.ePfx & uPath.tail
|
||||
uPath.path.setLen(uPath.path.len - 1)
|
||||
continue
|
||||
|
||||
var
|
||||
step = top
|
||||
let
|
||||
uPathLen = uPath.path.len # in case of backtracking
|
||||
uPathTail = uPath.tail # in case of backtracking
|
||||
|
||||
# Look ahead checking next node
|
||||
if start:
|
||||
let
|
||||
topLink = top.node.bLink[top.nibble]
|
||||
nextNode = block:
|
||||
if topLink.isZeroLink:
|
||||
return err(NearbyDanglingLink) # error
|
||||
let rc = topLink.getNode(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error) # error
|
||||
rc.value
|
||||
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
if uPath.accept(nextNode.lPfx):
|
||||
return uPath.complete(topLink, db, pathLenMax, doLeast=moveRight)
|
||||
of Extension:
|
||||
if uPath.accept(nextNode.ePfx):
|
||||
return uPath.complete(topLink, db, pathLenMax, doLeast=moveRight)
|
||||
of Branch:
|
||||
let nextNibble = uPath.tail[0].int8
|
||||
if start and accept(nextNibble):
|
||||
# Step down and complete with a branch link on the child node
|
||||
step = typeof(path.path[0])(
|
||||
key: topLink,
|
||||
node: nextNode,
|
||||
nibble: nextNibble)
|
||||
uPath.path &= step
|
||||
|
||||
# Find the next item to the right/left of the current top entry
|
||||
let n = step.node.branchNibbleNext step.nibble
|
||||
if 0 <= n:
|
||||
uPath.path[^1].nibble = n
|
||||
return uPath.complete(
|
||||
step.node.bLink[n], db, pathLenMax, doLeast=moveRight)
|
||||
|
||||
if start:
|
||||
# Retry without look ahead
|
||||
start = false
|
||||
|
||||
# Restore `uPath` (pop temporary extra step)
|
||||
if uPathLen < uPath.path.len:
|
||||
uPath.path.setLen(uPathLen)
|
||||
uPath.tail = uPathTail
|
||||
else:
|
||||
# Pop current `Branch` node on top and append nibble to `tail`
|
||||
uPath.tail = @[top.nibble.byte].initNibbleRange.slice(1) & uPath.tail
|
||||
uPath.path.setLen(uPath.path.len - 1)
|
||||
# End while
|
||||
|
||||
# Handle some pathological cases
|
||||
return path.finalise(db, moveRight)
|
||||
|
||||
|
||||
proc nearbyNext(
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
moveRight: static[bool]; # Direction of next node
|
||||
pathLenMax = 64; # Beware of loops (if any)
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `nearbyNext()`, convenience wrapper
|
||||
let rc = baseTag.hexaryPath(rootKey, db).nearbyNext(db, moveRight)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
|
||||
let path = rc.value
|
||||
if 0 < path.path.len and path.path[^1].node.kind == Leaf:
|
||||
let nibbles = path.getNibbles
|
||||
if nibbles.len == 64:
|
||||
return ok(nibbles.getBytes.convertTo(NodeTag))
|
||||
|
||||
err(NearbyLeafExpected)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, moving and right boundary proof
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Extends the maximally extended argument nodes `path` to the right (i.e.
|
||||
## with non-decreasing path value). This is similar to the
|
||||
## `hexary_path.next()` function, only that this algorithm does not
|
||||
## backtrack if there are dangling links in between and rather returns
|
||||
## an error.
|
||||
##
|
||||
## In the case that there is no more leaf node to the right of the argument
|
||||
## path, the particular error code `NearbyBeyondRange` is returned.
|
||||
##
|
||||
## This code is intended to be used for verifying a left-bound proof to
|
||||
## verify that there is no leaf node *right* of a boundary path value.
|
||||
path.nearbyNext(db, moveRight=true)
|
||||
|
||||
proc hexaryNearbyRight*(
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
|
||||
## than `RPath` or `XPath` ones.
|
||||
baseTag.nearbyNext(rootKey, db, moveRight=true)
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Similar to `hexaryNearbyRight()`.
|
||||
##
|
||||
## This code is intended to be used for verifying a right-bound proof to
|
||||
## verify that there is no leaf node *left* to a boundary path value.
|
||||
path.nearbyNext(db, moveRight=false)
|
||||
|
||||
proc hexaryNearbyLeft*(
|
||||
baseTag: NodeTag; # Some node
|
||||
rootKey: NodeKey; # State root
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[NodeTag,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
|
||||
baseTag.nearbyNext(rootKey, db, moveRight=false)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryNearbyRightMissing*(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): Result[bool,HexaryError]
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Returns `true` if the maximally extended argument nodes `path` is the
|
||||
## rightmost on the hexary trie database. It verifies that there is no more
|
||||
## leaf entry to the right of the argument `path`. This function is an
|
||||
## an alternative to
|
||||
## ::
|
||||
## let rc = path.hexaryNearbyRight(db)
|
||||
## if rc.isOk:
|
||||
## # not at the end => false
|
||||
## ...
|
||||
## elif rc.error != NearbyBeyondRange:
|
||||
## # problem with database => error
|
||||
## ...
|
||||
## else:
|
||||
## # no nore nodes => true
|
||||
## ...
|
||||
## and is intended mainly for debugging.
|
||||
if path.path.len == 0:
|
||||
return err(NearbyEmptyPath)
|
||||
if 0 < path.tail.len:
|
||||
return err(NearbyPathTail)
|
||||
|
||||
let top = path.path[^1]
|
||||
if top.node.kind != Branch or top.nibble < 0:
|
||||
return err(NearbyBranchError)
|
||||
|
||||
let nextNode = block:
|
||||
let topLink = top.node.bLink[top.nibble]
|
||||
if topLink.isZeroLink:
|
||||
return err(NearbyDanglingLink) # error
|
||||
let rc = topLink.getNode(db)
|
||||
if rc.isErr:
|
||||
return err(rc.error) # error
|
||||
rc.value
|
||||
|
||||
case nextNode.kind
|
||||
of Leaf:
|
||||
return ok(nextNode.lPfx < path.tail)
|
||||
of Extension:
|
||||
return ok(nextNode.ePfx < path.tail)
|
||||
of Branch:
|
||||
return ok(nextNode.branchNibbleMin(path.tail[0].int8) < 0)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,153 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Helpers to treat persistent and in-memory database in a similar way
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc isZeroLink*(a: Blob): bool =
|
||||
## Persistent database has `Blob` as key
|
||||
a.len == 0
|
||||
|
||||
proc isZeroLink*(a: RepairKey): bool =
|
||||
## Persistent database has `RepairKey` as key
|
||||
a.isZero
|
||||
|
||||
proc `==`*(a, b: XNodeObj): bool =
|
||||
if a.kind == b.kind:
|
||||
case a.kind:
|
||||
of Leaf:
|
||||
return a.lPfx == b.lPfx and a.lData == b.lData
|
||||
of Extension:
|
||||
return a.ePfx == b.ePfx and a.eLink == b.eLink
|
||||
of Branch:
|
||||
return a.bLink == b.bLink
|
||||
|
||||
# ------------------
|
||||
|
||||
proc toBranchNode*(
|
||||
rlp: Rlp
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError]} =
|
||||
var rlp = rlp
|
||||
XNodeObj(kind: Branch, bLink: rlp.read(array[17,Blob]))
|
||||
|
||||
proc toLeafNode*(
|
||||
rlp: Rlp;
|
||||
pSegm: NibblesSeq
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError]} =
|
||||
XNodeObj(kind: Leaf, lPfx: pSegm, lData: rlp.listElem(1).toBytes)
|
||||
|
||||
proc toExtensionNode*(
|
||||
rlp: Rlp;
|
||||
pSegm: NibblesSeq
|
||||
): XNodeObj
|
||||
{.gcsafe, raises: [RlpError]} =
|
||||
XNodeObj(kind: Extension, ePfx: pSegm, eLink: rlp.listElem(1).toBytes)
|
||||
|
||||
# ------------------
|
||||
|
||||
proc getNode*(
|
||||
nodeKey: RepairKey; # Node key
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): Result[RNodeRef,HexaryError]
|
||||
{.gcsafe, raises: [KeyError].} =
|
||||
## Fetch root node for given path
|
||||
if db.tab.hasKey(nodeKey):
|
||||
return ok(db.tab[nodeKey])
|
||||
err(NearbyDanglingLink)
|
||||
|
||||
proc getNode*(
|
||||
nodeKey: openArray[byte]; # Node key
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[XNodeObj,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `getRootNode()`
|
||||
let nodeData = nodeKey.getFn
|
||||
if 0 < nodeData.len:
|
||||
let nodeRlp = rlpFromBytes nodeData
|
||||
case nodeRlp.listLen:
|
||||
of 17:
|
||||
return ok(nodeRlp.toBranchNode)
|
||||
of 2:
|
||||
let (isLeaf,pfx) = hexPrefixDecode nodeRlp.listElem(0).toBytes
|
||||
if isLeaf:
|
||||
return ok(nodeRlp.toLeafNode pfx)
|
||||
else:
|
||||
return ok(nodeRlp.toExtensionNode pfx)
|
||||
else:
|
||||
return err(NearbyGarbledNode)
|
||||
err(NearbyDanglingLink)
|
||||
|
||||
proc getNode*(
|
||||
nodeKey: NodeKey; # Node key
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): Result[XNodeObj,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Variant of `getRootNode()`
|
||||
nodeKey.ByteArray32.getNode(getFn)
|
||||
|
||||
# ------------------
|
||||
|
||||
proc branchNibbleMin*(node: XNodeObj|RNodeRef; minInx: int8): int8 =
|
||||
## Find the least index for an argument branch `node` link with index
|
||||
## greater or equal the argument `nibble`.
|
||||
if node.kind == Branch:
|
||||
for n in minInx .. 15:
|
||||
if not node.bLink[n].isZeroLink:
|
||||
return n
|
||||
-1
|
||||
|
||||
proc branchNibbleMax*(node: XNodeObj|RNodeRef; maxInx: int8): int8 =
|
||||
## Find the greatest index for an argument branch `node` link with index
|
||||
## less or equal the argument `nibble`.
|
||||
if node.kind == Branch:
|
||||
for n in maxInx.countDown 0:
|
||||
if not node.bLink[n].isZeroLink:
|
||||
return n
|
||||
-1
|
||||
|
||||
# --------------------
|
||||
|
||||
proc padPartialPath*(pfx: NibblesSeq; dblNibble: byte): NodeKey =
|
||||
## Extend (or cut) `partialPath` nibbles sequence and generate `NodeKey`.
|
||||
## This function must be handled with some care regarding a meaningful value
|
||||
## for the `dblNibble` argument. Using values `0` or `255` is typically used
|
||||
## to create the minimum or maximum envelope value from the `pfx` argument.
|
||||
# Pad with zeroes
|
||||
var padded: NibblesSeq
|
||||
|
||||
let padLen = 64 - pfx.len
|
||||
if 0 <= padLen:
|
||||
padded = pfx & dblNibble.repeat(padLen div 2).initNibbleRange
|
||||
if (padLen and 1) == 1:
|
||||
padded = padded & @[dblNibble].initNibbleRange.slice(1)
|
||||
else:
|
||||
let nope = seq[byte].default.initNibbleRange
|
||||
padded = pfx.slice(0,64) & nope # nope forces re-alignment
|
||||
|
||||
let bytes = padded.getBytes
|
||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr bytes[0], bytes.len)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,337 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Find node paths in hexary tries.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, sets, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
stew/[byteutils, interval_set],
|
||||
"../.."/[constants, range_desc],
|
||||
"."/[hexary_desc, hexary_nodes_helper]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
#proc pp(w: Blob; db: HexaryTreeDbRef): string =
|
||||
# w.convertTo(RepairKey).pp(db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(a: RepairKey; T: type RepairKey): RepairKey =
|
||||
## Needed for generic function
|
||||
a
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use
|
||||
discard result.init(key.ByteArray33[1 .. 32])
|
||||
|
||||
proc getNibblesImpl(path: XPath|RPath; start = 0): NibblesSeq =
|
||||
## Re-build the key path
|
||||
for n in start ..< path.path.len:
|
||||
let it = path.path[n]
|
||||
case it.node.kind:
|
||||
of Branch:
|
||||
result = result & @[it.nibble.byte].initNibbleRange.slice(1)
|
||||
of Extension:
|
||||
result = result & it.node.ePfx
|
||||
of Leaf:
|
||||
result = result & it.node.lPfx
|
||||
result = result & path.tail
|
||||
|
||||
proc getNibblesImpl(path: XPath|RPath; start, maxLen: int): NibblesSeq =
|
||||
## Variant of `getNibblesImpl()` for partial rebuild
|
||||
for n in start ..< min(path.path.len, maxLen):
|
||||
let it = path.path[n]
|
||||
case it.node.kind:
|
||||
of Branch:
|
||||
result = result & @[it.nibble.byte].initNibbleRange.slice(1)
|
||||
of Extension:
|
||||
result = result & it.node.ePfx
|
||||
of Leaf:
|
||||
result = result & it.node.lPfx
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rootPathExtend(
|
||||
path: RPath|XPath; # Partially expanded path
|
||||
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
|
||||
): auto
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## For the given path, extend to the longest possible `db` database
|
||||
## path following the argument `path.tail`.
|
||||
result = path
|
||||
|
||||
when typeof(path) is RPath:
|
||||
var key = path.root
|
||||
else:
|
||||
var key = path.root.to(Blob)
|
||||
|
||||
while true:
|
||||
let rc = key.getNode(db)
|
||||
if rc.isErr:
|
||||
break
|
||||
let node = rc.value
|
||||
|
||||
case node.kind:
|
||||
of Leaf:
|
||||
if result.tail.len == result.tail.sharedPrefixLen(node.lPfx):
|
||||
# Bingo, got full path
|
||||
result.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
result.tail = EmptyNibbleSeq
|
||||
return
|
||||
|
||||
of Branch:
|
||||
if result.tail.len == 0:
|
||||
result.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
return
|
||||
let nibble = result.tail[0].int8
|
||||
if node.bLink[nibble].isZeroLink:
|
||||
return
|
||||
result.path.add typeof(path.path[0])(key: key, node: node, nibble: nibble)
|
||||
result.tail = result.tail.slice(1)
|
||||
key = node.bLink[nibble]
|
||||
|
||||
of Extension:
|
||||
if result.tail.len == 0:
|
||||
result.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
result.tail = EmptyNibbleSeq # clean up internal indexing
|
||||
return
|
||||
if node.ePfx.len != result.tail.sharedPrefixLen(node.ePfx):
|
||||
return
|
||||
result.path.add typeof(path.path[0])(key: key, node: node, nibble: -1)
|
||||
result.tail = result.tail.slice(node.ePfx.len)
|
||||
key = node.eLink
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getNibbles*(path: XPath|RPath; start = 0): NibblesSeq =
|
||||
## Re-build the key path
|
||||
path.getNibblesImpl(start)
|
||||
|
||||
proc getNibbles*(path: XPath|RPath; start, maxLen: int): NibblesSeq =
|
||||
## Variant of `getNibbles()`
|
||||
path.getNibblesImpl(start, maxLen)
|
||||
|
||||
|
||||
proc getPartialPath*(path: XPath|RPath): Blob =
|
||||
## Convert to hex encoded partial path as used in `eth` or `snap` protocol
|
||||
## where full leaf paths of nibble length 64 are encoded as 32 byte `Blob`
|
||||
## and non-leaf partial paths are *compact encoded* (i.e. per the Ethereum
|
||||
## wire protocol.)
|
||||
let
|
||||
isLeaf = (0 < path.path.len and path.path[^1].node.kind == Leaf)
|
||||
nibbles = path.getNibbles
|
||||
if isLeaf and nibbles.len == 64:
|
||||
nibbles.getBytes
|
||||
else:
|
||||
nibbles.hexPrefixEncode(isLeaf)
|
||||
|
||||
|
||||
proc leafData*(path: XPath): Blob =
|
||||
## Return the leaf data from a successful `XPath` computation (if any.)
|
||||
if path.tail.len == 0 and 0 < path.path.len:
|
||||
let node = path.path[^1].node
|
||||
case node.kind:
|
||||
of Branch:
|
||||
return node.bLink[16]
|
||||
of Leaf:
|
||||
return node.lData
|
||||
of Extension:
|
||||
discard
|
||||
|
||||
proc leafData*(path: RPath): Blob =
|
||||
## Return the leaf data from a successful `RPath` computation (if any.)
|
||||
if path.tail.len == 0 and 0 < path.path.len:
|
||||
let node = path.path[^1].node
|
||||
case node.kind:
|
||||
of Branch:
|
||||
return node.bData
|
||||
of Leaf:
|
||||
return node.lData
|
||||
of Extension:
|
||||
discard
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, hexary path constructors
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: NibblesSeq; # partial path to resolve
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
): RPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Compute the longest possible repair tree `db` path matching the `nodeKey`
|
||||
## nibbles. The `nodeNey` path argument comes before the `db` one for
|
||||
## supporting a more functional notation.
|
||||
RPath(root: rootKey.to(RepairKey), tail: partialPath).rootPathExtend(db)
|
||||
|
||||
proc hexaryPath*(
|
||||
nodeKey: NodeKey;
|
||||
rootKey: NodeKey|RepairKey;
|
||||
db: HexaryTreeDbRef;
|
||||
): RPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a node key.
|
||||
nodeKey.to(NibblesSeq).hexaryPath(rootKey, db)
|
||||
|
||||
proc hexaryPath*(
|
||||
nodeTag: NodeTag;
|
||||
rootKey: NodeKey|RepairKey;
|
||||
db: HexaryTreeDbRef;
|
||||
): RPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a node tag.
|
||||
nodeTag.to(NodeKey).hexaryPath(rootKey, db)
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: Blob;
|
||||
rootKey: NodeKey|RepairKey;
|
||||
db: HexaryTreeDbRef;
|
||||
): RPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a hex encoded partial path.
|
||||
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, db)
|
||||
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: NibblesSeq; # partial path to resolve
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Compute the longest possible path on an arbitrary hexary trie.
|
||||
XPath(root: rootKey, tail: partialPath).rootPathExtend(getFn)
|
||||
|
||||
proc hexaryPath*(
|
||||
nodeKey: NodeKey;
|
||||
rootKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a node key..
|
||||
nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn)
|
||||
|
||||
proc hexaryPath*(
|
||||
nodeTag: NodeTag;
|
||||
rootKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a node tag..
|
||||
nodeTag.to(NodeKey).hexaryPath(rootKey, getFn)
|
||||
|
||||
proc hexaryPath*(
|
||||
partialPath: Blob;
|
||||
rootKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
): XPath
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPath` for a hex encoded partial path.
|
||||
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers, partial paths resolvers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: NibblesSeq; # Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Returns the `NodeKey` equivalent for the argment `partialPath` if this
|
||||
## node is available in the database. If the argument flag `missingOk` is
|
||||
## set`true` and the last node addressed by the argument path is missing,
|
||||
## its key is returned as well.
|
||||
let steps = partialPath.hexaryPath(rootKey, db)
|
||||
if 0 < steps.path.len and steps.tail.len == 0:
|
||||
let top = steps.path[^1]
|
||||
# If the path was fully exhaused and the node exists for a `Branch` node,
|
||||
# then the `nibble` is `-1`.
|
||||
if top.nibble < 0 and top.key.isNodeKey:
|
||||
return ok(top.key.convertTo(NodeKey))
|
||||
if missingOk:
|
||||
let link = top.node.bLink[top.nibble]
|
||||
if not link.isZero and link.isNodeKey:
|
||||
return ok(link.convertTo(NodeKey))
|
||||
err()
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: Blob; # Hex encoded partial path
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for hex encoded partial path.
|
||||
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, db, missingOk)
|
||||
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: NibblesSeq; # Hex encoded partial path
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for persistent database.
|
||||
let steps = partialPath.hexaryPath(rootKey, getFn)
|
||||
if 0 < steps.path.len and steps.tail.len == 0:
|
||||
let top = steps.path[^1]
|
||||
# If the path was fully exhaused and the node exists for a `Branch` node,
|
||||
# then the `nibble` is `-1`.
|
||||
if top.nibble < 0:
|
||||
return ok(top.key.convertTo(NodeKey))
|
||||
if missingOk:
|
||||
let link = top.node.bLink[top.nibble]
|
||||
if 0 < link.len:
|
||||
return ok(link.convertTo(NodeKey))
|
||||
err()
|
||||
|
||||
proc hexaryPathNodeKey*(
|
||||
partialPath: Blob; # Partial database path
|
||||
rootKey: NodeKey; # State root
|
||||
getFn: HexaryGetFn; # Database abstraction
|
||||
missingOk = false; # Also return key for missing node
|
||||
): Result[NodeKey,void]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Variant of `hexaryPathNodeKey()` for persistent database and
|
||||
## hex encoded partial path.
|
||||
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk)
|
||||
|
||||
proc hexaryPathNodeKeys*(
|
||||
partialPaths: seq[Blob]; # Partial paths segments
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryTreeDbRef; # Database
|
||||
missingOk = false; # Also return key for missing node
|
||||
): HashSet[NodeKey]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Convert a list of path segments to a set of node keys
|
||||
partialPaths.toSeq
|
||||
.mapIt(it.hexaryPathNodeKey(rootKey, db, missingOk))
|
||||
.filterIt(it.isOk)
|
||||
.mapIt(it.value)
|
||||
.toHashSet
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,334 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[sequtils, sets, tables],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p, trie/nibbles],
|
||||
stew/[byteutils, interval_set],
|
||||
../../../protocol,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
|
||||
|
||||
type
|
||||
RangeLeaf* = object
|
||||
key*: NodeKey ## Leaf node path
|
||||
data*: Blob ## Leaf node data
|
||||
|
||||
RangeProof* = object
|
||||
base*: NodeTag ## No node between `base` and `leafs[0]`
|
||||
leafs*: seq[RangeLeaf] ## List of consecutive leaf nodes
|
||||
leafsLast*: bool ## If no leaf exceeds `max(base,leafs[])`
|
||||
leafsSize*: int ## RLP encoded size of `leafs` on wire
|
||||
proof*: seq[SnapProof] ## Boundary proof
|
||||
proofSize*: int ## RLP encoded size of `proof` on wire
|
||||
|
||||
const
|
||||
proofNodeSizeMax = 532
|
||||
## Branch node with all branches `high(UInt256)` within RLP list
|
||||
|
||||
veryLongDuration = 60.weeks
|
||||
## Longer than any collection of data will probably take
|
||||
|
||||
proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) {.gcsafe.}
|
||||
proc hexaryRangeRlpSize*(blobLen: int): int {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use (if at all, unless debugging)
|
||||
(addr result.ByteArray32[0]).copyMem(unsafeAddr key.ByteArray33[1], 32)
|
||||
|
||||
proc rlpPairSize(aLen: int; bRlpLen: int): int =
|
||||
## Size caclualation for an RLP encoded pair `[<a>,<rb>]` for blobs `a` and
|
||||
## rlp encoded `rb` argument length `aLen` and `bRlpLen`.
|
||||
let aRlpLen = hexaryRangeRlpSize(aLen)
|
||||
if bRlpLen < high(int) - aRlpLen:
|
||||
hexaryRangeRlpSize(aRlpLen + bRlpLen)
|
||||
else:
|
||||
high(int)
|
||||
|
||||
proc timeIsOver(stopAt: Moment): bool =
|
||||
## Helper (avoids `chronos` import when running generic function)
|
||||
stopAt <= chronos.Moment.now()
|
||||
|
||||
proc stopAt(timeout: chronos.Duration): Moment =
|
||||
## Helper (avoids `chronos` import when running generic function)
|
||||
chronos.Moment.now() + timeout
|
||||
|
||||
proc nonLeafPathNodes(
|
||||
nodeTag: NodeTag; # Left boundary
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): HashSet[SnapProof]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Helper for `updateProof()`
|
||||
nodeTag
|
||||
.hexaryPath(rootKey, db)
|
||||
.path
|
||||
.mapIt(it.node)
|
||||
.filterIt(it.kind != Leaf)
|
||||
.mapIt(it.convertTo(Blob).to(SnapProof))
|
||||
.toHashSet
|
||||
|
||||
proc allPathNodes(
|
||||
nodeTag: NodeTag; # Left boundary
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
): HashSet[SnapProof]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Helper for `updateProof()`
|
||||
nodeTag
|
||||
.hexaryPath(rootKey, db)
|
||||
.path
|
||||
.mapIt(it.node)
|
||||
.mapIt(it.convertTo(Blob).to(SnapProof))
|
||||
.toHashSet
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template collectLeafs(
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
nSizeLimit: int; # List of RLP encoded data must be smaller
|
||||
stopAt: Moment; # limit search time
|
||||
): auto =
|
||||
## Collect trie database leafs prototype. This directive is provided as
|
||||
## `template` for avoiding varying exceprion annotations.
|
||||
var
|
||||
rc: Result[RangeProof,HexaryError]
|
||||
ttd = stopAt
|
||||
block body:
|
||||
let
|
||||
nodeMax = maxPt(iv) # `inject` is for debugging (if any)
|
||||
var
|
||||
nodeTag = minPt(iv)
|
||||
prevTag: NodeTag
|
||||
rls: RangeProof
|
||||
|
||||
# Set up base node, the nearest node before `iv.minPt`
|
||||
if 0.to(NodeTag) < nodeTag:
|
||||
let rx = nodeTag.hexaryNearbyLeft(rootKey, db)
|
||||
if rx.isOk:
|
||||
rls.base = rx.value
|
||||
elif rx.error != NearbyBeyondRange:
|
||||
rc = typeof(rc).err(rx.error)
|
||||
break body
|
||||
|
||||
# Fill leaf nodes (at least one) from interval range unless size reached
|
||||
while nodeTag <= nodeMax or rls.leafs.len == 0:
|
||||
# The following logic might be sub-optimal. A strict version of the
|
||||
# `next()` function that stops with an error at dangling links could
|
||||
# be faster if the leaf nodes are not too far apart on the hexary trie.
|
||||
let
|
||||
xPath = block:
|
||||
let rx = nodeTag.hexaryPath(rootKey,db).hexaryNearbyRight(db)
|
||||
if rx.isErr:
|
||||
if rx.error != NearbyBeyondRange:
|
||||
rc = typeof(rc).err(rx.error)
|
||||
else:
|
||||
rls.leafsLast = true
|
||||
rc = typeof(rc).ok(rls) # done ok, last node reached
|
||||
break body
|
||||
rx.value
|
||||
rightKey = getPartialPath(xPath).convertTo(NodeKey)
|
||||
rightTag = rightKey.to(NodeTag)
|
||||
|
||||
# Prevents from semi-endless looping
|
||||
if rightTag <= prevTag and 0 < rls.leafs.len:
|
||||
# Oops, should have been tackled by `hexaryNearbyRight()`
|
||||
rc = typeof(rc).err(FailedNextNode)
|
||||
break body # stop here
|
||||
|
||||
let (pairLen,listLen) =
|
||||
hexaryRangeRlpLeafListSize(xPath.leafData.len, rls.leafsSize)
|
||||
|
||||
if listLen <= nSizeLimit:
|
||||
rls.leafsSize += pairLen
|
||||
else:
|
||||
break # collected enough
|
||||
|
||||
rls.leafs.add RangeLeaf(
|
||||
key: rightKey,
|
||||
data: xPath.leafData)
|
||||
|
||||
if timeIsOver(ttd):
|
||||
break # timout
|
||||
|
||||
prevTag = nodeTag
|
||||
nodeTag = rightTag + 1.u256
|
||||
# End loop
|
||||
|
||||
# Count outer RLP wrapper
|
||||
if 0 < rls.leafs.len:
|
||||
rls.leafsSize = hexaryRangeRlpSize rls.leafsSize
|
||||
|
||||
rc = typeof(rc).ok(rls)
|
||||
# End body
|
||||
|
||||
rc
|
||||
|
||||
|
||||
template updateProof(
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey|RepairKey; # State root
|
||||
rls: RangeProof; # Set of collected leafs and a `base`
|
||||
): auto =
|
||||
## Complement leafs list by adding proof nodes. This directive is provided as
|
||||
## `template` for avoiding varying exceprion annotations.
|
||||
var rp = rls
|
||||
|
||||
if 0.to(NodeTag) < rp.base or not rp.leafsLast:
|
||||
var proof = allPathNodes(rls.base, rootKey, db)
|
||||
if 0 < rls.leafs.len:
|
||||
proof.incl nonLeafPathNodes(rls.leafs[^1].key.to(NodeTag), rootKey, db)
|
||||
|
||||
rp.proof = toSeq(proof)
|
||||
rp.proofSize = hexaryRangeRlpSize rp.proof.foldl(a + b.to(Blob).len, 0)
|
||||
|
||||
rp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey; # State root
|
||||
iv: NodeTagRange; # Proofed range of leaf paths
|
||||
nSizeLimit = high(int); # List of RLP encoded data must be smaller
|
||||
timeout = veryLongDuration; # Limit retrieval time
|
||||
): Result[RangeProof,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Collect trie database leafs prototype and add proof.
|
||||
let rc = db.collectLeafs(rootKey, iv, nSizeLimit, stopAt(timeout))
|
||||
if rc.isErr:
|
||||
err(rc.error)
|
||||
else:
|
||||
ok(db.updateProof(rootKey, rc.value))
|
||||
|
||||
proc hexaryRangeLeafsProof*(
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey; # State root
|
||||
rp: RangeProof; # Set of collected leafs and a `base`
|
||||
): RangeProof
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Complement leafs list by adding proof nodes to the argument list
|
||||
## `leafList`.
|
||||
db.updateProof(rootKey, rp)
|
||||
|
||||
|
||||
proc hexaryRangeInflate*(
|
||||
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
|
||||
rootKey: NodeKey; # State root
|
||||
nodeKey: NodeTag; # Centre of inflated interval
|
||||
): NodeTagRange
|
||||
{.gcsafe, raises: [CatchableError]} =
|
||||
## Calculate the largest leaf range interval containing only the argument
|
||||
## `nodeKey`.
|
||||
##
|
||||
## If the database is fully allocated, then the returned interval ends right
|
||||
## before or after the next neighbour leaf node, or at the range type
|
||||
## boundaries `low(NodeTag)` or `high(NodeTag)`.
|
||||
##
|
||||
## If the database is partially allocated only and some of the neighbour
|
||||
## nodes are missing, the returned interval is not extended towards this
|
||||
## end.
|
||||
var
|
||||
leftPt = nodeKey
|
||||
rightPt = nodeKey
|
||||
|
||||
if low(NodeTag) < nodeKey:
|
||||
let
|
||||
pt = nodeKey - 1.u256
|
||||
rc = pt.hexaryPath(rootKey,db).hexaryNearbyLeft(db)
|
||||
if rc.isOk:
|
||||
leftPt = rc.value.getPartialPath.convertTo(NodeKey).to(NodeTag) + 1.u256
|
||||
elif rc.error == NearbyBeyondRange:
|
||||
leftPt = low(NodeTag)
|
||||
|
||||
if nodeKey < high(NodeTag):
|
||||
let
|
||||
pt = nodeKey + 1.u256
|
||||
rc = pt.hexaryPath(rootKey,db).hexaryNearbyRight(db)
|
||||
if rc.isOk:
|
||||
rightPt = rc.value.getPartialPath.convertTo(NodeKey).to(NodeTag) - 1.u256
|
||||
elif rc.error == NearbyBeyondRange:
|
||||
rightPt = high(NodeTag)
|
||||
|
||||
NodeTagRange.new(leftPt, rightPt)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaryRangeRlpSize*(blobLen: int): int =
|
||||
## Returns the size of RLP encoded <blob> of argument length `blobLen`.
|
||||
if blobLen < 56:
|
||||
return blobLen + 1
|
||||
if blobLen < (1 shl (8 * 1)):
|
||||
return blobLen + 2
|
||||
if blobLen < (1 shl (8 * 2)):
|
||||
return blobLen + 3
|
||||
if blobLen < (1 shl (8 * 3)):
|
||||
return blobLen + 4
|
||||
|
||||
when sizeof(int) < 8:
|
||||
if blobLen < (1 shl (8 * 4)):
|
||||
return blobLen + 5
|
||||
if blobLen < (1 shl (8 * 5)):
|
||||
return blobLen + 6
|
||||
if blobLen < (1 shl (8 * 6)):
|
||||
return blobLen + 7
|
||||
if blobLen < (1 shl (8 * 7)):
|
||||
return blobLen + 8
|
||||
|
||||
if blobLen < high(int) - (1 + sizeof(int)):
|
||||
blobLen + 1 + sizeof(int)
|
||||
else:
|
||||
high(int)
|
||||
|
||||
proc hexaryRangeRlpLeafListSize*(blobLen: int; lstLen = 0): (int,int) =
|
||||
## Size caclualation for an RLP encoded list `[[<key>,<blob>],a,b,..]`
|
||||
## where a,b,.. are from a sequence of the same format `[<keyA>,<blobA>]`,
|
||||
## `[<keyB>,<blobB>]`,... The size of blob is the argument size `blobLen`,
|
||||
## and the toral size of the sequence is `listLen`.
|
||||
##
|
||||
## The fuction returns `(x,y)`, the size `x` of the RLP encoded pair
|
||||
## `[<key>,<blob>]` and the total size `y` of the complete RLP encoded list
|
||||
## `[[<key>,<blob>],a,b,..]`.
|
||||
let pairLen = blobLen.rlpPairSize(33)
|
||||
if lstLen == 0:
|
||||
(pairLen, hexaryRangeRlpSize(pairLen))
|
||||
elif lstLen < high(int) - lstLen:
|
||||
(pairLen, hexaryRangeRlpSize(pairLen + lstLen))
|
||||
else:
|
||||
(pairLen, high(int))
|
||||
|
||||
proc hexaryRangeRlpNodesListSizeMax*(n: int): int =
|
||||
## Maximal size needs to RLP encode `n` nodes (handy for calculating the
|
||||
## space needed to store proof nodes.)
|
||||
const nMax = high(int) div proofNodeSizeMax
|
||||
if n <= nMax:
|
||||
hexaryRangeRlpSize(n * proofNodeSizeMax)
|
||||
else:
|
||||
high(int)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,176 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or
|
||||
# distributed except according to those terms.
|
||||
|
||||
## Bulk import loader for rocksdb
|
||||
|
||||
import
|
||||
std/os, # std/[sequtils, strutils],
|
||||
eth/common/eth_types,
|
||||
rocksdb/lib/librocksdb,
|
||||
rocksdb,
|
||||
../../../../db/kvstore_rocksdb
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
type
|
||||
RockyBulkLoadRef* = ref object of RootObj
|
||||
db: RocksStoreRef
|
||||
dbOption: ptr rocksdb_options_t
|
||||
envOption: ptr rocksdb_envoptions_t
|
||||
importOption: ptr rocksdb_ingestexternalfileoptions_t
|
||||
writer: ptr rocksdb_sstfilewriter_t
|
||||
filePath: string
|
||||
csError: string
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type RockyBulkLoadRef;
|
||||
db: RocksStoreRef;
|
||||
envOption: ptr rocksdb_envoptions_t
|
||||
): T =
|
||||
## Create a new bulk load descriptor.
|
||||
result = T(
|
||||
db: db,
|
||||
envOption: envOption,
|
||||
importOption: rocksdb_ingestexternalfileoptions_create())
|
||||
|
||||
doAssert not result.importOption.isNil
|
||||
doAssert not envOption.isNil
|
||||
|
||||
proc init*(T: type RockyBulkLoadRef; db: RocksStoreRef): T =
|
||||
## Variant of `init()`
|
||||
RockyBulkLoadRef.init(db, rocksdb_envoptions_create())
|
||||
|
||||
proc clearCacheFile*(db: RocksStoreRef; fileName: string): bool
|
||||
{.gcsafe, raises: [OSError].} =
|
||||
## Remove left-over cache file from an imcomplete previous session. The
|
||||
## return value `true` indicated that a cache file was detected.
|
||||
let filePath = fileName
|
||||
if filePath.fileExists:
|
||||
filePath.removeFile
|
||||
return true
|
||||
|
||||
proc destroy*(rbl: RockyBulkLoadRef) {.gcsafe, raises: [OSError].} =
|
||||
## Destructor, free memory resources and delete temporary file. This function
|
||||
## can always be called even though `finish()` will call `destroy()`
|
||||
## automatically if successful.
|
||||
##
|
||||
## Note that after calling `destroy()`, the `RockyBulkLoadRef` descriptor is
|
||||
## reset and must not be used anymore with any function (different from
|
||||
## `destroy()`.)
|
||||
##
|
||||
if not rbl.writer.isNil:
|
||||
rbl.writer.rocksdb_sstfilewriter_destroy()
|
||||
if not rbl.dbOption.isNil:
|
||||
rbl.dbOption.rocksdb_options_destroy()
|
||||
if not rbl.envOption.isNil:
|
||||
rbl.envOption.rocksdb_envoptions_destroy()
|
||||
if not rbl.importOption.isNil:
|
||||
rbl.importOption.rocksdb_ingestexternalfileoptions_destroy()
|
||||
if 0 < rbl.filePath.len:
|
||||
rbl.filePath.removeFile
|
||||
rbl[].reset
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc lastError*(rbl: RockyBulkLoadRef): string =
|
||||
## Get last error explainer
|
||||
rbl.csError
|
||||
|
||||
proc store*(rbl: RockyBulkLoadRef): RocksDbReadWriteRef =
|
||||
## Provide the diecriptor for backend functions as defined in `rocksdb`.
|
||||
rbl.db.rocksDb()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc begin*(rbl: RockyBulkLoadRef; fileName: string): bool =
|
||||
## Begin a new bulk load session storing data into a temporary cache file
|
||||
## `fileName`. When finished, this file will bi direcly imported into the
|
||||
## database.
|
||||
|
||||
rbl.dbOption = rocksdb_options_create()
|
||||
rbl.writer = rocksdb_sstfilewriter_create(rbl.envOption, rbl.dbOption)
|
||||
if rbl.writer.isNil:
|
||||
rbl.csError = "Cannot create sst writer session"
|
||||
return false
|
||||
|
||||
rbl.csError = ""
|
||||
let filePath = fileName
|
||||
var csError: cstring
|
||||
rbl.writer.rocksdb_sstfilewriter_open(fileName, cast[cstringArray](csError.addr))
|
||||
if not csError.isNil:
|
||||
rbl.csError = $csError
|
||||
return false
|
||||
|
||||
rbl.filePath = filePath
|
||||
return true
|
||||
|
||||
proc add*(
|
||||
rbl: RockyBulkLoadRef;
|
||||
key: openArray[byte];
|
||||
val: openArray[byte]
|
||||
): bool =
|
||||
## Append a record to the SST file. Note that consecutive records must be
|
||||
## strictly increasing.
|
||||
##
|
||||
## This function is a wrapper around `rocksdb_sstfilewriter_add()` or
|
||||
## `rocksdb_sstfilewriter_put()` (stragely enough, there are two functions
|
||||
## with exactly the same impementation code.)
|
||||
var csError: cstring
|
||||
rbl.writer.rocksdb_sstfilewriter_add(
|
||||
cast[cstring](unsafeAddr key[0]), csize_t(key.len),
|
||||
cast[cstring](unsafeAddr val[0]), csize_t(val.len),
|
||||
cast[cstringArray](csError.addr))
|
||||
if csError.isNil:
|
||||
return true
|
||||
rbl.csError = $csError
|
||||
|
||||
proc finish*(
|
||||
rbl: RockyBulkLoadRef
|
||||
): Result[int64,void]
|
||||
{.gcsafe, raises: [OSError, IOError].} =
|
||||
## Commit collected and cached data to the database. This function implies
|
||||
## `destroy()` if successful. Otherwise `destroy()` must be called
|
||||
## explicitely, e.g. after error analysis.
|
||||
##
|
||||
## If successful, the return value is the size of the SST file used if
|
||||
## that value is available. Otherwise, `0` is returned.
|
||||
var csError: cstring
|
||||
rbl.writer.rocksdb_sstfilewriter_finish(cast[cstringArray](csError.addr))
|
||||
|
||||
var filePath = rbl.filePath.cstring
|
||||
if csError.isNil:
|
||||
rbl.db.rocksDb.cPtr.rocksdb_ingest_external_file(
|
||||
cast[cstringArray](filePath.addr), 1,
|
||||
rbl.importOption,
|
||||
cast[cstringArray](csError.addr))
|
||||
|
||||
if csError.isNil:
|
||||
var
|
||||
size: int64
|
||||
f: File
|
||||
if f.open(rbl.filePath):
|
||||
size = f.getFileSize
|
||||
f.close
|
||||
rbl.destroy()
|
||||
return ok(size)
|
||||
|
||||
rbl.csError = $csError
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,445 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronicles,
|
||||
eth/[common, p2p, rlp, trie/nibbles],
|
||||
stew/[byteutils, interval_set],
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_envelope, hexary_import,
|
||||
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc,
|
||||
snapdb_persistent]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
type
|
||||
SnapDbAccountsRef* = ref object of SnapDbBaseRef
|
||||
peer: Peer ## For log messages
|
||||
|
||||
SnapAccountsGaps* = object
|
||||
innerGaps*: seq[NodeSpecs]
|
||||
dangling*: seq[NodeSpecs]
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(h: Hash256; T: type NodeKey): T =
|
||||
h.data.T
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible -- " & info & ": " & e.msg
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except CatchableError:
|
||||
return err(AccountNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistentAccounts(
|
||||
db: HexaryTreeDbRef; ## Current table
|
||||
ps: SnapDbAccountsRef; ## For persistent database
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## Store accounts trie table on databse
|
||||
if ps.rockDb.isNil:
|
||||
let rc = db.persistentAccountsPut(ps.kvDb)
|
||||
if rc.isErr: return rc
|
||||
else:
|
||||
let rc = db.persistentAccountsPut(ps.rockDb)
|
||||
if rc.isErr: return rc
|
||||
ok()
|
||||
|
||||
|
||||
proc collectAccounts(
|
||||
peer: Peer, ## for log messages
|
||||
base: NodeTag;
|
||||
acc: seq[PackedAccount];
|
||||
): Result[seq[RLeafSpecs],HexaryError] =
|
||||
## Repack account records into a `seq[RLeafSpecs]` queue. The argument data
|
||||
## `acc` are as received with the snap message `AccountRange`).
|
||||
##
|
||||
## The returned list contains leaf node information for populating a repair
|
||||
## table. The accounts, together with some hexary trie records for proofs
|
||||
## can be used for validating the argument account data.
|
||||
var rcAcc: seq[RLeafSpecs]
|
||||
|
||||
if 0 < acc.len:
|
||||
let pathTag0 = acc[0].accKey.to(NodeTag)
|
||||
|
||||
# Verify lower bound
|
||||
if pathTag0 < base:
|
||||
let error = LowerBoundAfterFirstEntry
|
||||
trace "collectAccounts()", peer, base, accounts=acc.len, error
|
||||
return err(error)
|
||||
|
||||
# Add first account
|
||||
rcAcc.add RLeafSpecs(pathTag: pathTag0, payload: acc[0].accBlob)
|
||||
|
||||
# Veify & add other accounts
|
||||
for n in 1 ..< acc.len:
|
||||
let nodeTag = acc[n].accKey.to(NodeTag)
|
||||
|
||||
if nodeTag <= rcAcc[^1].pathTag:
|
||||
let error = AccountsNotSrictlyIncreasing
|
||||
trace "collectAccounts()", peer, item=n, base, accounts=acc.len, error
|
||||
return err(error)
|
||||
|
||||
rcAcc.add RLeafSpecs(pathTag: nodeTag, payload: acc[n].accBlob)
|
||||
|
||||
ok(rcAcc)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type SnapDbAccountsRef;
|
||||
pv: SnapDbRef;
|
||||
root: Hash256;
|
||||
peer: Peer = nil
|
||||
): T =
|
||||
## Constructor, starts a new accounts session.
|
||||
new result
|
||||
result.init(pv, root.to(NodeKey))
|
||||
result.peer = peer
|
||||
|
||||
proc dup*(
|
||||
ps: SnapDbAccountsRef;
|
||||
root: Hash256;
|
||||
peer: Peer;
|
||||
): SnapDbAccountsRef =
|
||||
## Resume an accounts session with different `root` key and `peer`.
|
||||
new result
|
||||
result.copyWithoutRoot(ps)
|
||||
result.root = root.to(NodeKey)
|
||||
result.peer = peer
|
||||
|
||||
proc dup*(
|
||||
ps: SnapDbAccountsRef;
|
||||
root: Hash256;
|
||||
): SnapDbAccountsRef =
|
||||
## Variant of `dup()` without the `peer` argument.
|
||||
new result
|
||||
result.copyWithoutRoot(ps)
|
||||
result.root = root.to(NodeKey)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn =
|
||||
## Return `HexaryGetFn` closure.
|
||||
let getFn = ps.kvDb.persistentAccountsGetFn()
|
||||
return proc(key: openArray[byte]): Blob = getFn(key)
|
||||
|
||||
proc getAccountFn*(pv: SnapDbRef): HexaryGetFn =
|
||||
## Variant of `getAccountFn()`
|
||||
let getFn = pv.kvDb.persistentAccountsGetFn()
|
||||
return proc(key: openArray[byte]): Blob = getFn(key)
|
||||
|
||||
proc importAccounts*(
|
||||
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
||||
base: NodeTag; ## Before or at first account entry in `data`
|
||||
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
||||
persistent = false; ## Store data on disk
|
||||
): Result[SnapAccountsGaps,HexaryError] =
|
||||
## Validate and import accounts (using proofs as received with the snap
|
||||
## message `AccountRange`). This function accumulates data in a memory table
|
||||
## which can be written to disk with the argument `persistent` set `true`.
|
||||
## The memory table is held in the descriptor argument`ps`.
|
||||
##
|
||||
## On success, the function returns a list `innerGaps` of dangling node
|
||||
## links from the argument `proof` list of nodes after the populating with
|
||||
## accounts. The following example may illustrate the case:
|
||||
##
|
||||
## Assume an accounts hexary trie
|
||||
## ::
|
||||
## | 0 1 2 3 4 5 6 7 8 9 a b c d e f -- nibble positions
|
||||
## | root -> (a, .. b, .. c, .. d, .. ,) -- root branch node
|
||||
## | | | | |
|
||||
## | ... v v v
|
||||
## | (x,X) (y,Y) (z,Z)
|
||||
##
|
||||
## with `a`,`b`,`c`,`d` node hashes, `x`,`y`,`z` partial paths and account
|
||||
## hashes `3&x`,`7&y`,`b&z` for account values `X`,`Y`,`Z`. All other
|
||||
## links in the *root branch node* are assumed nil.
|
||||
##
|
||||
## The passing to this function
|
||||
## * base: `3&x`
|
||||
## * data.proof: *root branch node*
|
||||
## * data.accounts: `(3&x,X)`, `(7&y,Y)`, `(b&z,Z)`
|
||||
## a partial tree can be fully constructed and boundary proofs succeed.
|
||||
## The return value will be an empty list.
|
||||
##
|
||||
## Leaving out `(7&y,Y)` the boundary proofs still succeed but the
|
||||
## return value will be @[`(7&y,c)`].
|
||||
##
|
||||
## Besides the inner gaps, the function also returns the dangling nodes left
|
||||
## from the `proof` list.
|
||||
##
|
||||
## Note that the `peer` argument is for log messages, only.
|
||||
var
|
||||
accounts: seq[RLeafSpecs] # validated accounts to add to database
|
||||
gaps: SnapAccountsGaps # return value
|
||||
proofStats: TrieNodeStat # `proof` data dangling links
|
||||
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
||||
try:
|
||||
if 0 < data.proof.len:
|
||||
let rc = ps.hexaDb.mergeProofs(ps.root, data.proof, ps.peer)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
block:
|
||||
let rc = ps.peer.collectAccounts(base, data.accounts)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
accounts = rc.value
|
||||
|
||||
# Inspect trie for dangling nodes from proof data (if any.)
|
||||
if 0 < data.proof.len:
|
||||
proofStats = ps.hexaDb.hexaryInspectTrie(ps.root)
|
||||
|
||||
if 0 < accounts.len:
|
||||
if 0 < data.proof.len:
|
||||
# Inspect trie for dangling nodes. This is not a big deal here as the
|
||||
# proof data is typically small.
|
||||
let topTag = accounts[^1].pathTag
|
||||
for w in proofStats.dangling:
|
||||
let iv = w.partialPath.hexaryEnvelope
|
||||
if iv.maxPt < base or topTag < iv.minPt:
|
||||
# Dangling link with partial path envelope outside accounts range
|
||||
gaps.dangling.add w
|
||||
else:
|
||||
# Overlapping partial path envelope.
|
||||
innerSubTrie.add w
|
||||
|
||||
# Build partial or full hexary trie
|
||||
let rc = ps.hexaDb.hexaryInterpolate(
|
||||
ps.root, accounts, bootstrap = (data.proof.len == 0))
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
|
||||
# Collect missing inner sub-trees in the reconstructed partial hexary
|
||||
# trie (if any).
|
||||
let bottomTag = accounts[0].pathTag
|
||||
for w in innerSubTrie:
|
||||
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
||||
# Verify that `base` is to the left of the first account and there
|
||||
# is nothing in between. If there is an envelope to the left of
|
||||
# the first account, then it might also cover a point before the
|
||||
# first account.
|
||||
#
|
||||
# Without `proof` data available there can only be a complete
|
||||
# set/list of accounts so there are no dangling nodes in the first
|
||||
# place. But there must be `proof` data for an empty list.
|
||||
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||
return err(LowerBoundProofError)
|
||||
# Otherwise register left over entry, a gap in the accounts list
|
||||
gaps.innerGaps.add w
|
||||
|
||||
if persistent:
|
||||
let rc = ps.hexaDb.persistentAccounts(ps)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
|
||||
elif data.proof.len == 0:
|
||||
# There must be a proof for an empty argument list.
|
||||
return err(LowerBoundProofError)
|
||||
|
||||
else:
|
||||
for w in proofStats.dangling:
|
||||
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||
return err(LowerBoundProofError)
|
||||
gaps.dangling = proofStats.dangling
|
||||
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible @ importAccounts(KeyError): " & e.msg
|
||||
except OSError as e:
|
||||
error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg
|
||||
return err(OSErrorException)
|
||||
except CatchableError as e:
|
||||
raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg
|
||||
|
||||
#when extraTraceMessages:
|
||||
# trace "Accounts imported", peer=ps.peer, root=ps.root.ByteArray32.toHex,
|
||||
# proof=data.proof.len, base, accounts=data.accounts.len,
|
||||
# top=accounts[^1].pathTag, innerGapsLen=gaps.innerGaps.len,
|
||||
# danglingLen=gaps.dangling.len
|
||||
|
||||
ok(gaps)
|
||||
|
||||
proc importAccounts*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer; ## For log messages
|
||||
root: Hash256; ## State root
|
||||
base: NodeTag; ## Before or at first account entry in `data`
|
||||
data: PackedAccountRange; ## Re-packed `snap/1 ` reply data
|
||||
): Result[SnapAccountsGaps,HexaryError] =
|
||||
## Variant of `importAccounts()` for presistent storage, only.
|
||||
SnapDbAccountsRef.init(
|
||||
pv, root, peer).importAccounts(base, data, persistent=true)
|
||||
|
||||
|
||||
proc importRawAccountsNodes*(
|
||||
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
||||
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
||||
reportNodes = {Leaf}; ## Additional node types to report
|
||||
persistent = false; ## store data on disk
|
||||
): seq[HexaryNodeReport]
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
## Store data nodes given as argument `nodes` on the persistent database.
|
||||
##
|
||||
## If there were an error when processing a particular argument `notes` item,
|
||||
## it will be reported with the return value providing argument slot/index,
|
||||
## node type, end error code.
|
||||
##
|
||||
## If there was an error soring persistent data, the last report item will
|
||||
## have an error code, only.
|
||||
##
|
||||
## Additional node items might be reported if the node type is in the
|
||||
## argument set `reportNodes`. These reported items will have no error
|
||||
## code set (i.e. `HexaryError(0)`.)
|
||||
##
|
||||
let
|
||||
peer = ps.peer
|
||||
db = HexaryTreeDbRef.init(ps)
|
||||
nItems = nodes.len
|
||||
var
|
||||
nErrors = 0
|
||||
slot: Option[int]
|
||||
try:
|
||||
# Import nodes
|
||||
for n,node in nodes:
|
||||
if 0 < node.data.len: # otherwise ignore empty placeholder
|
||||
slot = some(n)
|
||||
var rep = db.hexaryImport(node)
|
||||
if rep.error != HexaryError(0):
|
||||
rep.slot = slot
|
||||
result.add rep
|
||||
nErrors.inc
|
||||
trace "Error importing account nodes", peer, inx=n, nItems,
|
||||
error=rep.error, nErrors
|
||||
elif rep.kind.isSome and rep.kind.unsafeGet in reportNodes:
|
||||
rep.slot = slot
|
||||
result.add rep
|
||||
|
||||
# Store to disk
|
||||
if persistent and 0 < db.tab.len:
|
||||
slot = none(int)
|
||||
let rc = db.persistentAccounts(ps)
|
||||
if rc.isErr:
|
||||
result.add HexaryNodeReport(slot: slot, error: rc.error)
|
||||
|
||||
except RlpError:
|
||||
result.add HexaryNodeReport(slot: slot, error: RlpEncoding)
|
||||
nErrors.inc
|
||||
trace "Error importing account nodes", peer, slot, nItems,
|
||||
error=RlpEncoding, nErrors
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible @ importRawAccountNodes: " & e.msg
|
||||
except OSError as e:
|
||||
result.add HexaryNodeReport(slot: slot, error: OSErrorException)
|
||||
nErrors.inc
|
||||
error "Import account nodes exception", peer, slot, nItems,
|
||||
name=($e.name), msg=e.msg, nErrors
|
||||
|
||||
when extraTraceMessages:
|
||||
if nErrors == 0:
|
||||
trace "Raw account nodes imported", peer, slot, nItems, nReport=result.len
|
||||
|
||||
proc importRawAccountsNodes*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer, ## For log messages, only
|
||||
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
||||
reportNodes = {Leaf}; ## Additional node types to report
|
||||
): seq[HexaryNodeReport]
|
||||
{.gcsafe, raises: [IOError].} =
|
||||
## Variant of `importRawNodes()` for persistent storage.
|
||||
SnapDbAccountsRef.init(
|
||||
pv, Hash256(), peer).importRawAccountsNodes(
|
||||
nodes, reportNodes, persistent=true)
|
||||
|
||||
proc getAccountsNodeKey*(
|
||||
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
||||
path: Blob; ## Partial node path
|
||||
persistent = false; ## Read data from disk
|
||||
): Result[NodeKey,HexaryError] =
|
||||
## For a partial node path argument `path`, return the raw node key.
|
||||
var rc: Result[NodeKey,void]
|
||||
noExceptionOops("getAccountsNodeKey()"):
|
||||
if persistent:
|
||||
rc = path.hexaryPathNodeKey(ps.root, ps.getAccountFn)
|
||||
else:
|
||||
rc = path.hexaryPathNodeKey(ps.root, ps.hexaDb)
|
||||
if rc.isOk:
|
||||
return ok(rc.value)
|
||||
err(NodeNotFound)
|
||||
|
||||
proc getAccountsNodeKey*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
root: Hash256; ## state root
|
||||
path: Blob; ## Partial node path
|
||||
): Result[NodeKey,HexaryError] =
|
||||
## Variant of `getAccountsNodeKey()` for persistent storage.
|
||||
SnapDbAccountsRef.init(
|
||||
pv, root, Peer()).getAccountsNodeKey(path, persistent=true)
|
||||
|
||||
|
||||
proc getAccountsData*(
|
||||
ps: SnapDbAccountsRef; ## Re-usable session descriptor
|
||||
path: NodeKey; ## Account to visit
|
||||
persistent = false; ## Read data from disk
|
||||
): Result[Account,HexaryError] =
|
||||
## Fetch account data.
|
||||
##
|
||||
## Caveat: There is no unit test yet for the non-persistent version
|
||||
var acc: Account
|
||||
|
||||
noExceptionOops("getAccountData()"):
|
||||
var leaf: Blob
|
||||
if persistent:
|
||||
leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData
|
||||
else:
|
||||
leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData
|
||||
|
||||
if leaf.len == 0:
|
||||
return err(AccountNotFound)
|
||||
acc = rlp.decode(leaf,Account)
|
||||
|
||||
return ok(acc)
|
||||
|
||||
proc getAccountsData*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
root: Hash256; ## State root
|
||||
path: NodeKey; ## Account to visit
|
||||
): Result[Account,HexaryError] =
|
||||
## Variant of `getAccountsData()` for persistent storage.
|
||||
SnapDbAccountsRef.init(
|
||||
pv, root, Peer()).getAccountsData(path, persistent=true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,112 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
eth/[common, p2p],
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, snapdb_desc, snapdb_persistent]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
type
|
||||
SnapDbContractsRef* = ref object of SnapDbBaseRef
|
||||
peer: Peer ## For log messages
|
||||
|
||||
when false:
|
||||
const
|
||||
extraTraceMessages = false or true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
raiseAssert "Not possible -- " & info & ": " & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistentContracts(
|
||||
ps: SnapDbContractsRef; ## Base descriptor on `CoreDbRef`
|
||||
data: seq[(NodeKey,Blob)]; ## Contract code items
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## Store contract codes onto permanent database
|
||||
if ps.rockDb.isNil:
|
||||
let rc = data.persistentContractPut ps.kvDb
|
||||
if rc.isErr:
|
||||
return rc
|
||||
else:
|
||||
let rc = data.persistentContractPut ps.rockDb
|
||||
if rc.isErr:
|
||||
return rc
|
||||
ok()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type SnapDbContractsRef;
|
||||
pv: SnapDbRef;
|
||||
peer: Peer = nil
|
||||
): T =
|
||||
## Constructor, starts a new accounts session.
|
||||
new result
|
||||
result.init(pv, NodeKey.default)
|
||||
result.peer = peer
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getContractsFn*(desc: SnapDbBaseRef|SnapDbRef): HexaryGetFn =
|
||||
## Return `HexaryGetFn` closure.
|
||||
let getFn = desc.kvDb.persistentContractsGetFn()
|
||||
return proc(key: openArray[byte]): Blob = getFn(key)
|
||||
|
||||
|
||||
proc importContracts*(
|
||||
ps: SnapDbContractsRef; ## Re-usable session descriptor
|
||||
data: seq[(NodeKey,Blob)]; ## Contract code items
|
||||
): Result[void,HexaryError] =
|
||||
## Store contract codes onto permanent database
|
||||
try:
|
||||
result = ps.persistentContracts data
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible @ importAccounts(KeyError): " & e.msg
|
||||
except OSError as e:
|
||||
error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg
|
||||
return err(OSErrorException)
|
||||
except CatchableError as e:
|
||||
raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg
|
||||
|
||||
proc importContracts*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer; ## For log messages
|
||||
data: seq[(NodeKey,Blob)]; ## Contract code items
|
||||
): Result[void,HexaryError] =
|
||||
## Variant of `importAccounts()` for presistent storage, only.
|
||||
SnapDbContractsRef.init(pv, peer).importContracts(data)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,180 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, sequtils, tables],
|
||||
eth/[common, trie/nibbles],
|
||||
results,
|
||||
../../range_desc,
|
||||
"."/[hexary_debug, hexary_desc, hexary_error, hexary_paths, snapdb_desc]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noPpError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except ValueError as e:
|
||||
raiseAssert "Inconveivable (" & info & "): " & e.msg
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible (" & info & "): " & e.msg
|
||||
except CatchableError as e:
|
||||
raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||
discard result.data.NodeKey.init(data) # size error => zero
|
||||
|
||||
template noKeyError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible (" & info & "): " & e.msg
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible -- " & info & ": " & e.msg
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except CatchableError:
|
||||
return err(AccountNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, pretty printing
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pp*(a: RepairKey; ps: SnapDbBaseRef): string =
|
||||
if not ps.isNil:
|
||||
let toKey = ps.hexaDb.keyPp
|
||||
if not toKey.isNil:
|
||||
try:
|
||||
return a.toKey
|
||||
except CatchableError:
|
||||
discard
|
||||
$a.ByteArray33
|
||||
|
||||
proc pp*(a: NodeKey; ps: SnapDbBaseRef): string =
|
||||
if not ps.isNil:
|
||||
let toKey = ps.hexaDb.keyPp
|
||||
if not toKey.isNil:
|
||||
try:
|
||||
return a.to(RepairKey).toKey
|
||||
except CatchableError:
|
||||
discard
|
||||
$a.ByteArray32
|
||||
|
||||
proc pp*(a: NodeTag; ps: SnapDbBaseRef): string =
|
||||
a.to(NodeKey).pp(ps)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type HexaryTreeDbRef;
|
||||
): T =
|
||||
## Constructor variant. It provides a `HexaryTreeDbRef()` with a key cache
|
||||
## attached for pretty printing. So this one is mainly for debugging.
|
||||
HexaryTreeDbRef.init(SnapDbRef())
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc sortMerge*(base: openArray[NodeTag]): NodeTag =
|
||||
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
|
||||
## so that there are no overlap which would be rejected by `merge()`.
|
||||
##
|
||||
## This function selects a `NodeTag` from a list.
|
||||
result = high(NodeTag)
|
||||
for w in base:
|
||||
if w < result:
|
||||
result = w
|
||||
|
||||
proc sortMerge*(acc: openArray[seq[PackedAccount]]): seq[PackedAccount] =
|
||||
## Helper for merging several `(NodeTag,seq[PackedAccount])` data sets
|
||||
## so that there are no overlap which would be rejected by `merge()`.
|
||||
##
|
||||
## This function flattens and sorts the argument account lists.
|
||||
noKeyError("sortMergeAccounts"):
|
||||
var accounts: Table[NodeTag,PackedAccount]
|
||||
for accList in acc:
|
||||
for item in accList:
|
||||
accounts[item.accKey.to(NodeTag)] = item
|
||||
result = toSeq(accounts.keys).sorted(cmp).mapIt(accounts[it])
|
||||
|
||||
proc nextAccountsChainDbKey*(
|
||||
ps: SnapDbBaseRef;
|
||||
accKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
): Result[NodeKey,HexaryError] =
|
||||
## Fetch the account path on the `CoreDbRef`, the one next to the
|
||||
## argument account key.
|
||||
noExceptionOops("getChainDbAccount()"):
|
||||
let path = accKey
|
||||
.hexaryPath(ps.root, getFn) # ps.getAccountFn)
|
||||
.next(getFn) # ps.getAccountFn)
|
||||
.getNibbles
|
||||
if 64 == path.len:
|
||||
return ok(path.getBytes.convertTo(Hash256).to(NodeKey))
|
||||
|
||||
err(AccountNotFound)
|
||||
|
||||
proc prevAccountsChainDbKey*(
|
||||
ps: SnapDbBaseRef;
|
||||
accKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
): Result[NodeKey,HexaryError] =
|
||||
## Fetch the account path on the `CoreDbRef`, the one before to the
|
||||
## argument account.
|
||||
noExceptionOops("getChainDbAccount()"):
|
||||
let path = accKey
|
||||
.hexaryPath(ps.root, getFn) # ps.getAccountFn)
|
||||
.prev(getFn) # ps.getAccountFn)
|
||||
.getNibbles
|
||||
if 64 == path.len:
|
||||
return ok(path.getBytes.convertTo(Hash256).to(NodeKey))
|
||||
|
||||
err(AccountNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# More debugging (and playing with the hexary database)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc assignPrettyKeys*(xDb: HexaryTreeDbRef; root: NodeKey) =
|
||||
## Prepare for pretty pringing/debugging. Run early enough this function
|
||||
## sets the root key to `"$"`, for instance.
|
||||
if not xDb.keyPp.isNil:
|
||||
noPpError("validate(1)"):
|
||||
# Make keys assigned in pretty order for printing
|
||||
let rootKey = root.to(RepairKey)
|
||||
discard xDb.keyPp rootKey
|
||||
var keysList = toSeq(xDb.tab.keys)
|
||||
if xDb.tab.hasKey(rootKey):
|
||||
keysList = @[rootKey] & keysList
|
||||
for key in keysList:
|
||||
let node = xDb.tab[key]
|
||||
discard xDb.keyPp key
|
||||
case node.kind:
|
||||
of Branch: (for w in node.bLink: discard xDb.keyPp w)
|
||||
of Extension: discard xDb.keyPp node.eLink
|
||||
of Leaf: discard
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,296 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronicles,
|
||||
eth/[common, p2p, trie/nibbles],
|
||||
../../../../db/core_db/persistent,
|
||||
../../../../db/[core_db, storage_types, kvstore_rocksdb],
|
||||
../../../protocol,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_import, hexary_nearby, hexary_paths,
|
||||
rocky_bulk_load]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
RockyBulkCache* = "accounts.sst"
|
||||
## Name of temporary file to accomodate SST records for `rocksdb`
|
||||
|
||||
type
|
||||
SnapDbRef* = ref object
|
||||
## Global, re-usable descriptor
|
||||
keyMap: Table[RepairKey,uint] ## For debugging only (will go away)
|
||||
db: CoreDbRef ## General database
|
||||
rocky: RocksStoreRef ## Set if rocksdb is available
|
||||
|
||||
SnapDbBaseRef* = ref object of RootRef
|
||||
## Session descriptor
|
||||
xDb: HexaryTreeDbRef ## Hexary database, memory based
|
||||
base: SnapDbRef ## Back reference to common parameters
|
||||
root*: NodeKey ## Session DB root node key
|
||||
|
||||
when extraTraceMessages:
|
||||
import hexary_debug
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private debugging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noKeyError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible (" & info & "): " & e.msg
|
||||
|
||||
proc keyPp(a: RepairKey; pv: SnapDbRef): string =
|
||||
if a.isZero:
|
||||
return "ø"
|
||||
if not pv.keyMap.hasKey(a):
|
||||
pv.keyMap[a] = pv.keyMap.len.uint + 1
|
||||
result = if a.isNodeKey: "$" else: "@"
|
||||
noKeyError("pp(RepairKey)"):
|
||||
result &= $pv.keyMap[a]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helper
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
proc clearRockyCacheFile(rocky: RocksStoreRef): bool =
|
||||
if not rocky.isNil:
|
||||
# A cache file might hang about from a previous crash
|
||||
try:
|
||||
discard rocky.clearCacheFile(RockyBulkCache)
|
||||
return true
|
||||
except OSError as e:
|
||||
error "Cannot clear rocksdb cache", exception=($e.name), msg=e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type SnapDbRef;
|
||||
db: CoreDbRef
|
||||
): T =
|
||||
## Main object constructor
|
||||
T(db: db, rocky: db.newKvt.backend.toRocksStoreRef)
|
||||
|
||||
proc init*(
|
||||
T: type HexaryTreeDbRef;
|
||||
pv: SnapDbRef;
|
||||
): T =
|
||||
## Constructor for inner hexary trie database
|
||||
let xDb = HexaryTreeDbRef()
|
||||
xDb.keyPp = proc(key: RepairKey): string = key.keyPp(pv) # will go away
|
||||
return xDb
|
||||
|
||||
proc init*(
|
||||
T: type HexaryTreeDbRef;
|
||||
ps: SnapDbBaseRef;
|
||||
): T =
|
||||
## Constructor variant
|
||||
HexaryTreeDbRef.init(ps.base)
|
||||
|
||||
# ---------------
|
||||
|
||||
proc init*(
|
||||
ps: SnapDbBaseRef;
|
||||
pv: SnapDbRef;
|
||||
root: NodeKey;
|
||||
) =
|
||||
## Session base constructor
|
||||
ps.base = pv
|
||||
ps.root = root
|
||||
ps.xDb = HexaryTreeDbRef.init(pv)
|
||||
|
||||
proc init*(
|
||||
T: type SnapDbBaseRef;
|
||||
ps: SnapDbBaseRef;
|
||||
root: NodeKey;
|
||||
): T =
|
||||
## Variant of session base constructor
|
||||
new result
|
||||
result.init(ps.base, root)
|
||||
|
||||
proc copyWithoutRoot*(a, b: SnapDbBaseRef) =
|
||||
a.xDb = b.xDb
|
||||
a.base = b.base
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public getters
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hexaDb*(ps: SnapDbBaseRef): HexaryTreeDbRef =
|
||||
## Getter, low level access to underlying session DB
|
||||
ps.xDb
|
||||
|
||||
proc rockDb*(ps: SnapDbBaseRef): RocksStoreRef =
|
||||
## Getter, low level access to underlying persistent rock DB interface
|
||||
ps.base.rocky
|
||||
|
||||
proc rockDb*(pv: SnapDbRef): RocksStoreRef =
|
||||
## Getter variant
|
||||
pv.rocky
|
||||
|
||||
proc kvDb*(ps: SnapDbBaseRef): CoreDbRef =
|
||||
## Getter, low level access to underlying persistent key-value DB
|
||||
ps.base.db
|
||||
|
||||
proc kvDb*(pv: SnapDbRef): CoreDbRef =
|
||||
## Getter, low level access to underlying persistent key-value DB
|
||||
pv.db
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, select sub-tables for persistent storage
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc toBlockHeaderKey*(a: Hash256): ByteArray33 =
|
||||
a.genericHashKey.data
|
||||
|
||||
proc toBlockNumberKey*(a: BlockNumber): ByteArray33 =
|
||||
static:
|
||||
doAssert 32 == sizeof BlockNumber # needed in `blockNumberToHashKey()`
|
||||
a.blockNumberToHashKey.data
|
||||
|
||||
proc toContractHashKey*(a: NodeKey): ByteArray33 =
|
||||
a.to(Hash256).contractHashKey.data
|
||||
|
||||
when false:
|
||||
proc toAccountsKey*(a: NodeKey): ByteArray33 =
|
||||
a.ByteArray32.snapSyncAccountKey.data
|
||||
|
||||
proc toStorageSlotsKey*(a: NodeKey): ByteArray33 =
|
||||
a.ByteArray32.snapSyncStorageSlotKey.data
|
||||
else:
|
||||
proc toAccountsKey*(a: NodeKey): ByteArray32 =
|
||||
a.ByteArray32
|
||||
|
||||
proc toStorageSlotsKey*(a: NodeKey): ByteArray32 =
|
||||
a.ByteArray32
|
||||
|
||||
proc toStateRootKey*(a: NodeKey): ByteArray33 =
|
||||
a.ByteArray32.snapSyncStateRootKey.data
|
||||
|
||||
template toOpenArray*(k: ByteArray32): openArray[byte] =
|
||||
k.toOpenArray(0, 31)
|
||||
|
||||
template toOpenArray*(k: ByteArray33): openArray[byte] =
|
||||
k.toOpenArray(0, 32)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc dbBackendRocksDb*(pv: SnapDbRef): bool =
|
||||
## Returns `true` if rocksdb features are available
|
||||
not pv.rocky.isNil
|
||||
|
||||
proc dbBackendRocksDb*(ps: SnapDbBaseRef): bool =
|
||||
## Returns `true` if rocksdb features are available
|
||||
not ps.base.rocky.isNil
|
||||
|
||||
proc mergeProofs*(
|
||||
xDb: HexaryTreeDbRef; ## Session database
|
||||
root: NodeKey; ## State root
|
||||
proof: seq[SnapProof]; ## Node records
|
||||
peer = Peer(); ## For log messages
|
||||
freeStandingOk = false; ## Remove freestanding nodes
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [RlpError,KeyError].} =
|
||||
## Import proof records (as received with snap message) into a hexary trie
|
||||
## of the repair table. These hexary trie records can be extended to a full
|
||||
## trie at a later stage and used for validating account data.
|
||||
var
|
||||
nodes: HashSet[RepairKey]
|
||||
refs = @[root.to(RepairKey)].toHashSet
|
||||
|
||||
for n,rlpRec in proof:
|
||||
let report = xDb.hexaryImport(rlpRec.to(Blob), nodes, refs)
|
||||
if report.error != HexaryError(0):
|
||||
let error = report.error
|
||||
trace "mergeProofs()", peer, item=n, proofs=proof.len, error
|
||||
return err(error)
|
||||
|
||||
# Remove free standing nodes (if any)
|
||||
if 0 < nodes.len:
|
||||
let rest = nodes - refs
|
||||
if 0 < rest.len:
|
||||
if freeStandingOk:
|
||||
trace "mergeProofs() detected unrelated nodes", peer, nodes=nodes.len
|
||||
discard
|
||||
else:
|
||||
# Delete unreferenced nodes
|
||||
for nodeKey in nodes:
|
||||
xDb.tab.del(nodeKey)
|
||||
trace "mergeProofs() ignoring unrelated nodes", peer, nodes=nodes.len
|
||||
|
||||
ok()
|
||||
|
||||
|
||||
proc verifyLowerBound*(
|
||||
xDb: HexaryTreeDbRef; ## Session database
|
||||
root: NodeKey; ## State root
|
||||
base: NodeTag; ## Before or at first account entry in `data`
|
||||
first: NodeTag; ## First account/storage key
|
||||
peer = Peer(); ## For log messages
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Verify that `base` is to the left of the first leaf entry and there is
|
||||
## nothing in between.
|
||||
var error: HexaryError
|
||||
|
||||
let rc = base.hexaryNearbyRight(root, xDb)
|
||||
if rc.isErr:
|
||||
error = rc.error
|
||||
elif first == rc.value:
|
||||
return ok()
|
||||
else:
|
||||
error = LowerBoundProofError
|
||||
|
||||
when extraTraceMessages:
|
||||
trace "verifyLowerBound()", peer, base=base.to(NodeKey).pp,
|
||||
first=first.to(NodeKey).pp, error
|
||||
err(error)
|
||||
|
||||
|
||||
proc verifyNoMoreRight*(
|
||||
xDb: HexaryTreeDbRef; ## Session database
|
||||
root: NodeKey; ## State root
|
||||
base: NodeTag; ## Before or at first account entry in `data`
|
||||
peer = Peer(); ## For log messages
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Verify that there is are no more leaf entries to the right of and
|
||||
## including `base`.
|
||||
var error: HexaryError
|
||||
|
||||
let rc = base.hexaryNearbyRight(root, xDb)
|
||||
if rc.isOk:
|
||||
error = LowerBoundProofError
|
||||
elif rc.error != NearbyBeyondRange:
|
||||
error = rc.error
|
||||
else:
|
||||
return ok()
|
||||
|
||||
when extraTraceMessages:
|
||||
trace "verifyLeftmostBound()", peer, base, root, error
|
||||
err(error)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,398 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[algorithm, tables],
|
||||
chronicles,
|
||||
eth/[common, trie/db],
|
||||
results,
|
||||
../../../../db/[core_db, kvstore_rocksdb],
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
type
|
||||
AccountsGetFn* = proc(key: openArray[byte]): Blob
|
||||
{.gcsafe, raises:[].}
|
||||
## The `get()` function for the accounts trie
|
||||
|
||||
ContractsGetFn* = proc(key: openArray[byte]): Blob
|
||||
{.gcsafe, raises:[].}
|
||||
## The `get()` function for the contracts table
|
||||
|
||||
StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob
|
||||
{.gcsafe, raises: [].}
|
||||
## The `get()` function for the storage tries depends on the current
|
||||
## account
|
||||
|
||||
StateRootRegistry* = object
|
||||
## State root record. A table of these kind of records is organised as
|
||||
## follows.
|
||||
## ::
|
||||
## zero -> (n/a) -------+
|
||||
## |
|
||||
## ... |
|
||||
## ^ |
|
||||
## | |
|
||||
## (data) |
|
||||
## ^ |
|
||||
## | |
|
||||
## (data) |
|
||||
## ^ |
|
||||
## | |
|
||||
## (data) <-----+
|
||||
##
|
||||
key*: NodeKey ## Top reference for base entry, back reference otherwise
|
||||
data*: Blob ## Some data
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enable additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers, logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Persistent db " & info
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeKey): T =
|
||||
## Might be lossy, check before use
|
||||
discard result.init(key.ByteArray33[1 .. 32])
|
||||
|
||||
proc convertTo(key: RepairKey; T: type NodeTag): T =
|
||||
## Might be lossy, check before use
|
||||
UInt256.fromBytesBE(key.ByteArray33[1 .. 32]).T
|
||||
|
||||
proc toAccountsKey(a: RepairKey): auto =
|
||||
a.convertTo(NodeKey).toAccountsKey
|
||||
|
||||
proc toStorageSlotsKey(a: RepairKey): auto =
|
||||
a.convertTo(NodeKey).toStorageSlotsKey
|
||||
|
||||
proc stateRootGet*(db: CoreDbRef; nodeKey: NodeKey): Blob =
|
||||
if db.isLegacy:
|
||||
return db.newKvt.backend.toLegacy.get(nodeKey.toStateRootKey.toOpenArray)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: get
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistentAccountsGetFn*(db: CoreDbRef): AccountsGetFn =
|
||||
## Returns a `get()` function for retrieving accounts data
|
||||
return proc(key: openArray[byte]): Blob =
|
||||
var nodeKey: NodeKey
|
||||
if nodeKey.init(key):
|
||||
if db.isLegacy:
|
||||
return db.newKvt.backend.toLegacy.get(
|
||||
nodeKey.toAccountsKey.toOpenArray)
|
||||
|
||||
proc persistentContractsGetFn*(db: CoreDbRef): ContractsGetFn =
|
||||
## Returns a `get()` function for retrieving contracts data
|
||||
return proc(key: openArray[byte]): Blob =
|
||||
var nodeKey: NodeKey
|
||||
if nodeKey.init(key):
|
||||
if db.isLegacy:
|
||||
return db.newKvt.backend.toLegacy.get(
|
||||
nodeKey.toContractHashKey.toOpenArray)
|
||||
|
||||
proc persistentStorageSlotsGetFn*(db: CoreDbRef): StorageSlotsGetFn =
|
||||
## Returns a `get()` function for retrieving storage slots data
|
||||
return proc(accKey: NodeKey; key: openArray[byte]): Blob =
|
||||
var nodeKey: NodeKey
|
||||
if nodeKey.init(key):
|
||||
if db.isLegacy:
|
||||
return db.newKvt.backend.toLegacy.get(
|
||||
nodeKey.toStorageSlotsKey.toOpenArray)
|
||||
|
||||
proc persistentStateRootGet*(
|
||||
db: CoreDbRef;
|
||||
root: NodeKey;
|
||||
): Result[StateRootRegistry,HexaryError] =
|
||||
## Implements a `get()` function for returning state root registry data.
|
||||
let rlpBlob = db.stateRootGet(root)
|
||||
if 0 < rlpBlob.len:
|
||||
try:
|
||||
return ok(rlp.decode(rlpBlob, StateRootRegistry))
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
err(StateRootNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: store/put
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistentBlockHeaderPut*(
|
||||
db: CoreDbRef;
|
||||
hdr: BlockHeader;
|
||||
) =
|
||||
## Store a single header. This function is intended to finalise snap sync
|
||||
## with storing a universal pivot header not unlike genesis.
|
||||
let hashKey = hdr.blockHash
|
||||
db.kvt.put(hashKey.toBlockHeaderKey.toOpenArray, rlp.encode(hdr))
|
||||
db.kvt.put(hdr.blockNumber.toBlockNumberKey.toOpenArray, rlp.encode(hashKey))
|
||||
when extraTraceMessages:
|
||||
trace logTxt "stored block header", hashKey,
|
||||
blockNumber=hdr.blockNumber.toStr,
|
||||
dbVerify=(0 < db.get(hashKey.toBlockHeaderKey.toOpenArray).len)
|
||||
|
||||
proc persistentAccountsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
base: CoreDbRef;
|
||||
): Result[void,HexaryError] =
|
||||
## Bulk store using transactional `put()`
|
||||
let dbTx = base.beginTransaction
|
||||
defer: dbTx.commit
|
||||
|
||||
for (key,value) in db.tab.pairs:
|
||||
if not key.isNodeKey:
|
||||
let error = UnresolvedRepairNode
|
||||
when extraTraceMessages:
|
||||
trace logTxt "unresolved node in repair table", error
|
||||
return err(error)
|
||||
base.kvt.put(key.toAccountsKey.toOpenArray, value.convertTo(Blob))
|
||||
ok()
|
||||
|
||||
proc persistentStorageSlotsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
base: CoreDbRef;
|
||||
): Result[void,HexaryError] =
|
||||
## Bulk store using transactional `put()`
|
||||
let dbTx = base.beginTransaction
|
||||
defer: dbTx.commit
|
||||
|
||||
for (key,value) in db.tab.pairs:
|
||||
if not key.isNodeKey:
|
||||
let error = UnresolvedRepairNode
|
||||
when extraTraceMessages:
|
||||
trace logTxt "unresolved node in repair table", error
|
||||
return err(error)
|
||||
base.kvt.put(key.toStorageSlotsKey.toOpenArray, value.convertTo(Blob))
|
||||
ok()
|
||||
|
||||
proc persistentContractPut*(
|
||||
data: seq[(NodeKey,Blob)];
|
||||
base: CoreDbRef;
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [].} =
|
||||
## SST based bulk load on `rocksdb`.
|
||||
let dbTx = base.beginTransaction
|
||||
defer: dbTx.commit
|
||||
|
||||
for (key,val) in data:
|
||||
base.kvt.put(key.toContractHashKey.toOpenArray,val)
|
||||
ok()
|
||||
|
||||
|
||||
proc persistentStateRootPut*(
|
||||
db: CoreDbRef;
|
||||
root: NodeKey;
|
||||
data: Blob;
|
||||
) {.gcsafe, raises: [RlpError].} =
|
||||
## Save or update state root registry data.
|
||||
const
|
||||
zeroKey = NodeKey.default
|
||||
let
|
||||
rlpData = db.stateRootGet(root)
|
||||
|
||||
if rlpData.len == 0:
|
||||
var backKey: NodeKey
|
||||
|
||||
let baseBlob = db.stateRootGet(zeroKey)
|
||||
if 0 < baseBlob.len:
|
||||
backKey = rlp.decode(baseBlob, StateRootRegistry).key
|
||||
|
||||
# No need for a transaction frame. If the system crashes in between,
|
||||
# so be it :). All that can happen is storing redundant top entries.
|
||||
let
|
||||
rootEntryData = rlp.encode StateRootRegistry(key: backKey, data: data)
|
||||
zeroEntryData = rlp.encode StateRootRegistry(key: root)
|
||||
|
||||
# Store a new top entry
|
||||
db.kvt.put(root.toStateRootKey.toOpenArray, rootEntryData)
|
||||
|
||||
# Store updated base record pointing to top entry
|
||||
db.kvt.put(zeroKey.toStateRootKey.toOpenArray, zeroEntryData)
|
||||
|
||||
else:
|
||||
let record = rlp.decode(rlpData, StateRootRegistry)
|
||||
if record.data != data:
|
||||
|
||||
let rootEntryData =
|
||||
rlp.encode StateRootRegistry(key: record.key, data: data)
|
||||
|
||||
db.kvt.put(root.toStateRootKey.toOpenArray, rootEntryData)
|
||||
|
||||
|
||||
proc persistentAccountsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
rocky: RocksStoreRef
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## SST based bulk load on `rocksdb`.
|
||||
if rocky.isNil:
|
||||
return err(NoRocksDbBackend)
|
||||
let bulker = RockyBulkLoadRef.init(rocky)
|
||||
defer: bulker.destroy()
|
||||
if not bulker.begin(RockyBulkCache):
|
||||
let error = CannotOpenRocksDbBulkSession
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb session initiation failed",
|
||||
error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
#let keyList = toSeq(db.tab.keys)
|
||||
# .filterIt(it.isNodeKey)
|
||||
# .mapIt(it.convertTo(NodeTag))
|
||||
# .sorted(cmp)
|
||||
var
|
||||
keyList = newSeq[NodeTag](db.tab.len)
|
||||
inx = 0
|
||||
for repairKey in db.tab.keys:
|
||||
if repairKey.isNodeKey:
|
||||
keyList[inx] = repairKey.convertTo(NodeTag)
|
||||
inx.inc
|
||||
if inx < db.tab.len:
|
||||
return err(UnresolvedRepairNode)
|
||||
keyList.sort(cmp)
|
||||
|
||||
for n,nodeTag in keyList:
|
||||
let
|
||||
nodeKey = nodeTag.to(NodeKey)
|
||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||
if not bulker.add(nodeKey.toAccountsKey.toOpenArray, data):
|
||||
let error = AddBulkItemFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb bulk stash failure",
|
||||
n, len=db.tab.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
if bulker.finish().isErr:
|
||||
let error = CommitBulkItemsFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb commit failure",
|
||||
len=db.tab.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
ok()
|
||||
|
||||
|
||||
proc persistentStorageSlotsPut*(
|
||||
db: HexaryTreeDbRef;
|
||||
rocky: RocksStoreRef
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## SST based bulk load on `rocksdb`.
|
||||
if rocky.isNil:
|
||||
return err(NoRocksDbBackend)
|
||||
let bulker = RockyBulkLoadRef.init(rocky)
|
||||
defer: bulker.destroy()
|
||||
if not bulker.begin(RockyBulkCache):
|
||||
let error = CannotOpenRocksDbBulkSession
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb session initiation failed",
|
||||
error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
#let keyList = toSeq(db.tab.keys)
|
||||
# .filterIt(it.isNodeKey)
|
||||
# .mapIt(it.convertTo(NodeTag))
|
||||
# .sorted(cmp)
|
||||
var
|
||||
keyList = newSeq[NodeTag](db.tab.len)
|
||||
inx = 0
|
||||
for repairKey in db.tab.keys:
|
||||
if repairKey.isNodeKey:
|
||||
keyList[inx] = repairKey.convertTo(NodeTag)
|
||||
inx.inc
|
||||
if inx < db.tab.len:
|
||||
return err(UnresolvedRepairNode)
|
||||
keyList.sort(cmp)
|
||||
|
||||
for n,nodeTag in keyList:
|
||||
let
|
||||
nodeKey = nodeTag.to(NodeKey)
|
||||
data = db.tab[nodeKey.to(RepairKey)].convertTo(Blob)
|
||||
if not bulker.add(nodeKey.toStorageSlotsKey.toOpenArray, data):
|
||||
let error = AddBulkItemFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb bulk stash failure",
|
||||
n, len=db.tab.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
if bulker.finish().isErr:
|
||||
let error = CommitBulkItemsFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb commit failure",
|
||||
len=db.tab.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
ok()
|
||||
|
||||
|
||||
proc persistentContractPut*(
|
||||
data: seq[(NodeKey,Blob)];
|
||||
rocky: RocksStoreRef
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## SST based bulk load on `rocksdb`.
|
||||
if rocky.isNil:
|
||||
return err(NoRocksDbBackend)
|
||||
let bulker = RockyBulkLoadRef.init(rocky)
|
||||
defer: bulker.destroy()
|
||||
if not bulker.begin(RockyBulkCache):
|
||||
let error = CannotOpenRocksDbBulkSession
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb session initiation failed",
|
||||
error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
var
|
||||
lookup: Table[NodeKey,Blob]
|
||||
keyList = newSeq[NodeTag](data.len)
|
||||
inx = 0
|
||||
for (key,val) in data:
|
||||
if not lookup.hasKey key:
|
||||
lookup[key] = val
|
||||
keyList[inx] = key.to(NodeTag)
|
||||
inx.inc
|
||||
if lookup.len < inx:
|
||||
keyList.setLen(inx)
|
||||
keyList.sort(cmp)
|
||||
|
||||
for n,nodeTag in keyList:
|
||||
let
|
||||
nodeKey = nodeTag.to(NodeKey)
|
||||
data = lookup[nodeKey]
|
||||
if not bulker.add(nodeKey.toContractHashKey.toOpenArray, data):
|
||||
let error = AddBulkItemFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb bulk load failure",
|
||||
n, dataLen=data.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
|
||||
if bulker.finish().isErr:
|
||||
let error = CommitBulkItemsFailed
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rocksdb commit failure",
|
||||
dataLen=data.len, error, info=bulker.lastError()
|
||||
return err(error)
|
||||
ok()
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
||||
|
@ -1,80 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/[common, rlp],
|
||||
results,
|
||||
../../range_desc,
|
||||
"."/[hexary_error, snapdb_desc, snapdb_persistent]
|
||||
|
||||
type
|
||||
SnapDbPivotRegistry* = object
|
||||
predecessor*: NodeKey ## Predecessor key in chain, auto filled
|
||||
header*: BlockHeader ## Pivot state, containg state root
|
||||
nAccounts*: uint64 ## Imported # of accounts
|
||||
nSlotLists*: uint64 ## Imported # of account storage tries
|
||||
dangling*: seq[Blob] ## Dangling nodes in accounts trie
|
||||
processed*: seq[
|
||||
(NodeTag,NodeTag)] ## Processed acoount ranges
|
||||
slotAccounts*: seq[NodeKey] ## List of accounts with missing storage slots
|
||||
ctraAccounts*: seq[NodeKey] ## List of accounts with missing contracts
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template handleRlpException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pivotSaveDB*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
data: SnapDbPivotRegistry; ## Registered data record
|
||||
): Result[int,HexaryError] =
|
||||
## Register pivot environment
|
||||
handleRlpException("pivotSaveDB()"):
|
||||
let rlpData = rlp.encode(data)
|
||||
pv.kvDb.persistentStateRootPut(data.header.stateRoot.to(NodeKey), rlpData)
|
||||
return ok(rlpData.len)
|
||||
# notreached
|
||||
|
||||
proc pivotRecoverDB*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
stateRoot: NodeKey; ## Check for a particular state root
|
||||
): Result[SnapDbPivotRegistry,HexaryError] =
|
||||
## Restore pivot environment for a particular state root.
|
||||
let rc = pv.kvDb.persistentStateRootGet(stateRoot)
|
||||
if rc.isOk:
|
||||
handleRlpException("rpivotRecoverDB()"):
|
||||
var r = rlp.decode(rc.value.data, SnapDbPivotRegistry)
|
||||
r.predecessor = rc.value.key
|
||||
return ok(r)
|
||||
err(StateRootNotFound)
|
||||
|
||||
proc pivotRecoverDB*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
): Result[SnapDbPivotRegistry,HexaryError] =
|
||||
## Restore pivot environment that was saved latest.
|
||||
let rc = pv.kvDb.persistentStateRootGet(NodeKey.default)
|
||||
if rc.isOk:
|
||||
return pv.pivotRecoverDB(rc.value.key)
|
||||
err(StateRootNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,513 +0,0 @@
|
||||
# nimbus-eth1
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronicles,
|
||||
eth/[common, p2p, rlp],
|
||||
stew/interval_set,
|
||||
../../../protocol,
|
||||
../../range_desc,
|
||||
"."/[hexary_desc, hexary_error, hexary_envelope, hexary_import,
|
||||
hexary_inspect, hexary_interpolate, hexary_paths, snapdb_desc,
|
||||
snapdb_persistent]
|
||||
|
||||
logScope:
|
||||
topics = "snap-db"
|
||||
|
||||
type
|
||||
SnapDbStorageSlotsRef* = ref object of SnapDbBaseRef
|
||||
peer: Peer ## For log messages
|
||||
accKey: NodeKey ## Accounts address hash (curr.unused)
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc to(h: Hash256; T: type NodeKey): T =
|
||||
h.data.T
|
||||
|
||||
#proc convertTo(data: openArray[byte]; T: type Hash256): T =
|
||||
# discard result.data.NodeKey.init(data) # size error => zero
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError:
|
||||
return err(RlpEncoding)
|
||||
except CatchableError:
|
||||
return err(SlotsNotFound)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc persistentStorageSlots(
|
||||
db: HexaryTreeDbRef; ## Current table
|
||||
ps: SnapDbStorageSlotsRef; ## For persistent database
|
||||
): Result[void,HexaryError]
|
||||
{.gcsafe, raises: [OSError,IOError,KeyError].} =
|
||||
## Store accounts trie table on databse
|
||||
if ps.rockDb.isNil:
|
||||
let rc = db.persistentStorageSlotsPut(ps.kvDb)
|
||||
if rc.isErr: return rc
|
||||
else:
|
||||
let rc = db.persistentStorageSlotsPut(ps.rockDb)
|
||||
if rc.isErr: return rc
|
||||
ok()
|
||||
|
||||
|
||||
proc collectStorageSlots(
|
||||
peer: Peer; ## for log messages
|
||||
base: NodeTag; ## before or at first account entry in `data`
|
||||
slotLists: seq[SnapStorage];
|
||||
): Result[seq[RLeafSpecs],HexaryError] =
|
||||
## Similar to `collectAccounts()`
|
||||
var rcSlots: seq[RLeafSpecs]
|
||||
|
||||
if slotLists.len != 0:
|
||||
let pathTag0 = slotLists[0].slotHash.to(NodeTag)
|
||||
|
||||
# Verify lower bound
|
||||
if pathTag0 < base:
|
||||
let error = LowerBoundAfterFirstEntry
|
||||
trace "collectStorageSlots()", peer, base, item=0,
|
||||
nSlotLists=slotLists.len, error
|
||||
return err(error)
|
||||
|
||||
# Add initial account
|
||||
rcSlots.add RLeafSpecs(
|
||||
pathTag: slotLists[0].slotHash.to(NodeTag),
|
||||
payload: slotLists[0].slotData)
|
||||
|
||||
# Veify & add other accounts
|
||||
for n in 1 ..< slotLists.len:
|
||||
let nodeTag = slotLists[n].slotHash.to(NodeTag)
|
||||
|
||||
if nodeTag <= rcSlots[^1].pathTag:
|
||||
let error = SlotsNotSrictlyIncreasing
|
||||
trace "collectStorageSlots()", peer, item=n,
|
||||
nSlotLists=slotLists.len, error
|
||||
return err(error)
|
||||
|
||||
rcSlots.add RLeafSpecs(
|
||||
pathTag: nodeTag,
|
||||
payload: slotLists[n].slotData)
|
||||
|
||||
ok(rcSlots)
|
||||
|
||||
|
||||
proc importStorageSlots(
|
||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||
base: NodeTag; ## before or at first account entry in `data`
|
||||
data: AccountSlots; ## Account storage descriptor
|
||||
proof: seq[SnapProof]; ## Storage slots proof data
|
||||
): Result[seq[NodeSpecs],HexaryError]
|
||||
{.gcsafe, raises: [CatchableError].} =
|
||||
## Process storage slots for a particular storage root. See `importAccounts()`
|
||||
## for comments on the return value.
|
||||
let
|
||||
tmpDb = SnapDbBaseRef.init(ps, data.account.storageRoot.to(NodeKey))
|
||||
var
|
||||
slots: seq[RLeafSpecs] # validated slots to add to database
|
||||
dangling: seq[NodeSpecs] # return value
|
||||
proofStats: TrieNodeStat # `proof` data dangling links
|
||||
innerSubTrie: seq[NodeSpecs] # internal, collect dangling links
|
||||
if 0 < proof.len:
|
||||
let rc = tmpDb.hexaDb.mergeProofs(tmpDb.root, proof, ps.peer)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
block:
|
||||
let rc = ps.peer.collectStorageSlots(base, data.data)
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
slots = rc.value
|
||||
|
||||
if 0 < slots.len:
|
||||
if 0 < proof.len:
|
||||
# Inspect trie for dangling nodes. This is not a big deal here as the
|
||||
# proof data is typically small.
|
||||
let topTag = slots[^1].pathTag
|
||||
for w in proofStats.dangling:
|
||||
let iv = w.partialPath.hexaryEnvelope
|
||||
if iv.maxPt < base or topTag < iv.minPt:
|
||||
# Dangling link with partial path envelope outside accounts range
|
||||
discard
|
||||
else:
|
||||
# Overlapping partial path envelope.
|
||||
innerSubTrie.add w
|
||||
|
||||
# Build partial hexary trie
|
||||
let rc = tmpDb.hexaDb.hexaryInterpolate(
|
||||
tmpDb.root, slots, bootstrap = (proof.len == 0))
|
||||
if rc.isErr:
|
||||
return err(rc.error)
|
||||
|
||||
# Collect missing inner sub-trees in the reconstructed partial hexary
|
||||
# trie (if any).
|
||||
let bottomTag = slots[0].pathTag
|
||||
for w in innerSubTrie:
|
||||
if not ps.hexaDb.tab.hasKey(w.nodeKey.to(RepairKey)):
|
||||
# Verify that `base` is to the left of the first slot and there is
|
||||
# nothing in between.
|
||||
#
|
||||
# Without `proof` data available there can only be a complete
|
||||
# set/list of accounts so there are no dangling nodes in the first
|
||||
# place. But there must be `proof` data for an empty list.
|
||||
if w.partialPath.hexaryEnvelope.maxPt < bottomTag:
|
||||
return err(LowerBoundProofError)
|
||||
# Otherwise register left over entry
|
||||
dangling.add w
|
||||
|
||||
# Commit to main descriptor
|
||||
for k,v in tmpDb.hexaDb.tab.pairs:
|
||||
if not k.isNodeKey:
|
||||
return err(UnresolvedRepairNode)
|
||||
ps.hexaDb.tab[k] = v
|
||||
|
||||
elif proof.len == 0:
|
||||
# There must be a proof for an empty argument list.
|
||||
return err(LowerBoundProofError)
|
||||
|
||||
else:
|
||||
for w in proofStats.dangling:
|
||||
if base <= w.partialPath.hexaryEnvelope.maxPt:
|
||||
return err(LowerBoundProofError)
|
||||
dangling = proofStats.dangling
|
||||
|
||||
ok(dangling)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public constructor
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(
|
||||
T: type SnapDbStorageSlotsRef;
|
||||
pv: SnapDbRef;
|
||||
accKey: NodeKey;
|
||||
root: Hash256;
|
||||
peer: Peer = nil
|
||||
): T =
|
||||
## Constructor, starts a new accounts session.
|
||||
new result
|
||||
result.init(pv, root.to(NodeKey))
|
||||
result.peer = peer
|
||||
result.accKey = accKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getStorageSlotsFn*(
|
||||
ps: SnapDbStorageSlotsRef;
|
||||
): HexaryGetFn =
|
||||
## Return `HexaryGetFn` closure.
|
||||
let getFn = ps.kvDb.persistentStorageSlotsGetFn()
|
||||
return proc(key: openArray[byte]): Blob = getFn(ps.accKey, key)
|
||||
|
||||
proc getStorageSlotsFn*(
|
||||
pv: SnapDbRef;
|
||||
accKey: NodeKey;
|
||||
): HexaryGetFn =
|
||||
## Variant of `getStorageSlotsFn()` for captured `accKey` argument.
|
||||
let getFn = pv.kvDb.persistentStorageSlotsGetFn()
|
||||
return proc(key: openArray[byte]): Blob = getFn(accKey, key)
|
||||
|
||||
|
||||
proc importStorageSlots*(
|
||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
||||
persistent = false; ## store data on disk
|
||||
): seq[HexaryNodeReport] =
|
||||
## Validate and import storage slots (using proofs as received with the snap
|
||||
## message `StorageRanges`). This function accumulates data in a memory table
|
||||
## which can be written to disk with the argument `persistent` set `true`. The
|
||||
## memory table is held in the descriptor argument`ps`.
|
||||
##
|
||||
## If there were an error when processing a particular argument `data` item,
|
||||
## it will be reported with the return value providing argument slot/index
|
||||
## end error code.
|
||||
##
|
||||
## If there was an error soring persistent data, the last report item will
|
||||
## have an error code, only.
|
||||
##
|
||||
## TODO:
|
||||
## Reconsider how to handle the persistant storage trie, see
|
||||
## github.com/status-im/nim-eth/issues/9#issuecomment-814573755
|
||||
##
|
||||
let
|
||||
peer = ps.peer
|
||||
nItems = data.storages.len
|
||||
sTop = nItems - 1
|
||||
var
|
||||
itemInx: Option[int]
|
||||
if 0 <= sTop:
|
||||
try:
|
||||
for n in 0 ..< sTop:
|
||||
# These ones always come without proof data => `NodeTag.default`
|
||||
itemInx = some(n)
|
||||
let rc = ps.importStorageSlots(
|
||||
NodeTag.default, data.storages[n], @[])
|
||||
if rc.isErr:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: rc.error)
|
||||
let p {.used.} = data.storages[n]
|
||||
trace "Storage slots item fails", peer, itemInx=n, nItems,
|
||||
accKey=p.account.accKey, stoRoot=p.account.storageRoot.to(NodeKey),
|
||||
nSlots=p.data.len, proofs=0, error=rc.error, nErrors=result.len
|
||||
|
||||
# Final one might come with proof data
|
||||
block:
|
||||
itemInx = some(sTop)
|
||||
let rc = ps.importStorageSlots(
|
||||
data.base, data.storages[sTop], data.proof)
|
||||
if rc.isErr:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: rc.error)
|
||||
let p {.used.} = data.storages[sTop]
|
||||
trace "Storage slots last item fails", peer, itemInx=sTop, nItems,
|
||||
accKey=p.account.accKey, stoRoot=p.account.storageRoot.to(NodeKey),
|
||||
nSlots=p.data.len, proofs=data.proof.len,
|
||||
error=rc.error, nErrors=result.len
|
||||
elif 0 < rc.value.len:
|
||||
result.add HexaryNodeReport(slot: itemInx, dangling: rc.value)
|
||||
|
||||
# Store to disk
|
||||
if persistent and 0 < ps.hexaDb.tab.len:
|
||||
itemInx = none(int)
|
||||
let rc = ps.hexaDb.persistentStorageSlots(ps)
|
||||
if rc.isErr:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: rc.error)
|
||||
|
||||
except RlpError:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: RlpEncoding)
|
||||
trace "Storage slot node error", peer, itemInx, nItems,
|
||||
nSlots=data.storages[sTop].data.len, proofs=data.proof.len,
|
||||
error=RlpEncoding, nErrors=result.len
|
||||
except OSError as e:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: OSErrorException)
|
||||
error "Import storage slots exception", peer, itemInx, nItems,
|
||||
name=($e.name), msg=e.msg, nErrors=result.len
|
||||
except IOError as e:
|
||||
result.add HexaryNodeReport(slot: itemInx, error: IOErrorException)
|
||||
error "Import storage slots exception", peer, itemInx, nItems,
|
||||
name=($e.name), msg=e.msg, nErrors=result.len
|
||||
except CatchableError as e:
|
||||
raiseAssert "Inconceivable @ importStorages: " & e.msg
|
||||
|
||||
#when extraTraceMessages:
|
||||
# if result.len == 0:
|
||||
# trace "Storage slots imported", peer, nItems,
|
||||
# nSlotLists=data.storages.len, proofs=data.proof.len
|
||||
|
||||
proc importStorageSlots*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer; ## For log messages, only
|
||||
data: AccountStorageRange; ## Account storage reply from `snap/1` protocol
|
||||
): seq[HexaryNodeReport] =
|
||||
## Variant of `importStorages()`
|
||||
SnapDbStorageSlotsRef.init(
|
||||
pv, Hash256().to(NodeKey), Hash256(), peer).importStorageSlots(
|
||||
data, persistent=true)
|
||||
|
||||
|
||||
proc importRawStorageSlotsNodes*(
|
||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
||||
reportNodes = {Leaf}; ## Additional node types to report
|
||||
persistent = false; ## store data on disk
|
||||
): seq[HexaryNodeReport] =
|
||||
## Store data nodes given as argument `nodes` on the persistent database.
|
||||
##
|
||||
## If there were an error when processing a particular argument `notes` item,
|
||||
## it will be reported with the return value providing argument slot/index,
|
||||
## node type, end error code.
|
||||
##
|
||||
## If there was an error soring persistent data, the last report item will
|
||||
## have an error code, only.
|
||||
##
|
||||
## Additional node items might be reported if the node type is in the
|
||||
## argument set `reportNodes`. These reported items will have no error
|
||||
## code set (i.e. `HexaryError(0)`.)
|
||||
##
|
||||
let
|
||||
peer = ps.peer
|
||||
db = HexaryTreeDbRef.init(ps)
|
||||
nItems = nodes.len
|
||||
var
|
||||
nErrors = 0
|
||||
slot: Option[int]
|
||||
try:
|
||||
# Import nodes
|
||||
for n,node in nodes:
|
||||
if 0 < node.data.len: # otherwise ignore empty placeholder
|
||||
slot = some(n)
|
||||
var rep = db.hexaryImport(node)
|
||||
if rep.error != HexaryError(0):
|
||||
rep.slot = slot
|
||||
result.add rep
|
||||
nErrors.inc
|
||||
trace "Error importing storage slots nodes", peer, inx=n, nItems,
|
||||
error=rep.error, nErrors
|
||||
elif rep.kind.isSome and rep.kind.unsafeGet in reportNodes:
|
||||
rep.slot = slot
|
||||
result.add rep
|
||||
|
||||
# Store to disk
|
||||
if persistent and 0 < db.tab.len:
|
||||
slot = none(int)
|
||||
let rc = db.persistentStorageSlots(ps)
|
||||
if rc.isErr:
|
||||
result.add HexaryNodeReport(slot: slot, error: rc.error)
|
||||
|
||||
except RlpError:
|
||||
result.add HexaryNodeReport(slot: slot, error: RlpEncoding)
|
||||
nErrors.inc
|
||||
trace "Error importing storage slots nodes", peer, slot, nItems,
|
||||
error=RlpEncoding, nErrors
|
||||
except KeyError as e:
|
||||
raiseAssert "Not possible @ importRawSorageSlotsNodes: " & e.msg
|
||||
except OSError as e:
|
||||
result.add HexaryNodeReport(slot: slot, error: OSErrorException)
|
||||
nErrors.inc
|
||||
error "Import storage slots nodes exception", peer, slot, nItems,
|
||||
name=($e.name), msg=e.msg, nErrors
|
||||
except IOError as e:
|
||||
result.add HexaryNodeReport(slot: slot, error: IOErrorException)
|
||||
nErrors.inc
|
||||
error "Import storage slots nodes exception", peer, slot, nItems,
|
||||
name=($e.name), msg=e.msg, nErrors
|
||||
|
||||
when extraTraceMessages:
|
||||
if nErrors == 0:
|
||||
trace "Raw storage slots nodes imported", peer, slot, nItems,
|
||||
report=result.len
|
||||
|
||||
proc importRawStorageSlotsNodes*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer, ## For log messages, only
|
||||
accKey: NodeKey; ## Account key
|
||||
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
|
||||
reportNodes = {Leaf}; ## Additional node types to report
|
||||
): seq[HexaryNodeReport] =
|
||||
## Variant of `importRawNodes()` for persistent storage.
|
||||
SnapDbStorageSlotsRef.init(
|
||||
pv, accKey, Hash256(), peer).importRawStorageSlotsNodes(
|
||||
nodes, reportNodes, persistent=true)
|
||||
|
||||
|
||||
proc inspectStorageSlotsTrie*(
|
||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||
pathList = seq[Blob].default; ## Starting nodes for search
|
||||
resumeCtx: TrieNodeStatCtxRef = nil; ## Context for resuming inspection
|
||||
suspendAfter = high(uint64); ## To be resumed
|
||||
persistent = false; ## Read data from disk
|
||||
ignoreError = false; ## Always return partial results if any
|
||||
): Result[TrieNodeStat, HexaryError] =
|
||||
## Starting with the argument list `pathSet`, find all the non-leaf nodes in
|
||||
## the hexary trie which have at least one node key reference missing in
|
||||
## the trie database. Argument `pathSet` list entries that do not refer to a
|
||||
## valid node are silently ignored.
|
||||
##
|
||||
## Trie inspection can be automatically suspended after having visited
|
||||
## `suspendAfter` nodes to be resumed at the last state. An application of
|
||||
## this feature would look like
|
||||
## ::
|
||||
## var ctx = TrieNodeStatCtxRef()
|
||||
## while not ctx.isNil:
|
||||
## let rc = inspectStorageSlotsTrie(.., resumeCtx=ctx, suspendAfter=1024)
|
||||
## ...
|
||||
## ctx = rc.value.resumeCtx
|
||||
##
|
||||
let peer {.used.} = ps.peer
|
||||
var stats: TrieNodeStat
|
||||
noExceptionOops("inspectStorageSlotsTrie()"):
|
||||
if persistent:
|
||||
stats = ps.getStorageSlotsFn.hexaryInspectTrie(
|
||||
ps.root, pathList, resumeCtx, suspendAfter=suspendAfter)
|
||||
else:
|
||||
stats = ps.hexaDb.hexaryInspectTrie(
|
||||
ps.root, pathList, resumeCtx, suspendAfter=suspendAfter)
|
||||
|
||||
block checkForError:
|
||||
var error = TrieIsEmpty
|
||||
if stats.stopped:
|
||||
error = TrieLoopAlert
|
||||
trace "Inspect storage slots trie failed", peer, nPathList=pathList.len,
|
||||
nDangling=stats.dangling.len, stoppedAt=stats.level
|
||||
elif 0 < stats.level:
|
||||
break checkForError
|
||||
if ignoreError:
|
||||
return ok(stats)
|
||||
return err(error)
|
||||
|
||||
#when extraTraceMessages:
|
||||
# trace "Inspect storage slots trie ok", peer, nPathList=pathList.len,
|
||||
# nDangling=stats.dangling.len, level=stats.level
|
||||
|
||||
return ok(stats)
|
||||
|
||||
proc inspectStorageSlotsTrie*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer; ## For log messages, only
|
||||
accKey: NodeKey; ## Account key
|
||||
root: Hash256; ## state root
|
||||
pathList = seq[Blob].default; ## Starting paths for search
|
||||
resumeCtx: TrieNodeStatCtxRef = nil; ## Context for resuming inspection
|
||||
suspendAfter = high(uint64); ## To be resumed
|
||||
ignoreError = false; ## Always return partial results if any
|
||||
): Result[TrieNodeStat, HexaryError] =
|
||||
## Variant of `inspectStorageSlotsTrieTrie()` for persistent storage.
|
||||
SnapDbStorageSlotsRef.init(
|
||||
pv, accKey, root, peer).inspectStorageSlotsTrie(
|
||||
pathList, resumeCtx, suspendAfter, persistent=true, ignoreError)
|
||||
|
||||
|
||||
proc getStorageSlotsData*(
|
||||
ps: SnapDbStorageSlotsRef; ## Re-usable session descriptor
|
||||
path: NodeKey; ## Account to visit
|
||||
persistent = false; ## Read data from disk
|
||||
): Result[Account,HexaryError] =
|
||||
## Fetch storage slots data.
|
||||
##
|
||||
## Caveat: There is no unit test yet
|
||||
let peer {.used.} = ps.peer
|
||||
var acc: Account
|
||||
|
||||
noExceptionOops("getStorageSlotsData()"):
|
||||
var leaf: Blob
|
||||
if persistent:
|
||||
leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData
|
||||
else:
|
||||
leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData
|
||||
|
||||
if leaf.len == 0:
|
||||
return err(SlotsNotFound)
|
||||
acc = rlp.decode(leaf,Account)
|
||||
|
||||
return ok(acc)
|
||||
|
||||
proc getStorageSlotsData*(
|
||||
pv: SnapDbRef; ## Base descriptor on `CoreDbRef`
|
||||
peer: Peer, ## For log messages, only
|
||||
accKey: NodeKey; ## Account key
|
||||
root: Hash256; ## state root
|
||||
path: NodeKey; ## Account to visit
|
||||
): Result[Account,HexaryError] =
|
||||
## Variant of `getStorageSlotsData()` for persistent storage.
|
||||
SnapDbStorageSlotsRef.init(
|
||||
pv, accKey, root, peer).getStorageSlotsData(path, persistent=true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,177 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## This module fetches the Ethereum account state trie from network peers by
|
||||
## traversing leaves of the trie in leaf path order, making network requests
|
||||
## using the `snap` protocol.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
stew/interval_set,
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[constants, range_desc, worker_desc],
|
||||
./get_error
|
||||
|
||||
logScope:
|
||||
topics = "snap-get"
|
||||
|
||||
type
|
||||
GetAccountRange* = object
|
||||
data*: PackedAccountRange ## Re-packed reply data
|
||||
withStorage*: seq[AccountSlotsHeader] ## Accounts with storage root
|
||||
withContract*: seq[AccountCodeHeader] ## Accounts with contacts
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getAccountRangeReq(
|
||||
buddy: SnapBuddyRef;
|
||||
root: Hash256;
|
||||
iv: NodeTagRange;
|
||||
pivot: string;
|
||||
): Future[Result[Option[SnapAccountRange],void]] {.async.} =
|
||||
let
|
||||
peer = buddy.peer
|
||||
try:
|
||||
let reply = await peer.getAccountRange(
|
||||
root, iv.minPt.to(Hash256).data, iv.maxPt.to(Hash256).data,
|
||||
fetchRequestBytesLimit)
|
||||
return ok(reply)
|
||||
except CatchableError as e:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetAccountRange reply", peer, pivot,
|
||||
error=(e.msg)
|
||||
return err()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getAccountRange*(
|
||||
buddy: SnapBuddyRef;
|
||||
stateRoot: Hash256; ## Current DB base (see `pivot` for logging)
|
||||
iv: NodeTagRange; ## Range to be fetched
|
||||
pivot: string; ## For logging, instead of `stateRoot`
|
||||
): Future[Result[GetAccountRange,GetError]] {.async.} =
|
||||
## Fetch data using the `snap#` protocol, returns the range covered.
|
||||
let
|
||||
peer {.used.} = buddy.peer
|
||||
if trSnapTracePacketsOk:
|
||||
trace trSnapSendSending & "GetAccountRange", peer, pivot, accRange=iv
|
||||
|
||||
let snAccRange = block:
|
||||
let rc = await buddy.getAccountRangeReq(stateRoot, iv, pivot)
|
||||
if rc.isErr:
|
||||
return err(GetNetworkProblem)
|
||||
if rc.value.isNone:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvTimeoutWaiting & "for AccountRange", peer, pivot
|
||||
return err(GetResponseTimeout)
|
||||
rc.value.get
|
||||
|
||||
var dd = GetAccountRange(
|
||||
data: PackedAccountRange(
|
||||
proof: snAccRange.proof.nodes,
|
||||
accounts: snAccRange.accounts
|
||||
# Re-pack accounts data
|
||||
.mapIt(PackedAccount(
|
||||
accKey: it.accHash.to(NodeKey),
|
||||
accBlob: it.accBody.encode))))
|
||||
|
||||
# Collect accounts with non-empty storage or contract code
|
||||
for w in snAccRange.accounts:
|
||||
if w.accBody.storageRoot != EMPTY_ROOT_HASH:
|
||||
# Collect accounts with non-empty storage
|
||||
dd.withStorage.add AccountSlotsHeader(
|
||||
accKey: w.accHash.to(NodeKey),
|
||||
storageRoot: w.accBody.storageRoot)
|
||||
if w.accBody.codeHash != EMPTY_CODE_HASH:
|
||||
# Collect accounts with contract data
|
||||
dd.withContract.add AccountCodeHeader(
|
||||
accKey: w.accHash.to(NodeKey),
|
||||
codeHash: w.accBody.codeHash)
|
||||
|
||||
let
|
||||
nAccounts = dd.data.accounts.len
|
||||
nProof = dd.data.proof.len
|
||||
|
||||
if nAccounts == 0:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getaccountrange-0x00:
|
||||
# Notes:
|
||||
# * Nodes must always respond to the query.
|
||||
# * If the node does not have the state for the requested state root, it
|
||||
# must return an empty reply. It is the responsibility of the caller to
|
||||
# query an state not older than 128 blocks.
|
||||
# * The responding node is allowed to return less data than requested (own
|
||||
# QoS limits), but the node must return at least one account. If no
|
||||
# accounts exist between startingHash and limitHash, then the first (if
|
||||
# any) account after limitHash must be provided.
|
||||
if nProof == 0:
|
||||
# Maybe try another peer
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "empty AccountRange", peer, pivot,
|
||||
nAccounts, nProof, accRange="n/a", reqRange=iv
|
||||
return err(GetNoAccountsForStateRoot)
|
||||
|
||||
# So there is no data and a proof.
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "terminal AccountRange", peer, pivot,
|
||||
nAccounts, nProof, accRange=NodeTagRange.new(iv.minPt, high(NodeTag)),
|
||||
reqRange=iv
|
||||
return ok(dd)
|
||||
|
||||
let (accMinPt, accMaxPt) = (
|
||||
dd.data.accounts[0].accKey.to(NodeTag),
|
||||
dd.data.accounts[^1].accKey.to(NodeTag))
|
||||
|
||||
if accMinPt < iv.minPt:
|
||||
# Not allowed
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvProtocolViolation & "min too small in AccountRange", peer,
|
||||
pivot, nAccounts, nProof, accRange=NodeTagRange.new(accMinPt, accMaxPt),
|
||||
reqRange=iv
|
||||
return err(GetAccountsMinTooSmall)
|
||||
|
||||
if iv.maxPt < accMaxPt:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getaccountrange-0x00:
|
||||
# Notes:
|
||||
# * [..]
|
||||
# * [..]
|
||||
# * [..] If no accounts exist between startingHash and limitHash, then the
|
||||
# first (if any) account after limitHash must be provided.
|
||||
if 1 < nAccounts:
|
||||
# Geth always seems to allow the last account to be larger than the
|
||||
# limit (seen with Geth/v1.10.18-unstable-4b309c70-20220517.)
|
||||
if iv.maxPt < dd.data.accounts[^2].accKey.to(NodeTag):
|
||||
# The second largest should not excceed the top one requested.
|
||||
when extraTraceMessages:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvProtocolViolation & "AccountRange top exceeded",
|
||||
peer, pivot, nAccounts, nProof,
|
||||
accRange=NodeTagRange.new(iv.minPt, accMaxPt), reqRange=iv
|
||||
return err(GetAccountsMaxTooLarge)
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "AccountRange", peer, pivot, nAccounts, nProof,
|
||||
accRange=NodeTagRange.new(accMinPt, accMaxPt), reqRange=iv
|
||||
|
||||
return ok(dd)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,130 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
stew/byteutils,
|
||||
"../../.."/[protocol, protocol/trace_config, types],
|
||||
../../worker_desc,
|
||||
./get_error
|
||||
|
||||
logScope:
|
||||
topics = "snap-get"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getBlockHeader*(
|
||||
buddy: SnapBuddyRef;
|
||||
num: BlockNumber;
|
||||
): Future[Result[BlockHeader,GetError]]
|
||||
{.async.} =
|
||||
## Get single block header
|
||||
let
|
||||
peer = buddy.peer
|
||||
reqLen = 1u
|
||||
hdrReq = BlocksRequest(
|
||||
startBlock: HashOrNum(
|
||||
isHash: false,
|
||||
number: num),
|
||||
maxResults: reqLen,
|
||||
skip: 0,
|
||||
reverse: false)
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthSendSendingGetBlockHeaders, peer, header=num.toStr, reqLen
|
||||
|
||||
var hdrResp: Option[blockHeadersObj]
|
||||
try:
|
||||
hdrResp = await peer.getBlockHeaders(hdrReq)
|
||||
except CatchableError as e:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetByteCodes reply", peer,
|
||||
error=e.msg
|
||||
return err(GetNetworkProblem)
|
||||
|
||||
var hdrRespLen = 0
|
||||
if hdrResp.isSome:
|
||||
hdrRespLen = hdrResp.get.headers.len
|
||||
if hdrRespLen == 0:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
||||
return err(GetNoHeaderAvailable)
|
||||
|
||||
if hdrRespLen == 1:
|
||||
let
|
||||
header = hdrResp.get.headers[0]
|
||||
blockNumber = header.blockNumber
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
|
||||
return ok(header)
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
||||
return err(GetTooManyHeaders)
|
||||
|
||||
|
||||
proc getBlockHeader*(
|
||||
buddy: SnapBuddyRef;
|
||||
hash: Hash256;
|
||||
): Future[Result[BlockHeader,GetError]]
|
||||
{.async.} =
|
||||
## Get single block header
|
||||
let
|
||||
peer = buddy.peer
|
||||
reqLen = 1u
|
||||
hdrReq = BlocksRequest(
|
||||
startBlock: HashOrNum(
|
||||
isHash: true,
|
||||
hash: hash),
|
||||
maxResults: reqLen,
|
||||
skip: 0,
|
||||
reverse: false)
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthSendSendingGetBlockHeaders, peer,
|
||||
header=hash.data.toHex, reqLen
|
||||
|
||||
var hdrResp: Option[blockHeadersObj]
|
||||
try:
|
||||
hdrResp = await peer.getBlockHeaders(hdrReq)
|
||||
except CatchableError as e:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetByteCodes reply", peer,
|
||||
error=e.msg
|
||||
return err(GetNetworkProblem)
|
||||
|
||||
var hdrRespLen = 0
|
||||
if hdrResp.isSome:
|
||||
hdrRespLen = hdrResp.get.headers.len
|
||||
if hdrRespLen == 0:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, reqLen, respose="n/a"
|
||||
return err(GetNoHeaderAvailable)
|
||||
|
||||
if hdrRespLen == 1:
|
||||
let
|
||||
header = hdrResp.get.headers[0]
|
||||
blockNumber = header.blockNumber
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, hdrRespLen, blockNumber
|
||||
return ok(header)
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trEthRecvReceivedBlockHeaders, peer, reqLen, hdrRespLen
|
||||
return err(GetTooManyHeaders)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,132 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[options, sequtils],
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[constants, range_desc, worker_desc],
|
||||
./get_error
|
||||
|
||||
logScope:
|
||||
topics = "snap-get"
|
||||
|
||||
type
|
||||
# SnapByteCodes* = object
|
||||
# codes*: seq[Blob]
|
||||
|
||||
GetByteCodes* = object
|
||||
leftOver*: seq[NodeKey]
|
||||
extra*: seq[(NodeKey,Blob)]
|
||||
kvPairs*: seq[(NodeKey,Blob)]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getByteCodesReq(
|
||||
buddy: SnapBuddyRef;
|
||||
keys: seq[Hash256];
|
||||
): Future[Result[Option[SnapByteCodes],void]]
|
||||
{.async.} =
|
||||
let
|
||||
peer = buddy.peer
|
||||
try:
|
||||
let reply = await peer.getByteCodes(keys, fetchRequestBytesLimit)
|
||||
return ok(reply)
|
||||
|
||||
except CatchableError as e:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetByteCodes reply", peer,
|
||||
error=e.msg
|
||||
return err()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getByteCodes*(
|
||||
buddy: SnapBuddyRef;
|
||||
keys: seq[NodeKey],
|
||||
): Future[Result[GetByteCodes,GetError]]
|
||||
{.async.} =
|
||||
## Fetch data using the `snap#` protocol, returns the byte codes requested
|
||||
## (if any.)
|
||||
let
|
||||
peer = buddy.peer
|
||||
nKeys = keys.len
|
||||
|
||||
if nKeys == 0:
|
||||
return err(GetEmptyRequestArguments)
|
||||
|
||||
if trSnapTracePacketsOk:
|
||||
trace trSnapSendSending & "GetByteCodes", peer, nkeys
|
||||
|
||||
let byteCodes = block:
|
||||
let rc = await buddy.getByteCodesReq keys.mapIt(it.to(Hash256))
|
||||
if rc.isErr:
|
||||
return err(GetNetworkProblem)
|
||||
if rc.value.isNone:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvTimeoutWaiting & "for reply to GetByteCodes", peer,
|
||||
nKeys
|
||||
return err(GetResponseTimeout)
|
||||
let blobs = rc.value.get.codes
|
||||
if nKeys < blobs.len:
|
||||
# Ooops, makes no sense
|
||||
return err(GetTooManyByteCodes)
|
||||
blobs
|
||||
|
||||
let
|
||||
nCodes = byteCodes.len
|
||||
|
||||
if nCodes == 0:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getbytecodes-0x04
|
||||
#
|
||||
# Notes:
|
||||
# * Nodes must always respond to the query.
|
||||
# * The returned codes must be in the request order.
|
||||
# * The responding node is allowed to return less data than requested
|
||||
# (serving QoS limits), but the node must return at least one bytecode,
|
||||
# unless none requested are available, in which case it must answer with
|
||||
# an empty response.
|
||||
# * If a bytecode is unavailable, the node must skip that slot and proceed
|
||||
# to the next one. The node must not return nil or other placeholders.
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "empty ByteCodes", peer, nKeys, nCodes
|
||||
return err(GetNoByteCodesAvailable)
|
||||
|
||||
# Assemble return value
|
||||
var
|
||||
dd: GetByteCodes
|
||||
req = keys.toHashSet
|
||||
|
||||
for n in 0 ..< nCodes:
|
||||
let key = byteCodes[n].keccakHash.to(NodeKey)
|
||||
if key in req:
|
||||
dd.kvPairs.add (key, byteCodes[n])
|
||||
req.excl key
|
||||
else:
|
||||
dd.extra.add (key, byteCodes[n])
|
||||
|
||||
dd.leftOver = req.toSeq
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "ByteCodes", peer,
|
||||
nKeys, nCodes, nLeftOver=dd.leftOver.len, nExtra=dd.extra.len
|
||||
|
||||
return ok(dd)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,114 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronos,
|
||||
../../../sync_desc,
|
||||
../../constants
|
||||
|
||||
type
|
||||
GetErrorStatsRef* = ref object
|
||||
## particular error counters so connections will not be cut immediately
|
||||
## after a particular error.
|
||||
peerDegraded*: bool
|
||||
nTimeouts*: uint
|
||||
nNoData*: uint
|
||||
nNetwork*: uint
|
||||
|
||||
GetError* = enum
|
||||
GetNothingSerious
|
||||
GetAccountsMaxTooLarge
|
||||
GetAccountsMinTooSmall
|
||||
GetEmptyAccountsArguments
|
||||
GetEmptyPartialRange
|
||||
GetEmptyRequestArguments
|
||||
GetNetworkProblem
|
||||
GetNoAccountsForStateRoot
|
||||
GetNoByteCodesAvailable
|
||||
GetNoHeaderAvailable
|
||||
GetNoStorageForAccounts
|
||||
GetNoTrieNodesAvailable
|
||||
GetResponseTimeout
|
||||
GetTooManyByteCodes
|
||||
GetTooManyHeaders
|
||||
GetTooManyStorageSlots
|
||||
GetTooManyTrieNodes
|
||||
|
||||
|
||||
proc getErrorReset*(stats: GetErrorStatsRef) =
|
||||
## Reset error counts after successful network operation
|
||||
stats[].reset
|
||||
|
||||
proc getErrorStopAfterSeriousOne*(
|
||||
ctrl: BuddyCtrlRef;
|
||||
error: GetError;
|
||||
stats: GetErrorStatsRef;
|
||||
): Future[bool]
|
||||
{.async.} =
|
||||
## Error handling after data protocol failed. Returns `true` if the current
|
||||
## worker should be terminated as *zombie*.
|
||||
case error:
|
||||
of GetResponseTimeout:
|
||||
stats.nTimeouts.inc
|
||||
if comErrorsTimeoutMax < stats.nTimeouts:
|
||||
# Mark this peer dead, i.e. avoid fetching from this peer for a while
|
||||
ctrl.zombie = true
|
||||
stats.peerDegraded = true
|
||||
return true
|
||||
|
||||
when 0 < comErrorsTimeoutSleepMSecs:
|
||||
# Otherwise try again some time later.
|
||||
await sleepAsync(comErrorsTimeoutSleepMSecs.milliseconds)
|
||||
|
||||
of GetNetworkProblem:
|
||||
stats.nNetwork.inc
|
||||
if comErrorsNetworkMax < stats.nNetwork:
|
||||
ctrl.zombie = true
|
||||
stats.peerDegraded = true
|
||||
return true
|
||||
|
||||
when 0 < comErrorsNetworkSleepMSecs:
|
||||
# Otherwise try again some time later.
|
||||
await sleepAsync(comErrorsNetworkSleepMSecs.milliseconds)
|
||||
|
||||
of GetNoAccountsForStateRoot,
|
||||
GetNoByteCodesAvailable,
|
||||
GetNoStorageForAccounts,
|
||||
GetNoHeaderAvailable,
|
||||
GetNoTrieNodesAvailable:
|
||||
stats.nNoData.inc
|
||||
if comErrorsNoDataMax < stats.nNoData:
|
||||
# Mark this peer dead, i.e. avoid fetching from this peer for a while
|
||||
ctrl.zombie = true
|
||||
return true
|
||||
|
||||
when 0 < comErrorsNoDataSleepMSecs:
|
||||
# Otherwise try again some time later.
|
||||
await sleepAsync(comErrorsNoDataSleepMSecs.milliseconds)
|
||||
|
||||
of GetAccountsMinTooSmall,
|
||||
GetAccountsMaxTooLarge,
|
||||
GetTooManyByteCodes,
|
||||
GetTooManyHeaders,
|
||||
GetTooManyStorageSlots,
|
||||
GetTooManyTrieNodes:
|
||||
# Mark this peer dead, i.e. avoid fetching from this peer for a while
|
||||
ctrl.zombie = true
|
||||
return true
|
||||
|
||||
of GetEmptyAccountsArguments,
|
||||
GetEmptyRequestArguments,
|
||||
GetEmptyPartialRange,
|
||||
GetError(0):
|
||||
discard
|
||||
|
||||
# End
|
@ -1,210 +0,0 @@
|
||||
# Nimbus - Fetch account and storage states from peers by snapshot traversal
|
||||
#
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
stew/interval_set,
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[constants, range_desc, worker_desc],
|
||||
./get_error
|
||||
|
||||
logScope:
|
||||
topics = "snap-get"
|
||||
|
||||
type
|
||||
# SnapStorage* = object
|
||||
# slotHash*: Hash256
|
||||
# slotData*: Blob
|
||||
#
|
||||
# SnapStorageRanges* = object
|
||||
# slotLists*: seq[seq[SnapStorage]]
|
||||
# proof*: seq[SnapProof]
|
||||
|
||||
GetStorageRanges* = object
|
||||
leftOver*: seq[AccountSlotsChanged]
|
||||
data*: AccountStorageRange
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getStorageRangesReq(
|
||||
buddy: SnapBuddyRef;
|
||||
root: Hash256;
|
||||
accounts: seq[Hash256];
|
||||
iv: Option[NodeTagRange];
|
||||
pivot: string;
|
||||
): Future[Result[Option[SnapStorageRanges],void]]
|
||||
{.async.} =
|
||||
let
|
||||
peer = buddy.peer
|
||||
try:
|
||||
var reply: Option[SnapStorageRanges]
|
||||
|
||||
if iv.isSome:
|
||||
reply = await peer.getStorageRanges(
|
||||
root, accounts,
|
||||
# here the interval bounds are an `array[32,byte]`
|
||||
iv.get.minPt.to(Hash256).data, iv.get.maxPt.to(Hash256).data,
|
||||
fetchRequestBytesLimit)
|
||||
else:
|
||||
reply = await peer.getStorageRanges(
|
||||
root, accounts,
|
||||
# here the interval bounds are of empty `Blob` type
|
||||
EmptyBlob, EmptyBlob,
|
||||
fetchRequestBytesLimit)
|
||||
return ok(reply)
|
||||
|
||||
except CatchableError as e:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetStorageRanges reply", peer, pivot,
|
||||
name=($e.name), error=(e.msg)
|
||||
return err()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getStorageRanges*(
|
||||
buddy: SnapBuddyRef;
|
||||
stateRoot: Hash256; ## Current DB base (`pivot` for logging)
|
||||
accounts: seq[AccountSlotsHeader]; ## List of per-account storage slots
|
||||
pivot: string; ## For logging, instead of `stateRoot`
|
||||
): Future[Result[GetStorageRanges,GetError]]
|
||||
{.async.} =
|
||||
## Fetch data using the `snap/1` protocol, returns the range covered.
|
||||
##
|
||||
## If the first `accounts` argument sequence item has the optional `subRange`
|
||||
## field set, only this account is fetched with for the range `subRange`.
|
||||
## Otherwise all accounts are asked for without a range (`subRange` fields
|
||||
## are ignored for later accounts list items.)
|
||||
var nAccounts = accounts.len
|
||||
if nAccounts == 0:
|
||||
return err(GetEmptyAccountsArguments)
|
||||
|
||||
let
|
||||
peer {.used.} = buddy.peer
|
||||
iv = accounts[0].subRange
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
when extraTraceMessages:
|
||||
trace trSnapSendSending & "GetStorageRanges", peer, pivot, nAccounts,
|
||||
iv=iv.get(otherwise=FullNodeTagRange)
|
||||
else:
|
||||
trace trSnapSendSending & "GetStorageRanges", peer, pivot, nAccounts
|
||||
|
||||
let
|
||||
snStoRanges = block:
|
||||
let rc = await buddy.getStorageRangesReq(stateRoot,
|
||||
accounts.mapIt(it.accKey.to(Hash256)), iv, pivot)
|
||||
if rc.isErr:
|
||||
return err(GetNetworkProblem)
|
||||
if rc.value.isNone:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvTimeoutWaiting & "for StorageRanges", peer, pivot,
|
||||
nAccounts
|
||||
return err(GetResponseTimeout)
|
||||
if nAccounts < rc.value.get.slotLists.len:
|
||||
# Ooops, makes no sense
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "too many slot lists", peer, pivot,
|
||||
nAccounts, nReceived=rc.value.get.slotLists.len
|
||||
return err(GetTooManyStorageSlots)
|
||||
rc.value.get
|
||||
|
||||
nSlotLists = snStoRanges.slotLists.len
|
||||
nProof = snStoRanges.proof.nodes.len
|
||||
|
||||
if nSlotLists == 0:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#getstorageranges-0x02:
|
||||
#
|
||||
# Notes:
|
||||
# * Nodes must always respond to the query.
|
||||
# * If the node does not have the state for the requested state root or
|
||||
# for any requested account hash, it must return an empty reply. It is
|
||||
# the responsibility of the caller to query an state not older than 128
|
||||
# blocks; and the caller is expected to only ever query existing accounts.
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "empty StorageRanges", peer, pivot,
|
||||
nAccounts, nSlotLists, nProof, firstAccount=accounts[0].accKey
|
||||
return err(GetNoStorageForAccounts)
|
||||
|
||||
# Assemble return structure for given peer response
|
||||
var dd = GetStorageRanges(
|
||||
data: AccountStorageRange(
|
||||
proof: snStoRanges.proof.nodes))
|
||||
|
||||
# Set the left proof boundary (if any)
|
||||
if 0 < nProof and iv.isSome:
|
||||
dd.data.base = iv.unsafeGet.minPt
|
||||
|
||||
# Filter remaining `slots` responses:
|
||||
# * Accounts for empty ones go back to the `leftOver` list.
|
||||
for n in 0 ..< nSlotLists:
|
||||
if 0 < snStoRanges.slotLists[n].len or (n == nSlotLists-1 and 0 < nProof):
|
||||
# Storage slot data available. The last storage slots list may
|
||||
# be a proved empty sub-range.
|
||||
dd.data.storages.add AccountSlots(
|
||||
account: accounts[n], # known to be no fewer accounts than slots
|
||||
data: snStoRanges.slotLists[n])
|
||||
|
||||
else: # if n < nSlotLists-1 or nProof == 0:
|
||||
# Empty data here indicate missing data
|
||||
dd.leftOver.add AccountSlotsChanged(
|
||||
account: accounts[n])
|
||||
|
||||
if 0 < nProof:
|
||||
# Ok, we have a proof now. In that case, there is always a duplicate
|
||||
# of the proved entry on the `dd.leftOver` list.
|
||||
#
|
||||
# Note that `storages[^1]` exists due to the clause
|
||||
# `(n==nSlotLists-1 and 0<nProof)` in the above `for` loop.
|
||||
let topAcc = dd.data.storages[^1].account
|
||||
dd.leftOver.add AccountSlotsChanged(account: topAcc)
|
||||
if 0 < dd.data.storages[^1].data.len:
|
||||
let
|
||||
reqMaxPt = topAcc.subRange.get(otherwise = FullNodeTagRange).maxPt
|
||||
respMaxPt = dd.data.storages[^1].data[^1].slotHash.to(NodeTag)
|
||||
if respMaxPt < reqMaxPt:
|
||||
dd.leftOver[^1].newRange = some(
|
||||
NodeTagRange.new(respMaxPt + 1.u256, reqMaxPt))
|
||||
elif 0 < dd.data.storages.len:
|
||||
let topAcc = dd.data.storages[^1].account
|
||||
if topAcc.subRange.isSome:
|
||||
#
|
||||
# Fringe case when a partial request was answered without a proof.
|
||||
# This means, that the interval requested covers the complete trie.
|
||||
#
|
||||
# Copying the request to the `leftOver`, the ranges reflect the new
|
||||
# state: `topAcc.subRange.isSome` and `newRange.isNone`.
|
||||
dd.leftOver.add AccountSlotsChanged(account: topAcc)
|
||||
|
||||
# Complete the part that was not answered by the peer.
|
||||
dd.leftOver = dd.leftOver & accounts[nSlotLists ..< nAccounts].mapIt(
|
||||
AccountSlotsChanged(account: it))
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "StorageRanges", peer, pivot, nAccounts,
|
||||
nSlotLists, nProof, nSlotLstRc=dd.data.storages.len,
|
||||
nLeftOver=dd.leftOver.len
|
||||
|
||||
return ok(dd)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,188 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2018-2024 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
"../../.."/[protocol, protocol/trace_config],
|
||||
"../.."/[constants, range_desc, worker_desc],
|
||||
./get_error
|
||||
|
||||
logScope:
|
||||
topics = "snap-get"
|
||||
|
||||
type
|
||||
# SnapTrieNodes = object
|
||||
# nodes*: seq[Blob]
|
||||
|
||||
GetTrieNodes* = object
|
||||
leftOver*: seq[SnapTriePaths] ## Unprocessed data
|
||||
nodes*: seq[NodeSpecs] ## `nodeKey` field unused with `NodeSpecs`
|
||||
|
||||
ProcessReplyStep = object
|
||||
leftOver: SnapTriePaths # Unprocessed data sets
|
||||
nodes: seq[NodeSpecs] # Processed nodes
|
||||
topInx: int # Index of first unprocessed item
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getTrieNodesReq(
|
||||
buddy: SnapBuddyRef;
|
||||
stateRoot: Hash256;
|
||||
paths: seq[SnapTriePaths];
|
||||
pivot: string;
|
||||
): Future[Result[Option[SnapTrieNodes],void]]
|
||||
{.async.} =
|
||||
let
|
||||
peer = buddy.peer
|
||||
try:
|
||||
let reply = await peer.getTrieNodes(
|
||||
stateRoot, paths, fetchRequestBytesLimit)
|
||||
return ok(reply)
|
||||
|
||||
except CatchableError as e:
|
||||
let error {.used.} = e.msg
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "waiting for GetByteCodes reply", peer, pivot,
|
||||
error
|
||||
return err()
|
||||
|
||||
|
||||
proc processReplyStep(
|
||||
paths: SnapTriePaths;
|
||||
nodeBlobs: seq[Blob];
|
||||
startInx: int
|
||||
): ProcessReplyStep =
|
||||
## Process reply item, return unprocessed remainder
|
||||
# Account node request
|
||||
if paths.slotPaths.len == 0:
|
||||
if nodeBlobs[startInx].len == 0:
|
||||
result.leftOver.accPath = paths.accPath
|
||||
else:
|
||||
result.nodes.add NodeSpecs(
|
||||
partialPath: paths.accPath,
|
||||
data: nodeBlobs[startInx])
|
||||
result.topInx = startInx + 1
|
||||
return
|
||||
|
||||
# Storage paths request
|
||||
let
|
||||
nSlotPaths = paths.slotPaths.len
|
||||
maxLen = min(nSlotPaths, nodeBlobs.len - startInx)
|
||||
|
||||
# Fill up nodes
|
||||
for n in 0 ..< maxLen:
|
||||
let nodeBlob = nodeBlobs[startInx + n]
|
||||
if 0 < nodeBlob.len:
|
||||
result.nodes.add NodeSpecs(
|
||||
partialPath: paths.slotPaths[n],
|
||||
data: nodeBlob)
|
||||
else:
|
||||
result.leftOver.slotPaths.add paths.slotPaths[n]
|
||||
result.topInx = startInx + maxLen
|
||||
|
||||
# Was that all for this step? Otherwise add some left over.
|
||||
if maxLen < nSlotPaths:
|
||||
result.leftOver.slotPaths &= paths.slotPaths[maxLen ..< nSlotPaths]
|
||||
|
||||
if 0 < result.leftOver.slotPaths.len:
|
||||
result.leftOver.accPath = paths.accPath
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getTrieNodes*(
|
||||
buddy: SnapBuddyRef;
|
||||
stateRoot: Hash256; # Current DB base (see `pivot` for logging)
|
||||
paths: seq[SnapTriePaths]; # Nodes to fetch
|
||||
pivot: string; # For logging, instead of `stateRoot`
|
||||
): Future[Result[GetTrieNodes,GetError]]
|
||||
{.async.} =
|
||||
## Fetch data using the `snap#` protocol, returns the trie nodes requested
|
||||
## (if any.)
|
||||
let
|
||||
peer {.used.} = buddy.peer
|
||||
nGroups = paths.len
|
||||
|
||||
if nGroups == 0:
|
||||
return err(GetEmptyRequestArguments)
|
||||
|
||||
let nTotal = paths.mapIt(max(1,it.slotPaths.len)).foldl(a+b, 0)
|
||||
|
||||
if trSnapTracePacketsOk:
|
||||
trace trSnapSendSending & "GetTrieNodes", peer, pivot, nGroups, nTotal
|
||||
|
||||
let trieNodes = block:
|
||||
let rc = await buddy.getTrieNodesReq(stateRoot, paths, pivot)
|
||||
if rc.isErr:
|
||||
return err(GetNetworkProblem)
|
||||
if rc.value.isNone:
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvTimeoutWaiting & "for TrieNodes", peer, pivot, nGroups
|
||||
return err(GetResponseTimeout)
|
||||
let blobs = rc.value.get.nodes
|
||||
if nTotal < blobs.len:
|
||||
# Ooops, makes no sense
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvError & "too many TrieNodes", peer, pivot,
|
||||
nGroups, nExpected=nTotal, nReceived=blobs.len
|
||||
return err(GetTooManyTrieNodes)
|
||||
blobs
|
||||
|
||||
let
|
||||
nNodes = trieNodes.len
|
||||
|
||||
if nNodes == 0:
|
||||
# github.com/ethereum/devp2p/blob/master/caps/snap.md#gettrienodes-0x06
|
||||
#
|
||||
# Notes:
|
||||
# * Nodes must always respond to the query.
|
||||
# * The returned nodes must be in the request order.
|
||||
# * If the node does not have the state for the requested state root or for
|
||||
# any requested account paths, it must return an empty reply. It is the
|
||||
# responsibility of the caller to query an state not older than 128
|
||||
# blocks; and the caller is expected to only ever query existing trie
|
||||
# nodes.
|
||||
# * The responding node is allowed to return less data than requested
|
||||
# (serving QoS limits), but the node must return at least one trie node.
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "empty TrieNodes", peer, pivot, nGroups, nNodes
|
||||
return err(GetNoByteCodesAvailable)
|
||||
|
||||
# Assemble return value
|
||||
var
|
||||
dd = GetTrieNodes()
|
||||
inx = 0
|
||||
for p in paths:
|
||||
let step = p.processReplyStep(trieNodes, inx)
|
||||
if 0 < step.leftOver.accPath.len or
|
||||
0 < step.leftOver.slotPaths.len:
|
||||
dd.leftOver.add step.leftOver
|
||||
if 0 < step.nodes.len:
|
||||
dd.nodes &= step.nodes
|
||||
inx = step.topInx
|
||||
if trieNodes.len <= inx:
|
||||
break
|
||||
|
||||
when trSnapTracePacketsOk:
|
||||
trace trSnapRecvReceived & "TrieNodes", peer, pivot,
|
||||
nGroups, nNodes, nLeftOver=dd.leftOver.len
|
||||
|
||||
return ok(dd)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,20 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
./pass/[pass_desc, pass_init]
|
||||
|
||||
export
|
||||
PassActorRef,
|
||||
passActor,
|
||||
pass_init
|
||||
|
||||
# End
|
@ -1,65 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed except
|
||||
# according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronos,
|
||||
../../worker_desc
|
||||
|
||||
type
|
||||
PassVoidFutureCtxFn* = proc(
|
||||
ctx: SnapCtxRef): Future[void]
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
PassVoidCtxFn* = proc(
|
||||
ctx: SnapCtxRef)
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
|
||||
PassVoidFutureBuddyFn* = proc(
|
||||
buddy: SnapBuddyRef): Future[void]
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
PassBoolBuddyBoolIntFn* = proc(
|
||||
buddy: SnapBuddyRef; last: bool; laps: int): bool
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
PassBoolBuddyFn* = proc(
|
||||
buddy: SnapBuddyRef): bool
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
PassVoidBuddyFn* = proc(
|
||||
buddy: SnapBuddyRef)
|
||||
{.gcsafe, raises: [CatchableError].}
|
||||
|
||||
|
||||
PassActorRef* = ref object of RootRef
|
||||
## Holds sync mode specs & methods for a particular sync state
|
||||
setup*: PassVoidCtxFn
|
||||
release*: PassVoidCtxFn
|
||||
start*: PassBoolBuddyFn
|
||||
stop*: PassVoidBuddyFn
|
||||
pool*: PassBoolBuddyBoolIntFn
|
||||
daemon*: PassVoidFutureCtxFn
|
||||
single*: PassVoidFutureBuddyFn
|
||||
multi*: PassVoidFutureBuddyFn
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc passActor*(ctx: SnapCtxRef): PassActorRef =
|
||||
## Getter
|
||||
ctx.pool.syncMode.tab[ctx.pool.syncMode.active].PassActorRef
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,376 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/p2p,
|
||||
stew/keyed_queue,
|
||||
../../../misc/[best_pivot, block_queue, sync_ctrl, ticker],
|
||||
../../../protocol,
|
||||
"../.."/[range_desc, worker_desc],
|
||||
../db/[snapdb_desc, snapdb_persistent],
|
||||
../get/get_error,
|
||||
./pass_desc
|
||||
|
||||
type
|
||||
FullPassCtxRef = ref object of RootRef
|
||||
## Pass local descriptor extension for full sync process
|
||||
startNumber: Option[BlockNumber] ## History starts here (used for logging)
|
||||
pivot: BestPivotCtxRef ## Global pivot descriptor
|
||||
bCtx: BlockQueueCtxRef ## Global block queue descriptor
|
||||
suspendAt: BlockNumber ## Suspend if persistent head is larger
|
||||
|
||||
FullPassBuddyRef = ref object of RootRef
|
||||
## Pass local descriptor extension for full sync process
|
||||
pivot: BestPivotWorkerRef ## Local pivot worker descriptor
|
||||
queue: BlockQueueWorkerRef ## Block queue worker
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
dumpDatabaseOnRollOver = false # or true # <--- will go away (debugging only)
|
||||
## Dump database before switching to full sync (debugging, testing)
|
||||
|
||||
when dumpDatabaseOnRollOver: # <--- will go away (debugging only)
|
||||
import ../../../../../tests/replay/undump_kvp
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Full worker " & info
|
||||
|
||||
template ignoreException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
error "Exception at " & info & ":", name=($e.name), msg=(e.msg)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private getter/setter
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pass(pool: SnapCtxData): auto =
|
||||
## Getter, pass local descriptor
|
||||
pool.full.FullPassCtxRef
|
||||
|
||||
proc pass(only: SnapBuddyData): auto =
|
||||
## Getter, pass local descriptor
|
||||
only.full.FullPassBuddyRef
|
||||
|
||||
proc `pass=`(pool: var SnapCtxData; val: FullPassCtxRef) =
|
||||
## Setter, pass local descriptor
|
||||
pool.full = val
|
||||
|
||||
proc `pass=`(only: var SnapBuddyData; val: FullPassBuddyRef) =
|
||||
## Getter, pass local descriptor
|
||||
only.full = val
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc resumeAtNumber(ctx: SnapCtxRef): BlockNumber =
|
||||
## Resume full sync (if any)
|
||||
ignoreException("resumeAtNumber"):
|
||||
const nBackBlocks = maxHeadersFetch div 2
|
||||
let bestNumber = ctx.chain.db.getCanonicalHead().blockNumber
|
||||
if nBackBlocks < bestNumber:
|
||||
return bestNumber - nBackBlocks
|
||||
|
||||
|
||||
proc tickerUpdater(ctx: SnapCtxRef): TickerFullStatsUpdater =
|
||||
result = proc: TickerFullStats =
|
||||
let full = ctx.pool.pass
|
||||
|
||||
var stats: BlockQueueStats
|
||||
full.bCtx.blockQueueStats(stats)
|
||||
|
||||
let suspended = 0 < full.suspendAt and full.suspendAt <= stats.topAccepted
|
||||
|
||||
TickerFullStats(
|
||||
pivotBlock: ctx.pool.pass.startNumber,
|
||||
topPersistent: stats.topAccepted,
|
||||
nextStaged: stats.nextStaged,
|
||||
nextUnprocessed: stats.nextUnprocessed,
|
||||
nStagedQueue: stats.nStagedQueue,
|
||||
suspended: suspended,
|
||||
reOrg: stats.reOrg)
|
||||
|
||||
|
||||
proc processStaged(buddy: SnapBuddyRef): bool =
|
||||
## Fetch a work item from the `staged` queue an process it to be
|
||||
## stored on the persistent block chain.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
chainDb = ctx.chain.db
|
||||
chain = ctx.chain
|
||||
bq = buddy.only.pass.queue
|
||||
|
||||
# Get a work item, a list of headers + bodies
|
||||
wi = block:
|
||||
let rc = bq.blockQueueFetchStaged()
|
||||
if rc.isErr:
|
||||
return false
|
||||
rc.value
|
||||
|
||||
#startNumber = wi.headers[0].blockNumber -- unused
|
||||
|
||||
# Store in persistent database
|
||||
try:
|
||||
if chain.persistBlocks(wi.headers, wi.bodies) == ValidationResult.OK:
|
||||
bq.blockQueueAccept(wi)
|
||||
return true
|
||||
except CatchableError as e:
|
||||
error logTxt "storing persistent blocks failed", peer, range=($wi.blocks),
|
||||
name=($e.name), msg=(e.msg)
|
||||
|
||||
# Something went wrong. Recycle work item (needs to be re-fetched, anyway)
|
||||
let
|
||||
parentHash = wi.headers[0].parentHash
|
||||
try:
|
||||
# Check whether hash of the first block is consistent
|
||||
var parent: BlockHeader
|
||||
if chainDb.getBlockHeader(parentHash, parent):
|
||||
# First block parent is ok, so there might be other problems. Re-fetch
|
||||
# the blocks from another peer.
|
||||
trace "Storing persistent blocks failed", peer, range=($wi.blocks)
|
||||
bq.blockQueueRecycle(wi)
|
||||
buddy.ctrl.zombie = true
|
||||
return false
|
||||
except CatchableError as e:
|
||||
error logTxt "failed to access parent blocks", peer,
|
||||
blockNumber=wi.headers[0].blockNumber.toStr, name=($e.name), msg=e.msg
|
||||
|
||||
# Parent block header problem, so we might be in the middle of a re-org.
|
||||
# Set single mode backtrack following the offending parent hash.
|
||||
bq.blockQueueBacktrackFrom(wi)
|
||||
buddy.ctrl.multiOk = false
|
||||
|
||||
if wi.topHash.isNone:
|
||||
# Assuming that currently staged entries are on the wrong branch
|
||||
bq.blockQueueRecycleStaged()
|
||||
notice logTxt "starting chain re-org backtrack work item", peer,
|
||||
range=($wi.blocks)
|
||||
else:
|
||||
# Leave that block range in the staged list
|
||||
trace logTxt "resuming chain re-org backtrack work item", peer,
|
||||
range=($wi.blocks)
|
||||
discard
|
||||
|
||||
return false
|
||||
|
||||
proc suspendDownload(buddy: SnapBuddyRef): bool =
|
||||
## Check whether downloading should be suspended
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
full = ctx.pool.pass
|
||||
|
||||
# Update from RPC magic
|
||||
if full.suspendAt < ctx.pool.beaconHeader.blockNumber:
|
||||
full.suspendAt = ctx.pool.beaconHeader.blockNumber
|
||||
|
||||
# Optionaly, some external update request
|
||||
if ctx.exCtrlFile.isSome:
|
||||
# Needs to be read as second line (index 1)
|
||||
let rc = ctx.exCtrlFile.syncCtrlBlockNumberFromFile(1)
|
||||
if rc.isOk and full.suspendAt < rc.value:
|
||||
full.suspendAt = rc.value
|
||||
|
||||
# Return `true` if download should be suspended
|
||||
if 0 < full.suspendAt:
|
||||
return full.suspendAt <= buddy.only.pass.queue.topAccepted
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, full sync admin handlers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fullSyncSetup(ctx: SnapCtxRef) =
|
||||
# Set up descriptor
|
||||
let full = FullPassCtxRef()
|
||||
ctx.pool.pass = full
|
||||
|
||||
# Initialise full sync, resume from previous download (if any)
|
||||
let blockNumber = ctx.resumeAtNumber()
|
||||
if 0 < blockNumber:
|
||||
full.startNumber = some(blockNumber)
|
||||
full.bCtx = BlockQueueCtxRef.init(blockNumber + 1)
|
||||
else:
|
||||
full.bCtx = BlockQueueCtxRef.init()
|
||||
|
||||
# Initialise peer pivots in relaxed mode (not waiting for agreeing peers)
|
||||
full.pivot = BestPivotCtxRef.init(rng=ctx.pool.rng, minPeers=0)
|
||||
|
||||
# Update ticker
|
||||
ctx.pool.ticker.init(cb = ctx.tickerUpdater())
|
||||
|
||||
proc fullSyncRelease(ctx: SnapCtxRef) =
|
||||
ctx.pool.ticker.stop()
|
||||
ctx.pool.pass = nil
|
||||
|
||||
|
||||
proc fullSyncStart(buddy: SnapBuddyRef): bool =
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
|
||||
if peer.supports(protocol.eth) and peer.state(protocol.eth).initialized:
|
||||
let p = ctx.pool.pass
|
||||
|
||||
buddy.only.pass = FullPassBuddyRef()
|
||||
buddy.only.pass.queue = BlockQueueWorkerRef.init(p.bCtx, buddy.ctrl, peer)
|
||||
buddy.only.pass.pivot = BestPivotWorkerRef.init(p.pivot, buddy.ctrl, peer)
|
||||
|
||||
ctx.pool.ticker.startBuddy()
|
||||
buddy.ctrl.multiOk = false # confirm default mode for soft restart
|
||||
buddy.only.errors = GetErrorStatsRef()
|
||||
return true
|
||||
|
||||
proc fullSyncStop(buddy: SnapBuddyRef) =
|
||||
buddy.only.pass.pivot.clear()
|
||||
buddy.ctx.pool.ticker.stopBuddy()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, full sync action handlers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fullSyncDaemon(ctx: SnapCtxRef) {.async.} =
|
||||
ctx.daemon = false
|
||||
|
||||
|
||||
proc fullSyncPool(buddy: SnapBuddyRef, last: bool; laps: int): bool =
|
||||
let ctx = buddy.ctx
|
||||
|
||||
# There is a soft re-setup after switch over to full sync mode if a pivot
|
||||
# block header is available initialised from outside, i.e. snap sync swich.
|
||||
if ctx.pool.fullHeader.isSome:
|
||||
let
|
||||
stateHeader = ctx.pool.fullHeader.unsafeGet
|
||||
initFullSync = ctx.pool.pass.startNumber.isNone
|
||||
|
||||
# Re-assign start number for logging (instead of genesis)
|
||||
ctx.pool.pass.startNumber = some(stateHeader.blockNumber)
|
||||
|
||||
if initFullSync:
|
||||
# Reinitialise block queue descriptor relative to current pivot
|
||||
ctx.pool.pass.bCtx = BlockQueueCtxRef.init(stateHeader.blockNumber + 1)
|
||||
|
||||
# Store pivot as parent hash in database
|
||||
ctx.pool.snapDb.kvDb.persistentBlockHeaderPut stateHeader
|
||||
|
||||
# Instead of genesis.
|
||||
ctx.chain.com.startOfHistory = stateHeader.blockHash
|
||||
|
||||
when dumpDatabaseOnRollOver: # <--- will go away (debugging only)
|
||||
# Dump database ... <--- will go away (debugging only)
|
||||
let nRecords = # <--- will go away (debugging only)
|
||||
ctx.pool.snapDb.rockDb.dumpAllDb # <--- will go away (debugging only)
|
||||
trace logTxt "dumped block chain database", nRecords
|
||||
|
||||
# Kick off ticker (was stopped by snap `release()` method)
|
||||
ctx.pool.ticker.start()
|
||||
|
||||
# Reset so that this action would not be triggered, again
|
||||
ctx.pool.fullHeader = none(BlockHeader)
|
||||
|
||||
# Soft re-start buddy peers if on the second lap.
|
||||
if 0 < laps and ctx.pool.pass.startNumber.isSome:
|
||||
if not buddy.fullSyncStart():
|
||||
# Start() method failed => wait for another peer
|
||||
buddy.ctrl.stopped = true
|
||||
if last:
|
||||
trace logTxt "soft restart done", peer=buddy.peer, last, laps,
|
||||
pivot=ctx.pool.pass.startNumber.toStr,
|
||||
mode=ctx.pool.syncMode.active, state= buddy.ctrl.state
|
||||
return false # does stop magically when looping over peers is exhausted
|
||||
|
||||
# Mind the gap, fill in if necessary (function is peer independent)
|
||||
buddy.only.pass.queue.blockQueueGrout()
|
||||
true # Stop after running once regardless of peer
|
||||
|
||||
|
||||
proc fullSyncSingle(buddy: SnapBuddyRef) {.async.} =
|
||||
let
|
||||
pv = buddy.only.pass.pivot
|
||||
bq = buddy.only.pass.queue
|
||||
bNum = bq.bestNumber.get(otherwise = bq.topAccepted + 1)
|
||||
|
||||
# Negotiate in order to derive the pivot header from this `peer`.
|
||||
if await pv.pivotNegotiate(some(bNum)):
|
||||
# Update/activate `bestNumber` from the pivot header
|
||||
bq.bestNumber = some(pv.pivotHeader.value.blockNumber)
|
||||
buddy.ctrl.multiOk = true
|
||||
when extraTraceMessages:
|
||||
trace logTxt "pivot accepted", peer=buddy.peer,
|
||||
minNumber=bNum.toStr, bestNumber=bq.bestNumber.unsafeGet.toStr
|
||||
return
|
||||
|
||||
if buddy.ctrl.stopped:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "single mode stopped", peer=buddy.peer
|
||||
return # done with this buddy
|
||||
|
||||
# Without waiting, this function repeats every 50ms (as set with the constant
|
||||
# `sync_sched.execLoopTimeElapsedMin`.)
|
||||
await sleepAsync 300.milliseconds
|
||||
|
||||
|
||||
proc fullSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
|
||||
## Full sync processing
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
bq = buddy.only.pass.queue
|
||||
|
||||
if buddy.suspendDownload:
|
||||
# Sleep for a while, then leave
|
||||
await sleepAsync(10.seconds)
|
||||
return
|
||||
|
||||
# Fetch work item
|
||||
let rc = await bq.blockQueueWorker()
|
||||
if rc.isErr:
|
||||
if rc.error == StagedQueueOverflow:
|
||||
# Mind the gap: Turn on pool mode if there are too may staged items.
|
||||
ctx.poolMode = true
|
||||
else:
|
||||
trace logTxt "error", peer=buddy.peer, error=rc.error
|
||||
return
|
||||
|
||||
# Update persistent database
|
||||
while buddy.processStaged() and not buddy.ctrl.stopped:
|
||||
trace logTxt "multi processed", peer=buddy.peer
|
||||
# Allow thread switch as `persistBlocks()` might be slow
|
||||
await sleepAsync(10.milliseconds)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc passFull*: auto =
|
||||
## Return full sync handler environment
|
||||
PassActorRef(
|
||||
setup: fullSyncSetup,
|
||||
release: fullSyncRelease,
|
||||
start: fullSyncStart,
|
||||
stop: fullSyncStop,
|
||||
pool: fullSyncPool,
|
||||
daemon: fullSyncDaemon,
|
||||
single: fullSyncSingle,
|
||||
multi: fullSyncMulti)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,102 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
../../../../common,
|
||||
../../../misc/ticker,
|
||||
../../worker_desc,
|
||||
../db/snapdb_desc,
|
||||
"."/[pass_full, pass_snap]
|
||||
|
||||
logScope:
|
||||
topics = "snap-init"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc updateBeaconHeaderCB(ctx: SnapCtxRef): SyncReqNewHeadCB =
|
||||
## Update beacon header. This function is intended as a call back function
|
||||
## for the RPC module.
|
||||
result = proc(h: BlockHeader) {.gcsafe, raises: [].} =
|
||||
if ctx.pool.beaconHeader.blockNumber < h.blockNumber:
|
||||
ctx.pool.beaconHeader = h
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc setupPass(ctx: SnapCtxRef) =
|
||||
## Set up sync mode specs table. This cannot be done at compile time.
|
||||
ctx.pool.syncMode.tab[SnapSyncMode] = passSnap()
|
||||
ctx.pool.syncMode.tab[FullSyncMode] = passFull()
|
||||
ctx.pool.syncMode.active = SnapSyncMode
|
||||
|
||||
proc releasePass(ctx: SnapCtxRef) =
|
||||
discard
|
||||
|
||||
# --------------
|
||||
|
||||
proc enableRpcMagic(ctx: SnapCtxRef) =
|
||||
## Helper for `setup()`: Enable external pivot update via RPC
|
||||
ctx.chain.com.syncReqNewHead = ctx.updateBeaconHeaderCB
|
||||
ctx.chain.com.syncReqRelaxV2 = true
|
||||
|
||||
proc disableRpcMagic(ctx: SnapCtxRef) =
|
||||
## Helper for `release()`
|
||||
ctx.chain.com.syncReqNewHead = nil
|
||||
|
||||
# --------------
|
||||
|
||||
proc setupTicker(ctx: SnapCtxRef) =
|
||||
let blindTicker: TickerSnapStatsUpdater = proc: TickerSnapStats =
|
||||
discard
|
||||
if ctx.pool.enableTicker:
|
||||
ctx.pool.ticker = TickerRef.init(blindTicker)
|
||||
|
||||
proc releaseTicker(ctx: SnapCtxRef) =
|
||||
## Helper for `release()`
|
||||
ctx.pool.ticker.stop()
|
||||
ctx.pool.ticker = nil
|
||||
|
||||
# --------------
|
||||
|
||||
proc setupSnapDb(ctx: SnapCtxRef) =
|
||||
## Helper for `setup()`: Initialise snap sync database layer
|
||||
ctx.pool.snapDb = SnapDbRef.init(ctx.chain.db)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public start/stop and admin functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc passInitSetup*(ctx: SnapCtxRef) =
|
||||
## Global set up
|
||||
ctx.setupPass() # Set up sync sub-mode specs.
|
||||
ctx.setupSnapDb() # Set database backend, subject to change
|
||||
ctx.setupTicker() # Start log/status ticker (if any)
|
||||
ctx.enableRpcMagic() # Allow external pivot update via RPC
|
||||
|
||||
# Experimental, also used for debugging
|
||||
if ctx.exCtrlFile.isSome:
|
||||
warn "Snap sync accepts pivot block number or hash",
|
||||
syncCtrlFile=ctx.exCtrlFile.get
|
||||
|
||||
proc passInitRelease*(ctx: SnapCtxRef) =
|
||||
## Global clean up
|
||||
ctx.disableRpcMagic() # Disable external pivot update via RPC
|
||||
ctx.releaseTicker() # Stop log/status ticker (if any)
|
||||
ctx.releasePass() # Shut down sync methods
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,348 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/p2p,
|
||||
stew/[interval_set, keyed_queue],
|
||||
"../../.."/[handlers/eth, misc/ticker, protocol],
|
||||
"../.."/[range_desc, worker_desc],
|
||||
../db/[hexary_desc, snapdb_pivot],
|
||||
../get/get_error,
|
||||
./pass_desc,
|
||||
./pass_snap/helper/[beacon_header, storage_queue],
|
||||
./pass_snap/[pivot, snap_pass_desc]
|
||||
|
||||
logScope:
|
||||
topics = "snap-play"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
extraScrutinyDoubleCheckCompleteness = 1_000_000
|
||||
## Double check database whether it is complete (debugging, testing). This
|
||||
## action is slow and intended for debugging and testing use, only. The
|
||||
## numeric value limits the action to the maximal number of account in the
|
||||
## database.
|
||||
##
|
||||
## Set to `0` to disable.
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Snap worker " & info
|
||||
|
||||
template ignoreException(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
error "Exception at " & info & ":", name=($e.name), msg=(e.msg)
|
||||
|
||||
# --------------
|
||||
|
||||
proc disableWireServices(ctx: SnapCtxRef) =
|
||||
## Helper for `setup()`: Temporarily stop useless wire protocol services.
|
||||
ctx.ethWireCtx.txPoolEnabled = false
|
||||
|
||||
proc enableWireServices(ctx: SnapCtxRef) =
|
||||
## Helper for `release()`
|
||||
ctx.ethWireCtx.txPoolEnabled = true
|
||||
|
||||
# --------------
|
||||
|
||||
proc detectSnapSyncRecovery(ctx: SnapCtxRef) =
|
||||
## Helper for `setup()`: Initiate snap sync recovery (if any)
|
||||
let rc = ctx.pool.snapDb.pivotRecoverDB()
|
||||
if rc.isOk:
|
||||
let snap = ctx.pool.pass
|
||||
snap.recovery = RecoveryRef(state: rc.value)
|
||||
ctx.daemon = true
|
||||
|
||||
# Set up early initial pivot
|
||||
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
|
||||
trace logTxt "recovery started",
|
||||
checkpoint=(snap.pivotTable.topNumber.toStr & "(0)")
|
||||
if not ctx.pool.ticker.isNil:
|
||||
ctx.pool.ticker.startRecovery()
|
||||
|
||||
proc recoveryStepContinue(ctx: SnapCtxRef): Future[bool] {.async.} =
|
||||
let
|
||||
snap = ctx.pool.pass
|
||||
recov = snap.recovery
|
||||
if recov.isNil:
|
||||
return false
|
||||
|
||||
let
|
||||
checkpoint = recov.state.header.blockNumber.toStr & "(" & $recov.level & ")"
|
||||
topLevel = recov.level == 0
|
||||
env = block:
|
||||
let rc = snap.pivotTable.eq recov.state.header.stateRoot
|
||||
if rc.isErr:
|
||||
error logTxt "recovery pivot context gone", checkpoint, topLevel
|
||||
return false
|
||||
rc.value
|
||||
|
||||
# Cosmetics: allow other processes (e.g. ticker) to log the current recovery
|
||||
# state. There is no other intended purpose of this wait state.
|
||||
await sleepAsync 1100.milliseconds
|
||||
|
||||
#when extraTraceMessages:
|
||||
# trace "Recovery continued ...", checkpoint, topLevel,
|
||||
# nAccounts=recov.state.nAccounts, nDangling=recov.state.dangling.len
|
||||
|
||||
# Update pivot data from recovery checkpoint
|
||||
env.pivotRecoverFromCheckpoint(ctx, topLevel)
|
||||
|
||||
# Fetch next recovery record if there is any
|
||||
if recov.state.predecessor.isZero:
|
||||
#when extraTraceMessages:
|
||||
# trace "Recovery done", checkpoint, topLevel
|
||||
return false
|
||||
let rc = ctx.pool.snapDb.pivotRecoverDB(recov.state.predecessor)
|
||||
if rc.isErr:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "stale pivot, recovery stopped", checkpoint, topLevel
|
||||
return false
|
||||
|
||||
# Set up next level pivot checkpoint
|
||||
snap.recovery = RecoveryRef(
|
||||
state: rc.value,
|
||||
level: recov.level + 1)
|
||||
|
||||
# Push onto pivot table and continue recovery (i.e. do not stop it yet)
|
||||
snap.pivotTable.reverseUpdate(snap.recovery.state.header, ctx)
|
||||
|
||||
return true # continue recovery
|
||||
|
||||
# --------------
|
||||
|
||||
proc snapSyncCompleteOk(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
): Future[bool]
|
||||
{.async.} =
|
||||
## Check whether this pivot is fully downloaded. The `async` part is for
|
||||
## debugging, only and should not be used on a large database as it uses
|
||||
## quite a bit of computation ressources.
|
||||
if env.pivotCompleteOk():
|
||||
when 0 < extraScrutinyDoubleCheckCompleteness:
|
||||
# Larger sizes might be infeasible
|
||||
if env.nAccounts <= extraScrutinyDoubleCheckCompleteness:
|
||||
if not await env.pivotVerifyComplete(ctx):
|
||||
error logTxt "inconsistent state, pivot incomplete",
|
||||
pivot=env.stateHeader.blockNumber.toStr, nAccounts=env.nAccounts
|
||||
return false
|
||||
ctx.pool.pass.completedPivot = env
|
||||
ctx.poolMode = true # Fast sync mode must be synchronized among all peers
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, snap sync admin handlers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc snapSyncSetup(ctx: SnapCtxRef) =
|
||||
# Set up snap sync descriptor
|
||||
ctx.pool.pass = SnapPassCtxRef()
|
||||
|
||||
# For snap sync book keeping
|
||||
ctx.pool.pass.coveredAccounts = NodeTagRangeSet.init()
|
||||
ctx.pool.ticker.init(cb = ctx.pool.pass.pivotTable.tickerStats(ctx))
|
||||
|
||||
ctx.disableWireServices() # Stop unwanted public services
|
||||
ctx.detectSnapSyncRecovery() # Check for recovery mode
|
||||
|
||||
proc snapSyncRelease(ctx: SnapCtxRef) =
|
||||
ctx.enableWireServices() # re-enable public services
|
||||
ctx.pool.ticker.stop()
|
||||
|
||||
proc snapSyncStart(buddy: SnapBuddyRef): bool =
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
if peer.supports(protocol.snap) and
|
||||
peer.supports(protocol.eth) and
|
||||
peer.state(protocol.eth).initialized:
|
||||
ctx.pool.ticker.startBuddy()
|
||||
buddy.ctrl.multiOk = false # confirm default mode for soft restart
|
||||
buddy.only.errors = GetErrorStatsRef()
|
||||
return true
|
||||
|
||||
proc snapSyncStop(buddy: SnapBuddyRef) =
|
||||
buddy.ctx.pool.ticker.stopBuddy()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, snap sync action handlers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc snapSyncPool(buddy: SnapBuddyRef, last: bool, laps: int): bool =
|
||||
## Enabled when `buddy.ctrl.poolMode` is `true`
|
||||
##
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
snap = ctx.pool.pass
|
||||
env = snap.completedPivot
|
||||
|
||||
# Check whether the snapshot is complete. If so, switch to full sync mode.
|
||||
# This process needs to be applied to all buddy peers.
|
||||
if not env.isNil:
|
||||
ignoreException("snapSyncPool"):
|
||||
# Stop all peers
|
||||
buddy.snapSyncStop()
|
||||
# After the last buddy peer was stopped switch to full sync mode
|
||||
# and repeat that loop over buddy peers for re-starting them.
|
||||
if last:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "switch to full sync", peer=buddy.peer, last, laps,
|
||||
pivot=env.stateHeader.blockNumber.toStr,
|
||||
mode=ctx.pool.syncMode.active, state= buddy.ctrl.state
|
||||
ctx.snapSyncRelease()
|
||||
ctx.pool.syncMode.active = FullSyncMode
|
||||
ctx.passActor.setup(ctx)
|
||||
ctx.poolMode = true # repeat looping over peers
|
||||
ctx.pool.fullHeader = some(env.stateHeader) # Full sync start here
|
||||
|
||||
return false # do stop magically when looping over peers is exhausted
|
||||
|
||||
# Clean up empty pivot slots (never the top one.) This needs to be run on
|
||||
# a single peer only. So the loop can stop immediately (returning `true`)
|
||||
# after this job is done.
|
||||
var rc = snap.pivotTable.beforeLast
|
||||
while rc.isOK:
|
||||
let (key, env) = (rc.value.key, rc.value.data)
|
||||
if env.fetchAccounts.processed.isEmpty:
|
||||
snap.pivotTable.del key
|
||||
rc = snap.pivotTable.prev(key)
|
||||
true # Stop ok
|
||||
|
||||
|
||||
proc snapSyncDaemon(ctx: SnapCtxRef) {.async.} =
|
||||
## Enabled while `ctx.daemon` is `true`
|
||||
##
|
||||
if not ctx.pool.pass.recovery.isNil:
|
||||
if not await ctx.recoveryStepContinue():
|
||||
# Done, stop recovery
|
||||
ctx.pool.pass.recovery = nil
|
||||
ctx.daemon = false
|
||||
|
||||
# Update logging
|
||||
if not ctx.pool.ticker.isNil:
|
||||
ctx.pool.ticker.stopRecovery()
|
||||
|
||||
|
||||
proc snapSyncSingle(buddy: SnapBuddyRef) {.async.} =
|
||||
## Enabled while
|
||||
## * `buddy.ctrl.multiOk` is `false`
|
||||
## * `buddy.ctrl.poolMode` is `false`
|
||||
##
|
||||
# External beacon header updater
|
||||
await buddy.beaconHeaderUpdateFromFile()
|
||||
|
||||
# Dedicate some process cycles to the recovery process (if any)
|
||||
if not buddy.ctx.pool.pass.recovery.isNil:
|
||||
when extraTraceMessages:
|
||||
trace "Throttling single mode in favour of recovery", peer=buddy.peer
|
||||
await sleepAsync 900.milliseconds
|
||||
|
||||
await buddy.pivotApprovePeer()
|
||||
buddy.ctrl.multiOk = true
|
||||
|
||||
|
||||
proc snapSyncMulti(buddy: SnapBuddyRef): Future[void] {.async.} =
|
||||
## Enabled while
|
||||
## * `buddy.ctx.multiOk` is `true`
|
||||
## * `buddy.ctx.poolMode` is `false`
|
||||
##
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
|
||||
# Fetch latest state root environment
|
||||
env = block:
|
||||
let rc = ctx.pool.pass.pivotTable.lastValue
|
||||
if rc.isErr:
|
||||
buddy.ctrl.multiOk = false
|
||||
return # nothing to do
|
||||
rc.value
|
||||
|
||||
# Check whether this pivot is fully downloaded
|
||||
if await env.snapSyncCompleteOk(ctx):
|
||||
return
|
||||
|
||||
# If this is a new snap sync pivot, the previous one can be cleaned up and
|
||||
# archived. There is no point in keeping some older space consuming state
|
||||
# data any longer.
|
||||
ctx.pool.pass.pivotTable.beforeTopMostlyClean()
|
||||
|
||||
let
|
||||
peer = buddy.peer
|
||||
pivot = env.stateHeader.blockNumber.toStr # for logging
|
||||
fa = env.fetchAccounts
|
||||
|
||||
when extraTraceMessages:
|
||||
trace "Multi sync runner", peer, pivot, nAccounts=env.nAccounts,
|
||||
processed=fa.processed.fullPC3, nStoQ=env.storageQueueTotal(),
|
||||
nSlotLists=env.nSlotLists
|
||||
|
||||
# This one is the syncing work horse which downloads the database
|
||||
await env.execSnapSyncAction(buddy)
|
||||
|
||||
# Various logging entries (after accounts and storage slots download)
|
||||
let
|
||||
nAccounts = env.nAccounts
|
||||
nSlotLists = env.nSlotLists
|
||||
processed = fa.processed.fullPC3
|
||||
|
||||
# Archive this pivot eveironment if it has become stale
|
||||
if env.archived:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "mothballing", peer, pivot, nAccounts, nSlotLists
|
||||
env.pivotMothball()
|
||||
return
|
||||
|
||||
# Save state so sync can be resumed at next start up
|
||||
let rc = env.saveCheckpoint(ctx)
|
||||
if rc.isOk:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "saved checkpoint", peer, pivot, nAccounts,
|
||||
processed, nStoQ=env.storageQueueTotal(), nSlotLists,
|
||||
blobSize=rc.value
|
||||
return
|
||||
|
||||
error logTxt "failed to save checkpoint", peer, pivot, nAccounts,
|
||||
processed, nStoQ=env.storageQueueTotal(), nSlotLists,
|
||||
error=rc.error
|
||||
|
||||
# Check whether this pivot is fully downloaded
|
||||
discard await env.snapSyncCompleteOk(ctx)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc passSnap*: auto =
|
||||
## Return snap sync handler environment
|
||||
PassActorRef(
|
||||
setup: snapSyncSetup,
|
||||
release: snapSyncRelease,
|
||||
start: snapSyncStart,
|
||||
stop: snapSyncStop,
|
||||
pool: snapSyncPool,
|
||||
daemon: snapSyncDaemon,
|
||||
single: snapSyncSingle,
|
||||
multi: snapSyncMulti)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,377 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Heal accounts DB
|
||||
## ================
|
||||
##
|
||||
## This module is a variation of the `swap-in` module in the sense that it
|
||||
## searches for missing nodes in the database (which means that nodes which
|
||||
## link to missing ones must exist), and then fetches the nodes from the
|
||||
## network.
|
||||
##
|
||||
## Algorithm
|
||||
## ---------
|
||||
##
|
||||
## * Run `swapInAccounts()` so that inheritable sub-tries are imported from
|
||||
## previous pivots.
|
||||
##
|
||||
## * Find dangling nodes in the current account trie via `findMissingNodes()`.
|
||||
##
|
||||
## * Install that nodes from the network.
|
||||
##
|
||||
## * Rinse and repeat
|
||||
##
|
||||
## Discussion
|
||||
## ----------
|
||||
##
|
||||
## A worst case scenario of a portentally failing `findMissingNodes()` call
|
||||
## must be solved by fetching and storing more accounts and running this
|
||||
## healing algorithm again.
|
||||
##
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[math, sequtils, sets, tables],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p, trie/nibbles, trie/trie_defs, rlp],
|
||||
stew/[byteutils, interval_set, keyed_queue],
|
||||
../../../../../utils/prettify,
|
||||
../../../../protocol,
|
||||
"../../.."/[constants, range_desc],
|
||||
../../get/[get_error, get_trie_nodes],
|
||||
../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_nearby,
|
||||
hexary_paths, hexary_range, snapdb_accounts],
|
||||
./helper/[missing_nodes, storage_queue, swap_in],
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-acc"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Accounts heal " & info
|
||||
|
||||
when false:
|
||||
proc `$`(node: NodeSpecs): string =
|
||||
node.partialPath.toHex
|
||||
|
||||
proc `$`(iv: NodeTagRange): string =
|
||||
iv.fullPC3
|
||||
|
||||
proc `$`(rs: NodeTagRangeSet): string =
|
||||
rs.fullPC3
|
||||
|
||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||
(sumUp.to(float) / (2.0^256)).toPC(n)
|
||||
|
||||
proc healingCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): string =
|
||||
let ctx = buddy.ctx
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"nAccounts=" & $env.nAccounts & "," &
|
||||
("covered=" & $env.fetchAccounts.processed & "/" &
|
||||
$ctx.pool.pass.coveredAccounts ) & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template discardRlpError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError:
|
||||
discard
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc compileMissingNodesList(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): Future[seq[NodeSpecs]]
|
||||
{.async.} =
|
||||
## Find some missing glue nodes in accounts database.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer {.used.} = buddy.peer
|
||||
rootKey = env.stateHeader.stateRoot.to(NodeKey)
|
||||
getFn = ctx.pool.snapDb.getAccountFn
|
||||
fa {.used.} = env.fetchAccounts
|
||||
|
||||
# Import from earlier run
|
||||
if ctx.swapInAccounts(env) != 0:
|
||||
discard ctx.swapInAccounts(env)
|
||||
|
||||
if not fa.processed.isFull:
|
||||
let mlv = await fa.missingNodesFind(
|
||||
rootKey, getFn,
|
||||
healAccountsInspectionPlanBLevel,
|
||||
healAccountsInspectionPlanBRetryMax,
|
||||
healAccountsInspectionPlanBRetryNapMSecs)
|
||||
|
||||
# Clean up empty account ranges found while looking for nodes
|
||||
if not mlv.emptyGaps.isNil:
|
||||
for w in mlv.emptyGaps.increasing:
|
||||
discard env.fetchAccounts.processed.merge w
|
||||
env.fetchAccounts.unprocessed.reduce w
|
||||
discard buddy.ctx.pool.pass.coveredAccounts.merge w
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "missing nodes", peer,
|
||||
ctx=buddy.healingCtx(env), nLevel=mlv.level, nVisited=mlv.visited,
|
||||
nResult=mlv.missing.len, result=mlv.missing.toPC
|
||||
|
||||
return mlv.missing
|
||||
|
||||
|
||||
proc getNodesFromNetwork(
|
||||
buddy: SnapBuddyRef;
|
||||
missingNodes: seq[NodeSpecs]; # Nodes to fetch from the network
|
||||
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||
env: SnapPivotRef;
|
||||
): Future[seq[NodeSpecs]]
|
||||
{.async.} =
|
||||
## Extract from `nodes.missing` the next batch of nodes that need
|
||||
## to be merged it into the database
|
||||
let
|
||||
peer {.used.} = buddy.peer
|
||||
rootHash = env.stateHeader.stateRoot
|
||||
pivot = env.stateHeader.blockNumber.toStr # for logging in `getTrieNodes()`
|
||||
|
||||
# Initalise for fetching nodes from the network via `getTrieNodes()`
|
||||
var
|
||||
nodeKey: Table[Blob,NodeKey] # Temporary `path -> key` mapping
|
||||
pathList: seq[SnapTriePaths] # Function argument for `getTrieNodes()`
|
||||
|
||||
# There is no point in fetching too many nodes as it will be rejected. So
|
||||
# rest of the `missingNodes` list is ignored to be picked up later.
|
||||
for w in missingNodes:
|
||||
if w.partialPath notin ignore and not nodeKey.hasKey(w.partialPath):
|
||||
pathList.add SnapTriePaths(accPath: w.partialPath)
|
||||
nodeKey[w.partialPath] = w.nodeKey
|
||||
if fetchRequestTrieNodesMax <= pathList.len:
|
||||
break
|
||||
|
||||
if 0 < pathList.len:
|
||||
# Fetch nodes from the network.
|
||||
let rc = await buddy.getTrieNodes(rootHash, pathList, pivot)
|
||||
if rc.isOk:
|
||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||
buddy.only.errors.getErrorReset()
|
||||
|
||||
# Forget about unfetched missing nodes, will be picked up later
|
||||
return rc.value.nodes.mapIt(NodeSpecs(
|
||||
partialPath: it.partialPath,
|
||||
nodeKey: nodeKey[it.partialPath],
|
||||
data: it.data))
|
||||
|
||||
# Process error ...
|
||||
let
|
||||
error = rc.error
|
||||
ok = await buddy.ctrl.getErrorStopAfterSeriousOne(
|
||||
error, buddy.only.errors)
|
||||
when extraTraceMessages:
|
||||
trace logTxt "reply error", peer, ctx=buddy.healingCtx(env),
|
||||
error, stop=ok
|
||||
else:
|
||||
discard ok
|
||||
|
||||
return @[]
|
||||
|
||||
|
||||
proc kvAccountLeaf(
|
||||
buddy: SnapBuddyRef;
|
||||
node: NodeSpecs;
|
||||
env: SnapPivotRef;
|
||||
): (bool,NodeKey,Account) =
|
||||
## Re-read leaf node from persistent database (if any)
|
||||
var nNibbles = -1
|
||||
discardRlpError("kvAccountLeaf"):
|
||||
let
|
||||
nodeRlp = rlpFromBytes node.data
|
||||
prefix = (hexPrefixDecode node.partialPath)[1]
|
||||
segment = (hexPrefixDecode nodeRlp.listElem(0).toBytes)[1]
|
||||
nibbles = prefix & segment
|
||||
|
||||
nNibbles = nibbles.len
|
||||
if nNibbles == 64:
|
||||
let
|
||||
data = nodeRlp.listElem(1).toBytes
|
||||
nodeKey = nibbles.getBytes.convertTo(NodeKey)
|
||||
accData = rlp.decode(data,Account)
|
||||
return (true, nodeKey, accData)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "non-leaf node path or corrupt data", peer=buddy.peer,
|
||||
ctx=buddy.healingCtx(env), nNibbles
|
||||
|
||||
|
||||
proc registerAccountLeaf(
|
||||
buddy: SnapBuddyRef;
|
||||
accKey: NodeKey;
|
||||
acc: Account;
|
||||
env: SnapPivotRef;
|
||||
) =
|
||||
## Process single account node as would be done with an interval by
|
||||
## the `storeAccounts()` function
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
rootKey = env.stateHeader.stateRoot.to(NodeKey)
|
||||
getFn = ctx.pool.snapDb.getAccountFn
|
||||
pt = accKey.to(NodeTag)
|
||||
|
||||
# Extend interval [pt,pt] if possible
|
||||
var iv: NodeTagRange
|
||||
try:
|
||||
iv = getFn.hexaryRangeInflate(rootKey, pt)
|
||||
except CatchableError as e:
|
||||
error logTxt "inflating interval oops", peer, ctx=buddy.healingCtx(env),
|
||||
accKey, name=($e.name), msg=e.msg
|
||||
iv = NodeTagRange.new(pt,pt)
|
||||
|
||||
# Register isolated leaf node
|
||||
if 0 < env.fetchAccounts.processed.merge iv:
|
||||
env.nAccounts.inc
|
||||
env.fetchAccounts.unprocessed.reduce iv
|
||||
discard buddy.ctx.pool.pass.coveredAccounts.merge iv
|
||||
|
||||
# Update storage slots batch
|
||||
if acc.storageRoot != EMPTY_ROOT_HASH:
|
||||
env.storageQueueAppendFull(acc.storageRoot, accKey)
|
||||
|
||||
# Update contract codes batch
|
||||
if acc.codeHash != EMPTY_CODE_HASH:
|
||||
env.fetchContracts[acc.codeHash] = accKey
|
||||
|
||||
#when extraTraceMessages:
|
||||
# trace logTxt "registered single account", peer, ctx=buddy.healingCtx(env),
|
||||
# leftSlack=(iv.minPt < pt), rightSlack=(pt < iv.maxPt)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: do the healing for one round
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accountsHealingImpl(
|
||||
buddy: SnapBuddyRef;
|
||||
ignore: HashSet[Blob];
|
||||
env: SnapPivotRef;
|
||||
): Future[(int,HashSet[Blob])]
|
||||
{.async.} =
|
||||
## Fetching and merging missing account trie database nodes. It returns the
|
||||
## number of nodes fetched from the network, and -1 upon error.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
db = ctx.pool.snapDb
|
||||
peer = buddy.peer
|
||||
|
||||
# Import from earlier runs (if any)
|
||||
while ctx.swapInAccounts(env) != 0:
|
||||
discard
|
||||
|
||||
# Update for changes since last visit
|
||||
let missingNodes = await buddy.compileMissingNodesList(env)
|
||||
if missingNodes.len == 0:
|
||||
# Nothing to do
|
||||
trace logTxt "nothing to do", peer, ctx=buddy.healingCtx(env)
|
||||
return (0,EmptyBlobSet) # nothing to do
|
||||
|
||||
# Get next batch of nodes that need to be merged it into the database
|
||||
let fetchedNodes = await buddy.getNodesFromNetwork(missingNodes, ignore, env)
|
||||
if fetchedNodes.len == 0:
|
||||
return (0,EmptyBlobSet)
|
||||
|
||||
# Store nodes onto disk
|
||||
let
|
||||
nFetchedNodes = fetchedNodes.len
|
||||
report = db.importRawAccountsNodes(peer, fetchedNodes)
|
||||
|
||||
if 0 < report.len and report[^1].slot.isNone:
|
||||
# Storage error, just run the next lap (not much else that can be done)
|
||||
error logTxt "databse error", peer, ctx=buddy.healingCtx(env),
|
||||
nFetchedNodes, error=report[^1].error
|
||||
return (-1,EmptyBlobSet)
|
||||
|
||||
# Filter out error and leaf nodes
|
||||
var
|
||||
nLeafNodes = 0 # for logging
|
||||
rejected: HashSet[Blob]
|
||||
for w in report:
|
||||
if w.slot.isSome: # non-indexed entries appear typically at the end, though
|
||||
let inx = w.slot.unsafeGet
|
||||
|
||||
# Node error, will need to pick up later and download again. Node that
|
||||
# there need not be an expicit node specs (so `kind` is opted out.)
|
||||
if w.kind.isNone or w.error != HexaryError(0):
|
||||
rejected.incl fetchedNodes[inx].partialPath
|
||||
|
||||
elif w.kind.unsafeGet == Leaf:
|
||||
# Leaf node has been stored, double check
|
||||
let (isLeaf, key, acc) = buddy.kvAccountLeaf(fetchedNodes[inx], env)
|
||||
if isLeaf:
|
||||
# Update `unprocessed` registry, collect storage roots (if any)
|
||||
buddy.registerAccountLeaf(key, acc, env)
|
||||
nLeafNodes.inc
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "merged into database", peer, ctx=buddy.healingCtx(env),
|
||||
nFetchedNodes, nLeafNodes, nRejected=rejected.len
|
||||
|
||||
return (nFetchedNodes - rejected.len, rejected)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc healAccounts*(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
) {.async.} =
|
||||
## Fetching and merging missing account trie database nodes.
|
||||
trace logTxt "started", peer=buddy.peer, ctx=buddy.healingCtx(env)
|
||||
|
||||
let
|
||||
fa = env.fetchAccounts
|
||||
var
|
||||
nNodesFetched = 0
|
||||
nFetchLoop = 0
|
||||
ignore: HashSet[Blob]
|
||||
|
||||
while not fa.processed.isFull() and
|
||||
buddy.ctrl.running and
|
||||
not env.archived:
|
||||
let (nNodes, rejected) = await buddy.accountsHealingImpl(ignore, env)
|
||||
if nNodes <= 0:
|
||||
break
|
||||
ignore = ignore + rejected
|
||||
nNodesFetched.inc(nNodes)
|
||||
nFetchLoop.inc
|
||||
|
||||
trace logTxt "done", peer=buddy.peer, ctx=buddy.healingCtx(env),
|
||||
nNodesFetched, nFetchLoop, nIgnore=ignore.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,394 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Heal storage slots DB
|
||||
## =====================
|
||||
##
|
||||
## This module works similar to `heal_accounts` applied to each per-account
|
||||
## storage slots hexary trie. These per-account trie work items are stored in
|
||||
## the queue `env.fetchStoragePart`.
|
||||
##
|
||||
## There is another such queue `env.fetchStorageFull` which is not used here.
|
||||
##
|
||||
## In order to be able to checkpoint the current list of storage accounts (by
|
||||
## a parallel running process), unfinished storage accounts are temporarily
|
||||
## held in the set `env.parkedStorage`.
|
||||
##
|
||||
## Algorithm applied to each entry of `env.fetchStoragePart`
|
||||
## --------------------------------------------------------
|
||||
##
|
||||
## * Find dangling nodes in the current slot trie via `findMissingNodes()`.
|
||||
##
|
||||
## * Install that nodes from the network.
|
||||
##
|
||||
## * Rinse and repeat
|
||||
##
|
||||
## Discussion
|
||||
## ----------
|
||||
##
|
||||
## A worst case scenario of a portentally failing `findMissingNodes()` call
|
||||
## must be solved by fetching and storing more storage slots and running this
|
||||
## healing algorithm again.
|
||||
##
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[math, sequtils, sets, tables],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p, trie/nibbles],
|
||||
stew/[byteutils, interval_set, keyed_queue],
|
||||
../../../../../utils/prettify,
|
||||
../../../../protocol,
|
||||
"../../.."/[constants, range_desc],
|
||||
../../get/[get_error, get_trie_nodes],
|
||||
../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_range,
|
||||
snapdb_storage_slots],
|
||||
./helper/[missing_nodes, storage_queue],
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-slot"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Storage slots heal " & info
|
||||
|
||||
when false:
|
||||
proc `$`(node: NodeSpecs): string =
|
||||
node.partialPath.toHex
|
||||
|
||||
proc `$`(iv: NodeTagRange): string =
|
||||
iv.fullPC3
|
||||
|
||||
proc `$`(rs: NodeTagRangeSet): string =
|
||||
rs.fullPC3
|
||||
|
||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||
(sumUp.to(float) / (2.0^256)).toPC(n)
|
||||
|
||||
proc healingCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): string {.used.} =
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"nStoQ=" & $env.storageQueueTotal() & "," &
|
||||
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||
"nParked=" & $env.parkedStorage.len & "," &
|
||||
"nSlotLists=" & $env.nSlotLists & "}"
|
||||
|
||||
proc healingCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
kvp: StoQuSlotsKVP;
|
||||
env: SnapPivotRef;
|
||||
): string =
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"processed=" & $kvp.data.slots.processed & "," &
|
||||
"nStoQ=" & $env.storageQueueTotal() & "," &
|
||||
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||
"nParked=" & $env.parkedStorage.len & "," &
|
||||
"nSlotLists=" & $env.nSlotLists & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template discardRlpError(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except RlpError:
|
||||
discard
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc compileMissingNodesList(
|
||||
buddy: SnapBuddyRef;
|
||||
kvp: StoQuSlotsKVP;
|
||||
env: SnapPivotRef;
|
||||
): Future[seq[NodeSpecs]]
|
||||
{.async.} =
|
||||
## Find some missing glue nodes in storage slots database.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer {.used.} = buddy.peer
|
||||
slots = kvp.data.slots
|
||||
rootKey = kvp.key.to(NodeKey)
|
||||
getFn = ctx.pool.snapDb.getStorageSlotsFn(kvp.data.accKey)
|
||||
|
||||
if not slots.processed.isFull:
|
||||
let mlv = await slots.missingNodesFind(
|
||||
rootKey, getFn,
|
||||
healStorageSlotsInspectionPlanBLevel,
|
||||
healStorageSlotsInspectionPlanBRetryMax,
|
||||
healStorageSlotsInspectionPlanBRetryNapMSecs,
|
||||
forcePlanBOk = true)
|
||||
|
||||
# Clean up empty account ranges found while looking for nodes
|
||||
if not mlv.emptyGaps.isNil:
|
||||
for w in mlv.emptyGaps.increasing:
|
||||
discard slots.processed.merge w
|
||||
slots.unprocessed.reduce w
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "missing nodes", peer,
|
||||
ctx=buddy.healingCtx(env), nLevel=mlv.level, nVisited=mlv.visited,
|
||||
nResult=mlv.missing.len, result=mlv.missing.toPC
|
||||
|
||||
return mlv.missing
|
||||
|
||||
|
||||
proc getNodesFromNetwork(
|
||||
buddy: SnapBuddyRef;
|
||||
missingNodes: seq[NodeSpecs]; # Nodes to fetch from the network
|
||||
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||
kvp: StoQuSlotsKVP; # Storage slots context
|
||||
env: SnapPivotRef; # For logging
|
||||
): Future[seq[NodeSpecs]]
|
||||
{.async.} =
|
||||
## Extract from `missing` the next batch of nodes that need
|
||||
## to be merged it into the database
|
||||
let
|
||||
peer {.used.} = buddy.peer
|
||||
accPath = kvp.data.accKey.to(Blob)
|
||||
rootHash = env.stateHeader.stateRoot
|
||||
pivot = env.stateHeader.blockNumber.toStr # for logging in `getTrieNodes()`
|
||||
|
||||
# Initalise for fetching nodes from the network via `getTrieNodes()`
|
||||
var
|
||||
nodeKey: Table[Blob,NodeKey] # Temporary `path -> key` mapping
|
||||
req = SnapTriePaths(accPath: accPath) # Argument for `getTrieNodes()`
|
||||
|
||||
# There is no point in fetching too many nodes as it will be rejected. So
|
||||
# rest of the `missingNodes` list is ignored to be picked up later.
|
||||
for w in missingNodes:
|
||||
if w.partialPath notin ignore and not nodeKey.hasKey(w.partialPath):
|
||||
req.slotPaths.add w.partialPath
|
||||
nodeKey[w.partialPath] = w.nodeKey
|
||||
if fetchRequestTrieNodesMax <= req.slotPaths.len:
|
||||
break
|
||||
|
||||
if 0 < req.slotPaths.len:
|
||||
# Fetch nodes from the network.
|
||||
let rc = await buddy.getTrieNodes(rootHash, @[req], pivot)
|
||||
if rc.isOk:
|
||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||
buddy.only.errors.getErrorReset()
|
||||
|
||||
return rc.value.nodes.mapIt(NodeSpecs(
|
||||
partialPath: it.partialPath,
|
||||
nodeKey: nodeKey[it.partialPath],
|
||||
data: it.data))
|
||||
|
||||
# Process error ...
|
||||
let
|
||||
error = rc.error
|
||||
ok = await buddy.ctrl.getErrorStopAfterSeriousOne(
|
||||
error, buddy.only.errors)
|
||||
when extraTraceMessages:
|
||||
trace logTxt "reply error", peer, ctx=buddy.healingCtx(kvp,env),
|
||||
error, stop=ok
|
||||
else:
|
||||
discard ok
|
||||
|
||||
return @[]
|
||||
|
||||
|
||||
proc kvStoSlotsLeaf(
|
||||
buddy: SnapBuddyRef;
|
||||
node: NodeSpecs; # Node data fetched from network
|
||||
kvp: StoQuSlotsKVP; # For logging
|
||||
env: SnapPivotRef; # For logging
|
||||
): (bool,NodeKey) =
|
||||
## Re-read leaf node from persistent database (if any)
|
||||
var nNibbles = -1
|
||||
discardRlpError("kvStorageSlotsLeaf"):
|
||||
let
|
||||
nodeRlp = rlpFromBytes node.data
|
||||
prefix = (hexPrefixDecode node.partialPath)[1]
|
||||
segment = (hexPrefixDecode nodeRlp.listElem(0).toBytes)[1]
|
||||
nibbles = prefix & segment
|
||||
|
||||
nNibbles = nibbles.len
|
||||
if nNibbles == 64:
|
||||
return (true, nibbles.getBytes.convertTo(NodeKey))
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "non-leaf node path or corrupt data", peer=buddy.peer,
|
||||
ctx=buddy.healingCtx(kvp,env), nNibbles
|
||||
|
||||
|
||||
proc registerStoSlotsLeaf(
|
||||
buddy: SnapBuddyRef;
|
||||
slotKey: NodeKey;
|
||||
kvp: StoQuSlotsKVP;
|
||||
env: SnapPivotRef;
|
||||
) =
|
||||
## Process single account node as would be done with an interval by
|
||||
## the `storeAccounts()` function
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
rootKey = kvp.key.to(NodeKey)
|
||||
getSlotFn = ctx.pool.snapDb.getStorageSlotsFn kvp.data.accKey
|
||||
pt = slotKey.to(NodeTag)
|
||||
|
||||
# Extend interval [pt,pt] if possible
|
||||
var iv: NodeTagRange
|
||||
try:
|
||||
iv = getSlotFn.hexaryRangeInflate(rootKey, pt)
|
||||
except CatchableError as e:
|
||||
error logTxt "inflating interval oops", peer, ctx=buddy.healingCtx(kvp,env),
|
||||
accKey=kvp.data.accKey, slotKey, name=($e.name), msg=e.msg
|
||||
iv = NodeTagRange.new(pt,pt)
|
||||
|
||||
# Register isolated leaf node
|
||||
if 0 < kvp.data.slots.processed.merge iv:
|
||||
kvp.data.slots.unprocessed.reduce iv
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "registered single slot", peer, ctx=buddy.healingCtx(env),
|
||||
leftSlack=(iv.minPt < pt), rightSlack=(pt < iv.maxPt)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: do the healing for one work item (sub-trie)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc stoSlotsHealingImpl(
|
||||
buddy: SnapBuddyRef;
|
||||
ignore: HashSet[Blob]; # Except for these partial paths listed
|
||||
kvp: StoQuSlotsKVP;
|
||||
env: SnapPivotRef;
|
||||
): Future[(int,HashSet[Blob])]
|
||||
{.async.} =
|
||||
## Returns `true` is the sub-trie is complete (probably inherited), and
|
||||
## `false` if there are nodes left to be completed.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
db = ctx.pool.snapDb
|
||||
peer = buddy.peer
|
||||
missing = await buddy.compileMissingNodesList(kvp, env)
|
||||
|
||||
if missing.len == 0:
|
||||
trace logTxt "nothing to do", peer, ctx=buddy.healingCtx(kvp,env)
|
||||
return (0,EmptyBlobSet) # nothing to do
|
||||
|
||||
# Get next batch of nodes that need to be merged it into the database
|
||||
let fetchedNodes = await buddy.getNodesFromNetwork(missing, ignore, kvp, env)
|
||||
if fetchedNodes.len == 0:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "node set unavailable", nMissing=missing.len
|
||||
return (0,EmptyBlobSet)
|
||||
|
||||
# Store nodes onto disk
|
||||
let
|
||||
nFetchedNodes = fetchedNodes.len
|
||||
report = db.importRawStorageSlotsNodes(peer, kvp.data.accKey, fetchedNodes)
|
||||
|
||||
if 0 < report.len and report[^1].slot.isNone:
|
||||
# Storage error, just run the next lap (not much else that can be done)
|
||||
error logTxt "database error", peer, ctx=buddy.healingCtx(kvp,env),
|
||||
nFetchedNodes, error=report[^1].error
|
||||
return (-1,EmptyBlobSet)
|
||||
|
||||
# Filter out leaf nodes
|
||||
var
|
||||
nLeafNodes = 0 # for logging
|
||||
rejected: HashSet[Blob]
|
||||
trace logTxt "importRawStorageSlotsNodes", nReport=report.len #########
|
||||
for w in report:
|
||||
if w.slot.isSome: # non-indexed entries appear typically at the end, though
|
||||
let inx = w.slot.unsafeGet
|
||||
|
||||
# Node error, will need to pick up later and download again. Node that
|
||||
# there need not be an expicit node specs (so `kind` is opted out.)
|
||||
if w.kind.isNone or w.error != HexaryError(0):
|
||||
rejected.incl fetchedNodes[inx].partialPath
|
||||
|
||||
elif w.kind.unsafeGet == Leaf:
|
||||
# Leaf node has been stored, double check
|
||||
let (isLeaf, key) = buddy.kvStoSlotsLeaf(fetchedNodes[inx], kvp, env)
|
||||
if isLeaf:
|
||||
# Update `unprocessed` registry, collect storage roots (if any)
|
||||
buddy.registerStoSlotsLeaf(key, kvp, env)
|
||||
nLeafNodes.inc
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "merged into database", peer, ctx=buddy.healingCtx(kvp,env),
|
||||
nLeafNodes
|
||||
|
||||
return (nFetchedNodes - rejected.len, rejected)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc healStorageSlots*(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
) {.async.} =
|
||||
## Fetching and merging missing slorage slots trie database nodes.
|
||||
trace logTxt "started", peer=buddy.peer, ctx=buddy.healingCtx(env)
|
||||
|
||||
var
|
||||
nNodesFetched = 0
|
||||
nFetchLoop = 0
|
||||
ignore: HashSet[Blob]
|
||||
visited: HashSet[NodeKey]
|
||||
|
||||
while buddy.ctrl.running and
|
||||
visited.len <= healStorageSlotsBatchMax and
|
||||
ignore.len <= healStorageSlotsFailedMax and
|
||||
not env.archived:
|
||||
# Pull out the next request list from the queue
|
||||
let kvp = block:
|
||||
let rc = env.storageQueueUnlinkPartialItem visited
|
||||
if rc.isErr:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "queue exhausted", peer=buddy.peer,
|
||||
ctx=buddy.healingCtx(env), nIgnore=ignore.len, nVisited=visited.len
|
||||
break
|
||||
rc.value
|
||||
|
||||
nFetchLoop.inc
|
||||
|
||||
# Process request range for healing
|
||||
let (nNodes, rejected) = await buddy.stoSlotsHealingImpl(ignore, kvp, env)
|
||||
if kvp.data.slots.processed.isFull:
|
||||
env.nSlotLists.inc
|
||||
env.parkedStorage.excl kvp.data.accKey
|
||||
else:
|
||||
# Re-queue again, to be re-processed in another cycle
|
||||
visited.incl kvp.data.accKey
|
||||
env.storageQueueAppend kvp
|
||||
|
||||
ignore = ignore + rejected
|
||||
nNodesFetched.inc(nNodes)
|
||||
|
||||
trace logTxt "done", peer=buddy.peer, ctx=buddy.healingCtx(env),
|
||||
nNodesFetched, nFetchLoop, nIgnore=ignore.len, nVisited=visited.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,38 +0,0 @@
|
||||
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
stew/interval_set,
|
||||
"../../../.."/range_desc,
|
||||
../snap_pass_desc
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: coverage
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accountsCoverage*(ctx: SnapCtxRef): float =
|
||||
## Returns the accounts coverage factor
|
||||
ctx.pool.pass.coveredAccounts.fullFactor + ctx.pool.pass.covAccTimesFull.float
|
||||
|
||||
proc accountsCoverage100PcRollOver*(ctx: SnapCtxRef) =
|
||||
## Roll over `coveredAccounts` registry when it reaches 100%.
|
||||
let snap = ctx.pool.pass
|
||||
if snap.coveredAccounts.isFull:
|
||||
# All of accounts hashes are covered by completed range fetch processes
|
||||
# for all pivot environments. So reset covering and record full-ness level.
|
||||
snap.covAccTimesFull.inc
|
||||
snap.coveredAccounts.clear()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,92 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
../../../../../misc/sync_ctrl,
|
||||
../../../get/[get_error, get_block_header],
|
||||
../snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-ctrl"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc beaconHeaderUpdatebuBlockNumber*(
|
||||
buddy: SnapBuddyRef; # Worker peer
|
||||
num: BlockNumber; # Block number to sync against
|
||||
) {.async.} =
|
||||
## This function updates the beacon header according to the blok number
|
||||
## argument.
|
||||
##
|
||||
## This function is typically used for testing and debugging.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
|
||||
trace "fetch beacon header", peer, num
|
||||
if ctx.pool.beaconHeader.blockNumber < num:
|
||||
let rc = await buddy.getBlockHeader(num)
|
||||
if rc.isOk:
|
||||
ctx.pool.beaconHeader = rc.value
|
||||
|
||||
|
||||
proc beaconHeaderUpdateFromFile*(
|
||||
buddy: SnapBuddyRef; # Worker peer
|
||||
) {.async.} =
|
||||
## This function updates the beacon header cache by import from the file name
|
||||
## argument `fileName`. The first line of the contents of the file looks like
|
||||
## * `0x<hex-number>` -- hash of block header
|
||||
## * `<decimal-number>` -- block number
|
||||
## This function is typically used for testing and debugging.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
|
||||
hashOrNum = block:
|
||||
let rc = ctx.exCtrlFile.syncCtrlHashOrBlockNumFromFile
|
||||
if rc.isErr:
|
||||
return
|
||||
rc.value
|
||||
|
||||
peer = buddy.peer
|
||||
|
||||
var
|
||||
rc = Result[BlockHeader,GetError].err(GetError(0))
|
||||
isHash = hashOrNum.isHash # so that the value can be logged
|
||||
|
||||
# Parse value dump and fetch a header from the peer (if any)
|
||||
try:
|
||||
if isHash:
|
||||
let hash = hashOrNum.hash
|
||||
trace "External beacon info", peer, hash
|
||||
if hash != ctx.pool.beaconHeader.hash:
|
||||
rc = await buddy.getBlockHeader(hash)
|
||||
else:
|
||||
let num = hashOrNum.number
|
||||
trace "External beacon info", peer, num
|
||||
if ctx.pool.beaconHeader.blockNumber < num:
|
||||
rc = await buddy.getBlockHeader(num)
|
||||
except CatchableError as e:
|
||||
trace "Exception while parsing beacon info", peer, isHash,
|
||||
name=($e.name), msg=(e.msg)
|
||||
|
||||
if rc.isOk:
|
||||
if ctx.pool.beaconHeader.blockNumber < rc.value.blockNumber:
|
||||
ctx.pool.beaconHeader = rc.value
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,208 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Find missing nodes for healing
|
||||
## ==============================
|
||||
##
|
||||
## This module searches for missing nodes in the database (which means that
|
||||
## nodes which link to missing ones must exist.)
|
||||
##
|
||||
## Algorithm
|
||||
## ---------
|
||||
##
|
||||
## * Find dangling node links in the current account trie by trying *plan A*,
|
||||
## and continuing with *plan B* only if *plan A* fails.
|
||||
##
|
||||
## A. Try to find nodes with envelopes that have no account in common with
|
||||
## any range interval of the `processed` set of the hexary trie. This
|
||||
## action will
|
||||
##
|
||||
## + either determine that there are no such envelopes implying that the
|
||||
## accounts trie is complete (then stop here)
|
||||
##
|
||||
## + or result in envelopes related to nodes that are all allocated on the
|
||||
## accounts trie (fail, use *plan B* below)
|
||||
##
|
||||
## + or result in some envelopes related to dangling nodes.
|
||||
##
|
||||
## B. Employ the `hexaryInspect()` trie perusal function in a limited mode
|
||||
## for finding dangling (i.e. missing) sub-nodes below the allocated nodes.
|
||||
##
|
||||
## C. Remove empry intervals from the accounting ranges. This is a pure
|
||||
## maintenance process that applies if A and B fail.
|
||||
##
|
||||
## Discussion
|
||||
## ----------
|
||||
##
|
||||
## For *plan A*, the complement of ranges in the `processed` is determined
|
||||
## and expressed as a list of node envelopes. As a consequence, the gaps
|
||||
## beween the envelopes are either blind ranges that have no leaf nodes in
|
||||
## the databse, or they are contained in the `processed` range. These gaps
|
||||
## will be silently merged into the `processed` set of ranges.
|
||||
##
|
||||
## For *plan B*, a worst case scenario of a failing *plan B* must be solved
|
||||
## by fetching and storing more nodes with other means before using this
|
||||
## algorithm to find more missing nodes.
|
||||
##
|
||||
## Due to the potentially poor performance using `hexaryInspect()`.there is
|
||||
## no general solution for *plan B* by recursively searching the whole hexary
|
||||
## trie database for more dangling nodes.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sequtils,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/common,
|
||||
stew/interval_set,
|
||||
"../../../.."/[constants, range_desc],
|
||||
../../../db/[hexary_desc, hexary_envelope, hexary_error, hexary_inspect,
|
||||
hexary_nearby],
|
||||
../snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-find"
|
||||
|
||||
type
|
||||
MissingNodesSpecs* = object
|
||||
## Return type for `findMissingNodes()`
|
||||
missing*: seq[NodeSpecs]
|
||||
level*: uint8
|
||||
visited*: uint64
|
||||
emptyGaps*: NodeTagRangeSet
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Find missing nodes " & info
|
||||
|
||||
template ignExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
trace logTxt "Ooops", `info`=info, name=($e.name), msg=(e.msg)
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
raiseAssert "Inconveivable (" &
|
||||
info & "): name=" & $e.name & " msg=" & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc missingNodesFind*(
|
||||
ranges: RangeBatchRef;
|
||||
rootKey: NodeKey;
|
||||
getFn: HexaryGetFn;
|
||||
planBLevelMax: uint8;
|
||||
planBRetryMax: int;
|
||||
planBRetrySleepMs: int;
|
||||
forcePlanBOk = false;
|
||||
): Future[MissingNodesSpecs]
|
||||
{.async.} =
|
||||
## Find some missing nodes in the hexary trie database.
|
||||
var nodes: seq[NodeSpecs]
|
||||
|
||||
# Plan A, try complement of `processed`
|
||||
noExceptionOops("compileMissingNodesList"):
|
||||
if not ranges.processed.isEmpty:
|
||||
# Get unallocated nodes to be fetched
|
||||
let rc = ranges.processed.hexaryEnvelopeDecompose(rootKey, getFn)
|
||||
if rc.isOk:
|
||||
# Extract nodes from the list that do not exisit in the database
|
||||
# and need to be fetched (and allocated.)
|
||||
let missing = rc.value.filterIt(it.nodeKey.ByteArray32.getFn().len == 0)
|
||||
if 0 < missing.len:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan A", nNodes=nodes.len, nMissing=missing.len
|
||||
return MissingNodesSpecs(missing: missing)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan A not applicable", nNodes=nodes.len
|
||||
|
||||
# Plan B, carefully employ `hexaryInspect()`
|
||||
var nRetryCount = 0
|
||||
if 0 < nodes.len or forcePlanBOk:
|
||||
ignExceptionOops("compileMissingNodesList"):
|
||||
let
|
||||
paths = nodes.mapIt it.partialPath
|
||||
suspend = if planBRetrySleepMs <= 0: 1.nanoseconds
|
||||
else: planBRetrySleepMs.milliseconds
|
||||
var
|
||||
maxLevel = planBLevelMax
|
||||
stats = getFn.hexaryInspectTrie(rootKey, paths,
|
||||
stopAtLevel = maxLevel,
|
||||
maxDangling = fetchRequestTrieNodesMax)
|
||||
|
||||
while stats.dangling.len == 0 and
|
||||
nRetryCount < planBRetryMax and
|
||||
1 < maxLevel and
|
||||
not stats.resumeCtx.isNil:
|
||||
await sleepAsync suspend
|
||||
nRetryCount.inc
|
||||
maxLevel.dec
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan B retry", forcePlanBOk, nRetryCount, maxLevel
|
||||
stats = getFn.hexaryInspectTrie(rootKey,
|
||||
resumeCtx = stats.resumeCtx,
|
||||
stopAtLevel = maxLevel,
|
||||
maxDangling = fetchRequestTrieNodesMax)
|
||||
|
||||
result = MissingNodesSpecs(
|
||||
missing: stats.dangling,
|
||||
level: stats.level,
|
||||
visited: stats.count)
|
||||
|
||||
if 0 < result.missing.len:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan B", forcePlanBOk, nNodes=nodes.len,
|
||||
nDangling=result.missing.len, level=result.level,
|
||||
nVisited=result.visited, nRetryCount
|
||||
return
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan B not applicable", forcePlanBOk, nNodes=nodes.len,
|
||||
level=result.level, nVisited=result.visited, nRetryCount
|
||||
|
||||
# Plan C, clean up intervals
|
||||
|
||||
# Calculate `gaps` as the complement of the `processed` set of intervals
|
||||
let gaps = NodeTagRangeSet.init()
|
||||
discard gaps.merge FullNodeTagRange
|
||||
for w in ranges.processed.increasing: discard gaps.reduce w
|
||||
|
||||
# Clean up empty gaps in the processed range
|
||||
result.emptyGaps = NodeTagRangeSet.init()
|
||||
for gap in gaps.increasing:
|
||||
let rc = gap.minPt.hexaryNearbyRight(rootKey,getFn)
|
||||
if rc.isOk:
|
||||
# So there is a right end in the database and there is no leaf in
|
||||
# the right open interval interval [gap.minPt,rc.value).
|
||||
discard result.emptyGaps.merge(gap.minPt, rc.value)
|
||||
elif rc.error == NearbyBeyondRange:
|
||||
discard result.emptyGaps.merge(gap.minPt, high(NodeTag))
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "plan C", nGapFixes=result.emptyGaps.chunks,
|
||||
nGapOpen=(ranges.processed.chunks - result.emptyGaps.chunks)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,503 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sets,
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/[interval_set, keyed_queue],
|
||||
"../../../.."/[constants, range_desc],
|
||||
../../../db/[hexary_inspect, snapdb_storage_slots],
|
||||
../snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-slots"
|
||||
|
||||
type
|
||||
StoQuSlotsKVP* = KeyedQueuePair[Hash256,SlotsQueueItemRef]
|
||||
## Key-value return code from `SnapSlotsQueue` handler
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Storage queue " & info
|
||||
|
||||
proc `$`(rs: NodeTagRangeSet): string =
|
||||
rs.fullPC3
|
||||
|
||||
proc `$`(tr: UnprocessedRanges): string =
|
||||
tr.fullPC3
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
raiseAssert "Inconveivable (" &
|
||||
info & "): name=" & $e.name & " msg=" & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc updatePartial(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
req: AccountSlotsChanged; # Left over account data
|
||||
): bool = # List entry was added
|
||||
## Update the range of account argument `req` to the partial slot ranges
|
||||
## queue.
|
||||
##
|
||||
## The function returns `true` if a new list entry was added.
|
||||
let
|
||||
accKey = req.account.accKey
|
||||
stoRoot = req.account.storageRoot
|
||||
noFullEntry = env.fetchStorageFull.delete(stoRoot).isErr
|
||||
iv = req.account.subRange.get(otherwise = FullNodeTagRange)
|
||||
jv = req.newRange.get(otherwise = FullNodeTagRange)
|
||||
(slots, newEntry, newPartEntry) = block:
|
||||
let rc = env.fetchStoragePart.lruFetch stoRoot
|
||||
if rc.isOk:
|
||||
(rc.value.slots, false, false)
|
||||
else:
|
||||
# New entry
|
||||
let
|
||||
stoSlo = RangeBatchRef(processed: NodeTagRangeSet.init())
|
||||
stoItem = SlotsQueueItemRef(accKey: accKey, slots: stoSlo)
|
||||
discard env.fetchStoragePart.append(stoRoot, stoItem)
|
||||
stoSlo.unprocessed.init(clear = true)
|
||||
|
||||
# Initalise ranges
|
||||
var newItem = false
|
||||
if iv == FullNodeTagRange:
|
||||
# New record (probably was a full range, before)
|
||||
stoSlo.unprocessed.mergeSplit FullNodeTagRange
|
||||
newItem = noFullEntry
|
||||
else:
|
||||
# Restore `processed` range, `iv` was the left over.
|
||||
discard stoSlo.processed.merge FullNodeTagRange
|
||||
discard stoSlo.processed.reduce iv
|
||||
(stoSlo, newItem, true)
|
||||
|
||||
# Remove delta state relative to original state
|
||||
if iv != jv:
|
||||
# Calculate `iv - jv`
|
||||
let ivSet = NodeTagRangeSet.init()
|
||||
discard ivSet.merge iv # Previous range
|
||||
discard ivSet.reduce jv # Left over range
|
||||
|
||||
# Update `processed` by delta range
|
||||
for w in ivSet.increasing:
|
||||
discard slots.processed.merge w
|
||||
|
||||
# Update left over
|
||||
slots.unprocessed.merge jv # Left over range
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "updated partially", accKey, iv, jv,
|
||||
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||
noFullEntry, newEntry, newPartEntry
|
||||
else:
|
||||
discard newPartEntry
|
||||
|
||||
env.parkedStorage.excl accKey # Un-park (if any)
|
||||
newEntry
|
||||
|
||||
|
||||
proc appendPartial(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
acc: AccountSlotsHeader; # Left over account data
|
||||
splitMerge: bool; # Bisect or straight merge
|
||||
): bool = # List entry was added
|
||||
## Append to partial queue. The argument range of `acc` is split so that
|
||||
## the next request of this range will result in the right most half size
|
||||
## of this very range.
|
||||
##
|
||||
## The function returns `true` if a new list entry was added.
|
||||
let
|
||||
accKey = acc.accKey
|
||||
stoRoot = acc.storageRoot
|
||||
notFull = env.fetchStorageFull.delete(stoRoot).isErr
|
||||
iv = acc.subRange.get(otherwise = FullNodeTagRange)
|
||||
rc = env.fetchStoragePart.lruFetch acc.storageRoot
|
||||
(slots,newEntry) = block:
|
||||
if rc.isOk:
|
||||
(rc.value.slots, false)
|
||||
else:
|
||||
# Restore missing range
|
||||
let
|
||||
stoSlo = RangeBatchRef(processed: NodeTagRangeSet.init())
|
||||
stoItem = SlotsQueueItemRef(accKey: accKey, slots: stoSlo)
|
||||
discard env.fetchStoragePart.append(stoRoot, stoItem)
|
||||
stoSlo.unprocessed.init(clear = true)
|
||||
discard stoSlo.processed.merge FullNodeTagRange
|
||||
discard stoSlo.processed.reduce iv
|
||||
(stoSlo, notFull)
|
||||
|
||||
if splitMerge:
|
||||
slots.unprocessed.mergeSplit iv
|
||||
else:
|
||||
slots.unprocessed.merge iv
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "merged partial", splitMerge, accKey, iv,
|
||||
processed=slots.processed, unprocessed=slots.unprocessed, newEntry
|
||||
|
||||
env.parkedStorage.excl accKey # Un-park (if any)
|
||||
newEntry
|
||||
|
||||
|
||||
proc reducePartial(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
acc: AccountSlotsHeader; # Left over account data
|
||||
): bool = # List entry was removed
|
||||
## Reduce range from partial ranges list.
|
||||
##
|
||||
## The function returns `true` if a list entry was removed.
|
||||
# So `iv` was not the full range in which case all of `iv` was fully
|
||||
# processed and there is nothing left.
|
||||
let
|
||||
accKey = acc.accKey
|
||||
stoRoot = acc.storageRoot
|
||||
notFull = env.fetchStorageFull.delete(stoRoot).isErr
|
||||
iv = acc.subRange.get(otherwise = FullNodeTagRange)
|
||||
rc = env.fetchStoragePart.lruFetch stoRoot
|
||||
|
||||
discard notFull
|
||||
var entryRemoved = false
|
||||
if rc.isErr:
|
||||
# This was the last missing range anyway. So there is no need to
|
||||
# re-insert this entry.
|
||||
entryRemoved = true # Virtually deleted
|
||||
when extraTraceMessages:
|
||||
trace logTxt "reduced partial, discarded", accKey, iv, entryRemoved
|
||||
else:
|
||||
let slots = rc.value.slots
|
||||
discard slots.processed.merge iv
|
||||
|
||||
if slots.processed.isFull:
|
||||
env.fetchStoragePart.del stoRoot
|
||||
result = true
|
||||
when extraTraceMessages:
|
||||
trace logTxt "reduced partial, deleted", accKey, iv, entryRemoved
|
||||
else:
|
||||
slots.unprocessed.reduce iv
|
||||
when extraTraceMessages:
|
||||
trace logTxt "reduced partial, completed", accKey, iv,
|
||||
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||
entryRemoved
|
||||
|
||||
env.parkedStorage.excl accKey # Un-park (if any)
|
||||
entryRemoved
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc storageQueueTotal*(env: SnapPivotRef): int =
|
||||
## Total number of entries on the storage queues, including parked ones.
|
||||
env.fetchStorageFull.len + env.fetchStoragePart.len + env.parkedStorage.len
|
||||
|
||||
proc storageQueueAvail*(env: SnapPivotRef): int =
|
||||
## Number of available entries on the storage queues
|
||||
env.fetchStorageFull.len + env.fetchStoragePart.len
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, append queue items
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc storageQueueAppendFull*(
|
||||
env: SnapPivotRef;
|
||||
stoRoot: Hash256;
|
||||
accKey: NodeKey;
|
||||
): bool
|
||||
{.discardable.} =
|
||||
## Append item to `fetchStorageFull` queue. This undoes the effect of the
|
||||
## function `storageQueueFetchFull()`. The function returns `true` if
|
||||
## a new entry was added.
|
||||
let
|
||||
notPart = env.fetchStoragePart.delete(stoRoot).isErr
|
||||
stoItem = SlotsQueueItemRef(accKey: accKey)
|
||||
env.parkedStorage.excl accKey # Un-park (if any)
|
||||
env.fetchStorageFull.append(stoRoot, stoItem) and notPart
|
||||
|
||||
proc storageQueueAppendFull*(
|
||||
env: SnapPivotRef;
|
||||
acc: AccountSlotsHeader;
|
||||
): bool
|
||||
{.discardable.} =
|
||||
## Variant of `storageQueueAppendFull()`
|
||||
env.storageQueueAppendFull(acc.storageRoot, acc.accKey)
|
||||
|
||||
proc storageQueueAppendPartialSplit*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
acc: AccountSlotsHeader; # Left over account data
|
||||
): bool
|
||||
{.discardable.} =
|
||||
## Merge slot range back into partial queue. This undoes the effect of the
|
||||
## function `storageQueueFetchPartial()` with the additional feature that
|
||||
## the argument range of `acc` is split. So some next range request for this
|
||||
## account will result in the right most half size of this very range just
|
||||
## inserted.
|
||||
##
|
||||
## The function returns `true` if a new entry was added.
|
||||
env.appendPartial(acc, splitMerge=true)
|
||||
|
||||
proc storageQueueAppendPartialSplit*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
req: openArray[AccountSlotsHeader]; # List of entries to push back
|
||||
) =
|
||||
## Variant of `storageQueueAppendPartialSplit()`
|
||||
for w in req:
|
||||
discard env.appendPartial(w, splitMerge=true)
|
||||
|
||||
proc storageQueueAppend*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
req: openArray[AccountSlotsHeader]; # List of entries to push back
|
||||
) =
|
||||
## Append a job list of ranges. This undoes the effect of either function
|
||||
## `storageQueueFetchFull()` or `storageQueueFetchPartial()`.
|
||||
for w in req:
|
||||
let iv = w.subRange.get(otherwise = FullNodeTagRange)
|
||||
if iv == FullNodeTagRange:
|
||||
env.storageQueueAppendFull w
|
||||
else:
|
||||
discard env.appendPartial(w, splitMerge=false)
|
||||
|
||||
proc storageQueueAppend*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
kvp: StoQuSlotsKVP; # List of entries to push back
|
||||
) =
|
||||
## Insert back a full administrative queue record. This function is typically
|
||||
## used after a record was unlinked vis `storageQueueUnlinkPartialItem()`.
|
||||
let accKey = kvp.data.accKey
|
||||
env.parkedStorage.excl accKey # Un-park (if any)
|
||||
|
||||
if kvp.data.slots.isNil:
|
||||
env.fetchStoragePart.del kvp.key # Sanitise data
|
||||
discard env.fetchStorageFull.append(kvp.key, kvp.data)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "re-queued full", accKey
|
||||
else:
|
||||
env.fetchStorageFull.del kvp.key # Sanitise data
|
||||
|
||||
let rc = env.fetchStoragePart.eq kvp.key
|
||||
if rc.isErr:
|
||||
discard env.fetchStoragePart.append(kvp.key, kvp.data)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "re-queued partial",
|
||||
processed=kvp.data.slots.processed,
|
||||
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||
else:
|
||||
# Merge `processed` ranges
|
||||
for w in kvp.data.slots.processed.increasing:
|
||||
discard rc.value.slots.processed.merge w
|
||||
|
||||
# Intersect `unprocessed` ranges
|
||||
for w in kvp.data.slots.unprocessed.ivItems:
|
||||
rc.value.slots.unprocessed.reduce w
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "re-merged partial",
|
||||
processed=kvp.data.slots.processed,
|
||||
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, modify/update/remove queue items
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc storageQueueUpdate*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
req: openArray[AccountSlotsChanged]; # List of entries to push back
|
||||
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||
): (int,int) = # Added, removed
|
||||
## Similar to `storageQueueAppend()`, this functions appends account header
|
||||
## entries back into the storage queues. Different to `storageQueueAppend()`,
|
||||
## this function is aware of changes after partial downloads from the network.
|
||||
##
|
||||
## The function returns the tuple `(added, removed)` reflecting the numbers
|
||||
## of changed list items (accumulated for partial and full range lists.)
|
||||
for w in req:
|
||||
if w.account.accKey notin ignore:
|
||||
let
|
||||
iv = w.account.subRange.get(otherwise = FullNodeTagRange)
|
||||
jv = w.newRange.get(otherwise = FullNodeTagRange)
|
||||
if jv != FullNodeTagRange:
|
||||
# So `jv` is some rest after processing. Typically this entry is
|
||||
# related to partial range response message that came with a proof.
|
||||
if env.updatePartial w:
|
||||
result[0].inc
|
||||
when extraTraceMessages:
|
||||
trace logTxt "update/append partial", accKey=w.account.accKey,
|
||||
iv, jv, nAdded=result[0], nRemoved=result[1]
|
||||
elif jv == iv:
|
||||
if env.storageQueueAppendFull w.account:
|
||||
result[0].inc
|
||||
#when extraTraceMessages:
|
||||
# trace logTxt "update/append full", accKey=w.account.accKey,
|
||||
# nAdded=result[0], nRemoved=result[1]t
|
||||
else:
|
||||
if env.reducePartial w.account:
|
||||
result[1].inc
|
||||
when extraTraceMessages:
|
||||
trace logTxt "update/reduce partial", accKey=w.account.accKey,
|
||||
iv, jv, nAdded=result[0], nRemoved=result[1]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions, fetch/remove queue items
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc storageQueueFetchFull*(
|
||||
ctx: SnapCtxRef; # Global context
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||
): seq[AccountSlotsHeader] =
|
||||
## Fetch a list of at most `fetchRequestStorageSlotsMax` full work items
|
||||
## from the batch queue.
|
||||
##
|
||||
## This function walks through the items queue and collects work items where
|
||||
## the hexary trie has not been fully or partially allocated on the database
|
||||
## already. These collected items are returned as first item of the return
|
||||
## code tuple.
|
||||
##
|
||||
## There will be a sufficient (but not necessary) quick check whether a
|
||||
## partally allocated work item is complete, already. In which case it is
|
||||
## removed from the queue. The number of removed items is returned as
|
||||
## second item of the return code tuple.
|
||||
##
|
||||
## Otherwise, a partially allocated item is meoved to the partial queue. The
|
||||
## number of items moved to the partial queue is returned as third item of
|
||||
## the return code tuple.
|
||||
##
|
||||
noExceptionOops("getNextSlotItemsFull"):
|
||||
for kvp in env.fetchStorageFull.nextPairs:
|
||||
if kvp.data.accKey notin ignore:
|
||||
let
|
||||
getFn = ctx.pool.snapDb.getStorageSlotsFn kvp.data.accKey
|
||||
rootKey = kvp.key.to(NodeKey)
|
||||
accItem = AccountSlotsHeader(
|
||||
accKey: kvp.data.accKey,
|
||||
storageRoot: kvp.key)
|
||||
|
||||
# This item will eventuallly be returned, discarded, or moved to the
|
||||
# partial queue (also subject for healing.) So it will be removed from
|
||||
# the full range lists queue.
|
||||
env.fetchStorageFull.del kvp.key # OK to delete current link
|
||||
|
||||
# Check whether the database trie is empty. Otherwise the sub-trie is
|
||||
# at least partially allocated.
|
||||
if rootKey.ByteArray32.getFn.len == 0:
|
||||
# Collect for return
|
||||
result.add accItem
|
||||
env.parkedStorage.incl accItem.accKey # Registerd as absent
|
||||
|
||||
# Maximal number of items to fetch
|
||||
if fetchRequestStorageSlotsMax <= result.len:
|
||||
break # stop here
|
||||
else:
|
||||
# Check how much there is below the top level storage slots node. For
|
||||
# a small storage trie, this check will be exhaustive.
|
||||
let stats = getFn.hexaryInspectTrie(rootKey,
|
||||
suspendAfter = storageSlotsTrieInheritPerusalMax,
|
||||
maxDangling = 1)
|
||||
|
||||
if stats.dangling.len == 0 and stats.resumeCtx.isNil:
|
||||
# This storage trie could be fully searched and there was no
|
||||
# dangling node. So it is complete and can be considered done.
|
||||
# It can be left removed from the batch queue.
|
||||
env.nSlotLists.inc # Update for logging
|
||||
else:
|
||||
# This item must be treated as a partially available slot
|
||||
env.storageQueueAppendPartialSplit accItem
|
||||
|
||||
proc storageQueueFetchPartial*(
|
||||
ctx: SnapCtxRef; # Global context (unused here)
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||
): seq[AccountSlotsHeader] = # At most one item
|
||||
## Get work item from the batch queue. This will typically return the full
|
||||
## work item and remove it from the queue unless the parially completed
|
||||
## range is fragmented.
|
||||
for kvp in env.fetchStoragePart.nextPairs:
|
||||
# Extract range and return single item request queue
|
||||
let
|
||||
slots = kvp.data.slots
|
||||
accKey = kvp.data.accKey
|
||||
accepted = accKey notin ignore
|
||||
if accepted:
|
||||
let rc = slots.unprocessed.fetch()
|
||||
if rc.isOk:
|
||||
let reqItem = AccountSlotsHeader(
|
||||
accKey: accKey,
|
||||
storageRoot: kvp.key,
|
||||
subRange: some rc.value)
|
||||
|
||||
# Delete from batch queue if the `unprocessed` range has become empty.
|
||||
if slots.unprocessed.isEmpty and
|
||||
high(UInt256) - rc.value.len <= slots.processed.total:
|
||||
# If this is all the rest, the record can be deleted from the todo
|
||||
# list. If not fully downloaded at a later stage, a new record will
|
||||
# be created on-the-fly.
|
||||
env.parkedStorage.incl accKey # Temporarily parked
|
||||
env.fetchStoragePart.del kvp.key # Last one not needed
|
||||
else:
|
||||
# Otherwise accept and update/rotate queue. Note that `lruFetch`
|
||||
# does leave the item on the queue.
|
||||
discard env.fetchStoragePart.lruFetch reqItem.storageRoot
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "fetched partial",
|
||||
processed=slots.processed, unprocessed=slots.unprocessed,
|
||||
accKey, iv=rc.value
|
||||
return @[reqItem] # done
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "rejected partial", accepted,
|
||||
processed=slots.processed, unprocessed=slots.unprocessed, accKey
|
||||
# End for()
|
||||
|
||||
proc storageQueueUnlinkPartialItem*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ignore: HashSet[NodeKey]; # Ignore accounts with these keys
|
||||
): Result[StoQuSlotsKVP,void] =
|
||||
## Fetch an item from the partial list. This item will be removed from the
|
||||
## list and ca be re-queued via `storageQueueAppend()`.
|
||||
for kvp in env.fetchStoragePart.nextPairs:
|
||||
# Extract range and return single item request queue
|
||||
let
|
||||
accKey = kvp.data.accKey
|
||||
accepted = accKey notin ignore
|
||||
if accepted:
|
||||
env.parkedStorage.incl accKey # Temporarily parked
|
||||
env.fetchStoragePart.del kvp.key # Last one not needed
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "unlink partial item", processed=kvp.data.slots.processed,
|
||||
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||
return ok(kvp) # done
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "unlink partial skip", accepted,
|
||||
processed=kvp.data.slots.processed,
|
||||
unprocessed=kvp.data.slots.unprocessed, accKey
|
||||
# End for()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,328 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Swap in already allocated sub-tries
|
||||
## ===================================
|
||||
##
|
||||
## This module imports sub-tries from other pivots into the current. It does
|
||||
## so by detecting the top of an existing sub-trie in the current pivot and
|
||||
## searches other pivots for the part of the sub-trie that is already
|
||||
## available there. So it can be marked accomplished on the current pivot.
|
||||
##
|
||||
## Algorithm
|
||||
## ---------
|
||||
##
|
||||
## * Find nodes with envelopes that have no account in common with any range
|
||||
## interval of the `processed` set of the current pivot.
|
||||
##
|
||||
## * From the nodes of the previous step, extract allocated nodes and try to
|
||||
## find them on previous pivots. Stop if there are no such nodes.
|
||||
##
|
||||
## * The portion of `processed` ranges on the other pivot that intersects with
|
||||
## the envelopes of the nodes have been downloaded already. And it is equally
|
||||
## applicable to the current pivot as it applies to the same sub-trie.
|
||||
##
|
||||
## So the intersection of `processed` with the node envelope will be copied
|
||||
## to to the `processed` ranges of the current pivot.
|
||||
##
|
||||
## * Rinse and repeat.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[math, sequtils],
|
||||
chronicles,
|
||||
eth/common,
|
||||
stew/[byteutils, interval_set, keyed_queue, sorted_set],
|
||||
../../../../../../utils/prettify,
|
||||
"../../../.."/range_desc,
|
||||
../../../db/[hexary_desc, hexary_envelope, hexary_error,
|
||||
hexary_paths, snapdb_accounts],
|
||||
../snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-swapin"
|
||||
|
||||
type
|
||||
SwapInPivot = object
|
||||
## Subset of `SnapPivotRef` with relevant parts, only
|
||||
rootKey: NodeKey ## Storage slots & accounts
|
||||
processed: NodeTagRangeSet ## Storage slots & accounts
|
||||
pivot: SnapPivotRef ## Accounts only
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
when false:
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Swap-in " & info
|
||||
|
||||
proc `$`(node: NodeSpecs): string =
|
||||
node.partialPath.toHex
|
||||
|
||||
proc `$`(rs: NodeTagRangeSet): string =
|
||||
rs.fullPC3
|
||||
|
||||
proc `$`(iv: NodeTagRange): string =
|
||||
iv.fullPC3
|
||||
|
||||
proc toPC(w: openArray[NodeSpecs]; n: static[int] = 3): string =
|
||||
let sumUp = w.mapIt(it.hexaryEnvelope.len).foldl(a+b, 0.u256)
|
||||
(sumUp.to(float) / (2.0^256)).toPC(n)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc existsInTrie(
|
||||
node: NodeSpecs; # Probe node to test to exist
|
||||
rootKey: NodeKey; # Start node into hexary trie
|
||||
getFn: HexaryGetFn; # Abstract database access
|
||||
): bool =
|
||||
## Check whether this node exists on the sub-trie starting at ` rootKey`
|
||||
var error: HexaryError
|
||||
|
||||
try:
|
||||
let rc = node.partialPath.hexaryPathNodeKey(rootKey, getFn)
|
||||
if rc.isOk:
|
||||
return rc.value == node.nodeKey
|
||||
except RlpError:
|
||||
error = RlpEncoding
|
||||
except CatchableError:
|
||||
error = ExceptionError
|
||||
|
||||
when extraTraceMessages:
|
||||
if error != HexaryError(0):
|
||||
trace logTxt "other trie check node failed", node, error
|
||||
|
||||
false
|
||||
|
||||
|
||||
template noExceptionOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
raiseAssert "Inconveivable (" &
|
||||
info & "): name=" & $e.name & " msg=" & e.msg
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc uncoveredEnvelopes(
|
||||
processed: NodeTagRangeSet; # To be complemented
|
||||
rootKey: NodeKey; # Start node into hexary trie
|
||||
getFn: HexaryGetFn; # Abstract database access
|
||||
): seq[NodeSpecs] =
|
||||
## Compile the complement of the union of the `processed` intervals and
|
||||
## express this complement as a list of envelopes of sub-tries.
|
||||
##
|
||||
var decomposed = "n/a"
|
||||
noExceptionOops("swapIn"):
|
||||
let rc = processed.hexaryEnvelopeDecompose(rootKey, getFn)
|
||||
if rc.isOk:
|
||||
# Return allocated nodes only
|
||||
result = rc.value.filterIt(0 < it.nodeKey.ByteArray32.getFn().len)
|
||||
|
||||
when extraTraceMessages:
|
||||
decomposed = rc.value.toPC
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "unprocessed envelopes", processed,
|
||||
nProcessed=processed.chunks, decomposed,
|
||||
nResult=result.len, result=result.toPC
|
||||
else:
|
||||
discard decomposed
|
||||
|
||||
|
||||
proc otherProcessedRanges(
|
||||
node: NodeSpecs; # Top node of portential sub-trie
|
||||
otherPivots: seq[SwapInPivot]; # Other pivots list
|
||||
rootKey: NodeKey; # Start node into hexary trie
|
||||
getFn: HexaryGetFn; # Abstract database access
|
||||
): seq[NodeTagRangeSet] =
|
||||
## Collect already processed ranges from other pivots intersecting with the
|
||||
## envelope of the argument `node`. The list of other pivots is represented
|
||||
## by the argument iterator `otherPivots`.
|
||||
let envelope = node.hexaryEnvelope
|
||||
|
||||
noExceptionOops("otherProcessedRanges"):
|
||||
# For the current `node` select all hexary sub-tries that contain the same
|
||||
# node `node.nodeKey` for the partial path `node.partianPath`.
|
||||
for n,op in otherPivots:
|
||||
result.add NodeTagRangeSet.init()
|
||||
|
||||
# Check whether the node is shared
|
||||
if node.existsInTrie(op.rootKey, getFn):
|
||||
# Import already processed part of the envelope of `node` into the
|
||||
# `batch.processed` set of ranges.
|
||||
let
|
||||
other = op.processed
|
||||
touched = other.hexaryEnvelopeTouchedBy node
|
||||
|
||||
for iv in touched.increasing:
|
||||
let segment = (envelope * iv).value
|
||||
discard result[^1].merge segment
|
||||
|
||||
#when extraTraceMessages:
|
||||
# trace logTxt "collect other pivot segment", n, node, segment
|
||||
|
||||
#when extraTraceMessages:
|
||||
# if 0 < touched.chunks:
|
||||
# trace logTxt "collected other pivot", n, node,
|
||||
# other, nOtherChunks=other.chunks,
|
||||
# touched, nTouched=touched.chunks,
|
||||
# collected=result[^1]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions, swap-in functionality
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc swapIn(
|
||||
processed: NodeTagRangeSet; # Covered node ranges to be updated
|
||||
unprocessed: var UnprocessedRanges; # Uncovered node ranges to be updated
|
||||
otherPivots: seq[SwapInPivot]; # Other pivots list (read only)
|
||||
rootKey: NodeKey; # Start node into target hexary trie
|
||||
getFn: HexaryGetFn; # Abstract database access
|
||||
loopMax: int; # Prevent from looping too often
|
||||
): (seq[NodeTagRangeSet],int) =
|
||||
## Collect processed already ranges from argument `otherPivots` and merge them
|
||||
## it onto the argument sets `processed` and `unprocessed`. For each entry
|
||||
## of `otherPivots`, this function returns a list of merged (aka swapped in)
|
||||
## ranges. It also returns the number of main loop runs with non-empty merges.
|
||||
var
|
||||
swappedIn = newSeq[NodeTagRangeSet](otherPivots.len)
|
||||
lapCount = 0 # Loop control
|
||||
allMerged = 0.u256 # Logging & debugging
|
||||
|
||||
# Initialise return value
|
||||
for n in 0 ..< swappedIn.len:
|
||||
swappedIn[n] = NodeTagRangeSet.init()
|
||||
|
||||
noExceptionOops("swapIn"):
|
||||
# Swap in node ranges from other pivots
|
||||
while lapCount < loopMax:
|
||||
var merged = 0.u256 # Loop control
|
||||
|
||||
let checkNodes = processed.uncoveredEnvelopes(rootKey, getFn)
|
||||
for node in checkNodes:
|
||||
|
||||
# Process table of sets from other pivots with ranges intersecting
|
||||
# with the `node` envelope.
|
||||
for n,rngSet in node.otherProcessedRanges(otherPivots, rootKey, getFn):
|
||||
|
||||
# Merge `rngSet` into `swappedIn[n]` and `pivot.processed`,
|
||||
# and remove `rngSet` from ` pivot.unprocessed`
|
||||
for iv in rngSet.increasing:
|
||||
discard swappedIn[n].merge iv # Imported range / other pivot
|
||||
merged += processed.merge iv # Import range as processed
|
||||
unprocessed.reduce iv # No need to re-fetch
|
||||
|
||||
if merged.isZero: # Loop control
|
||||
break
|
||||
|
||||
lapCount.inc
|
||||
allMerged += merged # Statistics, logging
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "inherited ranges", lapCount, nCheckNodes=checkNodes.len,
|
||||
merged=((merged.to(float) / (2.0^256)).toPC(3)),
|
||||
allMerged=((allMerged.to(float) / (2.0^256)).toPC(3))
|
||||
|
||||
# End while()
|
||||
|
||||
(swappedIn,lapCount)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc swapInAccounts*(
|
||||
ctx: SnapCtxRef; # Global context
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
loopMax = 100; # Prevent from looping too often
|
||||
): int =
|
||||
## Variant of `swapIn()` for the particular case of accounts database pivots.
|
||||
let fa = env.fetchAccounts
|
||||
if fa.processed.isFull:
|
||||
return # nothing to do
|
||||
|
||||
let
|
||||
pivot {.used.} = env.stateHeader.blockNumber.toStr # Logging & debugging
|
||||
rootKey = env.stateHeader.stateRoot.to(NodeKey)
|
||||
getFn = ctx.pool.snapDb.getAccountFn
|
||||
|
||||
others = toSeq(ctx.pool.pass.pivotTable.nextPairs)
|
||||
|
||||
# Swap in from mothballed pivots different from the current one
|
||||
.filterIt(it.data.archived and it.key.to(NodeKey) != rootKey)
|
||||
|
||||
# Extract relevant parts
|
||||
.mapIt(SwapInPivot(
|
||||
rootKey: it.key.to(NodeKey),
|
||||
processed: it.data.fetchAccounts.processed,
|
||||
pivot: it.data))
|
||||
|
||||
if others.len == 0:
|
||||
return # nothing to do
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "accounts start", pivot, nOthers=others.len
|
||||
|
||||
var
|
||||
nLaps = 0 # Logging & debugging
|
||||
nSlotAccounts = 0 # Logging & debugging
|
||||
swappedIn: seq[NodeTagRangeSet]
|
||||
|
||||
noExceptionOops("swapInAccounts"):
|
||||
(swappedIn, nLaps) = swapIn(
|
||||
fa.processed, fa.unprocessed, others, rootKey, getFn, loopMax)
|
||||
|
||||
if 0 < nLaps:
|
||||
# Update storage slots
|
||||
for n in 0 ..< others.len:
|
||||
|
||||
#when extraTraceMessages:
|
||||
# if n < swappedIn[n].chunks:
|
||||
# trace logTxt "post-processing storage slots", n, nMax=others.len,
|
||||
# changes=swappedIn[n], chunks=swappedIn[n].chunks
|
||||
|
||||
# Revisit all imported account key ranges
|
||||
for iv in swappedIn[n].increasing:
|
||||
|
||||
# The `storageAccounts` list contains indices for storage slots,
|
||||
# mapping account keys => storage root
|
||||
var rc = others[n].pivot.storageAccounts.ge(iv.minPt)
|
||||
while rc.isOk and rc.value.key <= iv.maxPt:
|
||||
|
||||
# Fetch storage slots specs from `fetchStorageFull` list
|
||||
let stRoot = rc.value.data
|
||||
if others[n].pivot.fetchStorageFull.hasKey(stRoot):
|
||||
let accKey = others[n].pivot.fetchStorageFull[stRoot].accKey
|
||||
discard env.fetchStorageFull.append(
|
||||
stRoot, SlotsQueueItemRef(acckey: accKey))
|
||||
nSlotAccounts.inc
|
||||
|
||||
rc = others[n].pivot.storageAccounts.gt(rc.value.key)
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "accounts done", pivot, nOthers=others.len, nLaps,
|
||||
nSlotAccounts
|
||||
|
||||
nLaps
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,618 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[math, sets, sequtils],
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/p2p, # trie/trie_defs],
|
||||
stew/[interval_set, keyed_queue, sorted_set],
|
||||
"../../../.."/[misc/ticker, sync_desc, types],
|
||||
"../../.."/[constants, range_desc],
|
||||
../../db/[hexary_error, snapdb_accounts, snapdb_contracts, snapdb_pivot],
|
||||
./helper/[accounts_coverage, storage_queue],
|
||||
"."/[heal_accounts, heal_storage_slots, range_fetch_accounts,
|
||||
range_fetch_contracts, range_fetch_storage_slots],
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-pivot"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
proc pivotMothball*(env: SnapPivotRef) {.gcsafe.}
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers, logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Pivot " & info
|
||||
|
||||
when false:
|
||||
template ignExceptionOops(info: static[string]; code: untyped) {.used.} =
|
||||
try:
|
||||
code
|
||||
except CatchableError as e:
|
||||
trace logTxt "Ooops", `info`=info, name=($e.name), msg=(e.msg)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accountsHealingOk(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
): bool =
|
||||
## Returns `true` if accounts healing is enabled for this pivot.
|
||||
not env.fetchAccounts.processed.isEmpty and
|
||||
healAccountsCoverageTrigger <= ctx.accountsCoverage()
|
||||
|
||||
|
||||
proc init(
|
||||
T: type RangeBatchRef; # Collection of sets of account ranges
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
): T =
|
||||
## Account ranges constructor
|
||||
new result
|
||||
result.unprocessed.init() # full range on the first set of the pair
|
||||
result.processed = NodeTagRangeSet.init()
|
||||
|
||||
# Update coverage level roll over
|
||||
ctx.accountsCoverage100PcRollOver()
|
||||
|
||||
# Initialise accounts range fetch batch, the pair of `fetchAccounts[]` range
|
||||
# sets. Deprioritise already processed ranges by moving it to the second set.
|
||||
for iv in ctx.pool.pass.coveredAccounts.increasing:
|
||||
discard result.unprocessed[0].reduce iv
|
||||
discard result.unprocessed[1].merge iv
|
||||
|
||||
proc init(
|
||||
T: type SnapPivotRef; # Privot descriptor type
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
header: BlockHeader; # Header to generate new pivot from
|
||||
): T =
|
||||
## Pivot constructor.
|
||||
result = T(
|
||||
stateHeader: header,
|
||||
fetchAccounts: RangeBatchRef.init(ctx))
|
||||
result.storageAccounts.init()
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: pivot table related
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc beforeTopMostlyClean*(pivotTable: var PivotTable) =
|
||||
## Clean up pivot queues of the entry before the top one. The queues are
|
||||
## the pivot data that need most of the memory. This cleaned pivot is not
|
||||
## usable any more after cleaning but might be useful as historic record.
|
||||
let rc = pivotTable.beforeLastValue
|
||||
if rc.isOk:
|
||||
rc.value.pivotMothball
|
||||
|
||||
proc topNumber*(pivotTable: var PivotTable): BlockNumber =
|
||||
## Return the block number of the top pivot entry, or zero if there is none.
|
||||
let rc = pivotTable.lastValue
|
||||
if rc.isOk:
|
||||
return rc.value.stateHeader.blockNumber
|
||||
|
||||
|
||||
proc reverseUpdate*(
|
||||
pivotTable: var PivotTable; # Pivot table
|
||||
header: BlockHeader; # Header to generate new pivot from
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
) =
|
||||
## Activate environment for earlier state root implied by `header` argument.
|
||||
##
|
||||
## Note that the pivot table is assumed to be sorted by the block numbers of
|
||||
## the pivot header.
|
||||
##
|
||||
# Append per-state root environment to LRU queue
|
||||
discard pivotTable.prepend(
|
||||
header.stateRoot, SnapPivotRef.init(ctx, header))
|
||||
|
||||
# Make sure that the LRU table does not grow too big.
|
||||
if max(3, ctx.buddiesMax) < pivotTable.len:
|
||||
# Delete second entry rather than the first which might currently
|
||||
# be needed.
|
||||
let rc = pivotTable.secondKey
|
||||
if rc.isOk:
|
||||
pivotTable.del rc.value
|
||||
|
||||
|
||||
proc tickerStats*(
|
||||
pivotTable: var PivotTable; # Pivot table
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
): TickerSnapStatsUpdater =
|
||||
## This function returns a function of type `TickerStatsUpdater` that prints
|
||||
## out pivot table statitics. The returned fuction is supposed to drive
|
||||
## ticker` module.
|
||||
proc meanStdDev(sum, sqSum: float; length: int): (float,float) =
|
||||
if 0 < length:
|
||||
result[0] = sum / length.float
|
||||
let
|
||||
sqSumAv = sqSum / length.float
|
||||
rSq = result[0] * result[0]
|
||||
if rSq < sqSumAv:
|
||||
result[1] = sqrt(sqSum / length.float - result[0] * result[0])
|
||||
|
||||
result = proc: auto =
|
||||
var
|
||||
aSum, aSqSum, uSum, uSqSum, sSum, sSqSum, cSum, cSqSum: float
|
||||
count = 0
|
||||
for kvp in ctx.pool.pass.pivotTable.nextPairs:
|
||||
|
||||
# Accounts mean & variance
|
||||
let aLen = kvp.data.nAccounts.float
|
||||
if 0 < aLen:
|
||||
count.inc
|
||||
aSum += aLen
|
||||
aSqSum += aLen * aLen
|
||||
|
||||
# Fill utilisation mean & variance
|
||||
let fill = kvp.data.fetchAccounts.processed.fullFactor
|
||||
uSum += fill
|
||||
uSqSum += fill * fill
|
||||
|
||||
let sLen = kvp.data.nSlotLists.float
|
||||
sSum += sLen
|
||||
sSqSum += sLen * sLen
|
||||
|
||||
# Lists of missing contracts
|
||||
let cLen = kvp.data.nContracts.float
|
||||
cSum += cLen
|
||||
cSqSum += cLen * cLen
|
||||
let
|
||||
env = ctx.pool.pass.pivotTable.lastValue.get(otherwise = nil)
|
||||
accCoverage = (ctx.pool.pass.coveredAccounts.fullFactor +
|
||||
ctx.pool.pass.covAccTimesFull.float)
|
||||
accFill = meanStdDev(uSum, uSqSum, count)
|
||||
var
|
||||
beaconBlock = none(BlockNumber)
|
||||
pivotBlock = none(BlockNumber)
|
||||
stoQuLen = none(int)
|
||||
ctraQuLen = none(int)
|
||||
procChunks = 0
|
||||
if not env.isNil:
|
||||
pivotBlock = some(env.stateHeader.blockNumber)
|
||||
procChunks = env.fetchAccounts.processed.chunks
|
||||
stoQuLen = some(env.storageQueueTotal())
|
||||
ctraQuLen = some(env.fetchContracts.len)
|
||||
if 0 < ctx.pool.beaconHeader.blockNumber:
|
||||
beaconBlock = some(ctx.pool.beaconHeader.blockNumber)
|
||||
|
||||
TickerSnapStats(
|
||||
beaconBlock: beaconBlock,
|
||||
pivotBlock: pivotBlock,
|
||||
nQueues: ctx.pool.pass.pivotTable.len,
|
||||
nAccounts: meanStdDev(aSum, aSqSum, count),
|
||||
nSlotLists: meanStdDev(sSum, sSqSum, count),
|
||||
nContracts: meanStdDev(cSum, cSqSum, count),
|
||||
accountsFill: (accFill[0], accFill[1], accCoverage),
|
||||
nAccountStats: procChunks,
|
||||
nStorageQueue: stoQuLen,
|
||||
nContractQueue: ctraQuLen)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions: particular pivot
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pivotCompleteOk*(env: SnapPivotRef): bool =
|
||||
## Returns `true` iff the pivot covers a complete set of accounts ans
|
||||
## storage slots.
|
||||
env.fetchAccounts.processed.isFull and
|
||||
env.storageQueueTotal() == 0 and
|
||||
env.fetchContracts.len == 0
|
||||
|
||||
|
||||
proc pivotMothball*(env: SnapPivotRef) =
|
||||
## Clean up most of this argument `env` pivot record and mark it `archived`.
|
||||
## Note that archived pivots will be checked for swapping in already known
|
||||
## accounts and storage slots.
|
||||
env.fetchAccounts.unprocessed.init()
|
||||
|
||||
# Simplify storage slots queues by resolving partial slots into full list
|
||||
for kvp in env.fetchStoragePart.nextPairs:
|
||||
discard env.fetchStorageFull.append(
|
||||
kvp.key, SlotsQueueItemRef(acckey: kvp.data.accKey))
|
||||
env.fetchStoragePart.clear()
|
||||
|
||||
# Provide index into `fetchStorageFull`
|
||||
env.storageAccounts.clear()
|
||||
for kvp in env.fetchStorageFull.nextPairs:
|
||||
let rc = env.storageAccounts.insert(kvp.data.accKey.to(NodeTag))
|
||||
# Note that `rc.isErr` should not exist as accKey => storageRoot
|
||||
if rc.isOk:
|
||||
rc.value.data = kvp.key
|
||||
|
||||
# Finally, mark that node `archived`
|
||||
env.archived = true
|
||||
|
||||
|
||||
proc execSnapSyncAction*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
buddy: SnapBuddyRef; # Worker peer
|
||||
) {.async.} =
|
||||
## Execute a synchronisation run.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
|
||||
if env.savedFullPivotOk:
|
||||
return # no need to do anything
|
||||
|
||||
block:
|
||||
# Clean up storage slots queue and contracts first it becomes too large
|
||||
if storageSlotsQuPrioThresh < env.storageQueueAvail():
|
||||
await buddy.rangeFetchStorageSlots(env)
|
||||
if buddy.ctrl.stopped or env.archived:
|
||||
return
|
||||
if contractsQuPrioThresh < env.fetchContracts.len:
|
||||
await buddy.rangeFetchContracts(env)
|
||||
if buddy.ctrl.stopped or env.archived:
|
||||
return
|
||||
|
||||
var rangeFetchOk = true
|
||||
if not env.fetchAccounts.processed.isFull:
|
||||
await buddy.rangeFetchAccounts(env)
|
||||
|
||||
# Update 100% accounting
|
||||
ctx.accountsCoverage100PcRollOver()
|
||||
|
||||
# Run at least one round fetching storage slosts and contracts even if
|
||||
# the `archived` flag is set in order to keep the batch queue small.
|
||||
if buddy.ctrl.running:
|
||||
await buddy.rangeFetchStorageSlots(env)
|
||||
await buddy.rangeFetchContracts(env)
|
||||
else:
|
||||
rangeFetchOk = false
|
||||
if env.archived or (buddy.ctrl.zombie and buddy.only.errors.peerDegraded):
|
||||
return
|
||||
|
||||
# Uncconditonally try healing if enabled.
|
||||
if env.accountsHealingOk(ctx):
|
||||
# Let this procedure decide whether to ditch this peer (if any.) The idea
|
||||
# is that the healing process might address different peer ressources
|
||||
# than the fetch procedure. So that peer might still be useful unless
|
||||
# physically disconnected.
|
||||
buddy.ctrl.forceRun = true
|
||||
await buddy.healAccounts(env)
|
||||
if env.archived or (buddy.ctrl.zombie and buddy.only.errors.peerDegraded):
|
||||
return
|
||||
|
||||
# Some additional storage slots and contracts might have been popped up
|
||||
if rangeFetchOk:
|
||||
await buddy.rangeFetchStorageSlots(env)
|
||||
await buddy.rangeFetchContracts(env)
|
||||
if env.archived:
|
||||
return
|
||||
|
||||
# Don't bother with storage slots healing before accounts healing takes
|
||||
# place. This saves communication bandwidth. The pivot might change soon,
|
||||
# anyway.
|
||||
if env.accountsHealingOk(ctx):
|
||||
buddy.ctrl.forceRun = true
|
||||
await buddy.healStorageSlots(env)
|
||||
|
||||
|
||||
proc saveCheckpoint*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
): Result[int,HexaryError] =
|
||||
## Save current sync admin data. On success, the size of the data record
|
||||
## saved is returned (e.g. for logging.)
|
||||
##
|
||||
if env.savedFullPivotOk:
|
||||
return ok(0) # no need to do anything
|
||||
|
||||
let fa = env.fetchAccounts
|
||||
if fa.processed.isEmpty:
|
||||
return err(NoAccountsYet)
|
||||
|
||||
if saveAccountsProcessedChunksMax < fa.processed.chunks:
|
||||
return err(TooManyChunksInAccountsQueue)
|
||||
|
||||
if saveStorageSlotsMax < env.storageQueueTotal():
|
||||
return err(TooManyQueuedStorageSlots)
|
||||
|
||||
if saveContactsMax < env.fetchContracts.len:
|
||||
return err(TooManyQueuedContracts)
|
||||
|
||||
result = ctx.pool.snapDb.pivotSaveDB SnapDbPivotRegistry(
|
||||
header: env.stateHeader,
|
||||
nAccounts: env.nAccounts,
|
||||
nSlotLists: env.nSlotLists,
|
||||
processed: toSeq(env.fetchAccounts.processed.increasing)
|
||||
.mapIt((it.minPt,it.maxPt)),
|
||||
slotAccounts: (toSeq(env.fetchStorageFull.nextKeys) &
|
||||
toSeq(env.fetchStoragePart.nextKeys)).mapIt(it.to(NodeKey)) &
|
||||
toSeq(env.parkedStorage.items),
|
||||
ctraAccounts: (toSeq(env.fetchContracts.nextValues)))
|
||||
|
||||
if result.isOk and env.pivotCompleteOk():
|
||||
env.savedFullPivotOk = true
|
||||
|
||||
|
||||
proc pivotRecoverFromCheckpoint*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ctx: SnapCtxRef; # Global context (containing save state)
|
||||
topLevel: bool; # Full data set on top level only
|
||||
) =
|
||||
## Recover some pivot variables and global list `coveredAccounts` from
|
||||
## checkpoint data. If the argument `toplevel` is set `true`, also the
|
||||
## `processed`, `unprocessed`, and the `fetchStorageFull` lists are
|
||||
## initialised.
|
||||
##
|
||||
let recov = ctx.pool.pass.recovery
|
||||
if recov.isNil:
|
||||
return
|
||||
|
||||
env.nAccounts = recov.state.nAccounts
|
||||
env.nSlotLists = recov.state.nSlotLists
|
||||
|
||||
# Import processed interval
|
||||
for (minPt,maxPt) in recov.state.processed:
|
||||
if topLevel:
|
||||
env.fetchAccounts.unprocessed.reduce NodeTagRange.new(minPt, maxPt)
|
||||
discard env.fetchAccounts.processed.merge(minPt, maxPt)
|
||||
discard ctx.pool.pass.coveredAccounts.merge(minPt, maxPt)
|
||||
ctx.accountsCoverage100PcRollOver() # update coverage level roll over
|
||||
|
||||
# Handle storage slots
|
||||
let stateRoot = recov.state.header.stateRoot
|
||||
for w in recov.state.slotAccounts:
|
||||
let pt = NodeTagRange.new(w.to(NodeTag),w.to(NodeTag)) # => `pt.len == 1`
|
||||
|
||||
if 0 < env.fetchAccounts.processed.covered(pt):
|
||||
# Ignoring slots that have accounts to be downloaded, anyway
|
||||
let rc = ctx.pool.snapDb.getAccountsData(stateRoot, w)
|
||||
if rc.isErr:
|
||||
# Oops, how did that account get lost?
|
||||
discard env.fetchAccounts.processed.reduce pt
|
||||
env.fetchAccounts.unprocessed.merge pt
|
||||
elif rc.value.storageRoot != EMPTY_ROOT_HASH:
|
||||
env.storageQueueAppendFull(rc.value.storageRoot, w)
|
||||
|
||||
# Handle contracts
|
||||
for w in recov.state.ctraAccounts:
|
||||
let pt = NodeTagRange.new(w.to(NodeTag),w.to(NodeTag)) # => `pt.len == 1`
|
||||
|
||||
if 0 < env.fetchAccounts.processed.covered(pt):
|
||||
# Ignoring contracts that have accounts to be downloaded, anyway
|
||||
let rc = ctx.pool.snapDb.getAccountsData(stateRoot, w)
|
||||
if rc.isErr:
|
||||
# Oops, how did that account get lost?
|
||||
discard env.fetchAccounts.processed.reduce pt
|
||||
env.fetchAccounts.unprocessed.merge pt
|
||||
elif rc.value.codeHash != EMPTY_CODE_HASH:
|
||||
env.fetchContracts[rc.value.codeHash] = w
|
||||
|
||||
# Handle mothballed pivots for swapping in (see `pivotMothball()`)
|
||||
if topLevel:
|
||||
env.savedFullPivotOk = env.pivotCompleteOk()
|
||||
when extraTraceMessages:
|
||||
trace logTxt "recovered top level record",
|
||||
pivot=env.stateHeader.blockNumber.toStr,
|
||||
savedFullPivotOk=env.savedFullPivotOk,
|
||||
processed=env.fetchAccounts.processed.fullPC3,
|
||||
nStoQ=env.storageQueueTotal()
|
||||
else:
|
||||
for kvp in env.fetchStorageFull.nextPairs:
|
||||
let rc = env.storageAccounts.insert(kvp.data.accKey.to(NodeTag))
|
||||
if rc.isOk:
|
||||
rc.value.data = kvp.key
|
||||
env.archived = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public function, manage new peer and pivot update
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pivotApprovePeer*(buddy: SnapBuddyRef) {.async.} =
|
||||
## Approve peer and update pivot. On failure, the `buddy` will be stopped so
|
||||
## it will not proceed to the next scheduler task.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
beaconHeader = ctx.pool.beaconHeader
|
||||
var
|
||||
pivotHeader: BlockHeader
|
||||
|
||||
block:
|
||||
let rc = ctx.pool.pass.pivotTable.lastValue
|
||||
if rc.isOk:
|
||||
pivotHeader = rc.value.stateHeader
|
||||
|
||||
# Check whether the pivot needs to be updated
|
||||
if pivotHeader.blockNumber+pivotBlockDistanceMin <= beaconHeader.blockNumber:
|
||||
# If the entry before the previous entry is unused, then run a pool mode
|
||||
# based session (which should enable a pivot table purge).
|
||||
block:
|
||||
let rc = ctx.pool.pass.pivotTable.beforeLast
|
||||
if rc.isOk and rc.value.data.fetchAccounts.processed.isEmpty:
|
||||
ctx.poolMode = true
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "new pivot from beacon chain", peer=buddy.peer,
|
||||
pivot=pivotHeader.blockNumber.toStr,
|
||||
beacon=beaconHeader.blockNumber.toStr, poolMode=ctx.poolMode
|
||||
|
||||
discard ctx.pool.pass.pivotTable.lruAppend(
|
||||
beaconHeader.stateRoot, SnapPivotRef.init(ctx, beaconHeader),
|
||||
pivotTableLruEntriesMax)
|
||||
|
||||
pivotHeader = beaconHeader
|
||||
|
||||
# Not ready yet?
|
||||
if pivotHeader.blockNumber.isZero:
|
||||
buddy.ctrl.stopped = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public function, debugging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
import
|
||||
../../db/[hexary_desc, hexary_inspect, hexary_nearby, hexary_paths,
|
||||
snapdb_storage_slots]
|
||||
|
||||
const
|
||||
pivotVerifyExtraBlurb = false # or true
|
||||
inspectSuspendAfter = 10_000
|
||||
inspectExtraNap = 100.milliseconds
|
||||
|
||||
proc pivotVerifyComplete*(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
ctx: SnapCtxRef; # Some global context
|
||||
inspectAccountsTrie = false; # Check for dangling links
|
||||
walkAccountsDB = true; # Walk accounts db
|
||||
inspectSlotsTries = true; # Check dangling links (if `walkAccountsDB`)
|
||||
verifyContracts = true; # Verify that code hashes are in database
|
||||
): Future[bool]
|
||||
{.async,discardable.} =
|
||||
## Check the database whether the pivot is complete -- not advidsed on a
|
||||
## production system as the process takes a lot of ressources.
|
||||
let
|
||||
rootKey = env.stateHeader.stateRoot.to(NodeKey)
|
||||
accFn = ctx.pool.snapDb.getAccountFn
|
||||
ctraFn = ctx.pool.snapDb.getContractsFn
|
||||
|
||||
# Verify consistency of accounts trie database. This should not be needed
|
||||
# if `walkAccountsDB` is set. In case that there is a dangling link that would
|
||||
# have been detected by `hexaryInspectTrie()`, the `hexaryNearbyRight()`
|
||||
# function should fail at that point as well.
|
||||
if inspectAccountsTrie:
|
||||
var
|
||||
stats = accFn.hexaryInspectTrie(rootKey,
|
||||
suspendAfter=inspectSuspendAfter,
|
||||
maxDangling=1)
|
||||
nVisited = stats.count
|
||||
nRetryCount = 0
|
||||
while stats.dangling.len == 0 and not stats.resumeCtx.isNil:
|
||||
when pivotVerifyExtraBlurb:
|
||||
trace logTxt "accounts db inspect ..", nVisited, nRetryCount
|
||||
await sleepAsync inspectExtraNap
|
||||
nRetryCount.inc
|
||||
stats = accFn.hexaryInspectTrie(rootKey,
|
||||
resumeCtx=stats.resumeCtx,
|
||||
suspendAfter=inspectSuspendAfter,
|
||||
maxDangling=1)
|
||||
nVisited += stats.count
|
||||
# End while
|
||||
|
||||
if stats.dangling.len != 0:
|
||||
error logTxt "accounts trie has danglig links", nVisited, nRetryCount
|
||||
return false
|
||||
trace logTxt "accounts trie ok", nVisited, nRetryCount
|
||||
# End `if inspectAccountsTrie`
|
||||
|
||||
# Visit accounts and make sense of storage slots
|
||||
if walkAccountsDB:
|
||||
var
|
||||
nAccounts = 0
|
||||
nStorages = 0
|
||||
nContracts = 0
|
||||
nRetryTotal = 0
|
||||
nodeTag = low(NodeTag)
|
||||
while true:
|
||||
if (nAccounts mod inspectSuspendAfter) == 0 and 0 < nAccounts:
|
||||
when pivotVerifyExtraBlurb:
|
||||
trace logTxt "accounts db walk ..",
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts
|
||||
await sleepAsync inspectExtraNap
|
||||
|
||||
# Find next account key => `nodeTag`
|
||||
let rc = nodeTag.hexaryPath(rootKey,accFn).hexaryNearbyRight(accFn)
|
||||
if rc.isErr:
|
||||
if rc.error == NearbyBeyondRange:
|
||||
break # No more accounts
|
||||
error logTxt "accounts db problem", nodeTag,
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts, error=rc.error
|
||||
return false
|
||||
nodeTag = rc.value.getPartialPath.convertTo(NodeKey).to(NodeTag)
|
||||
nAccounts.inc
|
||||
|
||||
# Decode accounts data
|
||||
var accData: Account
|
||||
try:
|
||||
accData = rc.value.leafData.decode(Account)
|
||||
except RlpError as e:
|
||||
error logTxt "account data problem", nodeTag,
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts, name=($e.name), msg=(e.msg)
|
||||
return false
|
||||
|
||||
# Check for storage slots for this account
|
||||
if accData.storageRoot != EMPTY_ROOT_HASH:
|
||||
nStorages.inc
|
||||
if inspectSlotsTries:
|
||||
let
|
||||
slotFn = ctx.pool.snapDb.getStorageSlotsFn(nodeTag.to(NodeKey))
|
||||
stoKey = accData.storageRoot.to(NodeKey)
|
||||
var
|
||||
stats = slotFn.hexaryInspectTrie(stoKey,
|
||||
suspendAfter=inspectSuspendAfter,
|
||||
maxDangling=1)
|
||||
nVisited = stats.count
|
||||
nRetryCount = 0
|
||||
while stats.dangling.len == 0 and not stats.resumeCtx.isNil:
|
||||
when pivotVerifyExtraBlurb:
|
||||
trace logTxt "storage slots inspect ..", nodeTag,
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts, nVisited, nRetryCount
|
||||
await sleepAsync inspectExtraNap
|
||||
nRetryCount.inc
|
||||
nRetryTotal.inc
|
||||
stats = accFn.hexaryInspectTrie(stoKey,
|
||||
resumeCtx=stats.resumeCtx,
|
||||
suspendAfter=inspectSuspendAfter,
|
||||
maxDangling=1)
|
||||
nVisited += stats.count
|
||||
|
||||
if stats.dangling.len != 0:
|
||||
error logTxt "storage slots trie has dangling link", nodeTag,
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, nVisited, nRetryCount
|
||||
return false
|
||||
if nVisited == 0:
|
||||
error logTxt "storage slots trie is empty", nodeTag,
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts, nVisited, nRetryCount
|
||||
return false
|
||||
|
||||
# Check for contract codes for this account
|
||||
if accData.codeHash != EMPTY_CODE_HASH:
|
||||
nContracts.inc
|
||||
if verifyContracts:
|
||||
let codeKey = accData.codeHash.to(NodeKey)
|
||||
if codeKey.to(Blob).ctraFn.len == 0:
|
||||
error logTxt "Contract code missing", nodeTag,
|
||||
codeKey=codeKey.to(NodeTag),
|
||||
nAccounts, nStorages, nContracts, nRetryTotal,
|
||||
inspectSlotsTries, verifyContracts
|
||||
return false
|
||||
|
||||
# Set up next node key for looping
|
||||
if nodeTag == high(NodeTag):
|
||||
break
|
||||
nodeTag = nodeTag + 1.u256
|
||||
# End while
|
||||
|
||||
trace logTxt "accounts db walk ok",
|
||||
nAccounts, nStorages, nContracts, nRetryTotal, inspectSlotsTries
|
||||
# End `if walkAccountsDB`
|
||||
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,258 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Fetch accounts DB ranges
|
||||
## ========================
|
||||
##
|
||||
## Acccount ranges allocated on the database are organised in the set
|
||||
## `env.fetchAccounts.processed` and the ranges that can be fetched are in
|
||||
## the pair of range sets `env.fetchAccounts.unprocessed`. The ranges of these
|
||||
## sets are mutually disjunct yet the union of all ranges does not fully
|
||||
## comprise the complete `[0,2^256]` range. The missing parts are the ranges
|
||||
## currently processed by worker peers.
|
||||
##
|
||||
## Algorithm
|
||||
## ---------
|
||||
##
|
||||
## * Some interval `iv` is removed from the `env.fetchAccounts.unprocessed`
|
||||
## pair of set (so the interval `iv` is protected from other worker
|
||||
## instances and might be safely accessed and manipulated by this function.)
|
||||
## Stop if there are no more intervals.
|
||||
##
|
||||
## * The accounts data points in the interval `iv` (aka account hashes) are
|
||||
## fetched from the network. This results in *key-value* pairs for accounts.
|
||||
##
|
||||
## * The received *key-value* pairs from the previous step are verified and
|
||||
## merged into the accounts hexary trie persistent database.
|
||||
##
|
||||
## * *Key-value* pairs that were invalid or were not recevied from the network
|
||||
## are merged back into the range set `env.fetchAccounts.unprocessed`. The
|
||||
## remainder of successfully added ranges (and verified key gaps) are merged
|
||||
## into `env.fetchAccounts.processed`.
|
||||
##
|
||||
## * For *Key-value* pairs that have an active account storage slot sub-trie,
|
||||
## the account including administrative data is queued in
|
||||
## `env.fetchStorageFull`.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
stew/[interval_set, keyed_queue],
|
||||
"../../../.."/[sync_desc, types],
|
||||
"../../.."/[constants, range_desc],
|
||||
../../get/[get_error, get_account_range],
|
||||
../../db/[hexary_envelope, snapdb_accounts],
|
||||
./helper/[accounts_coverage, storage_queue, swap_in],
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-acc"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Accounts fetch " & info
|
||||
|
||||
proc `$`(rs: NodeTagRangeSet): string =
|
||||
rs.fullPC3
|
||||
|
||||
when false:
|
||||
proc `$`(iv: NodeTagRange): string =
|
||||
iv.fullPC3
|
||||
|
||||
proc fetchCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): string {.used.} =
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"nStoQ=" & $env.storageQueueTotal() & "," &
|
||||
"nSlotLists=" & $env.nSlotLists & "," &
|
||||
"nConQ=" & $env.fetchContracts.len & "," &
|
||||
"nCon=" & $env.nContracts & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc getUnprocessed(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): Result[NodeTagRange,void] =
|
||||
## Fetch an interval from one of the account range lists.
|
||||
let accountRangeMax = high(UInt256) div buddy.ctx.buddiesMax.u256
|
||||
|
||||
env.fetchAccounts.unprocessed.fetch accountRangeMax
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions: do the account fetching for one round
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc accountsRangefetchImpl(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): Future[bool]
|
||||
{.async.} =
|
||||
## Fetch accounts and store them in the database. Returns true while more
|
||||
## data can probably be fetched.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
db = ctx.pool.snapDb
|
||||
fa = env.fetchAccounts
|
||||
stateRoot = env.stateHeader.stateRoot
|
||||
|
||||
# Get a range of accounts to fetch from
|
||||
let iv = block:
|
||||
let rc = buddy.getUnprocessed(env)
|
||||
if rc.isErr:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "currently all processed", peer, ctx=buddy.fetchCtx(env)
|
||||
return
|
||||
rc.value
|
||||
|
||||
# Process received accounts and stash storage slots to fetch later
|
||||
let dd = block:
|
||||
let
|
||||
pivot = env.stateHeader.blockNumber.toStr
|
||||
rc = await buddy.getAccountRange(stateRoot, iv, pivot)
|
||||
if rc.isErr:
|
||||
fa.unprocessed.mergeSplit iv # fail => interval back to pool
|
||||
if await buddy.ctrl.getErrorStopAfterSeriousOne(
|
||||
rc.error, buddy.only.errors):
|
||||
when extraTraceMessages:
|
||||
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
||||
reqLen=iv, error=rc.error
|
||||
discard
|
||||
return
|
||||
rc.value
|
||||
|
||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||
buddy.only.errors.getErrorReset()
|
||||
|
||||
let
|
||||
gotAccounts = dd.data.accounts.len # comprises `gotStorage`
|
||||
gotStorage {.used.} = dd.withStorage.len
|
||||
|
||||
# Now, we fully own the scheduler. The original interval will savely be placed
|
||||
# back for a moment (the `unprocessed` range set to be corrected below.)
|
||||
fa.unprocessed.mergeSplit iv
|
||||
|
||||
# Processed accounts hashes are set up as a set of intervals which is needed
|
||||
# if the data range returned from the network contains holes.
|
||||
let covered = NodeTagRangeSet.init()
|
||||
if 0 < dd.data.accounts.len:
|
||||
discard covered.merge(iv.minPt, dd.data.accounts[^1].accKey.to(NodeTag))
|
||||
else:
|
||||
discard covered.merge iv
|
||||
|
||||
let gaps = block:
|
||||
# No left boundary check needed. If there is a gap, the partial path for
|
||||
# that gap is returned by the import function to be registered, below.
|
||||
let rc = db.importAccounts(peer, stateRoot, iv.minPt, dd.data)
|
||||
if rc.isErr:
|
||||
# Bad data, just try another peer
|
||||
buddy.ctrl.zombie = true
|
||||
# Failed to store on database, not much that can be done here
|
||||
error logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
||||
gotAccounts, gotStorage, reqLen=iv, covered, error=rc.error
|
||||
return
|
||||
rc.value
|
||||
|
||||
# Statistics
|
||||
env.nAccounts.inc(gotAccounts)
|
||||
|
||||
# Punch holes into the reported range of received accounts from the network
|
||||
# if it there are gaps (described by dangling nodes.)
|
||||
for w in gaps.innerGaps:
|
||||
discard covered.reduce w.partialPath.hexaryEnvelope
|
||||
|
||||
# Update book keeping
|
||||
for w in covered.increasing:
|
||||
# Remove the processed range from the batch of unprocessed ones.
|
||||
fa.unprocessed.reduce w
|
||||
# Register consumed intervals on the accumulators over all state roots.
|
||||
discard fa.processed.merge w
|
||||
discard ctx.pool.pass.coveredAccounts.merge w
|
||||
ctx.accountsCoverage100PcRollOver() # update coverage level roll over
|
||||
|
||||
# Register accounts with storage slots on the storage TODO list.
|
||||
env.storageQueueAppend dd.withStorage
|
||||
|
||||
# Register accounts with contracts to fetch on the TODO list
|
||||
for w in dd.withContract:
|
||||
env.fetchContracts[w.codeHash] = w.accKey
|
||||
|
||||
# Swap in from other pivots unless mothballed, already
|
||||
var nSwapInLaps = 0
|
||||
if not env.archived:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "before swap in", peer, ctx=buddy.fetchCtx(env), covered,
|
||||
gotAccounts, gotStorage, processed=fa.processed,
|
||||
nProcessedChunks=fa.processed.chunks.uint.toSI
|
||||
|
||||
nSwapInLaps = ctx.swapInAccounts env
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "request done", peer, ctx=buddy.fetchCtx(env), gotAccounts,
|
||||
gotStorage, nSwapInLaps, covered, processed=fa.processed,
|
||||
nProcessedChunks=fa.processed.chunks.uint.toSI,
|
||||
nContracts=dd.withContract.len
|
||||
|
||||
return true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rangeFetchAccounts*(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
) {.async.} =
|
||||
## Fetch accounts and store them in the database.
|
||||
trace logTxt "start", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||
|
||||
let fa = env.fetchAccounts
|
||||
var nFetchAccounts = 0 # for logging
|
||||
if not fa.processed.isFull():
|
||||
|
||||
while not fa.processed.isFull() and
|
||||
buddy.ctrl.running and
|
||||
not env.archived:
|
||||
# May repeat fetching with re-arranged request intervals
|
||||
if not await buddy.accountsRangefetchImpl(env):
|
||||
break
|
||||
|
||||
nFetchAccounts.inc
|
||||
|
||||
# Clean up storage slots or contracts queues now if they become too large
|
||||
if storageSlotsQuPrioThresh < env.storageQueueAvail() or
|
||||
contractsQuPrioThresh < env.fetchContracts.len:
|
||||
break
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "looping", peer=buddy.peer, ctx=buddy.fetchCtx(env),
|
||||
nFetchAccounts, isFull=fa.processed.isFull()
|
||||
|
||||
trace logTxt "done", peer=buddy.peer, ctx=buddy.fetchCtx(env), nFetchAccounts
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,194 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Fetch and install contract codes
|
||||
## ================================
|
||||
##
|
||||
## Pretty straight forward
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/tables,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/[common, p2p],
|
||||
stew/keyed_queue,
|
||||
"../../.."/[constants, range_desc],
|
||||
../../get/[get_error, get_byte_codes],
|
||||
../../db/snapdb_contracts,
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-con"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
## Enabled additional logging noise
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Contracts fetch " & info
|
||||
|
||||
proc fetchCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): string {.used.} =
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"nConQ=" & $env.fetchContracts.len & "," &
|
||||
"nCon=" & $env.nContracts & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template noKeyErrorOops(info: static[string]; code: untyped) =
|
||||
try:
|
||||
code
|
||||
except KeyError as e:
|
||||
raiseAssert "Inconveivable (" &
|
||||
info & "): name=" & $e.name & " msg=" & e.msg
|
||||
|
||||
|
||||
proc getUnprocessed(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
ign: HashSet[NodeKey];
|
||||
): (seq[NodeKey],Table[Hash256,NodeKey]) =
|
||||
## Fetch contracy hashes from the batch queue. Full entries will be removed
|
||||
## from the batch queue and returned as second return code value.
|
||||
for w in env.fetchContracts.nextPairs:
|
||||
let key = w.key.to(NodeKey)
|
||||
if key notin ign:
|
||||
result[0].add key
|
||||
result[1][w.key] = w.data
|
||||
env.fetchContracts.del w.key # safe for `keyedQueue`
|
||||
if fetchRequestContractsMax <= result[0].len:
|
||||
break
|
||||
|
||||
|
||||
proc putUnprocessed(
|
||||
env: SnapPivotRef;
|
||||
tab: Table[Hash256,NodeKey];
|
||||
) =
|
||||
## Push back some items
|
||||
for (key,val) in tab.pairs:
|
||||
# Use LRU mode which moves an item to the right end in case it is a
|
||||
# duplicate. It might have been added by some other peer which could
|
||||
# happen with a duplicate account (e.g. the one returned beyond an empty
|
||||
# range.)
|
||||
if env.fetchContracts.lruFetch(key).isErr:
|
||||
discard env.fetchContracts.append(key,val)
|
||||
|
||||
proc putUnprocessed(
|
||||
env: SnapPivotRef; # Current pivot environment
|
||||
select: seq[NodeKey]; # List of codeHash keys to re-queue
|
||||
value: Table[Hash256,NodeKey]; # Value for codeHash keys
|
||||
): HashSet[NodeKey] =
|
||||
## Variant of `putUnprocessed()`
|
||||
noKeyErrorOops("putUnprocessed"):
|
||||
for key in select:
|
||||
let hashKey = key.to(Hash256)
|
||||
if env.fetchContracts.lruFetch(hashKey).isErr:
|
||||
discard env.fetchContracts.append(hashKey, value[hashKey])
|
||||
result.incl key
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rangeFetchContractsImpl(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
ign: HashSet[NodeKey];
|
||||
): Future[(HashSet[NodeKey],bool)]
|
||||
{.async.} =
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
|
||||
# Get a range of accounts to fetch from
|
||||
let (hashKeys, parking) = buddy.getUnprocessed(env,ign)
|
||||
if hashKeys.len == 0:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "currently all processed", peer, ctx=buddy.fetchCtx(env)
|
||||
return
|
||||
|
||||
# Fetch data from the network
|
||||
let dd = block:
|
||||
let rc = await buddy.getByteCodes hashKeys
|
||||
if rc.isErr:
|
||||
# Restore batch queue
|
||||
env.putUnprocessed parking
|
||||
if await buddy.ctrl.getErrorStopAfterSeriousOne(
|
||||
rc.error, buddy.only.errors):
|
||||
error logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
||||
nHashKeys=hashKeys.len, error=rc.error
|
||||
discard
|
||||
return (hashKeys.toHashSet, true)
|
||||
rc.value
|
||||
|
||||
# Import keys
|
||||
block:
|
||||
let rc = ctx.pool.snapDb.importContracts(peer, dd.kvPairs)
|
||||
if rc.isErr:
|
||||
error logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
||||
nHashKeys=hashKeys.len, error=rc.error
|
||||
return
|
||||
|
||||
# Statistics
|
||||
env.nContracts.inc(dd.kvPairs.len)
|
||||
|
||||
# Update left overs
|
||||
let leftOverKeys = env.putUnprocessed(dd.leftOver, parking)
|
||||
|
||||
return (leftOverKeys, true)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rangeFetchContracts*(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
) {.async.} =
|
||||
## Fetch contract codes and store them in the database.
|
||||
trace logTxt "start", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||
|
||||
var
|
||||
nFetchContracts = 0 # for logging
|
||||
ignore: HashSet[NodeKey] # avoid duplicate failures on this peer
|
||||
while buddy.ctrl.running and
|
||||
0 < env.fetchContracts.len and
|
||||
not env.archived:
|
||||
|
||||
# May repeat fetching batch
|
||||
let (leftOver,ok) = await buddy.rangeFetchContractsImpl(env,ignore)
|
||||
if not ok:
|
||||
break
|
||||
|
||||
for w in leftOver:
|
||||
ignore.incl w
|
||||
nFetchContracts.inc
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "looping", peer=buddy.peer, ctx=buddy.fetchCtx(env),
|
||||
nFetchContracts, nLeftOver=leftOver.len
|
||||
|
||||
trace logTxt "done", peer=buddy.peer, ctx=buddy.fetchCtx(env), nFetchContracts
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,259 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
## Fetch storage slots DB ranges
|
||||
## =============================
|
||||
##
|
||||
## In principle, this algorithm is a generalised version of the one for
|
||||
## installing on the accounts hexary trie database. The difference is that
|
||||
## there are many such storage slots hexary trie database which are typically
|
||||
## quite small. So the normal action is to download and install a full hexary
|
||||
## trie rather than merging a partial one.
|
||||
##
|
||||
## Algorithm
|
||||
## ---------
|
||||
##
|
||||
## * Handle full storage slot hexary trie entries
|
||||
##
|
||||
## + Remove a list of full storage slot hexary trie entries from the queue of
|
||||
## full requests `env.fetchStorageFull`.
|
||||
##
|
||||
## The *full* adjective indicates that a complete trie will be installed
|
||||
## rather an a partial one merged. Stop if there are no more full entries
|
||||
## and proceed with handling partial entries.
|
||||
##
|
||||
## + Fetch and install the full trie entries of that list from the network.
|
||||
##
|
||||
## + For a list entry that was partially received (there is only one per
|
||||
## reply message), store the remaining parts to install on the queue of
|
||||
## partial storage slot hexary trie entries `env.fetchStoragePart`.
|
||||
##
|
||||
## + Rinse and repeat
|
||||
##
|
||||
## * Handle partial storage slot hexary trie entries
|
||||
##
|
||||
## + Remove a single partial storage slot hexary trie entry from the queue
|
||||
## of partial requests `env.fetchStoragePart`.
|
||||
##
|
||||
## The detailed handling of this entry resembles the algorithm described
|
||||
## for fetching accounts regarding sets of ranges `processed` and
|
||||
## `unprocessed`. Stop if there are no more entries.
|
||||
##
|
||||
## + Fetch and install the partial trie entry from the network.
|
||||
##
|
||||
## + Rinse and repeat
|
||||
##
|
||||
## Discussion
|
||||
## ----------
|
||||
##
|
||||
## If there is a hexary trie integrity problem when storing a response to a
|
||||
## full or partial entry request, re-queue the entry on the queue of partial
|
||||
## requests `env.fetchStoragePart` with the next partial range to fetch half
|
||||
## of the current request.
|
||||
##
|
||||
## In general, if an error occurs, the entry that caused the error is moved
|
||||
## or re-stored onto the queue of partial requests `env.fetchStoragePart`.
|
||||
##
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/sets,
|
||||
chronicles,
|
||||
chronos,
|
||||
eth/p2p,
|
||||
stew/[interval_set, keyed_queue],
|
||||
"../../.."/[constants, range_desc],
|
||||
../../get/[get_error, get_storage_ranges],
|
||||
../../db/[hexary_error, snapdb_storage_slots],
|
||||
./helper/storage_queue,
|
||||
./snap_pass_desc
|
||||
|
||||
logScope:
|
||||
topics = "snap-slot"
|
||||
|
||||
const
|
||||
extraTraceMessages = false # or true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private logging helpers
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
template logTxt(info: static[string]): static[string] =
|
||||
"Storage slots fetch " & info
|
||||
|
||||
proc fetchCtx(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
): string =
|
||||
"{" &
|
||||
"piv=" & env.stateHeader.blockNumber.toStr & "," &
|
||||
"ctl=" & $buddy.ctrl.state & "," &
|
||||
"nQuFull=" & $env.fetchStorageFull.len & "," &
|
||||
"nQuPart=" & $env.fetchStoragePart.len & "," &
|
||||
"nParked=" & $env.parkedStorage.len & "," &
|
||||
"nSlotLists=" & $env.nSlotLists & "}"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Private functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc fetchStorageSlotsImpl(
|
||||
buddy: SnapBuddyRef;
|
||||
req: seq[AccountSlotsHeader];
|
||||
env: SnapPivotRef;
|
||||
): Future[Result[HashSet[NodeKey],void]]
|
||||
{.async.} =
|
||||
## Fetch account storage slots and store them in the database, returns
|
||||
## number of error or -1 for total failure.
|
||||
let
|
||||
ctx = buddy.ctx
|
||||
peer = buddy.peer
|
||||
stateRoot = env.stateHeader.stateRoot
|
||||
pivot = env.stateHeader.blockNumber.toStr # logging in `getStorageRanges()`
|
||||
|
||||
# Get storages slots data from the network
|
||||
var stoRange = block:
|
||||
let rc = await buddy.getStorageRanges(stateRoot, req, pivot)
|
||||
if rc.isErr:
|
||||
if await buddy.ctrl.getErrorStopAfterSeriousOne(
|
||||
rc.error, buddy.only.errors):
|
||||
trace logTxt "fetch error", peer, ctx=buddy.fetchCtx(env),
|
||||
nReq=req.len, error=rc.error
|
||||
return err() # all of `req` failed
|
||||
rc.value
|
||||
|
||||
# Reset error counts for detecting repeated timeouts, network errors, etc.
|
||||
buddy.only.errors.getErrorReset()
|
||||
|
||||
var
|
||||
nSlotLists = stoRange.data.storages.len
|
||||
reject: HashSet[NodeKey]
|
||||
|
||||
if 0 < nSlotLists:
|
||||
# Verify/process storages data and save it to disk
|
||||
let report = ctx.pool.snapDb.importStorageSlots(peer, stoRange.data)
|
||||
if 0 < report.len:
|
||||
if report[^1].slot.isNone:
|
||||
# Bad data, just try another peer
|
||||
buddy.ctrl.zombie = true
|
||||
# Failed to store on database, not much that can be done here
|
||||
error logTxt "import failed", peer, ctx=buddy.fetchCtx(env),
|
||||
nSlotLists=0, nReq=req.len, error=report[^1].error
|
||||
return err() # all of `req` failed
|
||||
|
||||
# Push back error entries to be processed later
|
||||
for w in report:
|
||||
# All except the last item always index to a node argument. The last
|
||||
# item has been checked for, already.
|
||||
let
|
||||
inx = w.slot.get
|
||||
acc = stoRange.data.storages[inx].account
|
||||
splitOk = w.error in {RootNodeMismatch,RightBoundaryProofFailed}
|
||||
|
||||
reject.incl acc.accKey
|
||||
|
||||
if splitOk:
|
||||
# Some pathological cases need further investigation. For the
|
||||
# moment, provide partial split requeue. So a different range
|
||||
# will be unqueued and processed, next time.
|
||||
env.storageQueueAppendPartialSplit acc
|
||||
|
||||
else:
|
||||
# Reset any partial result (which would be the last entry) to
|
||||
# requesting the full interval. So all the storage slots are
|
||||
# re-fetched completely for this account.
|
||||
env.storageQueueAppendFull acc
|
||||
|
||||
error logTxt "import error", peer, ctx=buddy.fetchCtx(env), splitOk,
|
||||
nSlotLists, nRejected=reject.len, nReqInx=inx, nReq=req.len,
|
||||
nDangling=w.dangling.len, error=w.error
|
||||
|
||||
# Return unprocessed left overs to batch queue. The `req[^1].subRange` is
|
||||
# the original range requested for the last item (if any.)
|
||||
let (_,removed) = env.storageQueueUpdate(stoRange.leftOver, reject)
|
||||
|
||||
# Update statistics. The variable removed is set if the queue for a partial
|
||||
# slot range was logically removed. A partial slot range list has one entry.
|
||||
# So the correction factor for the slot lists statistics is `removed - 1`.
|
||||
env.nSlotLists.inc(nSlotLists - reject.len + (removed - 1))
|
||||
|
||||
# Clean up, un-park successful slots (if any)
|
||||
for w in stoRange.data.storages:
|
||||
env.parkedStorage.excl w.account.accKey
|
||||
|
||||
return ok(reject)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc rangeFetchStorageSlots*(
|
||||
buddy: SnapBuddyRef;
|
||||
env: SnapPivotRef;
|
||||
) {.async.} =
|
||||
## Fetch some account storage slots and store them in the database. If left
|
||||
## anlone (e.t. no patallel activated processes) this function tries to fetch
|
||||
## each work item on the queue at least once.For partial partial slot range
|
||||
## items this means in case of success that the outstanding range has become
|
||||
## at least smaller.
|
||||
trace logTxt "start", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||
|
||||
# Fetch storage data and save it on disk. Storage requests are managed by
|
||||
# request queues for handling full/partial replies and re-fetch issues. For
|
||||
# all practical puroses, this request queue should mostly be empty.
|
||||
for (fetchFn, failMax) in [
|
||||
(storageQueueFetchFull, storageSlotsFetchFailedFullMax),
|
||||
(storageQueueFetchPartial, storageSlotsFetchFailedPartialMax)]:
|
||||
|
||||
var
|
||||
ignored: HashSet[NodeKey]
|
||||
rc = Result[HashSet[NodeKey],void].ok(ignored) # set ok() start value
|
||||
|
||||
# Run batch even if `archived` flag is set in order to shrink the queues.
|
||||
while buddy.ctrl.running and
|
||||
rc.isOk and
|
||||
ignored.len <= failMax:
|
||||
|
||||
# Pull out the next request list from the queue
|
||||
let reqList = buddy.ctx.fetchFn(env, ignored)
|
||||
if reqList.len == 0:
|
||||
when extraTraceMessages:
|
||||
trace logTxt "queue exhausted", peer=buddy.peer,
|
||||
ctx=buddy.fetchCtx(env),
|
||||
isPartQueue=(fetchFn==storageQueueFetchPartial)
|
||||
break
|
||||
|
||||
# Process list, store in database. The `reqList` is re-queued accordingly
|
||||
# in the `fetchStorageSlotsImpl()` function unless there is an error. In
|
||||
# the error case, the whole argument list `reqList` is left untouched.
|
||||
rc = await buddy.fetchStorageSlotsImpl(reqList, env)
|
||||
if rc.isOk:
|
||||
for w in rc.value:
|
||||
ignored.incl w # Ignoring bogus response items
|
||||
else:
|
||||
# Push back unprocessed jobs after error
|
||||
env.storageQueueAppendPartialSplit reqList
|
||||
|
||||
when extraTraceMessages:
|
||||
trace logTxt "processed", peer=buddy.peer, ctx=buddy.fetchCtx(env),
|
||||
isPartQueue=(fetchFn==storageQueueFetchPartial),
|
||||
nReqList=reqList.len,
|
||||
nIgnored=ignored.len,
|
||||
subRange0=reqList[0].subRange.get(otherwise=FullNodeTagRange),
|
||||
account0=reqList[0].accKey,
|
||||
rc=(if rc.isOk: rc.value.len else: -1)
|
||||
# End `while`
|
||||
# End `for`
|
||||
|
||||
trace logTxt "done", peer=buddy.peer, ctx=buddy.fetchCtx(env)
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,232 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
std/[hashes, sets],
|
||||
eth/common,
|
||||
stew/[interval_set, keyed_queue, sorted_set],
|
||||
"../../.."/[range_desc, worker_desc],
|
||||
../../db/snapdb_pivot
|
||||
|
||||
export
|
||||
worker_desc # base descriptor
|
||||
|
||||
type
|
||||
AccountsList* = SortedSet[NodeTag,Hash256]
|
||||
## Sorted pair of `(account,state-root)` entries
|
||||
|
||||
SlotsQueue* = KeyedQueue[Hash256,SlotsQueueItemRef]
|
||||
## Handles list of storage slots data to fetch, indexed by storage root.
|
||||
##
|
||||
## Typically, storage data requests cover the full storage slots trie. If
|
||||
## there is only a partial list of slots to fetch, the queue entry is
|
||||
## stored left-most for easy access.
|
||||
|
||||
SlotsQueueItemRef* = ref object
|
||||
## Storage slots request data. This entry is similar to `AccountSlotsHeader`
|
||||
## where the optional `subRange` interval has been replaced by an interval
|
||||
## range + healing support.
|
||||
accKey*: NodeKey ## Owner account
|
||||
slots*: RangeBatchRef ## Clots to fetch, nil => all slots
|
||||
|
||||
ContractsQueue* = KeyedQueue[Hash256,NodeKey]
|
||||
## Handles hash key list of contract data to fetch with accounts associated
|
||||
|
||||
UnprocessedRanges* = array[2,NodeTagRangeSet]
|
||||
## Pair of sets of ``unprocessed`` node ranges that need to be fetched and
|
||||
## integrated. The ranges in the first set must be handled with priority.
|
||||
##
|
||||
## This data structure is used for coordinating peers that run quasi
|
||||
## parallel.
|
||||
|
||||
RangeBatchRef* = ref object
|
||||
## `NodeTag` ranges to fetch, healing support
|
||||
unprocessed*: UnprocessedRanges ## Range of slots to be fetched
|
||||
processed*: NodeTagRangeSet ## Node ranges definitely processed
|
||||
|
||||
SnapPivotRef* = ref object
|
||||
## Per-state root cache for particular snap data environment
|
||||
stateHeader*: BlockHeader ## Pivot state, containg state root
|
||||
|
||||
# Accounts download coverage
|
||||
fetchAccounts*: RangeBatchRef ## Set of accounts ranges to fetch
|
||||
|
||||
# Contract code queue
|
||||
fetchContracts*: ContractsQueue ## Contacts to fetch & store
|
||||
|
||||
# Storage slots download
|
||||
fetchStorageFull*: SlotsQueue ## Fetch storage trie for these accounts
|
||||
fetchStoragePart*: SlotsQueue ## Partial storage trie to com[plete
|
||||
parkedStorage*: HashSet[NodeKey] ## Storage batch items in use
|
||||
|
||||
# Info
|
||||
nAccounts*: uint64 ## Imported # of accounts
|
||||
nSlotLists*: uint64 ## Imported # of account storage tries
|
||||
nContracts*: uint64 ## Imported # of contract code sets
|
||||
|
||||
# Checkponting
|
||||
savedFullPivotOk*: bool ## This fully completed pivot was saved
|
||||
|
||||
# Mothballing, ready to be swapped into newer pivot record
|
||||
storageAccounts*: AccountsList ## Accounts with missing storage slots
|
||||
archived*: bool ## Not latest pivot, anymore
|
||||
|
||||
PivotTable* = KeyedQueue[Hash256,SnapPivotRef]
|
||||
## LRU table, indexed by state root
|
||||
|
||||
RecoveryRef* = ref object
|
||||
## Recovery context
|
||||
state*: SnapDbPivotRegistry ## Saved recovery context state
|
||||
level*: int ## top level is zero
|
||||
|
||||
SnapPassCtxRef* = ref object of RootRef
|
||||
## Global context extension, snap sync parameters, pivot table
|
||||
pivotTable*: PivotTable ## Per state root environment
|
||||
completedPivot*: SnapPivotRef ## Start full sync from here
|
||||
coveredAccounts*: NodeTagRangeSet ## Derived from all available accounts
|
||||
covAccTimesFull*: uint ## # of 100% coverages
|
||||
recovery*: RecoveryRef ## Current recovery checkpoint/context
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public getter/setter
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc pass*(pool: SnapCtxData): auto =
|
||||
## Getter, pass local descriptor
|
||||
pool.snap.SnapPassCtxRef
|
||||
|
||||
proc `pass=`*(pool: var SnapCtxData; val: SnapPassCtxRef) =
|
||||
## Setter, pass local descriptor
|
||||
pool.snap = val
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public functions
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc hash*(a: SlotsQueueItemRef): Hash =
|
||||
## Table/KeyedQueue mixin
|
||||
cast[pointer](a).hash
|
||||
|
||||
proc hash*(a: Hash256): Hash =
|
||||
## Table/KeyedQueue mixin
|
||||
a.data.hash
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public helpers: UnprocessedRanges
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
proc init*(q: var UnprocessedRanges; clear = false) =
|
||||
## Populate node range sets with maximal range in the first range set. This
|
||||
## kind of pair or interval sets is managed as follows:
|
||||
## * As long as possible, fetch and merge back intervals on the first set.
|
||||
## * If the first set is empty and some intervals are to be fetched, swap
|
||||
## first and second interval lists.
|
||||
## That way, intervals from the first set are prioitised while the rest is
|
||||
## is considered after the prioitised intervals are exhausted.
|
||||
q[0] = NodeTagRangeSet.init()
|
||||
q[1] = NodeTagRangeSet.init()
|
||||
if not clear:
|
||||
discard q[0].merge FullNodeTagRange
|
||||
|
||||
proc clear*(q: var UnprocessedRanges) =
|
||||
## Reset argument range sets empty.
|
||||
q[0].clear()
|
||||
q[1].clear()
|
||||
|
||||
|
||||
proc merge*(q: var UnprocessedRanges; iv: NodeTagRange) =
|
||||
## Unconditionally merge the node range into the account ranges list.
|
||||
discard q[0].merge(iv)
|
||||
discard q[1].reduce(iv)
|
||||
|
||||
proc mergeSplit*(q: var UnprocessedRanges; iv: NodeTagRange) =
|
||||
## Ditto w/priorities partially reversed
|
||||
if iv.len == 1:
|
||||
discard q[0].reduce iv
|
||||
discard q[1].merge iv
|
||||
else:
|
||||
let
|
||||
# note that (`iv.len` == 0) => (`iv` == `FullNodeTagRange`)
|
||||
midPt = iv.minPt + ((iv.maxPt - iv.minPt) shr 1)
|
||||
iv1 = NodeTagRange.new(iv.minPt, midPt)
|
||||
iv2 = NodeTagRange.new(midPt + 1.u256, iv.maxPt)
|
||||
discard q[0].reduce iv1
|
||||
discard q[1].merge iv1
|
||||
discard q[0].merge iv2
|
||||
discard q[1].reduce iv2
|
||||
|
||||
|
||||
proc reduce*(q: var UnprocessedRanges; iv: NodeTagRange) =
|
||||
## Unconditionally remove the node range from the account ranges list
|
||||
discard q[0].reduce(iv)
|
||||
discard q[1].reduce(iv)
|
||||
|
||||
|
||||
iterator ivItems*(q: var UnprocessedRanges): NodeTagRange =
|
||||
## Iterator over all list entries
|
||||
for ivSet in q:
|
||||
for iv in ivSet.increasing:
|
||||
yield iv
|
||||
|
||||
|
||||
proc fetch*(
|
||||
q: var UnprocessedRanges;
|
||||
maxLen = 0.u256;
|
||||
): Result[NodeTagRange,void] =
|
||||
## Fetch interval from node ranges with maximal size `maxLen`, where
|
||||
## `0.u256` is interpreted as `2^256`.
|
||||
|
||||
# Swap batch queues if the first one is empty
|
||||
if q[0].isEmpty:
|
||||
swap(q[0], q[1])
|
||||
|
||||
# Fetch from first range list
|
||||
let rc = q[0].ge()
|
||||
if rc.isErr:
|
||||
return err()
|
||||
|
||||
let
|
||||
jv = rc.value
|
||||
iv = block:
|
||||
if maxLen.isZero or (0 < jv.len and jv.len <= maxLen):
|
||||
jv
|
||||
else:
|
||||
# Note that either:
|
||||
# (`jv.len` == 0) => (`jv` == `FullNodeTagRange`) => `jv.minPt` == 0
|
||||
# or
|
||||
# (`maxLen` < `jv.len`) => (`jv.minPt`+`maxLen` <= `jv.maxPt`)
|
||||
NodeTagRange.new(jv.minPt, jv.minPt + maxLen)
|
||||
|
||||
discard q[0].reduce(iv)
|
||||
ok(iv)
|
||||
|
||||
# -----------------
|
||||
|
||||
proc verify*(q: var UnprocessedRanges): bool =
|
||||
## Verify consistency, i.e. that the two sets of ranges have no overlap.
|
||||
if q[0].chunks == 0 or q[1].chunks == 0:
|
||||
# At least one set is empty
|
||||
return true
|
||||
# So neither set is empty
|
||||
if q[0].total.isZero or q[1].total.isZero:
|
||||
# At least one set is maximal and the other non-empty
|
||||
return false
|
||||
# So neither set is empty, not full
|
||||
let (a,b) = if q[0].chunks < q[1].chunks: (0,1) else: (1,0)
|
||||
for iv in q[a].increasing:
|
||||
if 0 < q[b].covered(iv):
|
||||
return false
|
||||
true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
@ -1,71 +0,0 @@
|
||||
# Nimbus
|
||||
# Copyright (c) 2021-2024 Status Research & Development GmbH
|
||||
# Licensed under either of
|
||||
# * Apache License, version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
# http://www.apache.org/licenses/LICENSE-2.0)
|
||||
# * MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
# http://opensource.org/licenses/MIT)
|
||||
# at your option. This file may not be copied, modified, or distributed
|
||||
# except according to those terms.
|
||||
|
||||
{.push raises: [].}
|
||||
|
||||
import
|
||||
eth/[common, p2p],
|
||||
../misc/ticker,
|
||||
../sync_desc,
|
||||
./worker/get/get_error,
|
||||
./worker/db/[snapdb_desc]
|
||||
|
||||
export
|
||||
sync_desc # worker desc prototype
|
||||
|
||||
type
|
||||
SnapBuddyData* = object
|
||||
## Peer-worker local descriptor data extension
|
||||
errors*: GetErrorStatsRef ## For error handling
|
||||
full*: RootRef ## Peer local full sync descriptor
|
||||
# snap*: RootRef ## Peer local snap sync descriptor
|
||||
|
||||
SnapSyncPassType* = enum
|
||||
## Current sync mode, after a snapshot has been downloaded, the system
|
||||
## proceeds with full sync.
|
||||
SnapSyncMode = 0 ## Start mode
|
||||
FullSyncMode
|
||||
|
||||
SnapSyncPass* = object
|
||||
## Full specs for all sync modes. This table must be held in the main
|
||||
## descriptor and initialised at run time. The table values are opaque
|
||||
## and will be specified in the worker module(s).
|
||||
active*: SnapSyncPassType
|
||||
tab*: array[SnapSyncPassType,RootRef]
|
||||
|
||||
SnapCtxData* = object
|
||||
## Globally shared data extension
|
||||
rng*: ref HmacDrbgContext ## Random generator
|
||||
snapDb*: SnapDbRef ## Accounts snapshot DB
|
||||
|
||||
# Info
|
||||
beaconHeader*: BlockHeader ## Running on beacon chain
|
||||
enableTicker*: bool ## Advisary, extra level of gossip
|
||||
ticker*: TickerRef ## Ticker, logger descriptor
|
||||
|
||||
# Snap/full mode muliplexing
|
||||
syncMode*: SnapSyncPass ## Sync mode methods & data
|
||||
|
||||
# Snap sync parameters, pivot table
|
||||
snap*: RootRef ## Global snap sync descriptor
|
||||
|
||||
# Full sync continuation parameters
|
||||
fullHeader*: Option[BlockHeader] ## Start full sync from here
|
||||
full*: RootRef ## Global full sync descriptor
|
||||
|
||||
SnapBuddyRef* = BuddyRef[SnapCtxData,SnapBuddyData]
|
||||
## Extended worker peer descriptor
|
||||
|
||||
SnapCtxRef* = CtxRef[SnapCtxData]
|
||||
## Extended global descriptor
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# End
|
||||
# ------------------------------------------------------------------------------
|
Loading…
x
Reference in New Issue
Block a user