Silence some compiler gossip -- part 8, sync (#1467)

details:
  Adding some missing exception annotation
This commit is contained in:
Jordan Hrycaj 2023-02-15 00:38:33 +01:00 committed by GitHub
parent df1217b7ca
commit 880313d7a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 237 additions and 315 deletions

View File

@ -135,16 +135,16 @@ proc processStaged(buddy: FullBuddyRef): bool =
except CatchableError as e: except CatchableError as e:
error "Storing persistent blocks failed", peer, range=($wi.blocks), error "Storing persistent blocks failed", peer, range=($wi.blocks),
error = $e.name, msg = e.msg error = $e.name, msg = e.msg
except Defect as e: #except Defect as e:
# Pass through # # Pass through
raise e # raise e
except Exception as e: #except Exception as e:
# Notorious case where the `Chain` reference applied to # # Notorious case where the `Chain` reference applied to
# `persistBlocks()` has the compiler traced a possible `Exception` # # `persistBlocks()` has the compiler traced a possible `Exception`
# (i.e. `ctx.chain` could be uninitialised.) # # (i.e. `ctx.chain` could be uninitialised.)
error "Exception while storing persistent blocks", peer, # error "Exception while storing persistent blocks", peer,
range=($wi.blocks), error=($e.name), msg=e.msg # range=($wi.blocks), error=($e.name), msg=e.msg
raise (ref Defect)(msg: $e.name & ": " & e.msg) # raise (ref Defect)(msg: $e.name & ": " & e.msg)
# Something went wrong. Recycle work item (needs to be re-fetched, anyway) # Something went wrong. Recycle work item (needs to be re-fetched, anyway)
let let

View File

@ -118,9 +118,8 @@ proc blockHeader(db: ChainDBRef,
# Private functions: peers related functions # Private functions: peers related functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
when isMainModule: proc hash(peer: Peer): hashes.Hash {.used.} =
proc hash(peer: Peer): hashes.Hash = hash(peer.remote)
hash(peer.remote)
proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] = proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] =
# do not send back tx or txhash to thisPeer # do not send back tx or txhash to thisPeer

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [].}
import import
chronicles, chronicles,
chronos, chronos,
@ -16,8 +18,6 @@ import
../protocol/snap/snap_types, ../protocol/snap/snap_types,
../../core/chain ../../core/chain
{.push raises: [].}
logScope: logScope:
topics = "wire-protocol" topics = "wire-protocol"

View File

@ -480,19 +480,8 @@ proc persistWorkItem(ctx: LegacySyncRef, wi: var WantedBlocks): ValidationResult
try: try:
result = ctx.chain.persistBlocks(wi.headers, wi.bodies) result = ctx.chain.persistBlocks(wi.headers, wi.bodies)
except CatchableError as e: except CatchableError as e:
error "storing persistent blocks failed", error "storing persistent blocks failed", error = $e.name, msg = e.msg
error = $e.name, msg = e.msg
result = ValidationResult.Error result = ValidationResult.Error
except Defect as e:
# Pass through
raise e
except Exception as e:
# Notorious case where the `Chain` reference applied to `persistBlocks()`
# has the compiler traced a possible `Exception` (i.e. `ctx.chain` could
# be uninitialised.)
error "exception while storing persistent blocks",
error = $e.name, msg = e.msg
raise (ref Defect)(msg: $e.name & ": " & e.msg)
case result case result
of ValidationResult.OK: of ValidationResult.OK:
ctx.finalizedBlock = wi.endIndex ctx.finalizedBlock = wi.endIndex

View File

@ -172,7 +172,7 @@ proc getBestHeader(
proc agreesOnChain( proc agreesOnChain(
bp: BestPivotWorkerRef; bp: BestPivotWorkerRef;
other: Peer other: Peer;
): Future[Result[void,bool]] ): Future[Result[void,bool]]
{.async.} = {.async.} =
## Returns `true` if one of the peers `bp.peer` or `other` acknowledges ## Returns `true` if one of the peers `bp.peer` or `other` acknowledges

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [].}
import import
eth/[common, p2p], eth/[common, p2p],
chronicles, chronicles,
@ -17,8 +19,6 @@ import
./snap/[worker, worker_desc], ./snap/[worker, worker_desc],
"."/[protocol, sync_desc, sync_sched] "."/[protocol, sync_desc, sync_sched]
{.push raises: [].}
logScope: logScope:
topics = "snap-sync" topics = "snap-sync"

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or # at your option. This file may not be copied, modified, or
# distributed except according to those terms. # distributed except according to those terms.
{.push raises: [].}
import import
std/[math, sequtils, strutils, hashes], std/[math, sequtils, strutils, hashes],
eth/common, eth/common,
@ -17,8 +19,6 @@ import
../protocol, ../protocol,
../types ../types
{.push raises: [].}
type type
ByteArray32* = array[32,byte] ByteArray32* = array[32,byte]
## Used for 32 byte database keys ## Used for 32 byte database keys

View File

@ -40,11 +40,8 @@ template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except CatchableError as e: except CatchableError as e:
raiseAssert "Inconveivable (" & info & ": name=" & $e.name & " msg=" & e.msg raiseAssert "Inconveivable (" &
except Defect as e: info & "): name=" & $e.name & " msg=" & e.msg
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions

View File

@ -18,8 +18,9 @@ import
{.push raises: [].} {.push raises: [].}
type type
HexaryPpFn* = proc(key: RepairKey): string {.gcsafe.} HexaryPpFn* =
## For testing/debugging: key pretty printer function proc(key: RepairKey): string {.gcsafe, raises: [CatchableError].}
## For testing/debugging: key pretty printer function
ByteArray33* = array[33,byte] ByteArray33* = array[33,byte]
## Used for 31 byte database keys, i.e. <marker> + <32-byte-key> ## Used for 31 byte database keys, i.e. <marker> + <32-byte-key>
@ -140,10 +141,11 @@ type
repairKeyGen*: uint64 ## Unique tmp key generator repairKeyGen*: uint64 ## Unique tmp key generator
keyPp*: HexaryPpFn ## For debugging, might go away keyPp*: HexaryPpFn ## For debugging, might go away
HexaryGetFn* = proc(key: openArray[byte]): Blob {.gcsafe.} HexaryGetFn* = proc(key: openArray[byte]): Blob
## Persistent database `get()` function. For read-only cases, this function {.gcsafe, raises: [CatchableError].}
## can be seen as the persistent alternative to ``tab[]` on a ## Persistent database `get()` function. For read-only cases, this
## `HexaryTreeDbRef` descriptor. ## function can be seen as the persistent alternative to ``tab[]` on
## a `HexaryTreeDbRef` descriptor.
HexaryNodeReport* = object HexaryNodeReport* = object
## Return code for single node operations ## Return code for single node operations
@ -208,7 +210,7 @@ proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string =
try: try:
if not disablePrettyKeys and not db.keyPp.isNil: if not disablePrettyKeys and not db.keyPp.isNil:
return db.keyPp(key) return db.keyPp(key)
except: except CatchableError:
discard discard
key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii
@ -276,17 +278,16 @@ proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] =
result = result or (1u64 shl 63) result = result or (1u64 shl 63)
proc cmpIt(x, y: (uint64,string)): int = proc cmpIt(x, y: (uint64,string)): int =
cmp(x[0],y[0]) cmp(x[0],y[0])
try:
var accu: seq[(uint64,string)] var accu: seq[(uint64,string)]
if root.ByteArray32 != ByteArray32.default: if root.ByteArray32 != ByteArray32.default:
accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")]
for key,node in db.tab.pairs: for key,node in db.tab.pairs:
accu.add ( accu.add (
key.ppImpl(db).tokey, key.ppImpl(db).tokey,
"(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")")
result = accu.sorted(cmpIt).mapIt(it[1])
except Exception as e: accu.sorted(cmpIt).mapIt(it[1])
result &= " ! Ooops ppImpl(): name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public debugging helpers # Public debugging helpers

View File

@ -93,6 +93,10 @@ proc `==`(a, b: XNodeObj): bool =
of Branch: of Branch:
return a.bLink == b.bLink return a.bLink == b.bLink
proc eq(a, b: XPathStep|RPathStep): bool =
a.key == b.key and a.nibble == b.nibble and a.node == b.node
proc isZeroLink(a: Blob): bool = proc isZeroLink(a: Blob): bool =
## Persistent database has `Blob` as key ## Persistent database has `Blob` as key
a.len == 0 a.len == 0
@ -168,7 +172,7 @@ proc doDecomposeLeft(
var collect: seq[NodeSpecs] var collect: seq[NodeSpecs]
block rightCurbEnvelope: block rightCurbEnvelope:
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len): for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
if n == envQ.path.len or envQ.path[n] != ivQ.path[n]: if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]):
# #
# At this point, the `node` entries of either `.path[n]` step are # At this point, the `node` entries of either `.path[n]` step are
# the same. This is so because the predecessor steps were the same # the same. This is so because the predecessor steps were the same
@ -218,7 +222,7 @@ proc doDecomposeRight(
var collect: seq[NodeSpecs] var collect: seq[NodeSpecs]
block leftCurbEnvelope: block leftCurbEnvelope:
for n in 0 ..< min(envQ.path.len+1, ivQ.path.len): for n in 0 ..< min(envQ.path.len+1, ivQ.path.len):
if n == envQ.path.len or envQ.path[n] != ivQ.path[n]: if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]):
for m in n ..< ivQ.path.len: for m in n ..< ivQ.path.len:
let let
pfx = ivQ.getNibbles(0, m) # common path segment pfx = ivQ.getNibbles(0, m) # common path segment
@ -241,7 +245,7 @@ proc decomposeLeftImpl(
iv: NodeTagRange; # Proofed range of leaf paths iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError] ): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [RlpError,KeyError].} = {.gcsafe, raises: [CatchableError].} =
## Database agnostic implementation of `hexaryEnvelopeDecompose()`. ## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
var nodeSpex: seq[NodeSpecs] var nodeSpex: seq[NodeSpecs]
@ -272,7 +276,7 @@ proc decomposeRightImpl(
iv: NodeTagRange; # Proofed range of leaf paths iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError] ): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [RlpError,KeyError].} = {.gcsafe, raises: [CatchableError].} =
## Database agnostic implementation of `hexaryEnvelopeDecompose()`. ## Database agnostic implementation of `hexaryEnvelopeDecompose()`.
var nodeSpex: seq[NodeSpecs] var nodeSpex: seq[NodeSpecs]
if iv.maxPt < env.maxPt: if iv.maxPt < env.maxPt:
@ -455,12 +459,12 @@ proc hexaryEnvelopeTouchedBy*(
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
proc hexaryEnvelopeDecompose*( proc hexaryEnvelopeDecompose*(
partialPath: Blob; # Hex encoded partial path partialPath: Blob; # Hex encoded partial path
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths iv: NodeTagRange; # Proofed range of leaf paths
db: HexaryTreeDbRef; # Database db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[seq[NodeSpecs],HexaryError] ): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [KeyError].} = {.gcsafe, raises: [CatchableError].} =
## This function computes the decomposition of the argument `partialPath` ## This function computes the decomposition of the argument `partialPath`
## relative to the argument range `iv`. ## relative to the argument range `iv`.
## ##
@ -502,7 +506,7 @@ proc hexaryEnvelopeDecompose*(
if iv.maxPt < env.minPt or env.maxPt < iv.minPt: if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
return err(DecomposeDisjunct) # empty result return err(DecomposeDisjunct) # empty result
noRlpErrorOops("in-memory hexaryEnvelopeDecompose"): noRlpErrorOops("hexaryEnvelopeDecompose"):
let left = block: let left = block:
let rc = env.decomposeLeftImpl(rootKey, iv, db) let rc = env.decomposeLeftImpl(rootKey, iv, db)
if rc.isErr: if rc.isErr:
@ -517,33 +521,6 @@ proc hexaryEnvelopeDecompose*(
# Notreached # Notreached
proc hexaryEnvelopeDecompose*(
partialPath: Blob; # Hex encoded partial path
rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths
getFn: HexaryGetFn; # Database abstraction
): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [RlpError].} =
## Variant of `hexaryEnvelopeDecompose()` for persistent database.
let env = partialPath.hexaryEnvelope
if iv.maxPt < env.minPt or env.maxPt < iv.minPt:
return err(DecomposeDisjunct) # empty result
noKeyErrorOops("persistent hexaryEnvelopeDecompose"):
let left = block:
let rc = env.decomposeLeftImpl(rootKey, iv, getFn)
if rc.isErr:
return rc
rc.value
let right = block:
let rc = env.decomposeRightImpl(rootKey, iv, getFn)
if rc.isErr:
return rc
rc.value
return ok(left & right)
# Notreached
proc hexaryEnvelopeDecompose*( proc hexaryEnvelopeDecompose*(
partialPath: Blob; # Hex encoded partial path partialPath: Blob; # Hex encoded partial path
ranges: NodeTagRangeSet; # To be complemented ranges: NodeTagRangeSet; # To be complemented
@ -641,7 +618,7 @@ proc hexaryEnvelopeDecompose*(
let rc = env.decomposeRightImpl(rootKey, iv, db) let rc = env.decomposeRightImpl(rootKey, iv, db)
if rc.isOk: if rc.isOk:
delayed &= rc.value # Queue right side for next lap delayed &= rc.value # Queue right side for next lap
except RlpError, KeyError: except CatchableError:
# Cannot decompose `w`, so just drop it # Cannot decompose `w`, so just drop it
discard discard
@ -659,7 +636,8 @@ proc hexaryEnvelopeDecompose*(
ranges: NodeTagRangeSet; # To be complemented ranges: NodeTagRangeSet; # To be complemented
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError] = ): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [CatchableError].} =
## Variant of `hexaryEnvelopeDecompose()` for ranges and a `NodeSpecs` ## Variant of `hexaryEnvelopeDecompose()` for ranges and a `NodeSpecs`
## argument rather than a partial path. ## argument rather than a partial path.
node.partialPath.hexaryEnvelopeDecompose(ranges, rootKey, db) node.partialPath.hexaryEnvelopeDecompose(ranges, rootKey, db)
@ -668,7 +646,8 @@ proc hexaryEnvelopeDecompose*(
ranges: NodeTagRangeSet; # To be complemented ranges: NodeTagRangeSet; # To be complemented
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
): Result[seq[NodeSpecs],HexaryError] = ): Result[seq[NodeSpecs],HexaryError]
{.gcsafe, raises: [CatchableError].} =
## Variant of `hexaryEnvelopeDecompose()` for ranges and an implicit maximal ## Variant of `hexaryEnvelopeDecompose()` for ranges and an implicit maximal
## partial path envelope. ## partial path envelope.
## argument rather than a partial path. ## argument rather than a partial path.

View File

@ -19,6 +19,7 @@ type
LowerBoundProofError LowerBoundProofError
NodeNotFound NodeNotFound
RlpEncoding RlpEncoding
SlotsNotFound
SlotsNotSrictlyIncreasing SlotsNotSrictlyIncreasing
TrieLoopAlert TrieLoopAlert
TrieIsEmpty TrieIsEmpty
@ -69,6 +70,8 @@ type
NoRocksDbBackend NoRocksDbBackend
UnresolvedRepairNode UnresolvedRepairNode
OSErrorException OSErrorException
IOErrorException
ExceptionError
StateRootNotFound StateRootNotFound
# End # End

View File

@ -99,7 +99,7 @@ proc processLink(
inspect: var seq[(NodeKey,NibblesSeq)]; inspect: var seq[(NodeKey,NibblesSeq)];
trail: NibblesSeq; trail: NibblesSeq;
child: Rlp; child: Rlp;
) {.gcsafe, raises: [RlpError]} = ) {.gcsafe, raises: [CatchableError]} =
## Ditto ## Ditto
if not child.isEmpty: if not child.isEmpty:
let childBlob = child.toBytes let childBlob = child.toBytes
@ -267,7 +267,7 @@ proc hexaryInspectTrie*(
stopAtLevel = 64u8; # Width-first depth level stopAtLevel = 64u8; # Width-first depth level
maxDangling = high(int); # Maximal number of dangling results maxDangling = high(int); # Maximal number of dangling results
): TrieNodeStat ): TrieNodeStat
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryInspectTrie()` for persistent database. ## Variant of `hexaryInspectTrie()` for persistent database.
when extraTraceMessages: when extraTraceMessages:
let nPaths = paths.len let nPaths = paths.len

View File

@ -34,7 +34,7 @@ type
# Private debugging helpers # Private debugging helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
when true: when false:
import std/[sequtils, strutils] import std/[sequtils, strutils]
proc pp(w: RPathXStep; db: HexaryTreeDbRef): string = proc pp(w: RPathXStep; db: HexaryTreeDbRef): string =

View File

@ -8,6 +8,7 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [].}
import import
std/tables, std/tables,
@ -16,13 +17,11 @@ import
../../range_desc, ../../range_desc,
"."/[hexary_desc, hexary_error, hexary_paths] "."/[hexary_desc, hexary_error, hexary_paths]
{.push raises: [].}
proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef; proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef;
): Result[RPath,HexaryError] {.gcsafe, raises: [KeyError]} ): Result[RPath,HexaryError] {.gcsafe, raises: [KeyError]}
proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn; proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn;
): Result[XPath,HexaryError] {.gcsafe, raises: [RlpError]} ): Result[XPath,HexaryError] {.gcsafe, raises: [CatchableError]}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
@ -91,7 +90,7 @@ proc hexaryNearbyRightImpl(
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [KeyError,RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Wrapper ## Wrapper
let path = block: let path = block:
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db) let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db)
@ -111,7 +110,7 @@ proc hexaryNearbyLeftImpl(
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [KeyError,RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Wrapper ## Wrapper
let path = block: let path = block:
let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db) let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db)
@ -184,7 +183,7 @@ proc completeLeast(
getFn: HexaryGetFn; getFn: HexaryGetFn;
pathLenMax = 64; pathLenMax = 64;
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [RlpError].} = {.gcsafe, raises: [CatchableError].} =
## Variant of `completeLeast()` for persistent database ## Variant of `completeLeast()` for persistent database
var xPath = XPath(path: path.path) var xPath = XPath(path: path.path)
@ -290,7 +289,7 @@ proc completeMost(
getFn: HexaryGetFn; getFn: HexaryGetFn;
pathLenMax = 64; pathLenMax = 64;
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [RlpError].} = {.gcsafe, raises: [CatchableError].} =
## Variant of `completeLeast()` for persistent database ## Variant of `completeLeast()` for persistent database
var xPath = XPath(path: path.path) var xPath = XPath(path: path.path)
@ -350,7 +349,7 @@ proc hexaryNearbyRight*(
path: RPath; # Partially expanded path path: RPath; # Partially expanded path
db: HexaryTreeDbRef; # Database db: HexaryTreeDbRef; # Database
): Result[RPath,HexaryError] ): Result[RPath,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [KeyError]} =
## Extends the maximally extended argument nodes `path` to the right (i.e. ## Extends the maximally extended argument nodes `path` to the right (i.e.
## with non-decreasing path value). This is similar to the ## with non-decreasing path value). This is similar to the
## `hexary_path.next()` function, only that this algorithm does not ## `hexary_path.next()` function, only that this algorithm does not
@ -448,7 +447,7 @@ proc hexaryNearbyRight*(
path: XPath; # Partially expanded path path: XPath; # Partially expanded path
getFn: HexaryGetFn; # Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryNearbyRight()` for persistant database ## Variant of `hexaryNearbyRight()` for persistant database
# Some easy cases # Some easy cases
@ -672,7 +671,7 @@ proc hexaryNearbyLeft*(
path: XPath; # Partially expanded path path: XPath; # Partially expanded path
getFn: HexaryGetFn; # Database abstraction getFn: HexaryGetFn; # Database abstraction
): Result[XPath,HexaryError] ): Result[XPath,HexaryError]
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryNearbyLeft()` for persistant database ## Variant of `hexaryNearbyLeft()` for persistant database
# Some easy cases # Some easy cases
@ -765,45 +764,25 @@ proc hexaryNearbyLeft*(
proc hexaryNearbyRight*( proc hexaryNearbyRight*(
baseTag: NodeTag; # Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [KeyError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather ## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather
## than `RPath()` ones. ## than `RPath` or `XPath` ones.
noRlpErrorOops("hexaryNearbyRight"): noRlpErrorOops("hexaryNearbyRight"):
return baseTag.hexaryNearbyRightImpl(rootKey, db) return baseTag.hexaryNearbyRightImpl(rootKey, db)
proc hexaryNearbyRight*(
baseTag: NodeTag; # Some node
rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryNearbyRight()` for persistant database
noKeyErrorOops("hexaryNearbyRight"):
return baseTag.hexaryNearbyRightImpl(rootKey, getFn)
proc hexaryNearbyLeft*( proc hexaryNearbyLeft*(
baseTag: NodeTag; # Some node baseTag: NodeTag; # Some node
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
db: HexaryTreeDbRef; # Database db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError] ): Result[NodeTag,HexaryError]
{.gcsafe, raises: [KeyError]} = {.gcsafe, raises: [CatchableError]} =
## Similar to `hexaryNearbyRight()` for `NodeKey` arguments. ## Similar to `hexaryNearbyRight()` for `NodeKey` arguments.
noRlpErrorOops("hexaryNearbyLeft"): noRlpErrorOops("hexaryNearbyLeft"):
return baseTag.hexaryNearbyLeftImpl(rootKey, db) return baseTag.hexaryNearbyLeftImpl(rootKey, db)
proc hexaryNearbyLeft*(
baseTag: NodeTag; # Some node
rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction
): Result[NodeTag,HexaryError]
{.gcsafe, raises: [RlpError]} =
## Variant of `hexaryNearbyLeft()` for persistant database
noKeyErrorOops("hexaryNearbyLeft"):
return baseTag.hexaryNearbyLeftImpl(rootKey, getFn)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -10,6 +10,8 @@
## Find node paths in hexary tries. ## Find node paths in hexary tries.
{.push raises: [].}
import import
std/[sequtils, sets, tables], std/[sequtils, sets, tables],
eth/[common, trie/nibbles], eth/[common, trie/nibbles],
@ -17,8 +19,6 @@ import
../../range_desc, ../../range_desc,
./hexary_desc ./hexary_desc
{.push raises: [].}
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private debugging helpers # Private debugging helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -132,7 +132,7 @@ proc pathExtend(
key: Blob; key: Blob;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Ditto for `XPath` rather than `RPath` ## Ditto for `XPath` rather than `RPath`
result = path result = path
var key = key var key = key
@ -193,7 +193,7 @@ proc pathLeast(
key: Blob; key: Blob;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## For the partial path given, extend by branch nodes with least node ## For the partial path given, extend by branch nodes with least node
## indices. ## indices.
result = path result = path
@ -283,7 +283,7 @@ proc pathMost(
key: Blob; key: Blob;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## For the partial path given, extend by branch nodes with greatest node ## For the partial path given, extend by branch nodes with greatest node
## indices. ## indices.
result = path result = path
@ -457,7 +457,7 @@ proc hexaryPath*(
rootKey: NodeKey|RepairKey; rootKey: NodeKey|RepairKey;
db: HexaryTreeDbRef; db: HexaryTreeDbRef;
): RPath ): RPath
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [KeyError]} =
## Variant of `hexaryPath` for a hex encoded partial path. ## Variant of `hexaryPath` for a hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, db) partialPath.hexPrefixDecode[1].hexaryPath(rootKey, db)
@ -467,7 +467,7 @@ proc hexaryPath*(
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
getFn: HexaryGetFn; # Database abstraction getFn: HexaryGetFn; # Database abstraction
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Compute the longest possible path on an arbitrary hexary trie. ## Compute the longest possible path on an arbitrary hexary trie.
XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn) XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn)
@ -476,7 +476,7 @@ proc hexaryPath*(
rootKey: NodeKey; rootKey: NodeKey;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryPath` for a node key.. ## Variant of `hexaryPath` for a node key..
nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn) nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn)
@ -485,7 +485,7 @@ proc hexaryPath*(
rootKey: NodeKey; rootKey: NodeKey;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryPath` for a node tag.. ## Variant of `hexaryPath` for a node tag..
nodeTag.to(NodeKey).hexaryPath(rootKey, getFn) nodeTag.to(NodeKey).hexaryPath(rootKey, getFn)
@ -494,7 +494,7 @@ proc hexaryPath*(
rootKey: NodeKey; rootKey: NodeKey;
getFn: HexaryGetFn; getFn: HexaryGetFn;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryPath` for a hex encoded partial path. ## Variant of `hexaryPath` for a hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn) partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn)
@ -543,7 +543,7 @@ proc hexaryPathNodeKey*(
getFn: HexaryGetFn; # Database abstraction getFn: HexaryGetFn; # Database abstraction
missingOk = false; # Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryPathNodeKey()` for persistent database. ## Variant of `hexaryPathNodeKey()` for persistent database.
let steps = partialPath.hexaryPath(rootKey, getFn) let steps = partialPath.hexaryPath(rootKey, getFn)
if 0 < steps.path.len and steps.tail.len == 0: if 0 < steps.path.len and steps.tail.len == 0:
@ -564,19 +564,18 @@ proc hexaryPathNodeKey*(
getFn: HexaryGetFn; # Database abstraction getFn: HexaryGetFn; # Database abstraction
missingOk = false; # Also return key for missing node missingOk = false; # Also return key for missing node
): Result[NodeKey,void] ): Result[NodeKey,void]
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Variant of `hexaryPathNodeKey()` for persistent database and ## Variant of `hexaryPathNodeKey()` for persistent database and
## hex encoded partial path. ## hex encoded partial path.
partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk) partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk)
proc hexaryPathNodeKeys*( proc hexaryPathNodeKeys*(
partialPaths: seq[Blob]; # Partial paths segments partialPaths: seq[Blob]; # Partial paths segments
rootKey: NodeKey|RepairKey; # State root rootKey: NodeKey|RepairKey; # State root
db: HexaryTreeDbRef; # Database db: HexaryTreeDbRef; # Database
missingOk = false; # Also return key for missing node missingOk = false; # Also return key for missing node
): HashSet[NodeKey] ): HashSet[NodeKey]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [KeyError]} =
## Convert a list of path segments to a set of node keys ## Convert a list of path segments to a set of node keys
partialPaths.toSeq partialPaths.toSeq
.mapIt(it.hexaryPathNodeKey(rootKey, db, missingOk)) .mapIt(it.hexaryPathNodeKey(rootKey, db, missingOk))
@ -593,7 +592,7 @@ proc next*(
getFn: HexaryGetFn; getFn: HexaryGetFn;
minDepth = 64; minDepth = 64;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Advance the argument `path` to the next leaf node (if any.). The ## Advance the argument `path` to the next leaf node (if any.). The
## `minDepth` argument requires the result of `next()` to satisfy ## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`. ## `minDepth <= next().getNibbles.len`.
@ -624,7 +623,7 @@ proc prev*(
getFn: HexaryGetFn; getFn: HexaryGetFn;
minDepth = 64; minDepth = 64;
): XPath ): XPath
{.gcsafe, raises: [RlpError]} = {.gcsafe, raises: [CatchableError]} =
## Advance the argument `path` to the previous leaf node (if any.) The ## Advance the argument `path` to the previous leaf node (if any.) The
## `minDepth` argument requires the result of `next()` to satisfy ## `minDepth` argument requires the result of `next()` to satisfy
## `minDepth <= next().getNibbles.len`. ## `minDepth <= next().getNibbles.len`.

View File

@ -11,7 +11,7 @@
import import
std/[sequtils, sets, tables], std/[sequtils, sets, tables],
chronicles, chronicles,
eth/[common, p2p, rlp, trie/nibbles], eth/[common, p2p, trie/nibbles],
stew/[byteutils, interval_set], stew/[byteutils, interval_set],
../../range_desc, ../../range_desc,
"."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths] "."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths]
@ -51,12 +51,12 @@ template collectLeafs(
block body: block body:
var var
nodeTag = iv.minPt nodeTag = minPt(iv)
prevTag: NodeTag prevTag: NodeTag
rls: seq[RangeLeaf] rls: seq[RangeLeaf]
# Fill at most `nLeafs` leaf nodes from interval range # Fill at most `nLeafs` leaf nodes from interval range
while rls.len < nLeafs and nodeTag <= iv.maxPt: while rls.len < nLeafs and nodeTag <= maxPt(iv):
# The following logic might be sub-optimal. A strict version of the # The following logic might be sub-optimal. A strict version of the
# `next()` function that stops with an error at dangling links could # `next()` function that stops with an error at dangling links could
# be faster if the leaf nodes are not too far apart on the hexary trie. # be faster if the leaf nodes are not too far apart on the hexary trie.
@ -119,13 +119,38 @@ template updateProof(
# Public functions # Public functions
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
#proc hexaryRangeLeafsProof*(
# db: HexaryTreeDbRef; # Database abstraction
# rootKey: NodeKey; # State root
# iv: NodeTagRange; # Proofed range of leaf paths
# nLeafs = high(int); # Implies maximal data size
# ): Result[RangeProof,HexaryError]
# {.gcsafe, raises: [KeyError]} =
# ## Collect trie database leafs prototype and add proof.
# let rc = db.collectLeafs(rootKey, iv, nLeafs)
# if rc.isErr:
# err(rc.error)
# else:
# ok(db.updateProof(rootKey, iv.minPt, rc.value))
#
#proc hexaryRangeLeafsProof*(
# db: HexaryTreeDbRef; # Database abstraction
# rootKey: NodeKey; # State root
# baseTag: NodeTag; # Left boundary
# leafList: seq[RangeLeaf]; # Set of already collected leafs
# ): RangeProof
# {.gcsafe, raises: [KeyError]} =
# ## Complement leafs list by adding proof nodes to the argument list
# ## `leafList`.
# db.updateProof(rootKey, baseTag, leafList)
proc hexaryRangeLeafsProof*( proc hexaryRangeLeafsProof*(
db: HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths iv: NodeTagRange; # Proofed range of leaf paths
nLeafs = high(int); # Implies maximal data size nLeafs = high(int); # Implies maximal data size
): Result[RangeProof,HexaryError] ): Result[RangeProof,HexaryError]
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [CatchableError]} =
## Collect trie database leafs prototype and add proof. ## Collect trie database leafs prototype and add proof.
let rc = db.collectLeafs(rootKey, iv, nLeafs) let rc = db.collectLeafs(rootKey, iv, nLeafs)
if rc.isErr: if rc.isErr:
@ -134,40 +159,16 @@ proc hexaryRangeLeafsProof*(
ok(db.updateProof(rootKey, iv.minPt, rc.value)) ok(db.updateProof(rootKey, iv.minPt, rc.value))
proc hexaryRangeLeafsProof*( proc hexaryRangeLeafsProof*(
db: HexaryTreeDbRef; # Database abstraction db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction
rootKey: NodeKey; # State root rootKey: NodeKey; # State root
baseTag: NodeTag; # Left boundary baseTag: NodeTag; # Left boundary
leafList: seq[RangeLeaf]; # Set of already collected leafs leafList: seq[RangeLeaf]; # Set of already collected leafs
): RangeProof ): RangeProof
{.gcsafe, raises: [Defect,KeyError]} = {.gcsafe, raises: [CatchableError]} =
## Complement leafs list by adding proof nodes to the argument list ## Complement leafs list by adding proof nodes to the argument list
## `leafList`. ## `leafList`.
db.updateProof(rootKey, baseTag, leafList) db.updateProof(rootKey, baseTag, leafList)
proc hexaryRangeLeafsProof*(
db: HexaryGetFn; # Database abstraction
rootKey: NodeKey; # State root
iv: NodeTagRange; # Proofed range of leaf paths
nLeafs = high(int); # Implies maximal data size
): Result[RangeProof,HexaryError]
{.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryRangeLeafsProof()` for persistent database.
let rc = db.collectLeafs(rootKey, iv, nLeafs)
if rc.isErr:
err(rc.error)
else:
ok(db.updateProof(rootKey, iv.minPt, rc.value))
proc hexaryRangeLeafsProof*(
db: HexaryGetFn; # Database abstraction
rootKey: NodeKey; # State root
baseTag: NodeTag; # Left boundary
leafList: seq[RangeLeaf]; # Set of already collected leafs
): RangeProof
{.gcsafe, raises: [Defect,RlpError]} =
## Variant of `hexaryRangeLeafsProof()` for persistent database.
db.updateProof(rootKey, baseTag, leafList)
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# End # End
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -159,7 +159,7 @@ proc add*(
proc finish*( proc finish*(
rbl: RockyBulkLoadRef rbl: RockyBulkLoadRef
): Result[int64,void] ): Result[int64,void]
{.gcsafe, raises: [OSError].} = {.gcsafe, raises: [OSError, IOError].} =
## Commit collected and cached data to the database. This function implies ## Commit collected and cached data to the database. This function implies
## `destroy()` if successful. Otherwise `destroy()` must be called ## `destroy()` if successful. Otherwise `destroy()` must be called
## explicitely, e.g. after error analysis. ## explicitely, e.g. after error analysis.
@ -177,14 +177,12 @@ proc finish*(
addr csError) addr csError)
if csError.isNil: if csError.isNil:
var size: int64 var
try: size: int64
var f: File f: File
if f.open(rbl.filePath): if f.open(rbl.filePath):
size = f.getFileSize size = f.getFileSize
f.close f.close
except:
discard
rbl.destroy() rbl.destroy()
return ok(size) return ok(size)

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [].}
import import
std/[algorithm, sequtils, tables], std/[algorithm, sequtils, tables],
chronicles, chronicles,
@ -18,8 +20,6 @@ import
hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc, hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc,
snapdb_persistent] snapdb_persistent]
{.push raises: [].}
logScope: logScope:
topics = "snap-db" topics = "snap-db"
@ -34,6 +34,8 @@ type
const const
extraTraceMessages = false or true extraTraceMessages = false or true
proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
@ -50,17 +52,15 @@ template noKeyError(info: static[string]; code: untyped) =
except KeyError as e: except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg raiseAssert "Not possible (" & info & "): " & e.msg
template noRlpExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except KeyError as e:
raiseAssert "Not possible -- " & info & ": " & e.msg
except RlpError: except RlpError:
return err(RlpEncoding) return err(RlpEncoding)
except KeyError as e: except CatchableError as e:
raiseAssert "Not possible (" & info & "): " & e.msg return err(AccountNotFound)
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -70,7 +70,7 @@ proc persistentAccounts(
db: HexaryTreeDbRef; ## Current table db: HexaryTreeDbRef; ## Current table
ps: SnapDbAccountsRef; ## For persistent database ps: SnapDbAccountsRef; ## For persistent database
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [OSError,KeyError].} = {.gcsafe, raises: [OSError,IOError,KeyError].} =
## Store accounts trie table on databse ## Store accounts trie table on databse
if ps.rockDb.isNil: if ps.rockDb.isNil:
let rc = db.persistentAccountsPut(ps.kvDb) let rc = db.persistentAccountsPut(ps.kvDb)
@ -300,7 +300,7 @@ proc importAccounts*(
except OSError as e: except OSError as e:
error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg
return err(OSErrorException) return err(OSErrorException)
except Exception as e: except CatchableError as e:
raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg
#when extraTraceMessages: #when extraTraceMessages:
@ -330,7 +330,8 @@ proc importRawAccountsNodes*(
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
reportNodes = {Leaf}; ## Additional node types to report reportNodes = {Leaf}; ## Additional node types to report
persistent = false; ## store data on disk persistent = false; ## store data on disk
): seq[HexaryNodeReport] = ): seq[HexaryNodeReport]
{.gcsafe, raises: [IOError].} =
## Store data nodes given as argument `nodes` on the persistent database. ## Store data nodes given as argument `nodes` on the persistent database.
## ##
## If there were an error when processing a particular argument `notes` item, ## If there were an error when processing a particular argument `notes` item,
@ -396,7 +397,8 @@ proc importRawAccountsNodes*(
peer: Peer, ## For log messages, only peer: Peer, ## For log messages, only
nodes: openArray[NodeSpecs]; ## List of `(key,data)` records nodes: openArray[NodeSpecs]; ## List of `(key,data)` records
reportNodes = {Leaf}; ## Additional node types to report reportNodes = {Leaf}; ## Additional node types to report
): seq[HexaryNodeReport] = ): seq[HexaryNodeReport]
{.gcsafe, raises: [IOError].} =
## Variant of `importRawNodes()` for persistent storage. ## Variant of `importRawNodes()` for persistent storage.
SnapDbAccountsRef.init( SnapDbAccountsRef.init(
pv, Hash256(), peer).importRawAccountsNodes( pv, Hash256(), peer).importRawAccountsNodes(
@ -409,7 +411,7 @@ proc getAccountsNodeKey*(
): Result[NodeKey,HexaryError] = ): Result[NodeKey,HexaryError] =
## For a partial node path argument `path`, return the raw node key. ## For a partial node path argument `path`, return the raw node key.
var rc: Result[NodeKey,void] var rc: Result[NodeKey,void]
noRlpExceptionOops("getAccountsNodeKey()"): noExceptionOops("getAccountsNodeKey()"):
if persistent: if persistent:
rc = path.hexaryPathNodeKey(ps.root, ps.getAccountFn) rc = path.hexaryPathNodeKey(ps.root, ps.getAccountFn)
else: else:
@ -438,7 +440,7 @@ proc getAccountsData*(
## Caveat: There is no unit test yet for the non-persistent version ## Caveat: There is no unit test yet for the non-persistent version
var acc: Account var acc: Account
noRlpExceptionOops("getAccountData()"): noExceptionOops("getAccountData()"):
var leaf: Blob var leaf: Blob
if persistent: if persistent:
leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData
@ -499,7 +501,7 @@ proc nextAccountsChainDbKey*(
): Result[NodeKey,HexaryError] = ): Result[NodeKey,HexaryError] =
## Fetch the account path on the `ChainDBRef`, the one next to the ## Fetch the account path on the `ChainDBRef`, the one next to the
## argument account key. ## argument account key.
noRlpExceptionOops("getChainDbAccount()"): noExceptionOops("getChainDbAccount()"):
let path = accKey let path = accKey
.hexaryPath(ps.root, ps.getAccountFn) .hexaryPath(ps.root, ps.getAccountFn)
.next(ps.getAccountFn) .next(ps.getAccountFn)
@ -515,7 +517,7 @@ proc prevAccountsChainDbKey*(
): Result[NodeKey,HexaryError] = ): Result[NodeKey,HexaryError] =
## Fetch the account path on the `ChainDBRef`, the one before to the ## Fetch the account path on the `ChainDBRef`, the one before to the
## argument account. ## argument account.
noRlpExceptionOops("getChainDbAccount()"): noExceptionOops("getChainDbAccount()"):
let path = accKey let path = accKey
.hexaryPath(ps.root, ps.getAccountFn) .hexaryPath(ps.root, ps.getAccountFn)
.prev(ps.getAccountFn) .prev(ps.getAccountFn)

View File

@ -52,9 +52,7 @@ template noPpError(info: static[string]; code: untyped) =
raiseAssert "Inconveivable (" & info & "): " & e.msg raiseAssert "Inconveivable (" & info & "): " & e.msg
except KeyError as e: except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg raiseAssert "Not possible (" & info & "): " & e.msg
except Defect as e: except CatchableError as e:
raise e
except Exception as e:
raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg
proc toKey(a: RepairKey; pv: SnapDbRef): uint = proc toKey(a: RepairKey; pv: SnapDbRef): uint =
@ -255,7 +253,7 @@ proc verifyLowerBound*(
base: NodeTag; ## Before or at first account entry in `data` base: NodeTag; ## Before or at first account entry in `data`
first: NodeTag; ## First account key first: NodeTag; ## First account key
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [KeyError].} = {.gcsafe, raises: [CatchableError].} =
## Verify that `base` is to the left of the first leaf entry and there is ## Verify that `base` is to the left of the first leaf entry and there is
## nothing in between. ## nothing in between.
var error: HexaryError var error: HexaryError
@ -279,7 +277,7 @@ proc verifyNoMoreRight*(
peer: Peer; ## For log messages peer: Peer; ## For log messages
base: NodeTag; ## Before or at first account entry in `data` base: NodeTag; ## Before or at first account entry in `data`
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [KeyError].} = {.gcsafe, raises: [CatchableError].} =
## Verify that there is are no more leaf entries to the right of and ## Verify that there is are no more leaf entries to the right of and
## including `base`. ## including `base`.
let let

View File

@ -8,6 +8,8 @@
# at your option. This file may not be copied, modified, or distributed # at your option. This file may not be copied, modified, or distributed
# except according to those terms. # except according to those terms.
{.push raises: [].}
import import
std/[algorithm, tables], std/[algorithm, tables],
chronicles, chronicles,
@ -16,17 +18,18 @@ import
../../range_desc, ../../range_desc,
"."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc] "."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc]
{.push raises: [].}
logScope: logScope:
topics = "snap-db" topics = "snap-db"
type type
AccountsGetFn* = proc(key: openArray[byte]): Blob {.gcsafe, raises:[Defect].} AccountsGetFn* = proc(key: openArray[byte]): Blob
## The `get()` function for the accounts trie {.gcsafe, raises:[].}
## The `get()` function for the accounts trie
StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob {.gcsafe, raises: [Defect].} StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob
## The `get()` function for the storage trie depends on the current account {.gcsafe, raises: [].}
## The `get()` function for the storage trie depends on the current
## account
StateRootRegistry* = object StateRootRegistry* = object
## State root record. A table of these kind of records is organised as ## State root record. A table of these kind of records is organised as
@ -180,7 +183,7 @@ proc persistentAccountsPut*(
db: HexaryTreeDbRef; db: HexaryTreeDbRef;
rocky: RocksStoreRef rocky: RocksStoreRef
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [OSError,KeyError].} = {.gcsafe, raises: [OSError,IOError,KeyError].} =
## SST based bulk load on `rocksdb`. ## SST based bulk load on `rocksdb`.
if rocky.isNil: if rocky.isNil:
return err(NoRocksDbBackend) return err(NoRocksDbBackend)
@ -229,7 +232,7 @@ proc persistentStorageSlotsPut*(
db: HexaryTreeDbRef; db: HexaryTreeDbRef;
rocky: RocksStoreRef rocky: RocksStoreRef
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [OSError,KeyError].} = {.gcsafe, raises: [OSError,IOError,KeyError].} =
## SST based bulk load on `rocksdb`. ## SST based bulk load on `rocksdb`.
if rocky.isNil: if rocky.isNil:
return err(NoRocksDbBackend) return err(NoRocksDbBackend)

View File

@ -42,34 +42,13 @@ proc to(h: Hash256; T: type NodeKey): T =
#proc convertTo(data: openArray[byte]; T: type Hash256): T = #proc convertTo(data: openArray[byte]; T: type Hash256): T =
# discard result.data.NodeKey.init(data) # size error => zero # discard result.data.NodeKey.init(data) # size error => zero
template noExceptionOops(info: static[string]; code: untyped) =
#template noKeyError(info: static[string]; code: untyped) =
# try:
# code
# except KeyError as e:
# raiseAssert "Not possible (" & info & "): " & e.msg
template noRlpExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except RlpError: except RlpError:
return err(RlpEncoding) return err(RlpEncoding)
except KeyError as e: except CatchableError as e:
raiseAssert "Not possible (" & info & "): " & e.msg return err(SlotsNotFound)
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
#template noGenericExOrKeyError(info: static[string]; code: untyped) =
# try:
# code
# except KeyError as e:
# raiseAssert "Not possible (" & info & "): " & e.msg
# except Defect as e:
# raise e
# except Exception as e:
# raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -79,7 +58,7 @@ proc persistentStorageSlots(
db: HexaryTreeDbRef; ## Current table db: HexaryTreeDbRef; ## Current table
ps: SnapDbStorageSlotsRef; ## For persistent database ps: SnapDbStorageSlotsRef; ## For persistent database
): Result[void,HexaryError] ): Result[void,HexaryError]
{.gcsafe, raises: [OSError,KeyError].} = {.gcsafe, raises: [OSError,IOError,KeyError].} =
## Store accounts trie table on databse ## Store accounts trie table on databse
if ps.rockDb.isNil: if ps.rockDb.isNil:
let rc = db.persistentStorageSlotsPut(ps.kvDb) let rc = db.persistentStorageSlotsPut(ps.kvDb)
@ -322,6 +301,10 @@ proc importStorageSlots*(
result.add HexaryNodeReport(slot: itemInx, error: OSErrorException) result.add HexaryNodeReport(slot: itemInx, error: OSErrorException)
error "Import storage slots exception", peer, itemInx, nItems, error "Import storage slots exception", peer, itemInx, nItems,
name=($e.name), msg=e.msg, nErrors=result.len name=($e.name), msg=e.msg, nErrors=result.len
except IOError as e:
result.add HexaryNodeReport(slot: itemInx, error: IOErrorException)
error "Import storage slots exception", peer, itemInx, nItems,
name=($e.name), msg=e.msg, nErrors=result.len
#when extraTraceMessages: #when extraTraceMessages:
# if result.len == 0: # if result.len == 0:
@ -401,6 +384,11 @@ proc importRawStorageSlotsNodes*(
nErrors.inc nErrors.inc
error "Import storage slots nodes exception", peer, slot, nItems, error "Import storage slots nodes exception", peer, slot, nItems,
name=($e.name), msg=e.msg, nErrors name=($e.name), msg=e.msg, nErrors
except IOError as e:
result.add HexaryNodeReport(slot: slot, error: IOErrorException)
nErrors.inc
error "Import storage slots nodes exception", peer, slot, nItems,
name=($e.name), msg=e.msg, nErrors
when extraTraceMessages: when extraTraceMessages:
if nErrors == 0: if nErrors == 0:
@ -445,7 +433,7 @@ proc inspectStorageSlotsTrie*(
## ##
let peer {.used.} = ps.peer let peer {.used.} = ps.peer
var stats: TrieNodeStat var stats: TrieNodeStat
noRlpExceptionOops("inspectStorageSlotsTrie()"): noExceptionOops("inspectStorageSlotsTrie()"):
if persistent: if persistent:
stats = ps.getStorageSlotsFn.hexaryInspectTrie( stats = ps.getStorageSlotsFn.hexaryInspectTrie(
ps.root, pathList, resumeCtx, suspendAfter=suspendAfter) ps.root, pathList, resumeCtx, suspendAfter=suspendAfter)
@ -498,7 +486,7 @@ proc getStorageSlotsData*(
let peer {.used.} = ps.peer let peer {.used.} = ps.peer
var acc: Account var acc: Account
noRlpExceptionOops("getStorageSlotsData()"): noExceptionOops("getStorageSlotsData()"):
var leaf: Blob var leaf: Blob
if persistent: if persistent:
leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData
@ -506,7 +494,7 @@ proc getStorageSlotsData*(
leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData
if leaf.len == 0: if leaf.len == 0:
return err(AccountNotFound) return err(SlotsNotFound)
acc = rlp.decode(leaf,Account) acc = rlp.decode(leaf,Account)
return ok(acc) return ok(acc)

View File

@ -69,10 +69,9 @@ import
template noExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except Defect as e: except CatchableError as e:
raise e raiseAssert "Inconveivable (" &
except Exception as e: info & "): name=" & $e.name & " msg=" & e.msg
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Public functions # Public functions
@ -125,7 +124,7 @@ proc findMissingNodes*(
stopAtLevel = planBLevelMax, stopAtLevel = planBLevelMax,
maxDangling = fetchRequestTrieNodesMax) maxDangling = fetchRequestTrieNodesMax)
result = (stats.dangling, stats.level, stats.count) result = (stats.dangling, stats.level, stats.count)
except: except CatchableError:
discard discard
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -101,10 +101,9 @@ template discardRlpError(info: static[string]; code: untyped) =
template noExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except Defect as e: except CatchableError as e:
raise e raiseAssert "Inconveivable (" &
except Exception as e: info & "): name=" & $e.name & " msg=" & e.msg
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions

View File

@ -37,6 +37,9 @@
## must be solved by fetching and storing more storage slots and running this ## must be solved by fetching and storing more storage slots and running this
## healing algorithm again. ## healing algorithm again.
## ##
# ###### --- CHECK DEADLOCK ---- ####
import import
std/[math, sequtils, tables], std/[math, sequtils, tables],
chronicles, chronicles,
@ -105,19 +108,12 @@ proc healingCtx(
# Private helpers # Private helpers
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
#template discardRlpError(info: static[string]; code: untyped) =
# try:
# code
# except RlpError as e:
# discard
template noExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except Defect as e: except CatchableError as e:
raise e raiseAssert "Inconveivable (" &
except Exception as e: info & "): name=" & $e.name & " msg=" & e.msg
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -205,7 +201,7 @@ proc slotKey(node: NodeSpecs): (bool,NodeKey) =
nibbles = prefix & segment nibbles = prefix & segment
if nibbles.len == 64: if nibbles.len == 64:
return (true, nibbles.getBytes.convertTo(NodeKey)) return (true, nibbles.getBytes.convertTo(NodeKey))
except: except CatchableError:
discard discard
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------

View File

@ -24,10 +24,9 @@ import
template noExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except Defect as e: except CatchableError as e:
raise e raiseAssert "Inconveivable (" &
except Exception as e: info & "): name=" & $e.name & " msg=" & e.msg
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions

View File

@ -99,6 +99,8 @@ proc existsInTrie(
return rc.value == node.nodeKey return rc.value == node.nodeKey
except RlpError: except RlpError:
error = RlpEncoding error = RlpEncoding
except CatchableError:
error = ExceptionError
when extraTraceMessages: when extraTraceMessages:
if error != NothingSerious: if error != NothingSerious:
@ -107,23 +109,12 @@ proc existsInTrie(
false false
template noKeyErrorOrExceptionOops(info: static[string]; code: untyped) =
try:
code
except KeyError as e:
raiseAssert "Not possible (" & info & "): " & e.msg
except Defect as e:
raise e
except Exception as e:
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
template noExceptionOops(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) =
try: try:
code code
except Defect as e: except CatchableError as e:
raise e raiseAssert "Inconveivable (" &
except Exception as e: info & "): name=" & $e.name & " msg=" & e.msg
raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg
# ------------------------------------------------------------------------------ # ------------------------------------------------------------------------------
# Private functions # Private functions
@ -138,13 +129,14 @@ proc uncoveredEnvelopes(
## express this complement as a list of envelopes of sub-tries. ## express this complement as a list of envelopes of sub-tries.
## ##
var decomposed = "n/a" var decomposed = "n/a"
let rc = processed.hexaryEnvelopeDecompose(rootKey, getFn) noExceptionOops("swapIn"):
if rc.isOk: let rc = processed.hexaryEnvelopeDecompose(rootKey, getFn)
# Return allocated nodes only if rc.isOk:
result = rc.value.filterIt(0 < it.nodeKey.ByteArray32.getFn().len) # Return allocated nodes only
result = rc.value.filterIt(0 < it.nodeKey.ByteArray32.getFn().len)
when extraTraceMessages: when extraTraceMessages:
decomposed = rc.value.toPC decomposed = rc.value.toPC
when extraTraceMessages: when extraTraceMessages:
trace logTxt "unprocessed envelopes", processed, trace logTxt "unprocessed envelopes", processed,
@ -216,36 +208,37 @@ proc swapIn(
for n in 0 ..< swappedIn.len: for n in 0 ..< swappedIn.len:
swappedIn[n] = NodeTagRangeSet.init() swappedIn[n] = NodeTagRangeSet.init()
# Swap in node ranges from other pivots noExceptionOops("swapIn"):
while lapCount < loopMax: # Swap in node ranges from other pivots
var merged = 0.u256 # Loop control while lapCount < loopMax:
var merged = 0.u256 # Loop control
let checkNodes = processed.uncoveredEnvelopes(rootKey, getFn) let checkNodes = processed.uncoveredEnvelopes(rootKey, getFn)
for node in checkNodes: for node in checkNodes:
# Process table of sets from other pivots with ranges intersecting # Process table of sets from other pivots with ranges intersecting
# with the `node` envelope. # with the `node` envelope.
for n,rngSet in node.otherProcessedRanges(otherPivots, rootKey, getFn): for n,rngSet in node.otherProcessedRanges(otherPivots, rootKey, getFn):
# Merge `rngSet` into `swappedIn[n]` and `pivot.processed`, # Merge `rngSet` into `swappedIn[n]` and `pivot.processed`,
# and remove `rngSet` from ` pivot.unprocessed` # and remove `rngSet` from ` pivot.unprocessed`
for iv in rngSet.increasing: for iv in rngSet.increasing:
discard swappedIn[n].merge iv # Imported range / other pivot discard swappedIn[n].merge iv # Imported range / other pivot
merged += processed.merge iv # Import range as processed merged += processed.merge iv # Import range as processed
unprocessed.reduce iv # No need to re-fetch unprocessed.reduce iv # No need to re-fetch
if merged == 0: # Loop control if merged == 0: # Loop control
break break
lapCount.inc lapCount.inc
allMerged += merged # Statistics, logging allMerged += merged # Statistics, logging
when extraTraceMessages: when extraTraceMessages:
trace logTxt "inherited ranges", lapCount, nCheckNodes=checkNodes.len, trace logTxt "inherited ranges", lapCount, nCheckNodes=checkNodes.len,
merged=((merged.to(float) / (2.0^256)).toPC(3)), merged=((merged.to(float) / (2.0^256)).toPC(3)),
allMerged=((allMerged.to(float) / (2.0^256)).toPC(3)) allMerged=((allMerged.to(float) / (2.0^256)).toPC(3))
# End while() # End while()
(swappedIn,lapCount) (swappedIn,lapCount)
@ -290,7 +283,7 @@ proc swapInAccounts*(
nSlotAccounts = 0 # Logging & debugging nSlotAccounts = 0 # Logging & debugging
swappedIn: seq[NodeTagRangeSet] swappedIn: seq[NodeTagRangeSet]
noKeyErrorOrExceptionOops("swapInAccounts"): noExceptionOops("swapInAccounts"):
(swappedIn, nLaps) = swapIn( (swappedIn, nLaps) = swapIn(
fa.processed, fa.unprocessed, others, rootKey, getFn, loopMax) fa.processed, fa.unprocessed, others, rootKey, getFn, loopMax)