From 880313d7a456dd56c7bce5767cdfa25ce3cd90f8 Mon Sep 17 00:00:00 2001 From: Jordan Hrycaj Date: Wed, 15 Feb 2023 00:38:33 +0100 Subject: [PATCH] Silence some compiler gossip -- part 8, sync (#1467) details: Adding some missing exception annotation --- nimbus/sync/full/worker.nim | 20 ++--- nimbus/sync/handlers/eth.nim | 5 +- nimbus/sync/handlers/snap.nim | 4 +- nimbus/sync/legacy.nim | 13 +-- nimbus/sync/misc/best_pivot.nim | 2 +- nimbus/sync/snap.nim | 4 +- nimbus/sync/snap/range_desc.nim | 4 +- nimbus/sync/snap/worker.nim | 7 +- nimbus/sync/snap/worker/db/hexary_desc.nim | 37 ++++----- .../sync/snap/worker/db/hexary_envelope.nim | 59 +++++--------- nimbus/sync/snap/worker/db/hexary_error.nim | 3 + nimbus/sync/snap/worker/db/hexary_inspect.nim | 4 +- .../snap/worker/db/hexary_interpolate.nim | 2 +- nimbus/sync/snap/worker/db/hexary_nearby.nim | 49 ++++------- nimbus/sync/snap/worker/db/hexary_paths.nim | 31 ++++--- nimbus/sync/snap/worker/db/hexary_range.nim | 63 ++++++++------- .../sync/snap/worker/db/rocky_bulk_load.nim | 16 ++-- .../sync/snap/worker/db/snapdb_accounts.nim | 36 +++++---- nimbus/sync/snap/worker/db/snapdb_desc.nim | 8 +- .../sync/snap/worker/db/snapdb_persistent.nim | 19 +++-- .../snap/worker/db/snapdb_storage_slots.nim | 44 ++++------ .../snap/worker/pivot/find_missing_nodes.nim | 9 +-- .../sync/snap/worker/pivot/heal_accounts.nim | 7 +- .../snap/worker/pivot/heal_storage_slots.nim | 18 ++--- .../worker/pivot/storage_queue_helper.nim | 7 +- nimbus/sync/snap/worker/pivot/swap_in.nim | 81 +++++++++---------- 26 files changed, 237 insertions(+), 315 deletions(-) diff --git a/nimbus/sync/full/worker.nim b/nimbus/sync/full/worker.nim index 36b9f2319..1917b0569 100644 --- a/nimbus/sync/full/worker.nim +++ b/nimbus/sync/full/worker.nim @@ -135,16 +135,16 @@ proc processStaged(buddy: FullBuddyRef): bool = except CatchableError as e: error "Storing persistent blocks failed", peer, range=($wi.blocks), error = $e.name, msg = e.msg - except Defect as e: - # Pass through - raise e - except Exception as e: - # Notorious case where the `Chain` reference applied to - # `persistBlocks()` has the compiler traced a possible `Exception` - # (i.e. `ctx.chain` could be uninitialised.) - error "Exception while storing persistent blocks", peer, - range=($wi.blocks), error=($e.name), msg=e.msg - raise (ref Defect)(msg: $e.name & ": " & e.msg) + #except Defect as e: + # # Pass through + # raise e + #except Exception as e: + # # Notorious case where the `Chain` reference applied to + # # `persistBlocks()` has the compiler traced a possible `Exception` + # # (i.e. `ctx.chain` could be uninitialised.) + # error "Exception while storing persistent blocks", peer, + # range=($wi.blocks), error=($e.name), msg=e.msg + # raise (ref Defect)(msg: $e.name & ": " & e.msg) # Something went wrong. Recycle work item (needs to be re-fetched, anyway) let diff --git a/nimbus/sync/handlers/eth.nim b/nimbus/sync/handlers/eth.nim index 2a71c7833..59577cf11 100644 --- a/nimbus/sync/handlers/eth.nim +++ b/nimbus/sync/handlers/eth.nim @@ -118,9 +118,8 @@ proc blockHeader(db: ChainDBRef, # Private functions: peers related functions # ------------------------------------------------------------------------------ -when isMainModule: - proc hash(peer: Peer): hashes.Hash = - hash(peer.remote) +proc hash(peer: Peer): hashes.Hash {.used.} = + hash(peer.remote) proc getPeers(ctx: EthWireRef, thisPeer: Peer): seq[Peer] = # do not send back tx or txhash to thisPeer diff --git a/nimbus/sync/handlers/snap.nim b/nimbus/sync/handlers/snap.nim index c364f6cb7..4e0affd88 100644 --- a/nimbus/sync/handlers/snap.nim +++ b/nimbus/sync/handlers/snap.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import chronicles, chronos, @@ -16,8 +18,6 @@ import ../protocol/snap/snap_types, ../../core/chain -{.push raises: [].} - logScope: topics = "wire-protocol" diff --git a/nimbus/sync/legacy.nim b/nimbus/sync/legacy.nim index 88609fbd6..3b7d7936c 100644 --- a/nimbus/sync/legacy.nim +++ b/nimbus/sync/legacy.nim @@ -480,19 +480,8 @@ proc persistWorkItem(ctx: LegacySyncRef, wi: var WantedBlocks): ValidationResult try: result = ctx.chain.persistBlocks(wi.headers, wi.bodies) except CatchableError as e: - error "storing persistent blocks failed", - error = $e.name, msg = e.msg + error "storing persistent blocks failed", error = $e.name, msg = e.msg result = ValidationResult.Error - except Defect as e: - # Pass through - raise e - except Exception as e: - # Notorious case where the `Chain` reference applied to `persistBlocks()` - # has the compiler traced a possible `Exception` (i.e. `ctx.chain` could - # be uninitialised.) - error "exception while storing persistent blocks", - error = $e.name, msg = e.msg - raise (ref Defect)(msg: $e.name & ": " & e.msg) case result of ValidationResult.OK: ctx.finalizedBlock = wi.endIndex diff --git a/nimbus/sync/misc/best_pivot.nim b/nimbus/sync/misc/best_pivot.nim index 1481d5809..57542e779 100644 --- a/nimbus/sync/misc/best_pivot.nim +++ b/nimbus/sync/misc/best_pivot.nim @@ -172,7 +172,7 @@ proc getBestHeader( proc agreesOnChain( bp: BestPivotWorkerRef; - other: Peer + other: Peer; ): Future[Result[void,bool]] {.async.} = ## Returns `true` if one of the peers `bp.peer` or `other` acknowledges diff --git a/nimbus/sync/snap.nim b/nimbus/sync/snap.nim index 02b7d871f..6c0bd8117 100644 --- a/nimbus/sync/snap.nim +++ b/nimbus/sync/snap.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import eth/[common, p2p], chronicles, @@ -17,8 +19,6 @@ import ./snap/[worker, worker_desc], "."/[protocol, sync_desc, sync_sched] -{.push raises: [].} - logScope: topics = "snap-sync" diff --git a/nimbus/sync/snap/range_desc.nim b/nimbus/sync/snap/range_desc.nim index afac4d970..67eecc580 100644 --- a/nimbus/sync/snap/range_desc.nim +++ b/nimbus/sync/snap/range_desc.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or # distributed except according to those terms. +{.push raises: [].} + import std/[math, sequtils, strutils, hashes], eth/common, @@ -17,8 +19,6 @@ import ../protocol, ../types -{.push raises: [].} - type ByteArray32* = array[32,byte] ## Used for 32 byte database keys diff --git a/nimbus/sync/snap/worker.nim b/nimbus/sync/snap/worker.nim index de70a23bc..076e3b9b1 100644 --- a/nimbus/sync/snap/worker.nim +++ b/nimbus/sync/snap/worker.nim @@ -40,11 +40,8 @@ template noExceptionOops(info: static[string]; code: untyped) = try: code except CatchableError as e: - raiseAssert "Inconveivable (" & info & ": name=" & $e.name & " msg=" & e.msg - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Private functions diff --git a/nimbus/sync/snap/worker/db/hexary_desc.nim b/nimbus/sync/snap/worker/db/hexary_desc.nim index ea79d1347..98992c733 100644 --- a/nimbus/sync/snap/worker/db/hexary_desc.nim +++ b/nimbus/sync/snap/worker/db/hexary_desc.nim @@ -18,8 +18,9 @@ import {.push raises: [].} type - HexaryPpFn* = proc(key: RepairKey): string {.gcsafe.} - ## For testing/debugging: key pretty printer function + HexaryPpFn* = + proc(key: RepairKey): string {.gcsafe, raises: [CatchableError].} + ## For testing/debugging: key pretty printer function ByteArray33* = array[33,byte] ## Used for 31 byte database keys, i.e. + <32-byte-key> @@ -140,10 +141,11 @@ type repairKeyGen*: uint64 ## Unique tmp key generator keyPp*: HexaryPpFn ## For debugging, might go away - HexaryGetFn* = proc(key: openArray[byte]): Blob {.gcsafe.} - ## Persistent database `get()` function. For read-only cases, this function - ## can be seen as the persistent alternative to ``tab[]` on a - ## `HexaryTreeDbRef` descriptor. + HexaryGetFn* = proc(key: openArray[byte]): Blob + {.gcsafe, raises: [CatchableError].} + ## Persistent database `get()` function. For read-only cases, this + ## function can be seen as the persistent alternative to ``tab[]` on + ## a `HexaryTreeDbRef` descriptor. HexaryNodeReport* = object ## Return code for single node operations @@ -208,7 +210,7 @@ proc ppImpl(key: RepairKey; db: HexaryTreeDbRef): string = try: if not disablePrettyKeys and not db.keyPp.isNil: return db.keyPp(key) - except: + except CatchableError: discard key.ByteArray33.toSeq.mapIt(it.toHex(2)).join.toLowerAscii @@ -276,17 +278,16 @@ proc ppImpl(db: HexaryTreeDbRef; root: NodeKey): seq[string] = result = result or (1u64 shl 63) proc cmpIt(x, y: (uint64,string)): int = cmp(x[0],y[0]) - try: - var accu: seq[(uint64,string)] - if root.ByteArray32 != ByteArray32.default: - accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] - for key,node in db.tab.pairs: - accu.add ( - key.ppImpl(db).tokey, - "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") - result = accu.sorted(cmpIt).mapIt(it[1]) - except Exception as e: - result &= " ! Ooops ppImpl(): name=" & $e.name & " msg=" & e.msg + + var accu: seq[(uint64,string)] + if root.ByteArray32 != ByteArray32.default: + accu.add @[(0u64, "($0" & "," & root.ppImpl(db) & ")")] + for key,node in db.tab.pairs: + accu.add ( + key.ppImpl(db).tokey, + "(" & key.ppImpl(db) & "," & node.ppImpl(db) & ")") + + accu.sorted(cmpIt).mapIt(it[1]) # ------------------------------------------------------------------------------ # Public debugging helpers diff --git a/nimbus/sync/snap/worker/db/hexary_envelope.nim b/nimbus/sync/snap/worker/db/hexary_envelope.nim index 4f90021e6..c3b2e02e1 100644 --- a/nimbus/sync/snap/worker/db/hexary_envelope.nim +++ b/nimbus/sync/snap/worker/db/hexary_envelope.nim @@ -93,6 +93,10 @@ proc `==`(a, b: XNodeObj): bool = of Branch: return a.bLink == b.bLink +proc eq(a, b: XPathStep|RPathStep): bool = + a.key == b.key and a.nibble == b.nibble and a.node == b.node + + proc isZeroLink(a: Blob): bool = ## Persistent database has `Blob` as key a.len == 0 @@ -168,7 +172,7 @@ proc doDecomposeLeft( var collect: seq[NodeSpecs] block rightCurbEnvelope: for n in 0 ..< min(envQ.path.len+1, ivQ.path.len): - if n == envQ.path.len or envQ.path[n] != ivQ.path[n]: + if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]): # # At this point, the `node` entries of either `.path[n]` step are # the same. This is so because the predecessor steps were the same @@ -218,7 +222,7 @@ proc doDecomposeRight( var collect: seq[NodeSpecs] block leftCurbEnvelope: for n in 0 ..< min(envQ.path.len+1, ivQ.path.len): - if n == envQ.path.len or envQ.path[n] != ivQ.path[n]: + if n == envQ.path.len or not envQ.path[n].eq(ivQ.path[n]): for m in n ..< ivQ.path.len: let pfx = ivQ.getNibbles(0, m) # common path segment @@ -241,7 +245,7 @@ proc decomposeLeftImpl( iv: NodeTagRange; # Proofed range of leaf paths db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction ): Result[seq[NodeSpecs],HexaryError] - {.gcsafe, raises: [RlpError,KeyError].} = + {.gcsafe, raises: [CatchableError].} = ## Database agnostic implementation of `hexaryEnvelopeDecompose()`. var nodeSpex: seq[NodeSpecs] @@ -272,7 +276,7 @@ proc decomposeRightImpl( iv: NodeTagRange; # Proofed range of leaf paths db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction ): Result[seq[NodeSpecs],HexaryError] - {.gcsafe, raises: [RlpError,KeyError].} = + {.gcsafe, raises: [CatchableError].} = ## Database agnostic implementation of `hexaryEnvelopeDecompose()`. var nodeSpex: seq[NodeSpecs] if iv.maxPt < env.maxPt: @@ -455,12 +459,12 @@ proc hexaryEnvelopeTouchedBy*( # ------------------------------------------------------------------------------ proc hexaryEnvelopeDecompose*( - partialPath: Blob; # Hex encoded partial path - rootKey: NodeKey; # State root - iv: NodeTagRange; # Proofed range of leaf paths - db: HexaryTreeDbRef; # Database + partialPath: Blob; # Hex encoded partial path + rootKey: NodeKey; # State root + iv: NodeTagRange; # Proofed range of leaf paths + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[seq[NodeSpecs],HexaryError] - {.gcsafe, raises: [KeyError].} = + {.gcsafe, raises: [CatchableError].} = ## This function computes the decomposition of the argument `partialPath` ## relative to the argument range `iv`. ## @@ -502,7 +506,7 @@ proc hexaryEnvelopeDecompose*( if iv.maxPt < env.minPt or env.maxPt < iv.minPt: return err(DecomposeDisjunct) # empty result - noRlpErrorOops("in-memory hexaryEnvelopeDecompose"): + noRlpErrorOops("hexaryEnvelopeDecompose"): let left = block: let rc = env.decomposeLeftImpl(rootKey, iv, db) if rc.isErr: @@ -517,33 +521,6 @@ proc hexaryEnvelopeDecompose*( # Notreached -proc hexaryEnvelopeDecompose*( - partialPath: Blob; # Hex encoded partial path - rootKey: NodeKey; # State root - iv: NodeTagRange; # Proofed range of leaf paths - getFn: HexaryGetFn; # Database abstraction - ): Result[seq[NodeSpecs],HexaryError] - {.gcsafe, raises: [RlpError].} = - ## Variant of `hexaryEnvelopeDecompose()` for persistent database. - let env = partialPath.hexaryEnvelope - if iv.maxPt < env.minPt or env.maxPt < iv.minPt: - return err(DecomposeDisjunct) # empty result - - noKeyErrorOops("persistent hexaryEnvelopeDecompose"): - let left = block: - let rc = env.decomposeLeftImpl(rootKey, iv, getFn) - if rc.isErr: - return rc - rc.value - let right = block: - let rc = env.decomposeRightImpl(rootKey, iv, getFn) - if rc.isErr: - return rc - rc.value - return ok(left & right) - # Notreached - - proc hexaryEnvelopeDecompose*( partialPath: Blob; # Hex encoded partial path ranges: NodeTagRangeSet; # To be complemented @@ -641,7 +618,7 @@ proc hexaryEnvelopeDecompose*( let rc = env.decomposeRightImpl(rootKey, iv, db) if rc.isOk: delayed &= rc.value # Queue right side for next lap - except RlpError, KeyError: + except CatchableError: # Cannot decompose `w`, so just drop it discard @@ -659,7 +636,8 @@ proc hexaryEnvelopeDecompose*( ranges: NodeTagRangeSet; # To be complemented rootKey: NodeKey; # State root db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction - ): Result[seq[NodeSpecs],HexaryError] = + ): Result[seq[NodeSpecs],HexaryError] + {.gcsafe, raises: [CatchableError].} = ## Variant of `hexaryEnvelopeDecompose()` for ranges and a `NodeSpecs` ## argument rather than a partial path. node.partialPath.hexaryEnvelopeDecompose(ranges, rootKey, db) @@ -668,7 +646,8 @@ proc hexaryEnvelopeDecompose*( ranges: NodeTagRangeSet; # To be complemented rootKey: NodeKey; # State root db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction - ): Result[seq[NodeSpecs],HexaryError] = + ): Result[seq[NodeSpecs],HexaryError] + {.gcsafe, raises: [CatchableError].} = ## Variant of `hexaryEnvelopeDecompose()` for ranges and an implicit maximal ## partial path envelope. ## argument rather than a partial path. diff --git a/nimbus/sync/snap/worker/db/hexary_error.nim b/nimbus/sync/snap/worker/db/hexary_error.nim index b32be2950..1751b13f9 100644 --- a/nimbus/sync/snap/worker/db/hexary_error.nim +++ b/nimbus/sync/snap/worker/db/hexary_error.nim @@ -19,6 +19,7 @@ type LowerBoundProofError NodeNotFound RlpEncoding + SlotsNotFound SlotsNotSrictlyIncreasing TrieLoopAlert TrieIsEmpty @@ -69,6 +70,8 @@ type NoRocksDbBackend UnresolvedRepairNode OSErrorException + IOErrorException + ExceptionError StateRootNotFound # End diff --git a/nimbus/sync/snap/worker/db/hexary_inspect.nim b/nimbus/sync/snap/worker/db/hexary_inspect.nim index 5d312c839..eddcbb3cb 100644 --- a/nimbus/sync/snap/worker/db/hexary_inspect.nim +++ b/nimbus/sync/snap/worker/db/hexary_inspect.nim @@ -99,7 +99,7 @@ proc processLink( inspect: var seq[(NodeKey,NibblesSeq)]; trail: NibblesSeq; child: Rlp; - ) {.gcsafe, raises: [RlpError]} = + ) {.gcsafe, raises: [CatchableError]} = ## Ditto if not child.isEmpty: let childBlob = child.toBytes @@ -267,7 +267,7 @@ proc hexaryInspectTrie*( stopAtLevel = 64u8; # Width-first depth level maxDangling = high(int); # Maximal number of dangling results ): TrieNodeStat - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryInspectTrie()` for persistent database. when extraTraceMessages: let nPaths = paths.len diff --git a/nimbus/sync/snap/worker/db/hexary_interpolate.nim b/nimbus/sync/snap/worker/db/hexary_interpolate.nim index f839edba5..e0a813c05 100644 --- a/nimbus/sync/snap/worker/db/hexary_interpolate.nim +++ b/nimbus/sync/snap/worker/db/hexary_interpolate.nim @@ -34,7 +34,7 @@ type # Private debugging helpers # ------------------------------------------------------------------------------ -when true: +when false: import std/[sequtils, strutils] proc pp(w: RPathXStep; db: HexaryTreeDbRef): string = diff --git a/nimbus/sync/snap/worker/db/hexary_nearby.nim b/nimbus/sync/snap/worker/db/hexary_nearby.nim index 079207ba4..c180459a1 100644 --- a/nimbus/sync/snap/worker/db/hexary_nearby.nim +++ b/nimbus/sync/snap/worker/db/hexary_nearby.nim @@ -8,6 +8,7 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} import std/tables, @@ -16,13 +17,11 @@ import ../../range_desc, "."/[hexary_desc, hexary_error, hexary_paths] -{.push raises: [].} - proc hexaryNearbyRight*(path: RPath; db: HexaryTreeDbRef; ): Result[RPath,HexaryError] {.gcsafe, raises: [KeyError]} proc hexaryNearbyRight*(path: XPath; getFn: HexaryGetFn; - ): Result[XPath,HexaryError] {.gcsafe, raises: [RlpError]} + ): Result[XPath,HexaryError] {.gcsafe, raises: [CatchableError]} # ------------------------------------------------------------------------------ # Private helpers @@ -91,7 +90,7 @@ proc hexaryNearbyRightImpl( rootKey: NodeKey; # State root db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [KeyError,RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Wrapper let path = block: let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyRight(db) @@ -111,7 +110,7 @@ proc hexaryNearbyLeftImpl( rootKey: NodeKey; # State root db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [KeyError,RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Wrapper let path = block: let rc = baseTag.hexaryPath(rootKey, db).hexaryNearbyLeft(db) @@ -184,7 +183,7 @@ proc completeLeast( getFn: HexaryGetFn; pathLenMax = 64; ): Result[XPath,HexaryError] - {.gcsafe, raises: [RlpError].} = + {.gcsafe, raises: [CatchableError].} = ## Variant of `completeLeast()` for persistent database var xPath = XPath(path: path.path) @@ -290,7 +289,7 @@ proc completeMost( getFn: HexaryGetFn; pathLenMax = 64; ): Result[XPath,HexaryError] - {.gcsafe, raises: [RlpError].} = + {.gcsafe, raises: [CatchableError].} = ## Variant of `completeLeast()` for persistent database var xPath = XPath(path: path.path) @@ -350,7 +349,7 @@ proc hexaryNearbyRight*( path: RPath; # Partially expanded path db: HexaryTreeDbRef; # Database ): Result[RPath,HexaryError] - {.gcsafe, raises: [Defect,KeyError]} = + {.gcsafe, raises: [KeyError]} = ## Extends the maximally extended argument nodes `path` to the right (i.e. ## with non-decreasing path value). This is similar to the ## `hexary_path.next()` function, only that this algorithm does not @@ -448,7 +447,7 @@ proc hexaryNearbyRight*( path: XPath; # Partially expanded path getFn: HexaryGetFn; # Database abstraction ): Result[XPath,HexaryError] - {.gcsafe, raises: [Defect,RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryNearbyRight()` for persistant database # Some easy cases @@ -672,7 +671,7 @@ proc hexaryNearbyLeft*( path: XPath; # Partially expanded path getFn: HexaryGetFn; # Database abstraction ): Result[XPath,HexaryError] - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryNearbyLeft()` for persistant database # Some easy cases @@ -765,45 +764,25 @@ proc hexaryNearbyLeft*( proc hexaryNearbyRight*( baseTag: NodeTag; # Some node rootKey: NodeKey; # State root - db: HexaryTreeDbRef; # Database + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [KeyError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryNearbyRight()` working with `NodeTag` arguments rather - ## than `RPath()` ones. + ## than `RPath` or `XPath` ones. noRlpErrorOops("hexaryNearbyRight"): return baseTag.hexaryNearbyRightImpl(rootKey, db) -proc hexaryNearbyRight*( - baseTag: NodeTag; # Some node - rootKey: NodeKey; # State root - getFn: HexaryGetFn; # Database abstraction - ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [RlpError]} = - ## Variant of `hexaryNearbyRight()` for persistant database - noKeyErrorOops("hexaryNearbyRight"): - return baseTag.hexaryNearbyRightImpl(rootKey, getFn) - proc hexaryNearbyLeft*( baseTag: NodeTag; # Some node rootKey: NodeKey; # State root - db: HexaryTreeDbRef; # Database + db: HexaryTreeDbRef|HexaryGetFn; # Database abstraction ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [KeyError]} = + {.gcsafe, raises: [CatchableError]} = ## Similar to `hexaryNearbyRight()` for `NodeKey` arguments. noRlpErrorOops("hexaryNearbyLeft"): return baseTag.hexaryNearbyLeftImpl(rootKey, db) -proc hexaryNearbyLeft*( - baseTag: NodeTag; # Some node - rootKey: NodeKey; # State root - getFn: HexaryGetFn; # Database abstraction - ): Result[NodeTag,HexaryError] - {.gcsafe, raises: [RlpError]} = - ## Variant of `hexaryNearbyLeft()` for persistant database - noKeyErrorOops("hexaryNearbyLeft"): - return baseTag.hexaryNearbyLeftImpl(rootKey, getFn) - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/hexary_paths.nim b/nimbus/sync/snap/worker/db/hexary_paths.nim index 8849a03b4..3c82cbb98 100644 --- a/nimbus/sync/snap/worker/db/hexary_paths.nim +++ b/nimbus/sync/snap/worker/db/hexary_paths.nim @@ -10,6 +10,8 @@ ## Find node paths in hexary tries. +{.push raises: [].} + import std/[sequtils, sets, tables], eth/[common, trie/nibbles], @@ -17,8 +19,6 @@ import ../../range_desc, ./hexary_desc -{.push raises: [].} - # ------------------------------------------------------------------------------ # Private debugging helpers # ------------------------------------------------------------------------------ @@ -132,7 +132,7 @@ proc pathExtend( key: Blob; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Ditto for `XPath` rather than `RPath` result = path var key = key @@ -193,7 +193,7 @@ proc pathLeast( key: Blob; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## For the partial path given, extend by branch nodes with least node ## indices. result = path @@ -283,7 +283,7 @@ proc pathMost( key: Blob; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## For the partial path given, extend by branch nodes with greatest node ## indices. result = path @@ -457,7 +457,7 @@ proc hexaryPath*( rootKey: NodeKey|RepairKey; db: HexaryTreeDbRef; ): RPath - {.gcsafe, raises: [Defect,KeyError]} = + {.gcsafe, raises: [KeyError]} = ## Variant of `hexaryPath` for a hex encoded partial path. partialPath.hexPrefixDecode[1].hexaryPath(rootKey, db) @@ -467,7 +467,7 @@ proc hexaryPath*( rootKey: NodeKey; # State root getFn: HexaryGetFn; # Database abstraction ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Compute the longest possible path on an arbitrary hexary trie. XPath(tail: partialPath).pathExtend(rootKey.to(Blob), getFn) @@ -476,7 +476,7 @@ proc hexaryPath*( rootKey: NodeKey; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryPath` for a node key.. nodeKey.to(NibblesSeq).hexaryPath(rootKey, getFn) @@ -485,7 +485,7 @@ proc hexaryPath*( rootKey: NodeKey; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryPath` for a node tag.. nodeTag.to(NodeKey).hexaryPath(rootKey, getFn) @@ -494,7 +494,7 @@ proc hexaryPath*( rootKey: NodeKey; getFn: HexaryGetFn; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryPath` for a hex encoded partial path. partialPath.hexPrefixDecode[1].hexaryPath(rootKey, getFn) @@ -543,7 +543,7 @@ proc hexaryPathNodeKey*( getFn: HexaryGetFn; # Database abstraction missingOk = false; # Also return key for missing node ): Result[NodeKey,void] - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryPathNodeKey()` for persistent database. let steps = partialPath.hexaryPath(rootKey, getFn) if 0 < steps.path.len and steps.tail.len == 0: @@ -564,19 +564,18 @@ proc hexaryPathNodeKey*( getFn: HexaryGetFn; # Database abstraction missingOk = false; # Also return key for missing node ): Result[NodeKey,void] - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Variant of `hexaryPathNodeKey()` for persistent database and ## hex encoded partial path. partialPath.hexPrefixDecode[1].hexaryPathNodeKey(rootKey, getFn, missingOk) - proc hexaryPathNodeKeys*( partialPaths: seq[Blob]; # Partial paths segments rootKey: NodeKey|RepairKey; # State root db: HexaryTreeDbRef; # Database missingOk = false; # Also return key for missing node ): HashSet[NodeKey] - {.gcsafe, raises: [Defect,KeyError]} = + {.gcsafe, raises: [KeyError]} = ## Convert a list of path segments to a set of node keys partialPaths.toSeq .mapIt(it.hexaryPathNodeKey(rootKey, db, missingOk)) @@ -593,7 +592,7 @@ proc next*( getFn: HexaryGetFn; minDepth = 64; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Advance the argument `path` to the next leaf node (if any.). The ## `minDepth` argument requires the result of `next()` to satisfy ## `minDepth <= next().getNibbles.len`. @@ -624,7 +623,7 @@ proc prev*( getFn: HexaryGetFn; minDepth = 64; ): XPath - {.gcsafe, raises: [RlpError]} = + {.gcsafe, raises: [CatchableError]} = ## Advance the argument `path` to the previous leaf node (if any.) The ## `minDepth` argument requires the result of `next()` to satisfy ## `minDepth <= next().getNibbles.len`. diff --git a/nimbus/sync/snap/worker/db/hexary_range.nim b/nimbus/sync/snap/worker/db/hexary_range.nim index 17e9a27a7..d11fee72c 100644 --- a/nimbus/sync/snap/worker/db/hexary_range.nim +++ b/nimbus/sync/snap/worker/db/hexary_range.nim @@ -11,7 +11,7 @@ import std/[sequtils, sets, tables], chronicles, - eth/[common, p2p, rlp, trie/nibbles], + eth/[common, p2p, trie/nibbles], stew/[byteutils, interval_set], ../../range_desc, "."/[hexary_desc, hexary_error, hexary_nearby, hexary_paths] @@ -51,12 +51,12 @@ template collectLeafs( block body: var - nodeTag = iv.minPt + nodeTag = minPt(iv) prevTag: NodeTag rls: seq[RangeLeaf] # Fill at most `nLeafs` leaf nodes from interval range - while rls.len < nLeafs and nodeTag <= iv.maxPt: + while rls.len < nLeafs and nodeTag <= maxPt(iv): # The following logic might be sub-optimal. A strict version of the # `next()` function that stops with an error at dangling links could # be faster if the leaf nodes are not too far apart on the hexary trie. @@ -119,13 +119,38 @@ template updateProof( # Public functions # ------------------------------------------------------------------------------ +#proc hexaryRangeLeafsProof*( +# db: HexaryTreeDbRef; # Database abstraction +# rootKey: NodeKey; # State root +# iv: NodeTagRange; # Proofed range of leaf paths +# nLeafs = high(int); # Implies maximal data size +# ): Result[RangeProof,HexaryError] +# {.gcsafe, raises: [KeyError]} = +# ## Collect trie database leafs prototype and add proof. +# let rc = db.collectLeafs(rootKey, iv, nLeafs) +# if rc.isErr: +# err(rc.error) +# else: +# ok(db.updateProof(rootKey, iv.minPt, rc.value)) +# +#proc hexaryRangeLeafsProof*( +# db: HexaryTreeDbRef; # Database abstraction +# rootKey: NodeKey; # State root +# baseTag: NodeTag; # Left boundary +# leafList: seq[RangeLeaf]; # Set of already collected leafs +# ): RangeProof +# {.gcsafe, raises: [KeyError]} = +# ## Complement leafs list by adding proof nodes to the argument list +# ## `leafList`. +# db.updateProof(rootKey, baseTag, leafList) + proc hexaryRangeLeafsProof*( - db: HexaryTreeDbRef; # Database abstraction + db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction rootKey: NodeKey; # State root iv: NodeTagRange; # Proofed range of leaf paths nLeafs = high(int); # Implies maximal data size ): Result[RangeProof,HexaryError] - {.gcsafe, raises: [Defect,KeyError]} = + {.gcsafe, raises: [CatchableError]} = ## Collect trie database leafs prototype and add proof. let rc = db.collectLeafs(rootKey, iv, nLeafs) if rc.isErr: @@ -134,40 +159,16 @@ proc hexaryRangeLeafsProof*( ok(db.updateProof(rootKey, iv.minPt, rc.value)) proc hexaryRangeLeafsProof*( - db: HexaryTreeDbRef; # Database abstraction + db: HexaryGetFn|HexaryTreeDbRef; # Database abstraction rootKey: NodeKey; # State root baseTag: NodeTag; # Left boundary leafList: seq[RangeLeaf]; # Set of already collected leafs ): RangeProof - {.gcsafe, raises: [Defect,KeyError]} = + {.gcsafe, raises: [CatchableError]} = ## Complement leafs list by adding proof nodes to the argument list ## `leafList`. db.updateProof(rootKey, baseTag, leafList) -proc hexaryRangeLeafsProof*( - db: HexaryGetFn; # Database abstraction - rootKey: NodeKey; # State root - iv: NodeTagRange; # Proofed range of leaf paths - nLeafs = high(int); # Implies maximal data size - ): Result[RangeProof,HexaryError] - {.gcsafe, raises: [Defect,RlpError]} = - ## Variant of `hexaryRangeLeafsProof()` for persistent database. - let rc = db.collectLeafs(rootKey, iv, nLeafs) - if rc.isErr: - err(rc.error) - else: - ok(db.updateProof(rootKey, iv.minPt, rc.value)) - -proc hexaryRangeLeafsProof*( - db: HexaryGetFn; # Database abstraction - rootKey: NodeKey; # State root - baseTag: NodeTag; # Left boundary - leafList: seq[RangeLeaf]; # Set of already collected leafs - ): RangeProof - {.gcsafe, raises: [Defect,RlpError]} = - ## Variant of `hexaryRangeLeafsProof()` for persistent database. - db.updateProof(rootKey, baseTag, leafList) - # ------------------------------------------------------------------------------ # End # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/db/rocky_bulk_load.nim b/nimbus/sync/snap/worker/db/rocky_bulk_load.nim index 55ada4f5e..536be8cb6 100644 --- a/nimbus/sync/snap/worker/db/rocky_bulk_load.nim +++ b/nimbus/sync/snap/worker/db/rocky_bulk_load.nim @@ -159,7 +159,7 @@ proc add*( proc finish*( rbl: RockyBulkLoadRef ): Result[int64,void] - {.gcsafe, raises: [OSError].} = + {.gcsafe, raises: [OSError, IOError].} = ## Commit collected and cached data to the database. This function implies ## `destroy()` if successful. Otherwise `destroy()` must be called ## explicitely, e.g. after error analysis. @@ -177,14 +177,12 @@ proc finish*( addr csError) if csError.isNil: - var size: int64 - try: - var f: File - if f.open(rbl.filePath): - size = f.getFileSize - f.close - except: - discard + var + size: int64 + f: File + if f.open(rbl.filePath): + size = f.getFileSize + f.close rbl.destroy() return ok(size) diff --git a/nimbus/sync/snap/worker/db/snapdb_accounts.nim b/nimbus/sync/snap/worker/db/snapdb_accounts.nim index c472a7f18..cb5a59784 100644 --- a/nimbus/sync/snap/worker/db/snapdb_accounts.nim +++ b/nimbus/sync/snap/worker/db/snapdb_accounts.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import std/[algorithm, sequtils, tables], chronicles, @@ -18,8 +20,6 @@ import hexary_interpolate, hexary_inspect, hexary_paths, snapdb_desc, snapdb_persistent] -{.push raises: [].} - logScope: topics = "snap-db" @@ -34,6 +34,8 @@ type const extraTraceMessages = false or true +proc getAccountFn*(ps: SnapDbAccountsRef): HexaryGetFn + # ------------------------------------------------------------------------------ # Private helpers # ------------------------------------------------------------------------------ @@ -50,17 +52,15 @@ template noKeyError(info: static[string]; code: untyped) = except KeyError as e: raiseAssert "Not possible (" & info & "): " & e.msg -template noRlpExceptionOops(info: static[string]; code: untyped) = +template noExceptionOops(info: static[string]; code: untyped) = try: code + except KeyError as e: + raiseAssert "Not possible -- " & info & ": " & e.msg except RlpError: return err(RlpEncoding) - except KeyError as e: - raiseAssert "Not possible (" & info & "): " & e.msg - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + return err(AccountNotFound) # ------------------------------------------------------------------------------ # Private functions @@ -70,7 +70,7 @@ proc persistentAccounts( db: HexaryTreeDbRef; ## Current table ps: SnapDbAccountsRef; ## For persistent database ): Result[void,HexaryError] - {.gcsafe, raises: [OSError,KeyError].} = + {.gcsafe, raises: [OSError,IOError,KeyError].} = ## Store accounts trie table on databse if ps.rockDb.isNil: let rc = db.persistentAccountsPut(ps.kvDb) @@ -300,7 +300,7 @@ proc importAccounts*( except OSError as e: error "Import Accounts exception", peer=ps.peer, name=($e.name), msg=e.msg return err(OSErrorException) - except Exception as e: + except CatchableError as e: raiseAssert "Not possible @ importAccounts(" & $e.name & "):" & e.msg #when extraTraceMessages: @@ -330,7 +330,8 @@ proc importRawAccountsNodes*( nodes: openArray[NodeSpecs]; ## List of `(key,data)` records reportNodes = {Leaf}; ## Additional node types to report persistent = false; ## store data on disk - ): seq[HexaryNodeReport] = + ): seq[HexaryNodeReport] + {.gcsafe, raises: [IOError].} = ## Store data nodes given as argument `nodes` on the persistent database. ## ## If there were an error when processing a particular argument `notes` item, @@ -396,7 +397,8 @@ proc importRawAccountsNodes*( peer: Peer, ## For log messages, only nodes: openArray[NodeSpecs]; ## List of `(key,data)` records reportNodes = {Leaf}; ## Additional node types to report - ): seq[HexaryNodeReport] = + ): seq[HexaryNodeReport] + {.gcsafe, raises: [IOError].} = ## Variant of `importRawNodes()` for persistent storage. SnapDbAccountsRef.init( pv, Hash256(), peer).importRawAccountsNodes( @@ -409,7 +411,7 @@ proc getAccountsNodeKey*( ): Result[NodeKey,HexaryError] = ## For a partial node path argument `path`, return the raw node key. var rc: Result[NodeKey,void] - noRlpExceptionOops("getAccountsNodeKey()"): + noExceptionOops("getAccountsNodeKey()"): if persistent: rc = path.hexaryPathNodeKey(ps.root, ps.getAccountFn) else: @@ -438,7 +440,7 @@ proc getAccountsData*( ## Caveat: There is no unit test yet for the non-persistent version var acc: Account - noRlpExceptionOops("getAccountData()"): + noExceptionOops("getAccountData()"): var leaf: Blob if persistent: leaf = path.hexaryPath(ps.root, ps.getAccountFn).leafData @@ -499,7 +501,7 @@ proc nextAccountsChainDbKey*( ): Result[NodeKey,HexaryError] = ## Fetch the account path on the `ChainDBRef`, the one next to the ## argument account key. - noRlpExceptionOops("getChainDbAccount()"): + noExceptionOops("getChainDbAccount()"): let path = accKey .hexaryPath(ps.root, ps.getAccountFn) .next(ps.getAccountFn) @@ -515,7 +517,7 @@ proc prevAccountsChainDbKey*( ): Result[NodeKey,HexaryError] = ## Fetch the account path on the `ChainDBRef`, the one before to the ## argument account. - noRlpExceptionOops("getChainDbAccount()"): + noExceptionOops("getChainDbAccount()"): let path = accKey .hexaryPath(ps.root, ps.getAccountFn) .prev(ps.getAccountFn) diff --git a/nimbus/sync/snap/worker/db/snapdb_desc.nim b/nimbus/sync/snap/worker/db/snapdb_desc.nim index 09a70ea05..077933204 100644 --- a/nimbus/sync/snap/worker/db/snapdb_desc.nim +++ b/nimbus/sync/snap/worker/db/snapdb_desc.nim @@ -52,9 +52,7 @@ template noPpError(info: static[string]; code: untyped) = raiseAssert "Inconveivable (" & info & "): " & e.msg except KeyError as e: raiseAssert "Not possible (" & info & "): " & e.msg - except Defect as e: - raise e - except Exception as e: + except CatchableError as e: raiseAssert "Ooops (" & info & ") " & $e.name & ": " & e.msg proc toKey(a: RepairKey; pv: SnapDbRef): uint = @@ -255,7 +253,7 @@ proc verifyLowerBound*( base: NodeTag; ## Before or at first account entry in `data` first: NodeTag; ## First account key ): Result[void,HexaryError] - {.gcsafe, raises: [KeyError].} = + {.gcsafe, raises: [CatchableError].} = ## Verify that `base` is to the left of the first leaf entry and there is ## nothing in between. var error: HexaryError @@ -279,7 +277,7 @@ proc verifyNoMoreRight*( peer: Peer; ## For log messages base: NodeTag; ## Before or at first account entry in `data` ): Result[void,HexaryError] - {.gcsafe, raises: [KeyError].} = + {.gcsafe, raises: [CatchableError].} = ## Verify that there is are no more leaf entries to the right of and ## including `base`. let diff --git a/nimbus/sync/snap/worker/db/snapdb_persistent.nim b/nimbus/sync/snap/worker/db/snapdb_persistent.nim index 6d34b9c74..e3296949a 100644 --- a/nimbus/sync/snap/worker/db/snapdb_persistent.nim +++ b/nimbus/sync/snap/worker/db/snapdb_persistent.nim @@ -8,6 +8,8 @@ # at your option. This file may not be copied, modified, or distributed # except according to those terms. +{.push raises: [].} + import std/[algorithm, tables], chronicles, @@ -16,17 +18,18 @@ import ../../range_desc, "."/[hexary_desc, hexary_error, rocky_bulk_load, snapdb_desc] -{.push raises: [].} - logScope: topics = "snap-db" type - AccountsGetFn* = proc(key: openArray[byte]): Blob {.gcsafe, raises:[Defect].} - ## The `get()` function for the accounts trie + AccountsGetFn* = proc(key: openArray[byte]): Blob + {.gcsafe, raises:[].} + ## The `get()` function for the accounts trie - StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob {.gcsafe, raises: [Defect].} - ## The `get()` function for the storage trie depends on the current account + StorageSlotsGetFn* = proc(acc: NodeKey; key: openArray[byte]): Blob + {.gcsafe, raises: [].} + ## The `get()` function for the storage trie depends on the current + ## account StateRootRegistry* = object ## State root record. A table of these kind of records is organised as @@ -180,7 +183,7 @@ proc persistentAccountsPut*( db: HexaryTreeDbRef; rocky: RocksStoreRef ): Result[void,HexaryError] - {.gcsafe, raises: [OSError,KeyError].} = + {.gcsafe, raises: [OSError,IOError,KeyError].} = ## SST based bulk load on `rocksdb`. if rocky.isNil: return err(NoRocksDbBackend) @@ -229,7 +232,7 @@ proc persistentStorageSlotsPut*( db: HexaryTreeDbRef; rocky: RocksStoreRef ): Result[void,HexaryError] - {.gcsafe, raises: [OSError,KeyError].} = + {.gcsafe, raises: [OSError,IOError,KeyError].} = ## SST based bulk load on `rocksdb`. if rocky.isNil: return err(NoRocksDbBackend) diff --git a/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim b/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim index a0932d853..75dd13609 100644 --- a/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim +++ b/nimbus/sync/snap/worker/db/snapdb_storage_slots.nim @@ -42,34 +42,13 @@ proc to(h: Hash256; T: type NodeKey): T = #proc convertTo(data: openArray[byte]; T: type Hash256): T = # discard result.data.NodeKey.init(data) # size error => zero - -#template noKeyError(info: static[string]; code: untyped) = -# try: -# code -# except KeyError as e: -# raiseAssert "Not possible (" & info & "): " & e.msg - -template noRlpExceptionOops(info: static[string]; code: untyped) = +template noExceptionOops(info: static[string]; code: untyped) = try: code except RlpError: return err(RlpEncoding) - except KeyError as e: - raiseAssert "Not possible (" & info & "): " & e.msg - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg - -#template noGenericExOrKeyError(info: static[string]; code: untyped) = -# try: -# code -# except KeyError as e: -# raiseAssert "Not possible (" & info & "): " & e.msg -# except Defect as e: -# raise e -# except Exception as e: -# raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + return err(SlotsNotFound) # ------------------------------------------------------------------------------ # Private functions @@ -79,7 +58,7 @@ proc persistentStorageSlots( db: HexaryTreeDbRef; ## Current table ps: SnapDbStorageSlotsRef; ## For persistent database ): Result[void,HexaryError] - {.gcsafe, raises: [OSError,KeyError].} = + {.gcsafe, raises: [OSError,IOError,KeyError].} = ## Store accounts trie table on databse if ps.rockDb.isNil: let rc = db.persistentStorageSlotsPut(ps.kvDb) @@ -322,6 +301,10 @@ proc importStorageSlots*( result.add HexaryNodeReport(slot: itemInx, error: OSErrorException) error "Import storage slots exception", peer, itemInx, nItems, name=($e.name), msg=e.msg, nErrors=result.len + except IOError as e: + result.add HexaryNodeReport(slot: itemInx, error: IOErrorException) + error "Import storage slots exception", peer, itemInx, nItems, + name=($e.name), msg=e.msg, nErrors=result.len #when extraTraceMessages: # if result.len == 0: @@ -401,6 +384,11 @@ proc importRawStorageSlotsNodes*( nErrors.inc error "Import storage slots nodes exception", peer, slot, nItems, name=($e.name), msg=e.msg, nErrors + except IOError as e: + result.add HexaryNodeReport(slot: slot, error: IOErrorException) + nErrors.inc + error "Import storage slots nodes exception", peer, slot, nItems, + name=($e.name), msg=e.msg, nErrors when extraTraceMessages: if nErrors == 0: @@ -445,7 +433,7 @@ proc inspectStorageSlotsTrie*( ## let peer {.used.} = ps.peer var stats: TrieNodeStat - noRlpExceptionOops("inspectStorageSlotsTrie()"): + noExceptionOops("inspectStorageSlotsTrie()"): if persistent: stats = ps.getStorageSlotsFn.hexaryInspectTrie( ps.root, pathList, resumeCtx, suspendAfter=suspendAfter) @@ -498,7 +486,7 @@ proc getStorageSlotsData*( let peer {.used.} = ps.peer var acc: Account - noRlpExceptionOops("getStorageSlotsData()"): + noExceptionOops("getStorageSlotsData()"): var leaf: Blob if persistent: leaf = path.hexaryPath(ps.root, ps.getStorageSlotsFn).leafData @@ -506,7 +494,7 @@ proc getStorageSlotsData*( leaf = path.hexaryPath(ps.root, ps.hexaDb).leafData if leaf.len == 0: - return err(AccountNotFound) + return err(SlotsNotFound) acc = rlp.decode(leaf,Account) return ok(acc) diff --git a/nimbus/sync/snap/worker/pivot/find_missing_nodes.nim b/nimbus/sync/snap/worker/pivot/find_missing_nodes.nim index c11e9c52e..0d7830590 100644 --- a/nimbus/sync/snap/worker/pivot/find_missing_nodes.nim +++ b/nimbus/sync/snap/worker/pivot/find_missing_nodes.nim @@ -69,10 +69,9 @@ import template noExceptionOops(info: static[string]; code: untyped) = try: code - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Public functions @@ -125,7 +124,7 @@ proc findMissingNodes*( stopAtLevel = planBLevelMax, maxDangling = fetchRequestTrieNodesMax) result = (stats.dangling, stats.level, stats.count) - except: + except CatchableError: discard # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/pivot/heal_accounts.nim b/nimbus/sync/snap/worker/pivot/heal_accounts.nim index f9d080925..793b0cd7e 100644 --- a/nimbus/sync/snap/worker/pivot/heal_accounts.nim +++ b/nimbus/sync/snap/worker/pivot/heal_accounts.nim @@ -101,10 +101,9 @@ template discardRlpError(info: static[string]; code: untyped) = template noExceptionOops(info: static[string]; code: untyped) = try: code - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Private functions diff --git a/nimbus/sync/snap/worker/pivot/heal_storage_slots.nim b/nimbus/sync/snap/worker/pivot/heal_storage_slots.nim index 3bf77eba5..c73b45a5a 100644 --- a/nimbus/sync/snap/worker/pivot/heal_storage_slots.nim +++ b/nimbus/sync/snap/worker/pivot/heal_storage_slots.nim @@ -37,6 +37,9 @@ ## must be solved by fetching and storing more storage slots and running this ## healing algorithm again. ## + +# ###### --- CHECK DEADLOCK ---- #### + import std/[math, sequtils, tables], chronicles, @@ -105,19 +108,12 @@ proc healingCtx( # Private helpers # ------------------------------------------------------------------------------ -#template discardRlpError(info: static[string]; code: untyped) = -# try: -# code -# except RlpError as e: -# discard - template noExceptionOops(info: static[string]; code: untyped) = try: code - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Private functions @@ -205,7 +201,7 @@ proc slotKey(node: NodeSpecs): (bool,NodeKey) = nibbles = prefix & segment if nibbles.len == 64: return (true, nibbles.getBytes.convertTo(NodeKey)) - except: + except CatchableError: discard # ------------------------------------------------------------------------------ diff --git a/nimbus/sync/snap/worker/pivot/storage_queue_helper.nim b/nimbus/sync/snap/worker/pivot/storage_queue_helper.nim index a654f85bc..272f175e8 100644 --- a/nimbus/sync/snap/worker/pivot/storage_queue_helper.nim +++ b/nimbus/sync/snap/worker/pivot/storage_queue_helper.nim @@ -24,10 +24,9 @@ import template noExceptionOops(info: static[string]; code: untyped) = try: code - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Private functions diff --git a/nimbus/sync/snap/worker/pivot/swap_in.nim b/nimbus/sync/snap/worker/pivot/swap_in.nim index 7a692126a..85c02cbfb 100644 --- a/nimbus/sync/snap/worker/pivot/swap_in.nim +++ b/nimbus/sync/snap/worker/pivot/swap_in.nim @@ -99,6 +99,8 @@ proc existsInTrie( return rc.value == node.nodeKey except RlpError: error = RlpEncoding + except CatchableError: + error = ExceptionError when extraTraceMessages: if error != NothingSerious: @@ -107,23 +109,12 @@ proc existsInTrie( false -template noKeyErrorOrExceptionOops(info: static[string]; code: untyped) = - try: - code - except KeyError as e: - raiseAssert "Not possible (" & info & "): " & e.msg - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg - template noExceptionOops(info: static[string]; code: untyped) = try: code - except Defect as e: - raise e - except Exception as e: - raiseAssert "Ooops " & info & ": name=" & $e.name & " msg=" & e.msg + except CatchableError as e: + raiseAssert "Inconveivable (" & + info & "): name=" & $e.name & " msg=" & e.msg # ------------------------------------------------------------------------------ # Private functions @@ -138,13 +129,14 @@ proc uncoveredEnvelopes( ## express this complement as a list of envelopes of sub-tries. ## var decomposed = "n/a" - let rc = processed.hexaryEnvelopeDecompose(rootKey, getFn) - if rc.isOk: - # Return allocated nodes only - result = rc.value.filterIt(0 < it.nodeKey.ByteArray32.getFn().len) + noExceptionOops("swapIn"): + let rc = processed.hexaryEnvelopeDecompose(rootKey, getFn) + if rc.isOk: + # Return allocated nodes only + result = rc.value.filterIt(0 < it.nodeKey.ByteArray32.getFn().len) - when extraTraceMessages: - decomposed = rc.value.toPC + when extraTraceMessages: + decomposed = rc.value.toPC when extraTraceMessages: trace logTxt "unprocessed envelopes", processed, @@ -216,36 +208,37 @@ proc swapIn( for n in 0 ..< swappedIn.len: swappedIn[n] = NodeTagRangeSet.init() - # Swap in node ranges from other pivots - while lapCount < loopMax: - var merged = 0.u256 # Loop control + noExceptionOops("swapIn"): + # Swap in node ranges from other pivots + while lapCount < loopMax: + var merged = 0.u256 # Loop control - let checkNodes = processed.uncoveredEnvelopes(rootKey, getFn) - for node in checkNodes: + let checkNodes = processed.uncoveredEnvelopes(rootKey, getFn) + for node in checkNodes: - # Process table of sets from other pivots with ranges intersecting - # with the `node` envelope. - for n,rngSet in node.otherProcessedRanges(otherPivots, rootKey, getFn): + # Process table of sets from other pivots with ranges intersecting + # with the `node` envelope. + for n,rngSet in node.otherProcessedRanges(otherPivots, rootKey, getFn): - # Merge `rngSet` into `swappedIn[n]` and `pivot.processed`, - # and remove `rngSet` from ` pivot.unprocessed` - for iv in rngSet.increasing: - discard swappedIn[n].merge iv # Imported range / other pivot - merged += processed.merge iv # Import range as processed - unprocessed.reduce iv # No need to re-fetch + # Merge `rngSet` into `swappedIn[n]` and `pivot.processed`, + # and remove `rngSet` from ` pivot.unprocessed` + for iv in rngSet.increasing: + discard swappedIn[n].merge iv # Imported range / other pivot + merged += processed.merge iv # Import range as processed + unprocessed.reduce iv # No need to re-fetch - if merged == 0: # Loop control - break + if merged == 0: # Loop control + break - lapCount.inc - allMerged += merged # Statistics, logging + lapCount.inc + allMerged += merged # Statistics, logging - when extraTraceMessages: - trace logTxt "inherited ranges", lapCount, nCheckNodes=checkNodes.len, - merged=((merged.to(float) / (2.0^256)).toPC(3)), - allMerged=((allMerged.to(float) / (2.0^256)).toPC(3)) + when extraTraceMessages: + trace logTxt "inherited ranges", lapCount, nCheckNodes=checkNodes.len, + merged=((merged.to(float) / (2.0^256)).toPC(3)), + allMerged=((allMerged.to(float) / (2.0^256)).toPC(3)) - # End while() + # End while() (swappedIn,lapCount) @@ -290,7 +283,7 @@ proc swapInAccounts*( nSlotAccounts = 0 # Logging & debugging swappedIn: seq[NodeTagRangeSet] - noKeyErrorOrExceptionOops("swapInAccounts"): + noExceptionOops("swapInAccounts"): (swappedIn, nLaps) = swapIn( fa.processed, fa.unprocessed, others, rootKey, getFn, loopMax)