extend `ncli` exception handling (#5905)

Make raised exceptions explicit in `ncli_common.nim`, and handle more of
them in `ncli_db.nim` to have better UX when directories cannot be read
or file names do not parse against the expected format.
This commit is contained in:
Etan Kissling 2024-02-19 10:56:19 +01:00 committed by GitHub
parent e04e95167d
commit 5fdb06fcd1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 90 additions and 27 deletions

View File

@ -5,6 +5,8 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import import
std/[os, strutils], std/[os, strutils],
stew/bitops2, stew/bitops2,
@ -124,8 +126,9 @@ static:
"15227487_86601706.echop"]: # Wrong extension "15227487_86601706.echop"]: # Wrong extension
doAssert not filename.matchFilenameAggregatedFiles doAssert not filename.matchFilenameAggregatedFiles
proc getUnaggregatedFilesEpochRange*(dir: string): proc getUnaggregatedFilesEpochRange*(
tuple[firstEpoch, lastEpoch: Epoch] = dir: string
): tuple[firstEpoch, lastEpoch: Epoch] {.raises: [OSError, ValueError].} =
var smallestEpochFileName = var smallestEpochFileName =
'9'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension '9'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension
var largestEpochFileName = var largestEpochFileName =
@ -141,10 +144,12 @@ proc getUnaggregatedFilesEpochRange*(dir: string):
result.lastEpoch = parseUInt( result.lastEpoch = parseUInt(
largestEpochFileName[0 ..< epochInfoFileNameDigitsCount]).Epoch largestEpochFileName[0 ..< epochInfoFileNameDigitsCount]).Epoch
proc getUnaggregatedFilesLastEpoch*(dir: string): Epoch = proc getUnaggregatedFilesLastEpoch*(
dir: string): Epoch {.raises: [OSError, ValueError].} =
dir.getUnaggregatedFilesEpochRange.lastEpoch dir.getUnaggregatedFilesEpochRange.lastEpoch
proc getAggregatedFilesLastEpoch*(dir: string): Epoch = proc getAggregatedFilesLastEpoch*(
dir: string): Epoch {.raises: [OSError, ValueError].}=
var largestEpochInFileName = 0'u var largestEpochInFileName = 0'u
for (_, fn) in walkDir(dir.string, relative = true): for (_, fn) in walkDir(dir.string, relative = true):
if fn.matchFilenameAggregatedFiles: if fn.matchFilenameAggregatedFiles:
@ -403,9 +408,15 @@ proc collectFromDeposits(
var index = findValidatorIndex(forkyState.data, pubkey) var index = findValidatorIndex(forkyState.data, pubkey)
if index.isNone: if index.isNone:
if pubkey in pubkeyToIndex: if pubkey in pubkeyToIndex:
index = Opt[ValidatorIndex].ok(pubkeyToIndex[pubkey]) try:
index = Opt[ValidatorIndex].ok(pubkeyToIndex[pubkey])
except KeyError as e:
raiseAssert "pubkey was checked to exist: " & e.msg
if index.isSome: if index.isSome:
rewardsAndPenalties[index.get()].deposits += amount try:
rewardsAndPenalties[index.get()].deposits += amount
except KeyError as e:
raiseAssert "rewardsAndPenalties lacks expected index " & $index.get()
elif verify_deposit_signature(cfg, deposit.data): elif verify_deposit_signature(cfg, deposit.data):
pubkeyToIndex[pubkey] = ValidatorIndex(rewardsAndPenalties.len) pubkeyToIndex[pubkey] = ValidatorIndex(rewardsAndPenalties.len)
rewardsAndPenalties.add( rewardsAndPenalties.add(

View File

@ -5,8 +5,10 @@
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
{.push raises: [].}
import import
std/[os, stats, strformat, tables], std/[os, stats, tables],
snappy, snappy,
chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3, chronicles, confutils, stew/[byteutils, io2], eth/db/kvstore_sqlite3,
../beacon_chain/networking/network_metadata, ../beacon_chain/networking/network_metadata,
@ -240,7 +242,8 @@ proc cmdBench(conf: DbConf, cfg: RuntimeConfig) =
seq[capella.TrustedSignedBeaconBlock], seq[capella.TrustedSignedBeaconBlock],
seq[deneb.TrustedSignedBeaconBlock]) seq[deneb.TrustedSignedBeaconBlock])
echo &"Loaded head slot {dag.head.slot}, selected {blockRefs.len} blocks" echo "Loaded head slot ", dag.head.slot,
" selected ", blockRefs.len, " blocks"
doAssert blockRefs.len() > 0, "Must select at least one block" doAssert blockRefs.len() > 0, "Must select at least one block"
for b in 0 ..< blockRefs.len: for b in 0 ..< blockRefs.len:
@ -390,8 +393,18 @@ proc cmdPutState(conf: DbConf, cfg: RuntimeConfig) =
for file in conf.stateFile: for file in conf.stateFile:
if shouldShutDown: quit QuitSuccess if shouldShutDown: quit QuitSuccess
let state = newClone(readSszForkedHashedBeaconState(
cfg, readAllBytes(file).tryGet())) let state =
try:
newClone(readSszForkedHashedBeaconState(
cfg, readAllBytes(file).tryGet()))
except ResultError[IoErrorCode] as e:
echo "Couldn't load ", file, ": ", e.msg
continue
except SerializationError as e:
echo "Malformed ", file, ": ", e.msg
continue
withState(state[]): withState(state[]):
db.putState(forkyState) db.putState(forkyState)
@ -427,8 +440,16 @@ proc cmdPutBlock(conf: DbConf, cfg: RuntimeConfig) =
for file in conf.blckFile: for file in conf.blckFile:
if shouldShutDown: quit QuitSuccess if shouldShutDown: quit QuitSuccess
let blck = readSszForkedSignedBeaconBlock( let blck =
cfg, readAllBytes(file).tryGet()) try:
readSszForkedSignedBeaconBlock(
cfg, readAllBytes(file).tryGet())
except ResultError[IoErrorCode] as e:
echo "Couldn't load ", file, ": ", e.msg
continue
except SerializationError as e:
echo "Malformed ", file, ": ", e.msg
continue
withBlck(blck.asTrusted()): withBlck(blck.asTrusted()):
db.putBlock(forkyBlck) db.putBlock(forkyBlck)
@ -576,7 +597,13 @@ proc cmdExportEra(conf: DbConf, cfg: RuntimeConfig) =
try: try:
moveFile(tmpName, name) moveFile(tmpName, name)
except IOError as e: except IOError as e:
warn "Failed to rename era file to its final name", warn "Failed to rename era file to its final name (IOError)",
name, tmpName, error = e.msg
except OSError as e:
warn "Failed to rename era file to its final name (OSError)",
name, tmpName, error = e.msg
except Exception as e:
warn "Failed to rename era file to its final name (Exception)",
name, tmpName, error = e.msg name, tmpName, error = e.msg
else: else:
if (let e = io2.removeFile(name); e.isErr): if (let e = io2.removeFile(name); e.isErr):
@ -915,18 +942,25 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
if conf.startEpoch.isSome: if conf.startEpoch.isSome:
Epoch(conf.startEpoch.get) Epoch(conf.startEpoch.get)
else: else:
let unaggregatedFilesNextEpoch = getUnaggregatedFilesLastEpoch( try:
unaggregatedFilesOutputDir) + 1 let unaggregatedFilesNextEpoch = getUnaggregatedFilesLastEpoch(
let aggregatedFilesNextEpoch = getAggregatedFilesLastEpoch( unaggregatedFilesOutputDir) + 1
aggregatedFilesOutputDir) + 1 let aggregatedFilesNextEpoch = getAggregatedFilesLastEpoch(
if conf.writeUnaggregatedFiles and conf.writeAggregatedFiles: aggregatedFilesOutputDir) + 1
min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch) if conf.writeUnaggregatedFiles and conf.writeAggregatedFiles:
elif conf.writeUnaggregatedFiles: min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch)
unaggregatedFilesNextEpoch elif conf.writeUnaggregatedFiles:
elif conf.writeAggregatedFiles: unaggregatedFilesNextEpoch
aggregatedFilesNextEpoch elif conf.writeAggregatedFiles:
else: aggregatedFilesNextEpoch
min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch) else:
min(unaggregatedFilesNextEpoch, aggregatedFilesNextEpoch)
except OSError as e:
fatal "Failed to iterate epoch files", e = e.msg
quit QuitFailure
except ValueError as e:
fatal "Failed to parse epoch file name", e = e.msg
quit QuitFailure
endEpoch = endEpoch =
if conf.endEpoch.isSome: if conf.endEpoch.isSome:
Epoch(conf.endEpoch.get) Epoch(conf.endEpoch.get)
@ -947,10 +981,28 @@ proc cmdValidatorDb(conf: DbConf, cfg: RuntimeConfig) =
blockRefs = dag.getBlockRange(startSlot, endSlot) blockRefs = dag.getBlockRange(startSlot, endSlot)
if not unaggregatedFilesOutputDir.dirExists: if not unaggregatedFilesOutputDir.dirExists:
unaggregatedFilesOutputDir.createDir try:
unaggregatedFilesOutputDir.createDir()
except IOError as e:
fatal "Failed to create unaggregated files directory (IOError)",
dirName = unaggregatedFilesOutputDir, e = e.msg
quit QuitFailure
except OSError as e:
fatal "Failed to create unaggregated files directory (OSError)",
dirName = unaggregatedFilesOutputDir, e = e.msg
quit QuitFailure
if not aggregatedFilesOutputDir.dirExists: if not aggregatedFilesOutputDir.dirExists:
aggregatedFilesOutputDir.createDir try:
aggregatedFilesOutputDir.createDir()
except IOError as e:
fatal "Failed to create aggregated files directory (IOError)",
dirName = aggregatedFilesOutputDir, e = e.msg
quit QuitFailure
except OSError as e:
fatal "Failed to create aggregated files directory (OSError)",
dirName = aggregatedFilesOutputDir, e = e.msg
quit QuitFailure
let tmpState = newClone(dag.headState) let tmpState = newClone(dag.headState)
var cache = StateCache() var cache = StateCache()