Reduce declared but not used warnings (#2822)

This commit is contained in:
andri lim 2024-11-03 07:11:24 +07:00 committed by GitHub
parent 4ffe056a25
commit 89fac051cd
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 11 additions and 54 deletions

View File

@ -99,7 +99,7 @@ proc setBlock*(c: ChainRef; blk: Block): Result[void, string] =
let
vmState = c.getVmState(header).valueOr:
return err("no vmstate")
stateRootChpt = vmState.parent.stateRoot # Check point
_ = vmState.parent.stateRoot # Check point
? vmState.processBlock(blk)
if not c.db.persistHeader(

View File

@ -13,7 +13,6 @@ import
stew/[byteutils],
json_rpc/[rpcserver, rpcclient],
../../../nimbus/[
config,
constants,
transaction,
db/ledger,

View File

@ -43,11 +43,6 @@ proc nonceAt*(client: RpcClient, address: Address): Future[AccountNonce] {.async
let hex = await client.eth_getTransactionCount(address, blockId("latest"))
result = hex.AccountNonce
func toTopics(list: openArray[Hash32]): seq[eth_types.Topic] =
result = newSeqOfCap[eth_types.Topic](list.len)
for x in list:
result.add eth_types.Topic(x)
func toLogs(list: openArray[LogObject]): seq[Log] =
result = newSeqOfCap[Log](list.len)
for x in list:

View File

@ -11,7 +11,6 @@
import
std/[
options,
strformat,
strutils,
times,
os,

View File

@ -52,7 +52,7 @@ proc checkBE*[T: RdbBackendRef|MemBackendRef|VoidBackendRef](
for (rvid,key) in T.walkKeyBe db:
if topVidBe.vid < rvid.vid:
topVidBe = rvid
let vtx = db.getVtxBE(rvid).valueOr:
let _ = db.getVtxBE(rvid).valueOr:
return err((rvid.vid,CheckBeVtxMissing))
# Compare calculated `vTop` against database state

View File

@ -96,19 +96,6 @@ proc retrieveMerkleHash(
key
ok key.to(Hash32)
proc hasPayload(
db: AristoDbRef;
root: VertexID;
path: openArray[byte];
): Result[bool,AristoError] =
let error = db.retrieveLeaf(root, path).errorOr:
return ok(true)
if error == FetchPathNotFound:
return ok(false)
err(error)
proc hasAccountPayload(
db: AristoDbRef;
accPath: Hash32;

View File

@ -21,7 +21,7 @@
{.push raises: [].}
import
std/[tables, typetraits],
std/[tables],
eth/common,
results,
"."/[aristo_desc, aristo_fetch, aristo_get, aristo_hike, aristo_path]

View File

@ -37,7 +37,7 @@ proc chainRlpNodes*(
): Result[void,AristoError] =
## Inspired by the `getBranchAux()` function from `hexary.nim`
let
key = ? db.computeKey rvid
_ = ? db.computeKey rvid
(vtx,_) = ? db.getVtxRc rvid
node = vtx.toNode(rvid.root, db).valueOr:
return err(PartChnNodeConvError)

View File

@ -230,21 +230,6 @@ proc jLogger(
) =
tr.jLogger(EmptyBlob, ti)
proc jLogger(
tr: TraceRecorderRef;
root: VertexID;
path: openArray[byte];
ti: TraceDataItemRef;
) =
tr.jLogger(@[root.byte] & @path, ti)
proc jLogger(
tr: TraceRecorderRef;
root: VertexID;
ti: TraceDataItemRef;
) =
tr.jLogger(@[root.byte], ti)
proc jLogger(
tr: TraceRecorderRef;
accPath: Hash32;

View File

@ -156,8 +156,6 @@ proc runPeer*(buddy: BeaconBuddyRef; info: static[string]) {.async.} =
## This peer worker method is repeatedly invoked (exactly one per peer) while
## the `buddy.ctrl.poolMode` flag is set `false`.
##
let peer = buddy.peer
if 0 < buddy.only.nMultiLoop: # statistics/debugging
buddy.only.multiRunIdle = Moment.now() - buddy.only.stoppedMultiRun
buddy.only.nMultiLoop.inc # statistics/debugging

View File

@ -224,7 +224,7 @@ proc dbPeekParentHash*(ctx: BeaconCtxRef; num: BlockNumber): Opt[Hash32] =
proc dbUnstashHeader*(ctx: BeaconCtxRef; bn: BlockNumber) =
## Remove header from temporary DB list
ctx.stash.withValue(bn, val):
ctx.stash.withValue(bn, _):
ctx.stash.del bn
return
discard ctx.db.ctx.getKvt().del(beaconHeaderKey(bn).toOpenArray)

View File

@ -246,7 +246,6 @@ proc dumpBlockStateImpl(
let
cc = activate CaptCtxRef.init(com, header)
parent = com.db.getBlockHeader(header.parentHash)
# only need a stack dump when scanning for internal transaction address
captureFlags = {DisableMemory, DisableStorage, EnableAccount}

View File

@ -13,7 +13,7 @@ import
eth/common,
stew/endians2,
../../nimbus/db/aristo/[
aristo_debug, aristo_desc, aristo_hike, aristo_layers, aristo_merge,
aristo_debug, aristo_desc, aristo_hike, aristo_layers,
aristo_tx],
../replay/pp,
"."/[undump_accounts, undump_desc, undump_storages, test_samples_xx]

View File

@ -348,7 +348,6 @@ proc runLedgerTransactionTests(noisy = true) =
env.txi.add n
test &"Run {env.txi.len} two-step trials with rollback":
let head = env.xdb.getCanonicalHead()
for n in env.txi:
let dbTx = env.xdb.ctx.newTransaction()
defer: dbTx.dispose()
@ -356,7 +355,6 @@ proc runLedgerTransactionTests(noisy = true) =
env.runTrial2ok(ledger, n)
test &"Run {env.txi.len} three-step trials with rollback":
let head = env.xdb.getCanonicalHead()
for n in env.txi:
let dbTx = env.xdb.ctx.newTransaction()
defer: dbTx.dispose()
@ -365,7 +363,6 @@ proc runLedgerTransactionTests(noisy = true) =
test &"Run {env.txi.len} three-step trials with extra db frame rollback" &
" throwing Exceptions":
let head = env.xdb.getCanonicalHead()
for n in env.txi:
let dbTx = env.xdb.ctx.newTransaction()
defer: dbTx.dispose()
@ -373,7 +370,6 @@ proc runLedgerTransactionTests(noisy = true) =
env.runTrial3Survive(ledger, n, noisy)
test &"Run {env.txi.len} tree-step trials without rollback":
let head = env.xdb.getCanonicalHead()
for n in env.txi:
let dbTx = env.xdb.ctx.newTransaction()
defer: dbTx.dispose()
@ -381,7 +377,6 @@ proc runLedgerTransactionTests(noisy = true) =
env.runTrial3(ledger, n, rollback = false)
test &"Run {env.txi.len} four-step trials with rollback and db frames":
let head = env.xdb.getCanonicalHead()
for n in env.txi:
let dbTx = env.xdb.ctx.newTransaction()
defer: dbTx.dispose()

View File

@ -93,7 +93,7 @@ proc close(client: RpcHttpClient, server: RpcHttpServer) =
# unless the base distance is reached. This is not the case for the tests, so we
# have to manually persist the blocks to the db.
# Main goal of the tests to check the RPC calls, can serve data persisted in the db
# as data from memory blocks are easily tested via kurtosis or other tests
# as data from memory blocks are easily tested via kurtosis or other tests
proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv =
var
acc = ctx.am.getAccount(signer).tryGet()
@ -201,7 +201,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv =
doAssert com.db.persistHeader(header,
com.pos.isNil, com.startOfHistory)
com.db.persistFixtureBlock()
com.db.persistent(header.number).isOkOr:
@ -212,7 +212,7 @@ proc setupEnv(signer, ks2: Address, ctx: EthContext, com: CommonRef): TestEnv =
txHash: signedTx1.rlpHash,
blockHash: header.blockHash
)
proc rpcMain*() =
suite "Remote Procedure Calls":
@ -242,7 +242,7 @@ proc rpcMain*() =
debugEcho unlock.error
doAssert(unlock.isOk)
let
let
env = setupEnv(signer, ks2, ctx, com)
chain = ForkedChainRef.init(com)
txPool = TxPoolRef.new(com)
@ -370,7 +370,7 @@ proc rpcMain*() =
let msgData = "\x19Ethereum Signed Message:\n" & $msg.len & msg
let msgDataBytes = @(msgData.toOpenArrayByte(0, msgData.len-1))
let msgHash = await client.web3_sha3(msgDataBytes)
let pubkey = recover(sig, SkMessage(msgHash.bytes)).tryGet()
let pubkey = recover(sig, SkMessage(msgHash.data)).tryGet()
let recoveredAddr = pubkey.toCanonicalAddress()
check recoveredAddr == signer # verified