fix XDeclaredButNotUsed warnings (#5638)

This commit is contained in:
tersec 2023-12-04 21:14:35 +00:00 committed by GitHub
parent 11bbc4010e
commit 1791b0ee0a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 43 additions and 58 deletions

View File

@ -64,7 +64,6 @@ proc keymanagerApiError(status: HttpCode, msg: string): RestApiResponse =
block: block:
var default: string var default: string
try: try:
var defstrings: seq[string]
var stream = memoryOutput() var stream = memoryOutput()
var writer = JsonWriter[RestJson].init(stream) var writer = JsonWriter[RestJson].init(stream)
writer.beginRecord() writer.beginRecord()

View File

@ -1024,27 +1024,27 @@ template withStateAndBlck*(
body: untyped): untyped = body: untyped): untyped =
case s.kind case s.kind
of ConsensusFork.Deneb: of ConsensusFork.Deneb:
const consensusFork {.inject.} = ConsensusFork.Deneb const consensusFork {.inject, used.} = ConsensusFork.Deneb
template forkyState: untyped {.inject.} = s.denebData template forkyState: untyped {.inject.} = s.denebData
template forkyBlck: untyped {.inject.} = b.denebData template forkyBlck: untyped {.inject.} = b.denebData
body body
of ConsensusFork.Capella: of ConsensusFork.Capella:
const consensusFork {.inject.} = ConsensusFork.Capella const consensusFork {.inject, used.} = ConsensusFork.Capella
template forkyState: untyped {.inject.} = s.capellaData template forkyState: untyped {.inject.} = s.capellaData
template forkyBlck: untyped {.inject.} = b.capellaData template forkyBlck: untyped {.inject.} = b.capellaData
body body
of ConsensusFork.Bellatrix: of ConsensusFork.Bellatrix:
const consensusFork {.inject.} = ConsensusFork.Bellatrix const consensusFork {.inject, used.} = ConsensusFork.Bellatrix
template forkyState: untyped {.inject.} = s.bellatrixData template forkyState: untyped {.inject.} = s.bellatrixData
template forkyBlck: untyped {.inject.} = b.bellatrixData template forkyBlck: untyped {.inject.} = b.bellatrixData
body body
of ConsensusFork.Altair: of ConsensusFork.Altair:
const consensusFork {.inject.} = ConsensusFork.Altair const consensusFork {.inject, used.} = ConsensusFork.Altair
template forkyState: untyped {.inject.} = s.altairData template forkyState: untyped {.inject.} = s.altairData
template forkyBlck: untyped {.inject.} = b.altairData template forkyBlck: untyped {.inject.} = b.altairData
body body
of ConsensusFork.Phase0: of ConsensusFork.Phase0:
const consensusFork {.inject.} = ConsensusFork.Phase0 const consensusFork {.inject, used.} = ConsensusFork.Phase0
template forkyState: untyped {.inject, used.} = s.phase0Data template forkyState: untyped {.inject, used.} = s.phase0Data
template forkyBlck: untyped {.inject, used.} = b.phase0Data template forkyBlck: untyped {.inject, used.} = b.phase0Data
body body

View File

@ -2649,35 +2649,35 @@ proc getValidatorsLiveness*(
activities_count = len(list), activities_count = len(list),
updated_count = updated updated_count = updated
else: else:
let failure = ApiNodeFailure.init( discard ApiNodeFailure.init(
ApiFailure.UnexpectedResponse, RequestName, ApiFailure.UnexpectedResponse, RequestName,
apiResponse.node, response.status, $res.error) apiResponse.node, response.status, $res.error)
# We do not update beacon node's status anymore because of # We do not update beacon node's status anymore because of
# issue #5377. # issue #5377.
continue continue
of 400: of 400:
let failure = ApiNodeFailure.init( discard ApiNodeFailure.init(
ApiFailure.Invalid, RequestName, ApiFailure.Invalid, RequestName,
apiResponse.node, response.status, response.getErrorMessage()) apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of # We do not update beacon node's status anymore because of
# issue #5377. # issue #5377.
continue continue
of 500: of 500:
let failure = ApiNodeFailure.init( discard ApiNodeFailure.init(
ApiFailure.Internal, RequestName, ApiFailure.Internal, RequestName,
apiResponse.node, response.status, response.getErrorMessage()) apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of # We do not update beacon node's status anymore because of
# issue #5377. # issue #5377.
continue continue
of 503: of 503:
let failure = ApiNodeFailure.init( discard ApiNodeFailure.init(
ApiFailure.NotSynced, RequestName, ApiFailure.NotSynced, RequestName,
apiResponse.node, response.status, response.getErrorMessage()) apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of # We do not update beacon node's status anymore because of
# issue #5377. # issue #5377.
continue continue
else: else:
let failure = ApiNodeFailure.init( discard ApiNodeFailure.init(
ApiFailure.UnexpectedCode, RequestName, ApiFailure.UnexpectedCode, RequestName,
apiResponse.node, response.status, response.getErrorMessage()) apiResponse.node, response.status, response.getErrorMessage())
# We do not update beacon node's status anymore because of # We do not update beacon node's status anymore because of

View File

@ -914,7 +914,6 @@ proc currentSlot*(vc: ValidatorClientRef): Slot =
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) = proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
let let
slot = vc.currentSlot()
withdrawalAddress = withdrawalAddress =
if vc.keymanagerHost.isNil: if vc.keymanagerHost.isNil:
Opt.none Eth1Address Opt.none Eth1Address

View File

@ -209,12 +209,6 @@ proc checkSync(
node.syncInfo = Opt.some(syncInfo) node.syncInfo = Opt.some(syncInfo)
let res = let res =
block: block:
let optimistic =
if syncInfo.is_optimistic.isNone():
"none"
else:
$syncInfo.is_optimistic.get()
if not(syncInfo.is_syncing) or (syncInfo.sync_distance < SYNC_TOLERANCE): if not(syncInfo.is_syncing) or (syncInfo.sync_distance < SYNC_TOLERANCE):
if not(syncInfo.is_optimistic.get(false)): if not(syncInfo.is_optimistic.get(false)):
RestBeaconNodeStatus.Synced RestBeaconNodeStatus.Synced

View File

@ -466,12 +466,6 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
dump("./", forkyState) dump("./", forkyState)
do: raiseAssert "withUpdatedState failed" do: raiseAssert "withUpdatedState failed"
func atCanonicalSlot(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] =
if slot == 0:
dag.getBlockIdAtSlot(GENESIS_SLOT)
else:
ok BlockSlotId.init((? dag.atSlot(bid, slot - 1)).bid, slot)
proc cmdVerifyEra(conf: DbConf, cfg: RuntimeConfig) = proc cmdVerifyEra(conf: DbConf, cfg: RuntimeConfig) =
let let
f = EraFile.open(conf.eraFile).valueOr: f = EraFile.open(conf.eraFile).valueOr:

View File

@ -296,7 +296,7 @@ proc openConnection*(address: TransportAddress, uri: Uri,
let transp = let transp =
try: try:
await connect(address) await connect(address)
except TransportOsError as exc: except TransportOsError:
raise newException(ConnectionError, "Unable to establish connection") raise newException(ConnectionError, "Unable to establish connection")
let treader = newAsyncStreamReader(transp) let treader = newAsyncStreamReader(transp)
@ -753,7 +753,7 @@ proc jsonBody(body: openArray[byte]): Result[JsonNode, cstring] =
let res = let res =
try: try:
parseJson(sbody) parseJson(sbody)
except CatchableError as exc: except CatchableError:
return err("Unable to parse json") return err("Unable to parse json")
except Exception as exc: except Exception as exc:
raiseAssert exc.msg raiseAssert exc.msg
@ -871,7 +871,7 @@ proc runTest(conn: HttpConnectionRef, uri: Uri,
debug "Running test", name = testName, test_index = testIndex, debug "Running test", name = testName, test_index = testIndex,
worker_index = workerIndex worker_index = workerIndex
let (requestUri, request) = let (_, request) =
block: block:
let res = prepareRequest(uri, rule) let res = prepareRequest(uri, rule)
if res.isErr(): if res.isErr():
@ -1152,7 +1152,7 @@ proc run(conf: RestTesterConf): int =
hostname = uri.hostname & ":" & uri.port hostname = uri.hostname & ":" & uri.port
try: try:
waitFor(checkConnection(conf, uri)) waitFor(checkConnection(conf, uri))
except ConnectionError as exc: except ConnectionError:
return 1 return 1
try: try:

View File

@ -1,5 +1,5 @@
# beacon_chain # beacon_chain
# Copyright (c) 2022 Status Research & Development GmbH # Copyright (c) 2022-2023 Status Research & Development GmbH
# Licensed and distributed under either of # Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). # * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). # * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -51,7 +51,7 @@ type
participationEpochsCount: seq[uint] participationEpochsCount: seq[uint]
inclusionDelaysCount: seq[uint] inclusionDelaysCount: seq[uint]
proc init*(T: type ValidatorDbAggregator, outputDir: string, func init*(T: type ValidatorDbAggregator, outputDir: string,
resolution: uint, endEpoch: Epoch): T = resolution: uint, endEpoch: Epoch): T =
const initialCapacity = 1 shl 16 const initialCapacity = 1 shl 16
ValidatorDbAggregator( ValidatorDbAggregator(
@ -87,7 +87,7 @@ proc checkIntegrity(startEpoch, endEpoch: Epoch, dir: string) =
fatal "File for epoch does not exist.", epoch = epoch, filePath = filePath fatal "File for epoch does not exist.", epoch = epoch, filePath = filePath
quit QuitFailure quit QuitFailure
proc parseRow(csvRow: CsvRow): RewardsAndPenalties = func parseRow(csvRow: CsvRow): RewardsAndPenalties =
result = RewardsAndPenalties( result = RewardsAndPenalties(
source_outcome: parseBiggestInt(csvRow[0]), source_outcome: parseBiggestInt(csvRow[0]),
max_source_reward: parseBiggestUInt(csvRow[1]), max_source_reward: parseBiggestUInt(csvRow[1]),
@ -106,7 +106,7 @@ proc parseRow(csvRow: CsvRow): RewardsAndPenalties =
if csvRow[14].len > 0: if csvRow[14].len > 0:
result.inclusion_delay = some(parseBiggestUInt(csvRow[14])) result.inclusion_delay = some(parseBiggestUInt(csvRow[14]))
proc `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) = func `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
lhs.source_outcome += rhs.source_outcome lhs.source_outcome += rhs.source_outcome
lhs.max_source_reward += rhs.max_source_reward lhs.max_source_reward += rhs.max_source_reward
lhs.target_outcome += rhs.target_outcome lhs.target_outcome += rhs.target_outcome
@ -128,7 +128,7 @@ proc `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
if rhs.inclusion_delay.isSome: if rhs.inclusion_delay.isSome:
lhs.inclusion_delay = some(rhs.inclusion_delay.get) lhs.inclusion_delay = some(rhs.inclusion_delay.get)
proc average(rp: var RewardsAndPenalties, func average(rp: var RewardsAndPenalties,
averageInclusionDelay: var Option[float], averageInclusionDelay: var Option[float],
epochsCount: uint, inclusionDelaysCount: uint64) = epochsCount: uint, inclusionDelaysCount: uint64) =
rp.source_outcome = rp.source_outcome div epochsCount.int64 rp.source_outcome = rp.source_outcome div epochsCount.int64
@ -153,7 +153,7 @@ proc average(rp: var RewardsAndPenalties,
averageInclusionDelay = none(float) averageInclusionDelay = none(float)
proc addValidatorData*(aggregator: var ValidatorDbAggregator, func addValidatorData*(aggregator: var ValidatorDbAggregator,
index: int, rp: RewardsAndPenalties) = index: int, rp: RewardsAndPenalties) =
if index >= aggregator.participationEpochsCount.len: if index >= aggregator.participationEpochsCount.len:
aggregator.aggregatedRewardsAndPenalties.add rp aggregator.aggregatedRewardsAndPenalties.add rp
@ -243,28 +243,28 @@ proc exitOnSigterm(signal: cint) {.noconv.} =
notice "Shutting down after having received SIGTERM." notice "Shutting down after having received SIGTERM."
shouldShutDown = true shouldShutDown = true
proc main =
setControlCHook(controlCHook)
when defined(posix):
c_signal(SIGTERM, exitOnSigterm)
let config = load AggregatorConf
let (startEpoch, endEpoch) = config.determineStartAndEndEpochs
if endEpoch == 0:
fatal "Not found epoch info files in the directory.",
inputDir = config.inputDir
quit QuitFailure
checkIntegrity(startEpoch, endEpoch, config.inputDir.string)
let outputDir =
if config.outputDir.string.len > 0:
config.outputDir
else:
config.inputDir
aggregateEpochs(startEpoch, endEpoch, config.resolution,
config.inputDir.string, outputDir.string)
when isMainModule: when isMainModule:
proc main =
setControlCHook(controlCHook)
when defined(posix):
c_signal(SIGTERM, exitOnSigterm)
let config = load AggregatorConf
let (startEpoch, endEpoch) = config.determineStartAndEndEpochs
if endEpoch == 0:
fatal "Not found epoch info files in the directory.",
inputDir = config.inputDir
quit QuitFailure
checkIntegrity(startEpoch, endEpoch, config.inputDir.string)
let outputDir =
if config.outputDir.string.len > 0:
config.outputDir
else:
config.inputDir
aggregateEpochs(startEpoch, endEpoch, config.resolution,
config.inputDir.string, outputDir.string)
main() main()

View File

@ -49,7 +49,6 @@ cli do(validatorsDir: string, secretsDir: string,
state = state =
newClone(readSszForkedHashedBeaconState( newClone(readSszForkedHashedBeaconState(
cfg, readAllBytes(startState).tryGet())) cfg, readAllBytes(startState).tryGet()))
finalizedEpoch = getStateField(state[], finalized_checkpoint).epoch
var var
clock = BeaconClock.init(getStateField(state[], genesis_time)) clock = BeaconClock.init(getStateField(state[], genesis_time))