fix XDeclaredButNotUsed warnings (#5638)
This commit is contained in:
parent
11bbc4010e
commit
1791b0ee0a
|
@ -64,7 +64,6 @@ proc keymanagerApiError(status: HttpCode, msg: string): RestApiResponse =
|
|||
block:
|
||||
var default: string
|
||||
try:
|
||||
var defstrings: seq[string]
|
||||
var stream = memoryOutput()
|
||||
var writer = JsonWriter[RestJson].init(stream)
|
||||
writer.beginRecord()
|
||||
|
|
|
@ -1024,27 +1024,27 @@ template withStateAndBlck*(
|
|||
body: untyped): untyped =
|
||||
case s.kind
|
||||
of ConsensusFork.Deneb:
|
||||
const consensusFork {.inject.} = ConsensusFork.Deneb
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Deneb
|
||||
template forkyState: untyped {.inject.} = s.denebData
|
||||
template forkyBlck: untyped {.inject.} = b.denebData
|
||||
body
|
||||
of ConsensusFork.Capella:
|
||||
const consensusFork {.inject.} = ConsensusFork.Capella
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Capella
|
||||
template forkyState: untyped {.inject.} = s.capellaData
|
||||
template forkyBlck: untyped {.inject.} = b.capellaData
|
||||
body
|
||||
of ConsensusFork.Bellatrix:
|
||||
const consensusFork {.inject.} = ConsensusFork.Bellatrix
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Bellatrix
|
||||
template forkyState: untyped {.inject.} = s.bellatrixData
|
||||
template forkyBlck: untyped {.inject.} = b.bellatrixData
|
||||
body
|
||||
of ConsensusFork.Altair:
|
||||
const consensusFork {.inject.} = ConsensusFork.Altair
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Altair
|
||||
template forkyState: untyped {.inject.} = s.altairData
|
||||
template forkyBlck: untyped {.inject.} = b.altairData
|
||||
body
|
||||
of ConsensusFork.Phase0:
|
||||
const consensusFork {.inject.} = ConsensusFork.Phase0
|
||||
const consensusFork {.inject, used.} = ConsensusFork.Phase0
|
||||
template forkyState: untyped {.inject, used.} = s.phase0Data
|
||||
template forkyBlck: untyped {.inject, used.} = b.phase0Data
|
||||
body
|
||||
|
|
|
@ -2649,35 +2649,35 @@ proc getValidatorsLiveness*(
|
|||
activities_count = len(list),
|
||||
updated_count = updated
|
||||
else:
|
||||
let failure = ApiNodeFailure.init(
|
||||
discard ApiNodeFailure.init(
|
||||
ApiFailure.UnexpectedResponse, RequestName,
|
||||
apiResponse.node, response.status, $res.error)
|
||||
# We do not update beacon node's status anymore because of
|
||||
# issue #5377.
|
||||
continue
|
||||
of 400:
|
||||
let failure = ApiNodeFailure.init(
|
||||
discard ApiNodeFailure.init(
|
||||
ApiFailure.Invalid, RequestName,
|
||||
apiResponse.node, response.status, response.getErrorMessage())
|
||||
# We do not update beacon node's status anymore because of
|
||||
# issue #5377.
|
||||
continue
|
||||
of 500:
|
||||
let failure = ApiNodeFailure.init(
|
||||
discard ApiNodeFailure.init(
|
||||
ApiFailure.Internal, RequestName,
|
||||
apiResponse.node, response.status, response.getErrorMessage())
|
||||
# We do not update beacon node's status anymore because of
|
||||
# issue #5377.
|
||||
continue
|
||||
of 503:
|
||||
let failure = ApiNodeFailure.init(
|
||||
discard ApiNodeFailure.init(
|
||||
ApiFailure.NotSynced, RequestName,
|
||||
apiResponse.node, response.status, response.getErrorMessage())
|
||||
# We do not update beacon node's status anymore because of
|
||||
# issue #5377.
|
||||
continue
|
||||
else:
|
||||
let failure = ApiNodeFailure.init(
|
||||
discard ApiNodeFailure.init(
|
||||
ApiFailure.UnexpectedCode, RequestName,
|
||||
apiResponse.node, response.status, response.getErrorMessage())
|
||||
# We do not update beacon node's status anymore because of
|
||||
|
|
|
@ -914,7 +914,6 @@ proc currentSlot*(vc: ValidatorClientRef): Slot =
|
|||
|
||||
proc addValidator*(vc: ValidatorClientRef, keystore: KeystoreData) =
|
||||
let
|
||||
slot = vc.currentSlot()
|
||||
withdrawalAddress =
|
||||
if vc.keymanagerHost.isNil:
|
||||
Opt.none Eth1Address
|
||||
|
|
|
@ -209,12 +209,6 @@ proc checkSync(
|
|||
node.syncInfo = Opt.some(syncInfo)
|
||||
let res =
|
||||
block:
|
||||
let optimistic =
|
||||
if syncInfo.is_optimistic.isNone():
|
||||
"none"
|
||||
else:
|
||||
$syncInfo.is_optimistic.get()
|
||||
|
||||
if not(syncInfo.is_syncing) or (syncInfo.sync_distance < SYNC_TOLERANCE):
|
||||
if not(syncInfo.is_optimistic.get(false)):
|
||||
RestBeaconNodeStatus.Synced
|
||||
|
|
|
@ -466,12 +466,6 @@ proc cmdRewindState(conf: DbConf, cfg: RuntimeConfig) =
|
|||
dump("./", forkyState)
|
||||
do: raiseAssert "withUpdatedState failed"
|
||||
|
||||
func atCanonicalSlot(dag: ChainDAGRef, bid: BlockId, slot: Slot): Opt[BlockSlotId] =
|
||||
if slot == 0:
|
||||
dag.getBlockIdAtSlot(GENESIS_SLOT)
|
||||
else:
|
||||
ok BlockSlotId.init((? dag.atSlot(bid, slot - 1)).bid, slot)
|
||||
|
||||
proc cmdVerifyEra(conf: DbConf, cfg: RuntimeConfig) =
|
||||
let
|
||||
f = EraFile.open(conf.eraFile).valueOr:
|
||||
|
|
|
@ -296,7 +296,7 @@ proc openConnection*(address: TransportAddress, uri: Uri,
|
|||
let transp =
|
||||
try:
|
||||
await connect(address)
|
||||
except TransportOsError as exc:
|
||||
except TransportOsError:
|
||||
raise newException(ConnectionError, "Unable to establish connection")
|
||||
|
||||
let treader = newAsyncStreamReader(transp)
|
||||
|
@ -753,7 +753,7 @@ proc jsonBody(body: openArray[byte]): Result[JsonNode, cstring] =
|
|||
let res =
|
||||
try:
|
||||
parseJson(sbody)
|
||||
except CatchableError as exc:
|
||||
except CatchableError:
|
||||
return err("Unable to parse json")
|
||||
except Exception as exc:
|
||||
raiseAssert exc.msg
|
||||
|
@ -871,7 +871,7 @@ proc runTest(conn: HttpConnectionRef, uri: Uri,
|
|||
debug "Running test", name = testName, test_index = testIndex,
|
||||
worker_index = workerIndex
|
||||
|
||||
let (requestUri, request) =
|
||||
let (_, request) =
|
||||
block:
|
||||
let res = prepareRequest(uri, rule)
|
||||
if res.isErr():
|
||||
|
@ -1152,7 +1152,7 @@ proc run(conf: RestTesterConf): int =
|
|||
hostname = uri.hostname & ":" & uri.port
|
||||
try:
|
||||
waitFor(checkConnection(conf, uri))
|
||||
except ConnectionError as exc:
|
||||
except ConnectionError:
|
||||
return 1
|
||||
|
||||
try:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2022 Status Research & Development GmbH
|
||||
# Copyright (c) 2022-2023 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -51,7 +51,7 @@ type
|
|||
participationEpochsCount: seq[uint]
|
||||
inclusionDelaysCount: seq[uint]
|
||||
|
||||
proc init*(T: type ValidatorDbAggregator, outputDir: string,
|
||||
func init*(T: type ValidatorDbAggregator, outputDir: string,
|
||||
resolution: uint, endEpoch: Epoch): T =
|
||||
const initialCapacity = 1 shl 16
|
||||
ValidatorDbAggregator(
|
||||
|
@ -87,7 +87,7 @@ proc checkIntegrity(startEpoch, endEpoch: Epoch, dir: string) =
|
|||
fatal "File for epoch does not exist.", epoch = epoch, filePath = filePath
|
||||
quit QuitFailure
|
||||
|
||||
proc parseRow(csvRow: CsvRow): RewardsAndPenalties =
|
||||
func parseRow(csvRow: CsvRow): RewardsAndPenalties =
|
||||
result = RewardsAndPenalties(
|
||||
source_outcome: parseBiggestInt(csvRow[0]),
|
||||
max_source_reward: parseBiggestUInt(csvRow[1]),
|
||||
|
@ -106,7 +106,7 @@ proc parseRow(csvRow: CsvRow): RewardsAndPenalties =
|
|||
if csvRow[14].len > 0:
|
||||
result.inclusion_delay = some(parseBiggestUInt(csvRow[14]))
|
||||
|
||||
proc `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
|
||||
func `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
|
||||
lhs.source_outcome += rhs.source_outcome
|
||||
lhs.max_source_reward += rhs.max_source_reward
|
||||
lhs.target_outcome += rhs.target_outcome
|
||||
|
@ -128,7 +128,7 @@ proc `+=`(lhs: var RewardsAndPenalties, rhs: RewardsAndPenalties) =
|
|||
if rhs.inclusion_delay.isSome:
|
||||
lhs.inclusion_delay = some(rhs.inclusion_delay.get)
|
||||
|
||||
proc average(rp: var RewardsAndPenalties,
|
||||
func average(rp: var RewardsAndPenalties,
|
||||
averageInclusionDelay: var Option[float],
|
||||
epochsCount: uint, inclusionDelaysCount: uint64) =
|
||||
rp.source_outcome = rp.source_outcome div epochsCount.int64
|
||||
|
@ -153,7 +153,7 @@ proc average(rp: var RewardsAndPenalties,
|
|||
averageInclusionDelay = none(float)
|
||||
|
||||
|
||||
proc addValidatorData*(aggregator: var ValidatorDbAggregator,
|
||||
func addValidatorData*(aggregator: var ValidatorDbAggregator,
|
||||
index: int, rp: RewardsAndPenalties) =
|
||||
if index >= aggregator.participationEpochsCount.len:
|
||||
aggregator.aggregatedRewardsAndPenalties.add rp
|
||||
|
@ -243,28 +243,28 @@ proc exitOnSigterm(signal: cint) {.noconv.} =
|
|||
notice "Shutting down after having received SIGTERM."
|
||||
shouldShutDown = true
|
||||
|
||||
proc main =
|
||||
setControlCHook(controlCHook)
|
||||
when defined(posix):
|
||||
c_signal(SIGTERM, exitOnSigterm)
|
||||
|
||||
let config = load AggregatorConf
|
||||
let (startEpoch, endEpoch) = config.determineStartAndEndEpochs
|
||||
if endEpoch == 0:
|
||||
fatal "Not found epoch info files in the directory.",
|
||||
inputDir = config.inputDir
|
||||
quit QuitFailure
|
||||
|
||||
checkIntegrity(startEpoch, endEpoch, config.inputDir.string)
|
||||
|
||||
let outputDir =
|
||||
if config.outputDir.string.len > 0:
|
||||
config.outputDir
|
||||
else:
|
||||
config.inputDir
|
||||
|
||||
aggregateEpochs(startEpoch, endEpoch, config.resolution,
|
||||
config.inputDir.string, outputDir.string)
|
||||
|
||||
when isMainModule:
|
||||
proc main =
|
||||
setControlCHook(controlCHook)
|
||||
when defined(posix):
|
||||
c_signal(SIGTERM, exitOnSigterm)
|
||||
|
||||
let config = load AggregatorConf
|
||||
let (startEpoch, endEpoch) = config.determineStartAndEndEpochs
|
||||
if endEpoch == 0:
|
||||
fatal "Not found epoch info files in the directory.",
|
||||
inputDir = config.inputDir
|
||||
quit QuitFailure
|
||||
|
||||
checkIntegrity(startEpoch, endEpoch, config.inputDir.string)
|
||||
|
||||
let outputDir =
|
||||
if config.outputDir.string.len > 0:
|
||||
config.outputDir
|
||||
else:
|
||||
config.inputDir
|
||||
|
||||
aggregateEpochs(startEpoch, endEpoch, config.resolution,
|
||||
config.inputDir.string, outputDir.string)
|
||||
|
||||
main()
|
||||
|
|
|
@ -49,7 +49,6 @@ cli do(validatorsDir: string, secretsDir: string,
|
|||
state =
|
||||
newClone(readSszForkedHashedBeaconState(
|
||||
cfg, readAllBytes(startState).tryGet()))
|
||||
finalizedEpoch = getStateField(state[], finalized_checkpoint).epoch
|
||||
|
||||
var
|
||||
clock = BeaconClock.init(getStateField(state[], genesis_time))
|
||||
|
|
Loading…
Reference in New Issue