fix most ConvFromXtoItselfNotNeeded hints and unhide remaining ones (#6307)

This commit is contained in:
tersec 2024-05-22 11:56:37 +00:00 committed by GitHub
parent e0f8ea752b
commit b56a671122
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
21 changed files with 64 additions and 75 deletions

View File

@ -9,12 +9,8 @@
import
chronicles,
../spec/datatypes/[phase0, altair, bellatrix],
../spec/forks
from ../spec/datatypes/capella import SomeBeaconBlock, TrustedBeaconBlock
from ../spec/datatypes/deneb import SomeBeaconBlock, TrustedBeaconBlock
export chronicles, forks
type
@ -76,7 +72,7 @@ func init*(
deneb.SomeBeaconBlock | deneb.TrustedBeaconBlock |
electra.SomeBeaconBlock | electra.TrustedBeaconBlock): BlockRef =
BlockRef.init(
root, Opt.some Eth2Digest(blck.body.execution_payload.block_hash),
root, Opt.some blck.body.execution_payload.block_hash,
executionValid =
executionValid or blck.body.execution_payload.block_hash == ZERO_HASH,
blck.slot)

View File

@ -137,7 +137,7 @@ proc sendEth(web3: Web3, to: Eth1Address, valueEth: int): Future[TxHash] =
# TODO: Force json-rpc to generate 'data' field
# should not be needed anymore, new execution-api schema
# is using `input` field
data: some(newSeq[byte]()),
data: some(newSeq[byte]()),
gas: Quantity(3000000).some,
gasPrice: Quantity(1).some,
value: some(valueEth.u256 * 1000000000000000000.u256),
@ -273,7 +273,7 @@ proc main() {.async.} =
error "Failed to read an Eth1 private key from standard input"
if privateKey.len > 0:
conf.privateKey = privateKey.string
conf.privateKey = privateKey
let web3 = await initWeb3(conf.web3Url, conf.privateKey)

View File

@ -26,10 +26,10 @@ type
func parseBootstrapAddress*(address: string):
Result[enr.Record, cstring] =
let lowerCaseAddress = toLowerAscii(string address)
let lowerCaseAddress = toLowerAscii(address)
if lowerCaseAddress.startsWith("enr:"):
var enrRec: enr.Record
if enrRec.fromURI(string address):
if enrRec.fromURI(address):
return ok enrRec
return err "Invalid ENR bootstrap record"
elif lowerCaseAddress.startsWith("enode:"):

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2021-2023 Status Research & Development GmbH
# Copyright (c) 2021-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -10,5 +10,4 @@
--styleCheck:usages
--styleCheck:hint
--hint[ConvFromXtoItselfNotNeeded]:off
--hint[Processing]:off

View File

@ -46,7 +46,7 @@ proc listRemoteValidators*(
if item.kind == ValidatorKind.Remote and item.data.remotes.len == 1:
validators.add RemoteKeystoreInfo(
pubkey: item.pubkey,
url: HttpHostUri(item.data.remotes[0].url)
url: item.data.remotes[0].url
)
validators

View File

@ -688,8 +688,8 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
message = (await PayloadType.makeBeaconBlockForHeadAndSlot(
node, qrandao, proposer, qgraffiti, qhead, qslot)).valueOr:
return RestApiResponse.jsonError(Http500, error)
executionValue = Opt.some(UInt256(message.executionPayloadValue))
consensusValue = Opt.some(UInt256(message.consensusBlockValue))
executionValue = Opt.some(message.executionPayloadValue)
consensusValue = Opt.some(message.consensusBlockValue)
headers = consensusFork.getMaybeBlindedHeaders(
isBlinded = false, executionValue, consensusValue)
@ -914,7 +914,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
request.validator_index).pubkey
node.validatorMonitor[].addAutoMonitor(
validator_pubkey, ValidatorIndex(request.validator_index))
validator_pubkey, request.validator_index)
RestApiResponse.jsonMsgResponse(BeaconCommitteeSubscriptionSuccess)
@ -955,7 +955,7 @@ proc installValidatorApiHandlers*(router: var RestRouter, node: BeaconNode) =
validator_pubkey, item.until_epoch)
node.validatorMonitor[].addAutoMonitor(
validator_pubkey, ValidatorIndex(item.validator_index))
validator_pubkey, item.validator_index)
RestApiResponse.jsonMsgResponse(SyncCommitteeSubscriptionSuccess)

View File

@ -289,7 +289,7 @@ func initiate_validator_exit*(
# Set validator exit epoch and withdrawable epoch
validator.exit_epoch = exit_queue_epoch
validator.withdrawable_epoch =
Epoch(validator.exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
validator.exit_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
if validator.withdrawable_epoch < validator.exit_epoch:
return err("Invalid large withdrawable epoch")
state.validators.mitem(index) = validator

View File

@ -212,7 +212,7 @@ proc getLightClientBootstrap*(
let consensusForkRes = ConsensusFork.decodeString(
resp.headers.getString("eth-consensus-version"))
if consensusForkRes.isErr:
raiseRestDecodingBytesError(cstring(consensusForkRes.error))
raiseRestDecodingBytesError(consensusForkRes.error)
ForkedLightClientBootstrap.decodeHttpLightClientObject(
data, resp.contentType, consensusForkRes.get, cfg)
of 404:
@ -294,7 +294,7 @@ proc getLightClientFinalityUpdate*(
let consensusForkRes = ConsensusFork.decodeString(
resp.headers.getString("eth-consensus-version"))
if consensusForkRes.isErr:
raiseRestDecodingBytesError(cstring(consensusForkRes.error))
raiseRestDecodingBytesError(consensusForkRes.error)
ForkedLightClientFinalityUpdate.decodeHttpLightClientObject(
data, resp.contentType, consensusForkRes.get, cfg)
of 404:
@ -336,7 +336,7 @@ proc getLightClientOptimisticUpdate*(
let consensusForkRes = ConsensusFork.decodeString(
resp.headers.getString("eth-consensus-version"))
if consensusForkRes.isErr:
raiseRestDecodingBytesError(cstring(consensusForkRes.error))
raiseRestDecodingBytesError(consensusForkRes.error)
ForkedLightClientOptimisticUpdate.decodeHttpLightClientObject(
data, resp.contentType, consensusForkRes.get, cfg)
of 404:

View File

@ -291,7 +291,7 @@ template `$`*(x: WalletName): string =
# TODO: `burnMem` in nimcrypto could use distinctBase
# to make its usage less error-prone.
template burnMem*(m: var (Mnemonic|string)) =
ncrutils.burnMem(string m)
ncrutils.burnMem(distinctBase m)
template burnMem*(m: var KeySeed) =
ncrutils.burnMem(distinctBase m)
@ -324,7 +324,7 @@ const
englishWordsDigest =
"AD90BF3BEB7B0EB7E5ACD74727DC0DA96E0A280A258354E7293FB7E211AC03DB".toDigest
proc checkEnglishWords(): bool =
func checkEnglishWords(): bool =
if len(englishWords) != wordListLen:
false
else:
@ -341,7 +341,7 @@ func validateKeyPath*(path: string): Result[KeyPath, cstring] =
var digitCount: int
var number: BiggestUInt
try:
for elem in path.string.split("/"):
for elem in path.split("/"):
# TODO: doesn't "m" have to be the first character and is it the only
# place where it is valid?
if elem == "m":
@ -382,7 +382,7 @@ func isControlRune(r: Rune): bool =
let r = int r
(r >= 0 and r < 0x20) or (r >= 0x7F and r < 0xA0)
proc init*(T: type KeystorePass, input: string): T =
func init*(T: type KeystorePass, input: string): T =
for rune in toNFKD(input):
if not isControlRune(rune):
result.str.add rune
@ -395,7 +395,7 @@ func getSeed*(mnemonic: Mnemonic, password: KeystorePass): KeySeed =
template add(m: var Mnemonic, s: cstring) =
m.string.add s
proc generateMnemonic*(
func generateMnemonic*(
rng: var HmacDrbgContext,
words: openArray[cstring] = englishWords,
entropyParam: openArray[byte] = @[]): Mnemonic =
@ -429,12 +429,12 @@ proc generateMnemonic*(
result.add " "
result.add words[entropy.getBitsBE(firstBit..lastBit)]
proc cmpIgnoreCase(lhs: cstring, rhs: string): int =
func cmpIgnoreCase(lhs: cstring, rhs: string): int =
# TODO: This is a bit silly.
# Nim should have a `cmp` function for C strings.
cmpIgnoreCase($lhs, rhs)
proc validateMnemonic*(inputWords: string,
func validateMnemonic*(inputWords: string,
outputMnemonic: var Mnemonic): bool =
## Accept a case-insensitive input string and returns `true`
## if it represents a valid mnenomic. The `outputMnemonic`
@ -465,7 +465,7 @@ proc validateMnemonic*(inputWords: string,
return true
proc deriveChildKey*(parentKey: ValidatorPrivKey,
func deriveChildKey*(parentKey: ValidatorPrivKey,
index: Natural): ValidatorPrivKey =
let success = derive_child_secretKey(SecretKey result,
SecretKey parentKey,
@ -475,7 +475,7 @@ proc deriveChildKey*(parentKey: ValidatorPrivKey,
# into asserts inside the function.
doAssert success
proc deriveMasterKey*(seed: KeySeed): ValidatorPrivKey =
func deriveMasterKey*(seed: KeySeed): ValidatorPrivKey =
let success = derive_master_secretKey(SecretKey result,
seq[byte] seed)
# TODO `derive_master_secretKey` is reporting pre-condition
@ -483,17 +483,17 @@ proc deriveMasterKey*(seed: KeySeed): ValidatorPrivKey =
# into asserts inside the function.
doAssert success
proc deriveMasterKey*(mnemonic: Mnemonic,
func deriveMasterKey*(mnemonic: Mnemonic,
password: KeystorePass): ValidatorPrivKey =
deriveMasterKey(getSeed(mnemonic, password))
proc deriveChildKey*(masterKey: ValidatorPrivKey,
func deriveChildKey*(masterKey: ValidatorPrivKey,
path: KeyPath): ValidatorPrivKey =
result = masterKey
for idx in pathNodes(path):
result = deriveChildKey(result, idx)
proc deriveChildKey*(masterKey: ValidatorPrivKey,
func deriveChildKey*(masterKey: ValidatorPrivKey,
path: openArray[Natural]): ValidatorPrivKey =
result = masterKey
for idx in path:
@ -503,12 +503,12 @@ proc deriveChildKey*(masterKey: ValidatorPrivKey,
# if we fail we want to scrub secrets from memory
result = deriveChildKey(result, idx)
proc keyFromPath*(mnemonic: Mnemonic,
func keyFromPath*(mnemonic: Mnemonic,
password: KeystorePass,
path: KeyPath): ValidatorPrivKey =
deriveChildKey(deriveMasterKey(mnemonic, password), path)
proc shaChecksum(key, cipher: openArray[byte]): Sha256Digest =
func shaChecksum(key, cipher: openArray[byte]): Sha256Digest =
var ctx: sha256
ctx.init()
ctx.update(key)
@ -681,7 +681,7 @@ proc readValue*(r: var JsonReader[DefaultFlavor], value: var Kdf)
readValueImpl(r, value)
{.pop.}
proc readValue*(r: var JsonReader, value: var (Checksum|Cipher|Kdf)) =
func readValue*(r: var JsonReader, value: var (Checksum|Cipher|Kdf)) =
static: raiseAssert "Unknown flavor `JsonReader[" & $typeof(r).Flavor &
"]` for `readValue` of `" & $typeof(value) & "`"
@ -951,7 +951,7 @@ func areValid(params: ScryptParams): bool =
params.p == scryptParams.p and
params.salt.bytes.len > 0
proc decryptCryptoField*(crypto: Crypto, decKey: openArray[byte],
func decryptCryptoField*(crypto: Crypto, decKey: openArray[byte],
outSecret: var seq[byte]): DecryptionStatus =
if crypto.cipher.message.bytes.len == 0:
return DecryptionStatus.InvalidKeystore
@ -977,7 +977,7 @@ proc decryptCryptoField*(crypto: Crypto, decKey: openArray[byte],
aesCipher.clear()
DecryptionStatus.Success
proc getDecryptionKey*(crypto: Crypto, password: KeystorePass,
func getDecryptionKey*(crypto: Crypto, password: KeystorePass,
decKey: var seq[byte]): DecryptionStatus =
let res =
case crypto.kdf.function
@ -996,7 +996,7 @@ proc getDecryptionKey*(crypto: Crypto, password: KeystorePass,
decKey = res
DecryptionStatus.Success
proc decryptCryptoField*(crypto: Crypto,
func decryptCryptoField*(crypto: Crypto,
password: KeystorePass,
outSecret: var seq[byte]): DecryptionStatus =
# https://github.com/ethereum/wiki/wiki/Web3-Secret-Storage-Definition
@ -1027,7 +1027,7 @@ template parseRemoteKeystore*(jsonContent: string): RemoteKeystore =
requireAllFields = false,
allowUnknownFields = true)
proc getSaltKey(keystore: Keystore, password: KeystorePass): KdfSaltKey =
func getSaltKey(keystore: Keystore, password: KeystorePass): KdfSaltKey =
let digest =
case keystore.crypto.kdf.function
of kdfPbkdf2:
@ -1050,8 +1050,8 @@ proc getSaltKey(keystore: Keystore, password: KeystorePass): KdfSaltKey =
h.update(toBytesLE(uint64(params.r)))
KdfSaltKey(digest.data)
proc `==`*(a, b: KdfSaltKey): bool {.borrow.}
proc hash*(salt: KdfSaltKey): Hash {.borrow.}
func `==`*(a, b: KdfSaltKey): bool {.borrow.}
func hash*(salt: KdfSaltKey): Hash {.borrow.}
{.push warning[ProveField]:off.}
func `==`*(a, b: Kdf): bool =
@ -1089,7 +1089,7 @@ func init*(t: typedesc[KeystoreCacheRef],
expireTime: expireTime
)
proc clear*(cache: KeystoreCacheRef) =
func clear*(cache: KeystoreCacheRef) =
cache.table.clear()
proc pruneExpiredKeys*(cache: KeystoreCacheRef) =
@ -1110,7 +1110,7 @@ proc init*(t: typedesc[KeystoreCacheItem], keystore: Keystore,
cipher: keystore.crypto.cipher, decryptionKey: @key,
timestamp: Moment.now())
proc getCachedKey*(cache: KeystoreCacheRef,
func getCachedKey*(cache: KeystoreCacheRef,
keystore: Keystore, password: KeystorePass): Opt[seq[byte]] =
if isNil(cache): return Opt.none(seq[byte])
let
@ -1132,7 +1132,7 @@ proc setCachedKey*(cache: KeystoreCacheRef, keystore: Keystore,
let saltKey = keystore.getSaltKey(password)
cache.table[saltKey] = KeystoreCacheItem.init(keystore, key)
proc destroyCacheKey*(cache: KeystoreCacheRef,
func destroyCacheKey*(cache: KeystoreCacheRef,
keystore: Keystore, password: KeystorePass) =
if isNil(cache): return
let saltKey = keystore.getSaltKey(password)
@ -1206,7 +1206,7 @@ proc readValue*(reader: var JsonReader, value: var lcrypto.PublicKey) {.
# TODO: Can we provide better diagnostic?
raiseUnexpectedValue(reader, "Valid hex-encoded public key expected")
proc decryptNetKeystore*(nkeystore: NetKeystore,
func decryptNetKeystore*(nkeystore: NetKeystore,
password: KeystorePass): KsResult[lcrypto.PrivateKey] =
var secret: seq[byte]
defer: burnMem(secret)
@ -1221,7 +1221,7 @@ proc decryptNetKeystore*(nkeystore: NetKeystore,
else:
err $status
proc decryptNetKeystore*(nkeystore: JsonString,
func decryptNetKeystore*(nkeystore: JsonString,
password: KeystorePass): KsResult[lcrypto.PrivateKey] =
try:
let keystore = parseNetKeystore(string nkeystore)
@ -1229,10 +1229,10 @@ proc decryptNetKeystore*(nkeystore: JsonString,
except SerializationError as exc:
return err(exc.formatMsg("<keystore>"))
proc generateKeystoreSalt*(rng: var HmacDrbgContext): seq[byte] =
func generateKeystoreSalt*(rng: var HmacDrbgContext): seq[byte] =
rng.generateBytes(keyLen)
proc createCryptoField(kdfKind: KdfKind,
func createCryptoField(kdfKind: KdfKind,
rng: var HmacDrbgContext,
secret: openArray[byte],
password = KeystorePass.init "",
@ -1339,7 +1339,7 @@ proc createKeystore*(kdfKind: KdfKind,
uuid: $uuid,
version: 4)
proc createRemoteKeystore*(pubKey: ValidatorPubKey, remoteUri: HttpHostUri,
func createRemoteKeystore*(pubKey: ValidatorPubKey, remoteUri: HttpHostUri,
version = 1'u64, description = "",
remoteType = RemoteSignerType.Web3Signer,
flags: set[RemoteKeystoreFlag] = {}): RemoteKeystore =
@ -1387,10 +1387,10 @@ func makeWithdrawalCredentials*(k: ValidatorPubKey): Eth2Digest =
bytes
# https://github.com/ethereum/consensus-specs/blob/v1.5.0-alpha.2/specs/phase0/deposit-contract.md#withdrawal-credentials
proc makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
func makeWithdrawalCredentials*(k: CookedPubKey): Eth2Digest =
makeWithdrawalCredentials(k.toPubKey())
proc prepareDeposit*(cfg: RuntimeConfig,
func prepareDeposit*(cfg: RuntimeConfig,
withdrawalPubKey: CookedPubKey,
signingKey: ValidatorPrivKey, signingPubKey: CookedPubKey,
amount = MAX_EFFECTIVE_BALANCE.Gwei): DepositData =

View File

@ -579,7 +579,7 @@ func process_execution_layer_withdrawal_request*(
exit_queue_epoch =
compute_exit_epoch_and_update_churn(cfg, state, to_withdraw, cache)
withdrawable_epoch =
Epoch(exit_queue_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
exit_queue_epoch + cfg.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
# In theory can fail, but failing/early returning here is indistinguishable
discard state.pending_partial_withdrawals.add(PendingPartialWithdrawal(

View File

@ -110,7 +110,7 @@ proc initQueue[A, B](man: SyncManager[A, B]) =
# there is present check `needsBackfill().
firstSlot
else:
Slot(firstSlot - 1'u64)
firstSlot - 1'u64
man.queue = SyncQueue.init(A, man.direction, startSlot, lastSlot,
man.chunkSize, man.getSafeSlot,
man.blockVerifier, 1, man.ident)

View File

@ -1511,7 +1511,7 @@ proc `+`*(slot: Slot, epochs: Epoch): Slot =
func finish_slot*(epoch: Epoch): Slot =
## Return the last slot of ``epoch``.
Slot((epoch + 1).start_slot() - 1)
(epoch + 1).start_slot() - 1
proc getGraffitiBytes*(vc: ValidatorClientRef,
validator: AttachedValidator): GraffitiBytes =

View File

@ -126,14 +126,13 @@ static:
"15227487_86601706.echop"]: # Wrong extension
doAssert not filename.matchFilenameAggregatedFiles
proc getUnaggregatedFilesEpochRange*(
dir: string
): tuple[firstEpoch, lastEpoch: Epoch] {.raises: [OSError, ValueError].} =
proc getUnaggregatedFilesEpochRange*(dir: string):
tuple[firstEpoch, lastEpoch: Epoch] {.raises: [OSError, ValueError].} =
var smallestEpochFileName =
'9'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension
var largestEpochFileName =
'0'.repeat(epochInfoFileNameDigitsCount) & epochFileNameExtension
for (_, fn) in walkDir(dir.string, relative = true):
for (_, fn) in walkDir(dir, relative = true):
if fn.matchFilenameUnaggregatedFiles:
if fn < smallestEpochFileName:
smallestEpochFileName = fn
@ -151,7 +150,7 @@ proc getUnaggregatedFilesLastEpoch*(
proc getAggregatedFilesLastEpoch*(
dir: string): Epoch {.raises: [OSError, ValueError].}=
var largestEpochInFileName = 0'u
for (_, fn) in walkDir(dir.string, relative = true):
for (_, fn) in walkDir(dir, relative = true):
if fn.matchFilenameAggregatedFiles:
let fileLastEpoch = parseUInt(
fn[epochInfoFileNameDigitsCount + 1 .. 2 * epochInfoFileNameDigitsCount])

View File

@ -553,8 +553,8 @@ proc sendDeposits(deposits: seq[LaunchPadDeposit],
var web3 = await initWeb3(web3Url, privateKey)
let gasPrice = int(await web3.provider.eth_gasPrice()) * 2
let depositContract = web3.contractSender(DepositContract,
Eth1Address depositContractAddress)
let depositContract = web3.contractSender(
DepositContract, depositContractAddress)
for i in 4200 ..< deposits.len:
let dp = deposits[i] as DepositData
@ -656,7 +656,7 @@ when isMainModule:
error "Failed to read an Eth1 private key from standard input"
if privateKey.len > 0:
conf.privateKey = privateKey.string
conf.privateKey = privateKey
case conf.cmd
of StartUpCommand.createTestnet:

View File

@ -11,5 +11,4 @@
--styleCheck:usages
--styleCheck:error
--hint[ConvFromXtoItselfNotNeeded]:off
--hint[Processing]:off

View File

@ -326,8 +326,7 @@ cli do(slots = SLOTS_PER_EPOCH * 7,
contribution: contribution,
selection_proof: aggregator.selectionProof)
validatorPrivKey =
MockPrivKeys[aggregator.validatorIdx.ValidatorIndex]
validatorPrivKey = MockPrivKeys[aggregator.validatorIdx]
signedContributionAndProof = SignedContributionAndProof(
message: contributionAndProof,

View File

@ -11,5 +11,4 @@
--styleCheck:usages
--styleCheck:error
--hint[ConvFromXtoItselfNotNeeded]:off
--hint[Processing]:off

View File

@ -1,5 +1,5 @@
# beacon_chain
# Copyright (c) 2019-2023 Status Research & Development GmbH
# Copyright (c) 2019-2024 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
@ -11,5 +11,4 @@
--styleCheck:usages
--styleCheck:hint
--hint[ConvFromXtoItselfNotNeeded]:off
--hint[Processing]:off

View File

@ -1336,8 +1336,7 @@ suite "State history":
res = process_slots(cfg, dag.headState, 5.Slot, cache, info, flags = {})
check res.isOk
for i in 0.Slot .. 5.Slot:
check state.getBlockIdAtSlot(i) ==
Opt.some BlockSlotId.init(gen, i.Slot)
check state.getBlockIdAtSlot(i) == Opt.some BlockSlotId.init(gen, i)
check state.getBlockIdAtSlot(6.Slot).isNone
# Fill 5 slots

View File

@ -103,7 +103,7 @@ suite "Sync committee pool":
privkey2 = MockPrivKeys[1.ValidatorIndex]
bid1 = BlockId(
slot: Slot(cfg.BELLATRIX_FORK_EPOCH.start_slot - 1),
slot: cfg.BELLATRIX_FORK_EPOCH.start_slot - 1,
root: eth2digest(@[1.byte]))
sig1 = get_sync_committee_message_signature(

View File

@ -369,7 +369,7 @@ suite "SyncManager test suite":
aq = newAsyncQueue[BlockEntry]()
chunkSize = 3'u64
numberOfChunks = 3'u64
finishSlot = Slot(startSlot + numberOfChunks * chunkSize - 1'u64)
finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64
queueSize = 1
var counter =
@ -734,7 +734,7 @@ suite "SyncManager test suite":
startSlot = Slot(0)
chunkSize = SLOTS_PER_EPOCH
numberOfChunks = 4'u64
finishSlot = Slot(startSlot + numberOfChunks * chunkSize - 1'u64)
finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64
queueSize = 1
var counter = int(startSlot)
@ -855,7 +855,7 @@ suite "SyncManager test suite":
startSlot = Slot(0)
chunkSize = SLOTS_PER_EPOCH
numberOfChunks = 1'u64
finishSlot = Slot(startSlot + numberOfChunks * chunkSize - 1'u64)
finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64
queueSize = 1
var counter = int(startSlot)
@ -902,7 +902,7 @@ suite "SyncManager test suite":
startSlot = Slot(0)
chunkSize = SLOTS_PER_EPOCH
numberOfChunks = 4'u64
finishSlot = Slot(startSlot + numberOfChunks * chunkSize - 1'u64)
finishSlot = startSlot + numberOfChunks * chunkSize - 1'u64
queueSize = 1
var