Prefer converting int` to uint64 and switch foo.len.uint64 to .len64 (#1375)
* avoid converting from uint64 to int, and where most feasible, int type conversion at all * .len.uint64 -> .len64 * fix 32-bit compilation * try keeping state_sim loop variable/bounds as int for 32-bit Azure * len64 -> lenu64
This commit is contained in:
parent
e5a76b0047
commit
90708a8287
|
@ -286,7 +286,7 @@ proc isValidAggregatedAttestation*(
|
|||
# aggregate.data.slot by the validator with index
|
||||
# aggregate_and_proof.aggregator_index.
|
||||
# get_slot_signature(state, aggregate.data.slot, privkey)
|
||||
if aggregate_and_proof.aggregator_index >= state.validators.len.uint64:
|
||||
if aggregate_and_proof.aggregator_index >= state.validators.lenu64:
|
||||
debug "Invalid aggregator_index"
|
||||
return false
|
||||
|
||||
|
|
|
@ -113,13 +113,13 @@ proc slotIndex(
|
|||
pool.startingSlot =
|
||||
state.finalized_checkpoint.epoch.compute_start_slot_at_epoch()
|
||||
|
||||
if pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
|
||||
if pool.startingSlot + pool.mapSlotsToAttestations.lenu64 <= attestationSlot:
|
||||
trace "Growing attestation pool",
|
||||
attestationSlot = shortLog(attestationSlot),
|
||||
startingSlot = shortLog(pool.startingSlot)
|
||||
|
||||
# Make sure there's a pool entry for every slot, even when there's a gap
|
||||
while pool.startingSlot + pool.mapSlotsToAttestations.len.uint64 <= attestationSlot:
|
||||
while pool.startingSlot + pool.mapSlotsToAttestations.lenu64 <= attestationSlot:
|
||||
pool.mapSlotsToAttestations.addLast(AttestationsSeen())
|
||||
|
||||
if pool.startingSlot <
|
||||
|
@ -321,11 +321,11 @@ proc getAttestationsForSlot*(pool: AttestationPool, newBlockSlot: Slot):
|
|||
attestationSlot = newBlockSlot - MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
if attestationSlot < pool.startingSlot or
|
||||
attestationSlot >= pool.startingSlot + pool.mapSlotsToAttestations.len.uint64:
|
||||
attestationSlot >= pool.startingSlot + pool.mapSlotsToAttestations.lenu64:
|
||||
info "No attestations matching the slot range",
|
||||
attestationSlot = shortLog(attestationSlot),
|
||||
startingSlot = shortLog(pool.startingSlot),
|
||||
endingSlot = shortLog(pool.startingSlot + pool.mapSlotsToAttestations.len.uint64)
|
||||
endingSlot = shortLog(pool.startingSlot + pool.mapSlotsToAttestations.lenu64)
|
||||
return none(AttestationsSeen)
|
||||
|
||||
let slotDequeIdx = int(attestationSlot - pool.startingSlot)
|
||||
|
@ -402,7 +402,7 @@ proc getAttestationsForBlock*(pool: AttestationPool,
|
|||
|
||||
result.add(attestation)
|
||||
|
||||
if result.len >= MAX_ATTESTATIONS.int:
|
||||
if result.lenu64 >= MAX_ATTESTATIONS:
|
||||
debug "getAttestationsForBlock: returning early after hitting MAX_ATTESTATIONS",
|
||||
attestationSlot = newBlockSlot - 1
|
||||
return
|
||||
|
|
|
@ -217,7 +217,7 @@ proc isValidBeaconBlock*(
|
|||
pool.dag, pool.quarantine, signed_beacon_block, current_slot, flags)
|
||||
|
||||
func count_active_validators*(epochInfo: EpochRef): uint64 =
|
||||
epochInfo.shuffled_active_validator_indices.len.uint64
|
||||
epochInfo.shuffled_active_validator_indices.lenu64
|
||||
|
||||
func get_committee_count_per_slot*(epochInfo: EpochRef): uint64 =
|
||||
get_committee_count_per_slot(count_active_validators(epochInfo))
|
||||
|
|
|
@ -387,7 +387,7 @@ proc writeChunk*(conn: Connection,
|
|||
if responseCode.isSome:
|
||||
output.write byte(responseCode.get)
|
||||
|
||||
output.write varintBytes(payload.len.uint64)
|
||||
output.write varintBytes(payload.lenu64)
|
||||
|
||||
if noSnappy:
|
||||
output.write(payload)
|
||||
|
|
|
@ -140,7 +140,7 @@ func getDepositsInRange(eth1Chain: Eth1Chain,
|
|||
# This function should be used with indices obtained with `eth1Chain.findBlock`.
|
||||
# This guarantess that both of these indices will be valid:
|
||||
doAssert sinceBlock >= firstBlockInCache and
|
||||
int(latestBlock - firstBlockInCache) < eth1Chain.blocks.len
|
||||
(latestBlock - firstBlockInCache) < eth1Chain.blocks.lenu64
|
||||
let
|
||||
sinceBlockIdx = sinceBlock - firstBlockInCache
|
||||
latestBlockIdx = latestBlock - firstBlockInCache
|
||||
|
@ -195,7 +195,7 @@ func isSuccessorBlock(eth1Chain: Eth1Chain, newBlock: Eth1Block): bool =
|
|||
if lastBlock.number >= newBlock.number: return false
|
||||
lastBlock.voteData.deposit_count
|
||||
|
||||
(currentDepositCount + newBlock.deposits.len.uint64) == newBlock.voteData.deposit_count
|
||||
(currentDepositCount + newBlock.deposits.lenu64) == newBlock.voteData.deposit_count
|
||||
|
||||
func addSuccessorBlock*(eth1Chain: var Eth1Chain, newBlock: Eth1Block): bool =
|
||||
result = isSuccessorBlock(eth1Chain, newBlock)
|
||||
|
|
|
@ -15,20 +15,20 @@ import
|
|||
../../nbench/bench_lab
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_valid_merkle_branch
|
||||
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool {.nbench.}=
|
||||
func is_valid_merkle_branch*(leaf: Eth2Digest, branch: openarray[Eth2Digest], depth: int, index: uint64, root: Eth2Digest): bool {.nbench.}=
|
||||
## Check if ``leaf`` at ``index`` verifies against the Merkle ``root`` and
|
||||
## ``branch``.
|
||||
var
|
||||
value = leaf
|
||||
buf: array[64, byte]
|
||||
|
||||
for i in 0 ..< depth.int:
|
||||
for i in 0 ..< depth:
|
||||
if (index div (1'u64 shl i)) mod 2 != 0:
|
||||
buf[0..31] = branch[i.int].data
|
||||
buf[0..31] = branch[i].data
|
||||
buf[32..63] = value.data
|
||||
else:
|
||||
buf[0..31] = value.data
|
||||
buf[32..63] = branch[i.int].data
|
||||
buf[32..63] = branch[i].data
|
||||
value = eth2digest(buf)
|
||||
value == root
|
||||
|
||||
|
@ -222,7 +222,7 @@ proc initialize_beacon_state_from_eth1*(
|
|||
# validators - there needs to be at least one member in each committee -
|
||||
# good to know for testing, though arguably the system is not that useful at
|
||||
# at that point :)
|
||||
doAssert deposits.len >= SLOTS_PER_EPOCH.int
|
||||
doAssert deposits.lenu64 >= SLOTS_PER_EPOCH
|
||||
|
||||
var state = BeaconStateRef(
|
||||
fork: Fork(
|
||||
|
@ -290,7 +290,7 @@ func is_valid_genesis_state*(preset: RuntimePreset,
|
|||
if state.genesis_time < preset.MIN_GENESIS_TIME:
|
||||
return false
|
||||
# This is an okay get_active_validator_indices(...) for the time being.
|
||||
if active_validator_indices.len.uint64 < preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
||||
if active_validator_indices.lenu64 < preset.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT:
|
||||
return false
|
||||
true
|
||||
|
||||
|
@ -421,7 +421,7 @@ func is_valid_indexed_attestation*(
|
|||
|
||||
# Not from spec, but this function gets used in front-line roles, not just
|
||||
# behind firewall.
|
||||
let num_validators = state.validators.len.uint64
|
||||
let num_validators = state.validators.lenu64
|
||||
if anyIt(indexed_attestation.attesting_indices, it >= num_validators):
|
||||
trace "indexed attestation: not all indices valid validators"
|
||||
return false
|
||||
|
@ -435,7 +435,7 @@ func is_valid_indexed_attestation*(
|
|||
# Verify aggregate signature
|
||||
if skipBLSValidation notin flags:
|
||||
# TODO: fuse loops with blsFastAggregateVerify
|
||||
let pubkeys = mapIt(indices, state.validators[it.int].pubkey)
|
||||
let pubkeys = mapIt(indices, state.validators[it].pubkey)
|
||||
if not verify_attestation_signature(
|
||||
state.fork, state.genesis_validators_root, indexed_attestation.data,
|
||||
pubkeys, indexed_attestation.signature):
|
||||
|
|
|
@ -68,7 +68,7 @@ const
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/p2p-interface.md#configuration
|
||||
ATTESTATION_PROPAGATION_SLOT_RANGE* = 32
|
||||
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD* = Slot(EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH)
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD* = EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH
|
||||
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH* = 32
|
||||
BASE_REWARDS_PER_EPOCH* = 4
|
||||
|
@ -533,6 +533,7 @@ proc readValue*(reader: var JsonReader, value: var ForkDigest)
|
|||
raiseUnexpectedValue(reader, "Hex string of 4 bytes expected")
|
||||
|
||||
# `ValidatorIndex` seq handling.
|
||||
# TODO harden these against uint32/uint64 to int type conversion risks
|
||||
func max*(a: ValidatorIndex, b: int) : auto =
|
||||
max(a.int, b)
|
||||
|
||||
|
@ -610,6 +611,9 @@ template assignClone*[T: not ref](x: T): ref T =
|
|||
template newClone*[T](x: ref T not nil): ref T =
|
||||
newClone(x[])
|
||||
|
||||
template lenu64*(x: untyped): untyped =
|
||||
x.len.uint64
|
||||
|
||||
func `$`*(v: ForkDigest | Version): string =
|
||||
toHex(array[4, byte](v))
|
||||
|
||||
|
|
|
@ -62,7 +62,7 @@ func count_active_validators*(state: BeaconState,
|
|||
cache: StateCache): uint64 =
|
||||
if epoch in cache.shuffled_active_validator_indices:
|
||||
try:
|
||||
cache.shuffled_active_validator_indices[epoch].len.uint64
|
||||
cache.shuffled_active_validator_indices[epoch].lenu64
|
||||
except KeyError:
|
||||
raiseAssert "just checked"
|
||||
else:
|
||||
|
|
|
@ -67,7 +67,7 @@ proc verify_block_signature*(
|
|||
state: BeaconState, signed_block: SomeSignedBeaconBlock): bool {.nbench.} =
|
||||
let
|
||||
proposer_index = signed_block.message.proposer_index
|
||||
if proposer_index >= state.validators.len.uint64:
|
||||
if proposer_index >= state.validators.lenu64:
|
||||
notice "Invalid proposer index in block",
|
||||
blck = shortLog(signed_block.message)
|
||||
return false
|
||||
|
|
|
@ -125,7 +125,8 @@ proc process_randao(
|
|||
func process_eth1_data(state: var BeaconState, body: SomeBeaconBlockBody) {.nbench.}=
|
||||
state.eth1_data_votes.add body.eth1_data
|
||||
|
||||
if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int:
|
||||
if state.eth1_data_votes.asSeq.count(body.eth1_data).uint64 * 2 >
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||
state.eth1_data = body.eth1_data
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/specs/phase0/beacon-chain.md#is_slashable_validator
|
||||
|
@ -146,7 +147,7 @@ proc process_proposer_slashing*(
|
|||
header_2 = proposer_slashing.signed_header_2.message
|
||||
|
||||
# Not from spec
|
||||
if header_1.proposer_index >= state.validators.len.uint64:
|
||||
if header_1.proposer_index >= state.validators.lenu64:
|
||||
return err("process_proposer_slashing: invalid proposer index")
|
||||
|
||||
# Verify header slots match
|
||||
|
@ -218,7 +219,7 @@ proc process_attester_slashing*(
|
|||
toHashSet(attestation_1.attesting_indices.asSeq),
|
||||
toHashSet(attestation_2.attesting_indices.asSeq)).items), system.cmp):
|
||||
if is_slashable_validator(
|
||||
state.validators[index.int], get_current_epoch(state)):
|
||||
state.validators[index], get_current_epoch(state)):
|
||||
slash_validator(state, index.ValidatorIndex, stateCache)
|
||||
slashed_any = true
|
||||
if not slashed_any:
|
||||
|
@ -234,10 +235,10 @@ proc process_voluntary_exit*(
|
|||
let voluntary_exit = signed_voluntary_exit.message
|
||||
|
||||
# Not in spec. Check that validator_index is in range
|
||||
if voluntary_exit.validator_index >= state.validators.len.uint64:
|
||||
if voluntary_exit.validator_index >= state.validators.lenu64:
|
||||
return err("Exit: invalid validator index")
|
||||
|
||||
let validator = state.validators[voluntary_exit.validator_index.int]
|
||||
let validator = state.validators[voluntary_exit.validator_index]
|
||||
|
||||
# Verify the validator is active
|
||||
if not is_active_validator(validator, get_current_epoch(state)):
|
||||
|
|
|
@ -134,7 +134,7 @@ proc process_justification_and_finalization*(state: var BeaconState,
|
|||
# This is a somewhat expensive approach
|
||||
let active_validator_indices {.used.} =
|
||||
toHashSet(mapIt(
|
||||
get_active_validator_indices(state, get_current_epoch(state)), it.int))
|
||||
get_active_validator_indices(state, get_current_epoch(state)), it.uint32))
|
||||
|
||||
let matching_target_attestations_previous =
|
||||
get_matching_target_attestations(state, previous_epoch) # Previous epoch
|
||||
|
@ -154,11 +154,11 @@ proc process_justification_and_finalization*(state: var BeaconState,
|
|||
missing_all_validators=
|
||||
difference(active_validator_indices,
|
||||
toHashSet(mapIt(get_attesting_indices(state,
|
||||
matching_target_attestations_previous, stateCache), it.int))),
|
||||
matching_target_attestations_previous, stateCache), it.uint32))),
|
||||
missing_unslashed_validators=
|
||||
difference(active_validator_indices,
|
||||
toHashSet(mapIt(get_unslashed_attesting_indices(state,
|
||||
matching_target_attestations_previous, stateCache), it.int))),
|
||||
matching_target_attestations_previous, stateCache), it.uint32))),
|
||||
prev_attestations_len=len(state.previous_epoch_attestations),
|
||||
cur_attestations_len=len(state.current_epoch_attestations),
|
||||
num_active_validators=len(active_validator_indices),
|
||||
|
|
|
@ -87,7 +87,7 @@ func get_shuffled_active_validator_indices*(state: BeaconState, epoch: Epoch):
|
|||
mapIt(
|
||||
get_shuffled_seq(
|
||||
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
|
||||
active_validator_indices.len.uint64),
|
||||
active_validator_indices.lenu64),
|
||||
active_validator_indices[it])
|
||||
|
||||
func get_shuffled_active_validator_indices*(
|
||||
|
@ -155,7 +155,7 @@ func get_beacon_committee*(
|
|||
|
||||
try:
|
||||
let committees_per_slot = get_committee_count_per_slot(
|
||||
cache.shuffled_active_validator_indices[epoch].len.uint64)
|
||||
cache.shuffled_active_validator_indices[epoch].lenu64)
|
||||
compute_committee(
|
||||
cache.shuffled_active_validator_indices[epoch],
|
||||
get_seed(state, epoch, DOMAIN_BEACON_ATTESTER),
|
||||
|
@ -214,7 +214,7 @@ func compute_proposer_index(state: BeaconState, indices: seq[ValidatorIndex],
|
|||
if len(indices) == 0:
|
||||
return none(ValidatorIndex)
|
||||
|
||||
let seq_len = indices.len.uint64
|
||||
let seq_len = indices.lenu64
|
||||
|
||||
var
|
||||
i = 0'u64
|
||||
|
|
|
@ -74,7 +74,7 @@ proc getValidatorInfoFromValidatorId(
|
|||
var valIdx: BiggestUInt
|
||||
if parseBiggestUInt(validatorId, valIdx) != validatorId.len:
|
||||
raise newException(CatchableError, "Not a valid index")
|
||||
if state.validators.len >= valIdx.int:
|
||||
if state.validators.lenu64 >= valIdx:
|
||||
raise newException(CatchableError, "Index out of bounds")
|
||||
state.validators[valIdx]
|
||||
|
||||
|
@ -361,7 +361,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||
if ca.isSome:
|
||||
result.add((public_key: pubkey,
|
||||
committee_index: ca.get.b,
|
||||
committee_length: ca.get.a.len.uint64,
|
||||
committee_length: ca.get.a.lenu64,
|
||||
validator_committee_index: ca.get.a.find(idx.ValidatorIndex).uint64,
|
||||
slot: ca.get.c))
|
||||
|
||||
|
|
|
@ -148,22 +148,22 @@ proc nfuzz_shuffle(input_seed: ptr byte, xoutput: var openArray[uint64]): bool
|
|||
{.exportc, raises: [Defect].} =
|
||||
var seed: Eth2Digest
|
||||
# Should be OK as max 2 bytes are passed by the framework.
|
||||
let list_size = xoutput.len.uint64
|
||||
let list_size = xoutput.len
|
||||
|
||||
copyMem(addr(seed.data), input_seed, sizeof(seed.data))
|
||||
|
||||
var shuffled_seq: seq[ValidatorIndex]
|
||||
shuffled_seq = get_shuffled_seq(seed, list_size)
|
||||
shuffled_seq = get_shuffled_seq(seed, list_size.uint64)
|
||||
|
||||
doAssert(
|
||||
list_size == shuffled_seq.len.uint64,
|
||||
list_size == shuffled_seq.len,
|
||||
"Shuffled list should be of requested size."
|
||||
)
|
||||
|
||||
for i in 0..<list_size:
|
||||
# ValidatorIndex is currently wrongly uint32 so we copy this 1 by 1,
|
||||
# assumes passed xoutput is zeroed.
|
||||
copyMem(offset(addr xoutput, i.int), shuffled_seq[i.int].unsafeAddr,
|
||||
copyMem(offset(addr xoutput, i), shuffled_seq[i].unsafeAddr,
|
||||
sizeof(ValidatorIndex))
|
||||
|
||||
result = true
|
||||
true
|
||||
|
|
|
@ -85,8 +85,8 @@ cli do(slots = SLOTS_PER_EPOCH * 6,
|
|||
blockAttestations = attestations.getOrDefault(attestations_idx)
|
||||
|
||||
attestations.del attestations_idx
|
||||
doAssert len(attestations) <=
|
||||
(SLOTS_PER_EPOCH.int + MIN_ATTESTATION_INCLUSION_DELAY.int)
|
||||
doAssert attestations.lenu64 <=
|
||||
SLOTS_PER_EPOCH + MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
let t =
|
||||
if (state[].data.slot > GENESIS_SLOT and
|
||||
|
|
|
@ -58,7 +58,7 @@ template mockGenesisDepositsImpl(
|
|||
# (it can still be skipped later).
|
||||
if skipBlsValidation in flags:
|
||||
# 1st loop - build deposit data
|
||||
for valIdx in 0 ..< validatorCount.int:
|
||||
for valIdx in 0 ..< validatorCount:
|
||||
# Directly build the Deposit in-place for speed
|
||||
result.setLen(valIdx + 1)
|
||||
|
||||
|
@ -71,7 +71,7 @@ template mockGenesisDepositsImpl(
|
|||
var depositsData: seq[DepositData]
|
||||
|
||||
# 1st loop - build deposit data
|
||||
for valIdx in 0 ..< validatorCount.int:
|
||||
for valIdx in 0 ..< validatorCount:
|
||||
# Directly build the Deposit in-place for speed
|
||||
result.setLen(valIdx + 1)
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ suiteReport "[Unit - Spec - Block processing] Attestations " & preset():
|
|||
|
||||
const NumValidators = uint64(8) * SLOTS_PER_EPOCH
|
||||
let genesisState = newClone(initGenesisState(NumValidators))
|
||||
doAssert genesisState.data.validators.len == int NumValidators
|
||||
doAssert genesisState.data.validators.lenu64 == NumValidators
|
||||
|
||||
template valid_attestation(name: string, body: untyped): untyped {.dirty.}=
|
||||
# Process a valid attestation
|
||||
|
|
|
@ -27,7 +27,7 @@ suiteReport "[Unit - Spec - Block processing] Deposits " & preset():
|
|||
|
||||
const NumValidators = uint64 5 * SLOTS_PER_EPOCH
|
||||
let genesisState = newClone(initGenesisState(NumValidators))
|
||||
doAssert genesisState.data.validators.len == int NumValidators
|
||||
doAssert genesisState.data.validators.lenu64 == NumValidators
|
||||
|
||||
template valid_deposit(deposit_amount: uint64, name: string): untyped =
|
||||
timedTest "Deposit " & name & " MAX_EFFECTIVE_BALANCE balance (" &
|
||||
|
|
|
@ -218,7 +218,7 @@ proc payload =
|
|||
|
||||
const NumValidators = uint64(8) * SLOTS_PER_EPOCH
|
||||
let genesisState = newClone(initGenesisState(NumValidators))
|
||||
doAssert genesisState.data.validators.len == int NumValidators
|
||||
doAssert genesisState.data.validators.lenu64 == NumValidators
|
||||
|
||||
setup:
|
||||
var state = assignClone(genesisState[])
|
||||
|
|
|
@ -16,4 +16,4 @@ suiteReport "Beacon state" & preset():
|
|||
timedTest "Smoke test initialize_beacon_state_from_eth1" & preset():
|
||||
let state = initialize_beacon_state_from_eth1(
|
||||
defaultRuntimePreset, Eth2Digest(), 0, makeInitialDeposits(SLOTS_PER_EPOCH, {}), {})
|
||||
check: state.validators.len == SLOTS_PER_EPOCH.int
|
||||
check: state.validators.lenu64 == SLOTS_PER_EPOCH
|
||||
|
|
|
@ -137,7 +137,7 @@ suiteReport "SSZ navigator":
|
|||
timedTest "basictype":
|
||||
var leaves = HashList[uint64, 1'i64 shl 3]()
|
||||
while leaves.len < leaves.maxLen:
|
||||
leaves.add leaves.len.uint64
|
||||
leaves.add leaves.lenu64
|
||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||
|
||||
suiteReport "SSZ dynamic navigator":
|
||||
|
|
|
@ -57,7 +57,7 @@ suiteReport "Block processing" & preset():
|
|||
previous_block_root = genesisRoot
|
||||
cache = StateCache()
|
||||
|
||||
for i in 1..SLOTS_PER_EPOCH.int:
|
||||
for i in 1..SLOTS_PER_EPOCH:
|
||||
let new_block = makeTestBlock(state[], previous_block_root, cache)
|
||||
|
||||
let block_ok = state_transition(defaultRuntimePreset, state[], new_block, {}, noRollback)
|
||||
|
|
|
@ -117,7 +117,7 @@ proc addTestBlock*(
|
|||
# Keep deposit counts internally consistent.
|
||||
Eth1Data(
|
||||
deposit_root: eth1_data.deposit_root,
|
||||
deposit_count: state.data.eth1_deposit_index + deposits.len.uint64,
|
||||
deposit_count: state.data.eth1_deposit_index + deposits.lenu64,
|
||||
block_hash: eth1_data.block_hash),
|
||||
graffiti,
|
||||
attestations,
|
||||
|
|
Loading…
Reference in New Issue