cache balances, more type fixes
This commit is contained in:
parent
be92842944
commit
fec01829c4
|
@ -274,7 +274,7 @@ type
|
|||
|
||||
# Registry
|
||||
validators*: HashList[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances*: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances*: HashList[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||
|
||||
# Randomness
|
||||
randao_mixes*: HashArray[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||
|
|
|
@ -585,35 +585,49 @@ func hashTreeRootAux[T](x: T): Eth2Digest =
|
|||
else:
|
||||
unsupported T
|
||||
|
||||
func mergedDataHash(x: HashList|HashArray, dataIdx: int64): Eth2Digest =
|
||||
func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest =
|
||||
# The hash of the two cached
|
||||
trs "DATA HASH ", chunkIdx, " ", x.data.len
|
||||
|
||||
when x.T is uint64:
|
||||
when cpuEndian == bigEndian:
|
||||
unsupported type x
|
||||
unsupported type x # No bigendian support here!
|
||||
|
||||
let
|
||||
pos = offset(cast[ptr byte](unsafeAddr x.data[0]), dataIdx.int * 32)
|
||||
pos2 = offset(pos, 32)
|
||||
bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0])
|
||||
byteIdx = chunkIdx * bytesPerChunk
|
||||
byteLen = x.data.len * sizeof(x.T)
|
||||
|
||||
hash(makeOpenArray(pos, 32), makeOpenArray(pos2, 32))
|
||||
const zero64 = default(array[64, byte])
|
||||
|
||||
if byteIdx >= byteLen:
|
||||
zeroHashes[1]
|
||||
else:
|
||||
trs "DATA HASH ", dataIdx, " ", x.data.len
|
||||
let
|
||||
nbytes = min(byteLen - byteIdx, 64)
|
||||
padding = 64 - nbytes
|
||||
|
||||
if dataIdx + 1 > x.data.len():
|
||||
hash(
|
||||
toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)),
|
||||
toOpenArray(zero64, 0, int(padding - 1)))
|
||||
else:
|
||||
if chunkIdx + 1 > x.data.len():
|
||||
zeroHashes[x.maxDepth]
|
||||
elif dataIdx + 1 == x.data.len():
|
||||
elif chunkIdx + 1 == x.data.len():
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[dataIdx]),
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
Eth2Digest())
|
||||
else:
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[dataIdx]),
|
||||
hash_tree_root(x.data[dataIdx + 1]))
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
hash_tree_root(x.data[chunkIdx + 1]))
|
||||
|
||||
func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
||||
doAssert vIdx >= 1
|
||||
|
||||
let
|
||||
layer = layer(vIdx)
|
||||
idxInLayer = vIdx - (1 shl layer)
|
||||
idxInLayer = vIdx - (1'i64 shl layer)
|
||||
layerIdx = idxInlayer + x.indices[layer]
|
||||
|
||||
doAssert layer < x.maxDepth
|
||||
|
@ -630,7 +644,7 @@ func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
|||
|
||||
px[].hashes[layerIdx] =
|
||||
if layer == x.maxDepth - 1:
|
||||
let dataIdx = vIdx * 2 - 1 shl (x.maxDepth)
|
||||
let dataIdx = vIdx * 2 - 1'i64 shl (x.maxDepth)
|
||||
mergedDataHash(x, dataIdx)
|
||||
else:
|
||||
mergeBranches(
|
||||
|
|
|
@ -142,12 +142,12 @@ template maxDepth*(a: HashList|HashArray): int =
|
|||
## Layer where data is
|
||||
layer(a.maxChunks)
|
||||
|
||||
proc clearCaches*(a: var HashList, dataIdx: auto) =
|
||||
proc clearCaches*(a: var HashList, dataIdx: int64) =
|
||||
if a.hashes.len == 0:
|
||||
return
|
||||
|
||||
var
|
||||
idx = 1 shl (a.maxDepth - 1) + int(dataIdx div 2)
|
||||
idx = 1'i64 shl (a.maxDepth - 1) + int64(dataIdx div 2)
|
||||
layer = a.maxDepth - 1
|
||||
while idx > 0:
|
||||
let
|
||||
|
@ -210,7 +210,7 @@ proc `[]`*(x: var HashList, idx: auto): var x.T =
|
|||
clearCaches(x, idx.int64)
|
||||
x.data[idx]
|
||||
|
||||
proc `[]=`*(x: var HashList, idx: int64, val: auto) =
|
||||
proc `[]=`*(x: var HashList, idx: auto, val: auto) =
|
||||
clearCaches(x, idx.int64)
|
||||
x.data[idx] = val
|
||||
|
||||
|
|
Loading…
Reference in New Issue