cache balances, more type fixes
This commit is contained in:
parent
be92842944
commit
fec01829c4
|
@ -274,7 +274,7 @@ type
|
||||||
|
|
||||||
# Registry
|
# Registry
|
||||||
validators*: HashList[Validator, VALIDATOR_REGISTRY_LIMIT]
|
validators*: HashList[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||||
balances*: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
balances*: HashList[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
|
||||||
# Randomness
|
# Randomness
|
||||||
randao_mixes*: HashArray[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
randao_mixes*: HashArray[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||||
|
|
|
@ -585,35 +585,49 @@ func hashTreeRootAux[T](x: T): Eth2Digest =
|
||||||
else:
|
else:
|
||||||
unsupported T
|
unsupported T
|
||||||
|
|
||||||
func mergedDataHash(x: HashList|HashArray, dataIdx: int64): Eth2Digest =
|
func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest =
|
||||||
|
# The hash of the two cached
|
||||||
|
trs "DATA HASH ", chunkIdx, " ", x.data.len
|
||||||
|
|
||||||
when x.T is uint64:
|
when x.T is uint64:
|
||||||
when cpuEndian == bigEndian:
|
when cpuEndian == bigEndian:
|
||||||
unsupported type x
|
unsupported type x # No bigendian support here!
|
||||||
|
|
||||||
let
|
let
|
||||||
pos = offset(cast[ptr byte](unsafeAddr x.data[0]), dataIdx.int * 32)
|
bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0])
|
||||||
pos2 = offset(pos, 32)
|
byteIdx = chunkIdx * bytesPerChunk
|
||||||
|
byteLen = x.data.len * sizeof(x.T)
|
||||||
|
|
||||||
hash(makeOpenArray(pos, 32), makeOpenArray(pos2, 32))
|
const zero64 = default(array[64, byte])
|
||||||
|
|
||||||
|
if byteIdx >= byteLen:
|
||||||
|
zeroHashes[1]
|
||||||
else:
|
else:
|
||||||
trs "DATA HASH ", dataIdx, " ", x.data.len
|
let
|
||||||
|
nbytes = min(byteLen - byteIdx, 64)
|
||||||
|
padding = 64 - nbytes
|
||||||
|
|
||||||
if dataIdx + 1 > x.data.len():
|
hash(
|
||||||
|
toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)),
|
||||||
|
toOpenArray(zero64, 0, int(padding - 1)))
|
||||||
|
else:
|
||||||
|
if chunkIdx + 1 > x.data.len():
|
||||||
zeroHashes[x.maxDepth]
|
zeroHashes[x.maxDepth]
|
||||||
elif dataIdx + 1 == x.data.len():
|
elif chunkIdx + 1 == x.data.len():
|
||||||
mergeBranches(
|
mergeBranches(
|
||||||
hash_tree_root(x.data[dataIdx]),
|
hash_tree_root(x.data[chunkIdx]),
|
||||||
Eth2Digest())
|
Eth2Digest())
|
||||||
else:
|
else:
|
||||||
mergeBranches(
|
mergeBranches(
|
||||||
hash_tree_root(x.data[dataIdx]),
|
hash_tree_root(x.data[chunkIdx]),
|
||||||
hash_tree_root(x.data[dataIdx + 1]))
|
hash_tree_root(x.data[chunkIdx + 1]))
|
||||||
|
|
||||||
func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
||||||
doAssert vIdx >= 1
|
doAssert vIdx >= 1
|
||||||
|
|
||||||
let
|
let
|
||||||
layer = layer(vIdx)
|
layer = layer(vIdx)
|
||||||
idxInLayer = vIdx - (1 shl layer)
|
idxInLayer = vIdx - (1'i64 shl layer)
|
||||||
layerIdx = idxInlayer + x.indices[layer]
|
layerIdx = idxInlayer + x.indices[layer]
|
||||||
|
|
||||||
doAssert layer < x.maxDepth
|
doAssert layer < x.maxDepth
|
||||||
|
@ -630,7 +644,7 @@ func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
||||||
|
|
||||||
px[].hashes[layerIdx] =
|
px[].hashes[layerIdx] =
|
||||||
if layer == x.maxDepth - 1:
|
if layer == x.maxDepth - 1:
|
||||||
let dataIdx = vIdx * 2 - 1 shl (x.maxDepth)
|
let dataIdx = vIdx * 2 - 1'i64 shl (x.maxDepth)
|
||||||
mergedDataHash(x, dataIdx)
|
mergedDataHash(x, dataIdx)
|
||||||
else:
|
else:
|
||||||
mergeBranches(
|
mergeBranches(
|
||||||
|
|
|
@ -142,12 +142,12 @@ template maxDepth*(a: HashList|HashArray): int =
|
||||||
## Layer where data is
|
## Layer where data is
|
||||||
layer(a.maxChunks)
|
layer(a.maxChunks)
|
||||||
|
|
||||||
proc clearCaches*(a: var HashList, dataIdx: auto) =
|
proc clearCaches*(a: var HashList, dataIdx: int64) =
|
||||||
if a.hashes.len == 0:
|
if a.hashes.len == 0:
|
||||||
return
|
return
|
||||||
|
|
||||||
var
|
var
|
||||||
idx = 1 shl (a.maxDepth - 1) + int(dataIdx div 2)
|
idx = 1'i64 shl (a.maxDepth - 1) + int64(dataIdx div 2)
|
||||||
layer = a.maxDepth - 1
|
layer = a.maxDepth - 1
|
||||||
while idx > 0:
|
while idx > 0:
|
||||||
let
|
let
|
||||||
|
@ -210,7 +210,7 @@ proc `[]`*(x: var HashList, idx: auto): var x.T =
|
||||||
clearCaches(x, idx.int64)
|
clearCaches(x, idx.int64)
|
||||||
x.data[idx]
|
x.data[idx]
|
||||||
|
|
||||||
proc `[]=`*(x: var HashList, idx: int64, val: auto) =
|
proc `[]=`*(x: var HashList, idx: auto, val: auto) =
|
||||||
clearCaches(x, idx.int64)
|
clearCaches(x, idx.int64)
|
||||||
x.data[idx] = val
|
x.data[idx] = val
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue