From fec01829c49aa6dd1f7429a0ace54517160b84bf Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sun, 31 May 2020 17:21:47 +0200 Subject: [PATCH] cache balances, more type fixes --- beacon_chain/spec/datatypes.nim | 2 +- beacon_chain/ssz.nim | 42 ++++++++++++++++++++++----------- beacon_chain/ssz/types.nim | 6 ++--- 3 files changed, 32 insertions(+), 18 deletions(-) diff --git a/beacon_chain/spec/datatypes.nim b/beacon_chain/spec/datatypes.nim index f296921a1..0e43d8ee0 100644 --- a/beacon_chain/spec/datatypes.nim +++ b/beacon_chain/spec/datatypes.nim @@ -274,7 +274,7 @@ type # Registry validators*: HashList[Validator, VALIDATOR_REGISTRY_LIMIT] - balances*: List[uint64, VALIDATOR_REGISTRY_LIMIT] + balances*: HashList[uint64, VALIDATOR_REGISTRY_LIMIT] # Randomness randao_mixes*: HashArray[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] diff --git a/beacon_chain/ssz.nim b/beacon_chain/ssz.nim index b72bd5c65..202e7edc9 100644 --- a/beacon_chain/ssz.nim +++ b/beacon_chain/ssz.nim @@ -585,35 +585,49 @@ func hashTreeRootAux[T](x: T): Eth2Digest = else: unsupported T -func mergedDataHash(x: HashList|HashArray, dataIdx: int64): Eth2Digest = +func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest = + # The hash of the two cached + trs "DATA HASH ", chunkIdx, " ", x.data.len + when x.T is uint64: when cpuEndian == bigEndian: - unsupported type x + unsupported type x # No bigendian support here! + let - pos = offset(cast[ptr byte](unsafeAddr x.data[0]), dataIdx.int * 32) - pos2 = offset(pos, 32) + bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0]) + byteIdx = chunkIdx * bytesPerChunk + byteLen = x.data.len * sizeof(x.T) - hash(makeOpenArray(pos, 32), makeOpenArray(pos2, 32)) + const zero64 = default(array[64, byte]) + + if byteIdx >= byteLen: + zeroHashes[1] + else: + let + nbytes = min(byteLen - byteIdx, 64) + padding = 64 - nbytes + + hash( + toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)), + toOpenArray(zero64, 0, int(padding - 1))) else: - trs "DATA HASH ", dataIdx, " ", x.data.len - - if dataIdx + 1 > x.data.len(): + if chunkIdx + 1 > x.data.len(): zeroHashes[x.maxDepth] - elif dataIdx + 1 == x.data.len(): + elif chunkIdx + 1 == x.data.len(): mergeBranches( - hash_tree_root(x.data[dataIdx]), + hash_tree_root(x.data[chunkIdx]), Eth2Digest()) else: mergeBranches( - hash_tree_root(x.data[dataIdx]), - hash_tree_root(x.data[dataIdx + 1])) + hash_tree_root(x.data[chunkIdx]), + hash_tree_root(x.data[chunkIdx + 1])) func cachedHash*(x: HashList, vIdx: int64): Eth2Digest = doAssert vIdx >= 1 let layer = layer(vIdx) - idxInLayer = vIdx - (1 shl layer) + idxInLayer = vIdx - (1'i64 shl layer) layerIdx = idxInlayer + x.indices[layer] doAssert layer < x.maxDepth @@ -630,7 +644,7 @@ func cachedHash*(x: HashList, vIdx: int64): Eth2Digest = px[].hashes[layerIdx] = if layer == x.maxDepth - 1: - let dataIdx = vIdx * 2 - 1 shl (x.maxDepth) + let dataIdx = vIdx * 2 - 1'i64 shl (x.maxDepth) mergedDataHash(x, dataIdx) else: mergeBranches( diff --git a/beacon_chain/ssz/types.nim b/beacon_chain/ssz/types.nim index a67fb6821..884629697 100644 --- a/beacon_chain/ssz/types.nim +++ b/beacon_chain/ssz/types.nim @@ -142,12 +142,12 @@ template maxDepth*(a: HashList|HashArray): int = ## Layer where data is layer(a.maxChunks) -proc clearCaches*(a: var HashList, dataIdx: auto) = +proc clearCaches*(a: var HashList, dataIdx: int64) = if a.hashes.len == 0: return var - idx = 1 shl (a.maxDepth - 1) + int(dataIdx div 2) + idx = 1'i64 shl (a.maxDepth - 1) + int64(dataIdx div 2) layer = a.maxDepth - 1 while idx > 0: let @@ -210,7 +210,7 @@ proc `[]`*(x: var HashList, idx: auto): var x.T = clearCaches(x, idx.int64) x.data[idx] -proc `[]=`*(x: var HashList, idx: int64, val: auto) = +proc `[]=`*(x: var HashList, idx: auto, val: auto) = clearCaches(x, idx.int64) x.data[idx] = val