wip: cache
* cache lists and arrays of complex objects (5x block processing speed on ncli_db) trivial baseline cache that stores tree in flat memory structure
This commit is contained in:
parent
a327e8581b
commit
f1dcee2749
|
@ -62,12 +62,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
+ parent sanity [Preset: mainnet] OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Discovery v5 utilities
|
||||
```diff
|
||||
+ ENR to ENode OK
|
||||
+ Multiaddress to ENode OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Fork Choice + Finality [Preset: mainnet]
|
||||
```diff
|
||||
+ fork_choice - testing finality #01 OK
|
||||
|
@ -257,6 +251,11 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ Rule IV - 12 finalization without support OK
|
||||
```
|
||||
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
## hash
|
||||
```diff
|
||||
+ HashArray OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 158/161 Fail: 0/161 Skip: 3/161
|
||||
|
|
|
@ -68,12 +68,6 @@ OK: 2/2 Fail: 0/2 Skip: 0/2
|
|||
+ parent sanity [Preset: minimal] OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Discovery v5 utilities
|
||||
```diff
|
||||
+ ENR to ENode OK
|
||||
+ Multiaddress to ENode OK
|
||||
```
|
||||
OK: 2/2 Fail: 0/2 Skip: 0/2
|
||||
## Fork Choice + Finality [Preset: minimal]
|
||||
```diff
|
||||
+ fork_choice - testing finality #01 OK
|
||||
|
@ -263,6 +257,11 @@ OK: 4/4 Fail: 0/4 Skip: 0/4
|
|||
+ Rule IV - 12 finalization without support OK
|
||||
```
|
||||
OK: 8/8 Fail: 0/8 Skip: 0/8
|
||||
## hash
|
||||
```diff
|
||||
+ HashArray OK
|
||||
```
|
||||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 160/163 Fail: 0/163 Skip: 3/163
|
||||
|
|
|
@ -2,19 +2,20 @@ FixtureSSZGeneric-minimal
|
|||
===
|
||||
## Official - SSZ generic types
|
||||
```diff
|
||||
+ **Skipping** bitlist inputs - valid - skipped altogether OK
|
||||
Testing basic_vector inputs - invalid - skipping Vector[uint128, N] and Vector[uint256, N] Skip
|
||||
+ Testing basic_vector inputs - valid - skipping Vector[uint128, N] and Vector[uint256, N] OK
|
||||
+ Testing bitlist inputs - invalid OK
|
||||
+ Testing bitlist inputs - valid OK
|
||||
Testing bitvector inputs - invalid Skip
|
||||
+ Testing bitvector inputs - valid OK
|
||||
+ Testing boolean inputs - invalid OK
|
||||
+ Testing boolean inputs - valid OK
|
||||
+ Testing containers inputs - invalid - skipping VarTestStruct, ComplexTestStruct, BitsStr OK
|
||||
+ Testing containers inputs - valid - skipping VarTestStruct, ComplexTestStruct, BitsStruc OK
|
||||
+ Testing containers inputs - invalid - skipping BitsStruct OK
|
||||
+ Testing containers inputs - valid - skipping BitsStruct OK
|
||||
+ Testing uints inputs - invalid - skipping uint128 and uint256 OK
|
||||
+ Testing uints inputs - valid - skipping uint128 and uint256 OK
|
||||
```
|
||||
OK: 9/11 Fail: 0/11 Skip: 2/11
|
||||
OK: 10/12 Fail: 0/12 Skip: 2/12
|
||||
|
||||
---TOTAL---
|
||||
OK: 9/11 Fail: 0/11 Skip: 2/11
|
||||
OK: 10/12 Fail: 0/12 Skip: 2/12
|
||||
|
|
|
@ -260,24 +260,24 @@ type
|
|||
latest_block_header*: BeaconBlockHeader ##\
|
||||
## `latest_block_header.state_root == ZERO_HASH` temporarily
|
||||
|
||||
block_roots*: array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] ##\
|
||||
block_roots*: HashArray[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest] ##\
|
||||
## Needed to process attestations, older to newer
|
||||
|
||||
state_roots*: array[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||
state_roots*: HashArray[SLOTS_PER_HISTORICAL_ROOT, Eth2Digest]
|
||||
historical_roots*: List[Eth2Digest, HISTORICAL_ROOTS_LIMIT]
|
||||
|
||||
# Eth1
|
||||
eth1_data*: Eth1Data
|
||||
eth1_data_votes*:
|
||||
List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
HashList[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index*: uint64
|
||||
|
||||
# Registry
|
||||
validators*: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
validators*: HashList[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances*: List[uint64, VALIDATOR_REGISTRY_LIMIT]
|
||||
|
||||
# Randomness
|
||||
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||
randao_mixes*: HashArray[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||
|
||||
# Slashings
|
||||
slashings*: array[EPOCHS_PER_SLASHINGS_VECTOR, uint64] ##\
|
||||
|
@ -285,9 +285,9 @@ type
|
|||
|
||||
# Attestations
|
||||
previous_epoch_attestations*:
|
||||
List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
HashList[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
current_epoch_attestations*:
|
||||
List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
HashList[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
|
||||
# Finality
|
||||
justification_bits*: uint8 ##\
|
||||
|
|
|
@ -95,6 +95,10 @@ proc process_block_header*(
|
|||
|
||||
true
|
||||
|
||||
proc `xor`[T: array](a, b: T): T =
|
||||
for i in 0..<result.len:
|
||||
result[i] = a[i] xor b[i]
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#randao
|
||||
proc process_randao(
|
||||
state: var BeaconState, body: BeaconBlockBody, flags: UpdateFlags,
|
||||
|
@ -125,14 +129,15 @@ proc process_randao(
|
|||
mix = get_randao_mix(state, epoch)
|
||||
rr = eth2hash(body.randao_reveal.toRaw()).data
|
||||
|
||||
for i in 0 ..< mix.data.len:
|
||||
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR].data[i] = mix.data[i] xor rr[i]
|
||||
state.randao_mixes[epoch mod EPOCHS_PER_HISTORICAL_VECTOR].data =
|
||||
mix.data xor rr
|
||||
|
||||
true
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.3/specs/phase0/beacon-chain.md#eth1-data
|
||||
func process_eth1_data(state: var BeaconState, body: BeaconBlockBody) {.nbench.}=
|
||||
state.eth1_data_votes.add body.eth1_data
|
||||
|
||||
if state.eth1_data_votes.asSeq.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD.int:
|
||||
state.eth1_data = body.eth1_data
|
||||
|
||||
|
|
|
@ -136,7 +136,7 @@ func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
|||
result.stream = stream
|
||||
|
||||
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||
when holder is array:
|
||||
when holder is array|HashArray:
|
||||
for fieldVar in holder: body
|
||||
else:
|
||||
enumInstanceSerializedFields(holder, _{.used.}, fieldVar): body
|
||||
|
@ -148,7 +148,7 @@ proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
|||
when isFixedSize(T):
|
||||
FixedSizedWriterCtx()
|
||||
else:
|
||||
const offset = when T is array: len(T) * offsetSize
|
||||
const offset = when T is array|HashArray: len(T) * offsetSize
|
||||
else: fixedPortionSize(T)
|
||||
VarSizedWriterCtx(offset: offset,
|
||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||
|
@ -205,13 +205,19 @@ proc writeVarSizeType(w: var SszWriter, value: auto) {.raises: [Defect, IOError]
|
|||
mixin toSszType
|
||||
type T = type toSszType(value)
|
||||
|
||||
when T is List:
|
||||
when T is List|HashList:
|
||||
# We reduce code bloat by forwarding all `List` types to a general `seq[T]` proc.
|
||||
writeSeq(w, asSeq value)
|
||||
elif T is BitList:
|
||||
# ATTENTION! We can reuse `writeSeq` only as long as our BitList type is implemented
|
||||
# to internally match the binary representation of SSZ BitLists in memory.
|
||||
writeSeq(w, bytes value)
|
||||
elif T is HashArray:
|
||||
trs "WRITING HASHARRAY"
|
||||
var ctx = beginRecord(w, T.T)
|
||||
enumerateSubFields(value, field):
|
||||
writeField w, ctx, astToStr(field), field.data
|
||||
endRecord w, ctx
|
||||
elif T is object|tuple|array:
|
||||
trs "WRITING OBJECT OR ARRAY"
|
||||
var ctx = beginRecord(w, T)
|
||||
|
@ -244,10 +250,12 @@ func sszSize*(value: auto): int {.gcsafe, raises: [Defect].} =
|
|||
when isFixedSize(T):
|
||||
anonConst fixedPortionSize(T)
|
||||
|
||||
elif T is array|List:
|
||||
elif T is array|List|HashList|HashArray:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E):
|
||||
len(value) * anonConst(fixedPortionSize(E))
|
||||
elif T is HashArray:
|
||||
sszSizeForVarSizeList(value.data)
|
||||
elif T is array:
|
||||
sszSizeForVarSizeList(value)
|
||||
else:
|
||||
|
@ -577,10 +585,91 @@ func hashTreeRootAux[T](x: T): Eth2Digest =
|
|||
else:
|
||||
unsupported T
|
||||
|
||||
func mergedDataHash(x: HashList|HashArray, dataIdx: int64): Eth2Digest =
|
||||
trs "DATA HASH ", dataIdx, " ", x.data.len
|
||||
|
||||
if dataIdx + 1 > x.data.len():
|
||||
zeroHashes[x.maxDepth]
|
||||
elif dataIdx + 1 == x.data.len():
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[dataIdx]),
|
||||
Eth2Digest())
|
||||
else:
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[dataIdx]),
|
||||
hash_tree_root(x.data[dataIdx + 1]))
|
||||
|
||||
func cachedHash*(x: HashList, vIdx: int64): Eth2Digest =
|
||||
doAssert vIdx >= 1
|
||||
|
||||
let
|
||||
layer = layer(vIdx)
|
||||
idxInLayer = vIdx - (1 shl layer)
|
||||
layerIdx = idxInlayer + x.indices[layer]
|
||||
|
||||
doAssert layer < x.maxDepth
|
||||
trs "GETTING ", vIdx, " ", layerIdx, " ", layer, " ", x.indices.len
|
||||
if layerIdx >= x.indices[layer + 1]:
|
||||
trs "ZERO ", x.indices[layer], " ", x.indices[layer + 1]
|
||||
zeroHashes[x.maxDepth - layer]
|
||||
else:
|
||||
if true or not isCached(x.hashes[layerIdx]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
trs "REFRESHING ", vIdx, " ", layerIdx, " ", layer
|
||||
|
||||
px[].hashes[layerIdx] =
|
||||
if layer == x.maxDepth - 1:
|
||||
let dataIdx = vIdx * 2 - 1 shl (x.maxDepth)
|
||||
mergedDataHash(x, dataIdx)
|
||||
else:
|
||||
mergeBranches(
|
||||
cachedHash(x, vIdx * 2),
|
||||
cachedHash(x, vIdx * 2 + 1))
|
||||
else:
|
||||
trs "CACHED ", layerIdx
|
||||
|
||||
x.hashes[layerIdx]
|
||||
|
||||
func cachedHash*(x: HashArray, i: int): Eth2Digest =
|
||||
doAssert i > 0, "Only valid for flat merkle tree indices"
|
||||
|
||||
if not isCached(x.hashes[i]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
px[].hashes[i] =
|
||||
if i * 2 >= x.hashes.len():
|
||||
let dataIdx = i * 2 - x.hashes.len()
|
||||
mergedDataHash(x, dataIdx)
|
||||
else:
|
||||
mergeBranches(
|
||||
cachedHash(x, i * 2),
|
||||
cachedHash(x, i * 2 + 1))
|
||||
|
||||
return x.hashes[i]
|
||||
|
||||
func hash_tree_root*(x: auto): Eth2Digest {.raises: [Defect], nbench.} =
|
||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||
mixin toSszType
|
||||
result = when x is List|BitList:
|
||||
|
||||
result = when x is HashArray:
|
||||
if x.hashes.len < 2:
|
||||
zeroHashes[log2trunc(uint64(x.data.len() + 1))]
|
||||
else:
|
||||
cachedHash(x, 1)
|
||||
elif x is HashList:
|
||||
if x.hashes.len < 2:
|
||||
mixInLength(zeroHashes[x.maxDepth], x.data.len())
|
||||
else:
|
||||
if not isCached(x.hashes[0]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
px[].hashes[0] = mixInLength(cachedHash(x, 1), x.data.len)
|
||||
x.hashes[0]
|
||||
elif x is List|BitList:
|
||||
const maxLen = static(x.maxLen)
|
||||
type T = type(x)
|
||||
const limit = maxChunksCount(T, maxLen)
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
import
|
||||
typetraits, options,
|
||||
stew/[bitseqs, endians2, objects, bitseqs], serialization/testing/tracing,
|
||||
stew/[bitseqs, bitops2, endians2, objects, bitseqs], serialization/testing/tracing,
|
||||
../spec/[digest, datatypes], ./types
|
||||
|
||||
template raiseIncorrectSize(T: type) =
|
||||
|
@ -66,8 +66,6 @@ template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
|||
proc `[]`[T, U, V](s: openArray[T], x: HSlice[U, V]) {.error:
|
||||
"Please don't use openarray's [] as it allocates a result sequence".}
|
||||
|
||||
# func readOpenArray[T](result: var openarray[T], input: openarray[byte]) =
|
||||
|
||||
template checkForForbiddenBits(ResulType: type,
|
||||
input: openarray[byte],
|
||||
expectedBits: static int) =
|
||||
|
@ -131,6 +129,16 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
|
|||
if resultBytesCount == maxExpectedSize:
|
||||
checkForForbiddenBits(T, input, val.maxLen + 1)
|
||||
|
||||
elif val is HashList:
|
||||
readSszValue(input, val.data)
|
||||
val.hashes.setLen(0)
|
||||
val.growHashes()
|
||||
|
||||
elif val is HashArray:
|
||||
readSszValue(input, val.data)
|
||||
for h in val.hashes.mitems():
|
||||
clearCache(h)
|
||||
|
||||
elif val is List|array:
|
||||
type E = type val[0]
|
||||
when E is byte:
|
||||
|
@ -248,7 +256,6 @@ func readSszValue*[T](input: openarray[byte], val: var T) {.raisesssz.} =
|
|||
input.toOpenArray(startOffset, endOffset - 1),
|
||||
field)
|
||||
trs "READING COMPLETE ", fieldName
|
||||
|
||||
else:
|
||||
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
|
||||
field = fromSszBytes(
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
|
||||
import
|
||||
tables, options, typetraits,
|
||||
stew/shims/macros, stew/[objects, bitseqs],
|
||||
serialization/[object_serialization, errors]
|
||||
stew/shims/macros, stew/[byteutils, bitops2, objects, bitseqs],
|
||||
serialization/[object_serialization, errors],
|
||||
../spec/digest
|
||||
|
||||
const
|
||||
offsetSize* = 4
|
||||
|
@ -30,6 +31,15 @@ type
|
|||
actualSszSize*: int
|
||||
elementSize*: int
|
||||
|
||||
HashArray*[maxLen: static int; T] = object
|
||||
data*: array[maxLen, T]
|
||||
hashes* {.dontSerialize.}: array[maxLen, Eth2Digest]
|
||||
|
||||
HashList*[T; maxLen: static int64] = object
|
||||
data*: List[T, maxLen]
|
||||
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
||||
indices* {.dontSerialize.}: array[log2trunc(maxLen.uint64) + 1, int]
|
||||
|
||||
template asSeq*(x: List): auto = distinctBase(x)
|
||||
|
||||
template init*[T](L: type List, x: seq[T], N: static Limit): auto =
|
||||
|
@ -46,7 +56,7 @@ template low*(x: List): auto = low(distinctBase x)
|
|||
template high*(x: List): auto = high(distinctBase x)
|
||||
template `[]`*(x: List, idx: auto): untyped = distinctBase(x)[idx]
|
||||
template `[]=`*(x: var List, idx: auto, val: auto) = distinctBase(x)[idx] = val
|
||||
template `==`*(a, b: List): bool = asSeq(a) == distinctBase(b)
|
||||
template `==`*(a, b: List): bool = distinctBase(a) == distinctBase(b)
|
||||
|
||||
template `&`*(a, b: List): auto = (type(a)(distinctBase(a) & distinctBase(b)))
|
||||
|
||||
|
@ -78,6 +88,133 @@ iterator items*(x: BitList): bool =
|
|||
for i in 0 ..< x.len:
|
||||
yield x[i]
|
||||
|
||||
template isCached*(v: Eth2Digest): bool =
|
||||
## An entry is "in the cache" if the first 8 bytes are zero - conveniently,
|
||||
## Nim initializes values this way, and while there may be false positives,
|
||||
## that's fine.
|
||||
v.data.toOpenArray(0, 7) != [byte 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
template clearCache*(v: var Eth2Digest) =
|
||||
v.data[0..<8] = [byte 0, 0, 0, 0, 0, 0, 0, 0]
|
||||
|
||||
proc clearTree*(a: var HashArray, dataIdx: auto) =
|
||||
## Clear all cache entries after data at dataIdx has been modified
|
||||
var idx = 1 shl (a.maxDepth - 1) + int(dataIdx div 2)
|
||||
while idx != 0:
|
||||
clearCache(a.hashes[idx])
|
||||
idx = idx div 2
|
||||
|
||||
func nodesAtLayer*(layer, depth, leaves: int): int =
|
||||
## Given a number of leaves, how many nodes do you need at a given layer
|
||||
## in a binary tree structure?
|
||||
let leavesPerNode = 1 shl (depth - layer)
|
||||
(leaves + leavesPerNode - 1) div leavesPerNode
|
||||
|
||||
func cacheNodes*(depth, leaves: int): int =
|
||||
## Total number of nodes needed to cache a tree of a given depth with
|
||||
## `leaves` items in it (the rest zero-filled)
|
||||
var res = 0
|
||||
for i in 0..<depth:
|
||||
res += nodesAtLayer(i, depth, leaves)
|
||||
res
|
||||
|
||||
template layer*(vIdx: int64): int =
|
||||
## Layer 0 = layer at which the root hash is
|
||||
## We place the root hash at index 1 which simplifies the math and leaves
|
||||
## index 0 for the mixed-in-length
|
||||
log2trunc(vIdx.uint64).int
|
||||
|
||||
template maxDepth*(a: HashList|HashArray): int =
|
||||
## Layer where data is
|
||||
layer(a.maxLen)
|
||||
|
||||
proc clearTree*(a: var HashList, dataIdx: auto) =
|
||||
if a.hashes.len == 0:
|
||||
return
|
||||
|
||||
var
|
||||
idx = 1 shl (a.maxDepth - 1) + int(dataIdx div 2)
|
||||
layer = a.maxDepth - 1
|
||||
while idx > 0:
|
||||
let
|
||||
idxInLayer = idx - (1 shl layer)
|
||||
layerIdx = idxInlayer + a.indices[layer]
|
||||
if layerIdx < a.indices[layer + 1]:
|
||||
clearCache(a.hashes[layerIdx])
|
||||
|
||||
idx = idx div 2
|
||||
layer = layer - 1
|
||||
|
||||
clearCache(a.hashes[0])
|
||||
|
||||
proc growHashes*(a: var HashList) =
|
||||
# Ensure that the hash cache is big enough for the data in the list
|
||||
let
|
||||
leaves = a.data.len()
|
||||
newSize = 1 + cacheNodes(a.maxDepth, leaves)
|
||||
|
||||
if a.hashes.len >= newSize:
|
||||
return
|
||||
|
||||
var
|
||||
newHashes = newSeq[Eth2Digest](newSize)
|
||||
newIndices = default(type a.indices)
|
||||
|
||||
if a.hashes.len != newSize:
|
||||
newIndices[0] = nodesAtLayer(0, a.maxDepth, leaves)
|
||||
for i in 1..a.maxDepth:
|
||||
newIndices[i] = newIndices[i - 1] + nodesAtLayer(i - 1, a.maxDepth, leaves)
|
||||
|
||||
for i in 1..<a.maxDepth:
|
||||
for j in 0..<(a.indices[i] - a.indices[i-1]):
|
||||
newHashes[newIndices[i - 1] + j] = a.hashes[a.indices[i - 1] + j]
|
||||
|
||||
swap(a.hashes, newHashes)
|
||||
a.indices = newIndices
|
||||
|
||||
template `[]`*(a: HashArray, b: auto): auto =
|
||||
a.data[b]
|
||||
|
||||
proc `[]`*[maxLen: static int; T](a: var HashArray[maxLen, T], b: auto): var T =
|
||||
clearTree(a, b.int64)
|
||||
a.data[b]
|
||||
|
||||
proc `[]=`*(a: var HashArray, b: auto, c: auto) =
|
||||
clearTree(a, b.int64)
|
||||
a.data[b] = c
|
||||
|
||||
template fill*[N: static int; T](a: var HashArray[N, T], c: T) =
|
||||
mixin fill
|
||||
fill(a.data, c)
|
||||
template sum*[N: static int; T](a: var HashArray[N, T]): T =
|
||||
mixin sum
|
||||
sum(a.data)
|
||||
|
||||
template len*[N: static int; T](a: type HashArray[N, T]): int = N
|
||||
|
||||
template add*(x: var HashList, val: x.T) =
|
||||
add(x.data, val)
|
||||
x.growHashes()
|
||||
clearTree(x, x.data.len() - 1) # invalidate entry we just added
|
||||
|
||||
template len*(x: HashList|HashArray): auto = len(x.data)
|
||||
template low*(x: HashList|HashArray): auto = low(x.data)
|
||||
template high*(x: HashList|HashArray): auto = high(x.data)
|
||||
template `[]`*(x: HashList, idx: auto): auto = x.data[idx]
|
||||
proc `[]`*[T; maxLen: static int64](x: var HashList[T, maxLen], idx: auto): var T =
|
||||
clearTree(x, idx.int64)
|
||||
x.data[idx]
|
||||
|
||||
proc `[]=`*(x: var HashList, idx: int64, val: auto) =
|
||||
clearTree(x, idx.int64)
|
||||
x.data[idx] = val
|
||||
|
||||
template `==`*(a, b: HashList|HashArray): bool = a.data == b.data
|
||||
template asSeq*(x: HashList): auto = asSeq(x.data)
|
||||
template `$`*(x: HashList): auto = $(x.data)
|
||||
|
||||
template items* (x: HashList|HashArray): untyped = items(x.data)
|
||||
template pairs* (x: HashList|HashArray): untyped = pairs(x.data)
|
||||
|
||||
macro unsupported*(T: typed): untyped =
|
||||
# TODO: {.fatal.} breaks compilation even in `compiles()` context,
|
||||
# so we use this macro instead. It's also much better at figuring
|
||||
|
@ -85,6 +222,12 @@ macro unsupported*(T: typed): untyped =
|
|||
# File both problems as issues.
|
||||
error "SSZ serialization of the type " & humaneTypeName(T) & " is not supported"
|
||||
|
||||
template ElemType*(T: type[HashArray]): untyped =
|
||||
type(default(T).data[0])
|
||||
|
||||
template ElemType*(T: type[HashList]): untyped =
|
||||
type(default(T).data[0])
|
||||
|
||||
template ElemType*(T: type[array]): untyped =
|
||||
type(default(T)[low(T)])
|
||||
|
||||
|
@ -98,7 +241,7 @@ func isFixedSize*(T0: type): bool {.compileTime.} =
|
|||
|
||||
when T is BasicType:
|
||||
return true
|
||||
elif T is array:
|
||||
elif T is array|HashArray:
|
||||
return isFixedSize(ElemType(T))
|
||||
elif T is object|tuple:
|
||||
enumAllSerializedFields(T):
|
||||
|
@ -112,7 +255,7 @@ func fixedPortionSize*(T0: type): int {.compileTime.} =
|
|||
type T = type toSszType(declval T0)
|
||||
|
||||
when T is BasicType: sizeof(T)
|
||||
elif T is array:
|
||||
elif T is array|HashArray:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E): len(T) * fixedPortionSize(E)
|
||||
else: len(T) * offsetSize
|
||||
|
|
|
@ -51,6 +51,9 @@ of QueryCmd.get:
|
|||
stderr.write config.getQueryPath & " is not a valid path"
|
||||
quit 1
|
||||
|
||||
let navigator = DynamicSszNavigator.init(bytes, BeaconState)
|
||||
# TODO nasty compile error here
|
||||
# /home/arnetheduck/status/nim-beacon-chain/beacon_chain/ssz/navigator.nim(45, 50) template/generic instantiation of `getFieldBoundingOffsets` from here
|
||||
# Error: internal error: (filename: "semtypes.nim", line: 1864, column: 21)
|
||||
# let navigator = DynamicSszNavigator.init(bytes, BeaconState)
|
||||
|
||||
echo navigator.navigatePath(pathFragments[1 .. ^1]).toJson
|
||||
# echo navigator.navigatePath(pathFragments[1 .. ^1]).toJson
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
import
|
||||
macros,
|
||||
nimcrypto/utils,
|
||||
../../beacon_chain/spec/[datatypes, crypto, digest]
|
||||
../../beacon_chain/spec/[datatypes, crypto, digest], ../../beacon_chain/ssz/types
|
||||
# digest is necessary for them to be printed as hex
|
||||
|
||||
# Define comparison of object variants for BLSValue
|
||||
|
@ -92,7 +92,7 @@ proc inspectType(tImpl, xSubField, ySubField: NimNode, stmts: var NimNode) =
|
|||
inspectType(tImpl[0], xSubField, ySubField, stmts)
|
||||
of {nnkSym, nnkBracketExpr}:
|
||||
if tImpl.kind == nnkBracketExpr:
|
||||
doAssert tImpl[0].eqIdent"List" or tImpl[0].eqIdent"seq" or tImpl[0].eqIdent"array", "Error: unsupported generic type: " & $tImpl[0]
|
||||
# doAssert tImpl[0].eqIdent"List" or tImpl[0].eqIdent"seq" or tImpl[0].eqIdent"array", "Error: unsupported generic type: " & $tImpl[0]
|
||||
compareContainerStmt(xSubField, ySubField, stmts)
|
||||
elif $tImpl in builtinTypes:
|
||||
compareStmt(xSubField, ySubField, stmts)
|
||||
|
@ -106,7 +106,7 @@ proc inspectType(tImpl, xSubField, ySubField: NimNode, stmts: var NimNode) =
|
|||
" for field \"" & $xSubField.toStrLit &
|
||||
"\" of type \"" & tImpl.repr
|
||||
|
||||
macro reportDiff*(x, y: typed{`var`|`let`|`const`}): untyped =
|
||||
macro reportDiff*(x, y: typed): untyped =
|
||||
doAssert sameType(x, y)
|
||||
result = newStmtList()
|
||||
|
||||
|
|
|
@ -63,6 +63,15 @@ type
|
|||
F: array[4, FixedTestStruct]
|
||||
G: array[2, VarTestStruct]
|
||||
|
||||
HashArrayComplexTestStruct = object
|
||||
A: uint16
|
||||
B: List[uint16, 128]
|
||||
C: uint8
|
||||
D: List[byte, 256]
|
||||
E: VarTestStruct
|
||||
F: HashArray[4, FixedTestStruct]
|
||||
G: HashArray[2, VarTestStruct]
|
||||
|
||||
BitsStruct = object
|
||||
A: BitList[5]
|
||||
B: BitArray[2]
|
||||
|
@ -224,6 +233,9 @@ proc sszCheck(baseDir, sszType, sszSubType: string) =
|
|||
of "VarTestStruct": checkBasic(VarTestStruct, dir, expectedHash)
|
||||
of "ComplexTestStruct": checkBasic(ComplexTestStruct, dir, expectedHash)
|
||||
of "BitsStruct": checkBasic(BitsStruct, dir, expectedHash)
|
||||
of "ComplexTestStruct":
|
||||
checkBasic(ComplexTestStruct, dir, expectedHash)
|
||||
checkBasic(HashComplexTestStruct, dir, expectedHash)
|
||||
else:
|
||||
raise newException(ValueError, "unknown container in test: " & sszSubType)
|
||||
else:
|
||||
|
|
|
@ -88,13 +88,16 @@ suiteReport "SSZ navigator":
|
|||
let b = [byte 0x04, 0x05, 0x06].toDigest
|
||||
let c = [byte 0x07, 0x08, 0x09].toDigest
|
||||
|
||||
let leaves = List[Eth2Digest, int64(1 shl 3)](@[a, b, c])
|
||||
var leaves = HashList[Eth2Digest, int64(1 shl 3)]()
|
||||
leaves.add a
|
||||
leaves.add b
|
||||
leaves.add c
|
||||
let root = hash_tree_root(leaves)
|
||||
check $root == "5248085B588FAB1DD1E03F3CD62201602B12E6560665935964F46E805977E8C5"
|
||||
|
||||
let leaves2 = List[Eth2Digest, int64(1 shl 10)](@[a, b, c])
|
||||
let root2 = hash_tree_root(leaves2)
|
||||
check $root2 == "9FB7D518368DC14E8CC588FB3FD2749BEEF9F493FEF70AE34AF5721543C67173"
|
||||
while leaves.len < leaves.maxLen:
|
||||
leaves.add c
|
||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
||||
|
||||
suiteReport "SSZ dynamic navigator":
|
||||
timedTest "navigating fields":
|
||||
|
@ -116,3 +119,39 @@ suiteReport "SSZ dynamic navigator":
|
|||
expect KeyError:
|
||||
discard navBar.navigate("biz")
|
||||
|
||||
type
|
||||
Obj = object
|
||||
arr: array[8, Eth2Digest]
|
||||
|
||||
li: List[Eth2Digest, 8]
|
||||
|
||||
HashObj = object
|
||||
arr: HashArray[8, Eth2Digest]
|
||||
|
||||
li: HashList[Eth2Digest, 8]
|
||||
|
||||
suiteReport "hash":
|
||||
timedTest "HashArray":
|
||||
var
|
||||
o = Obj()
|
||||
ho = HashObj()
|
||||
|
||||
template both(body) =
|
||||
block:
|
||||
template it: auto {.inject.} = o
|
||||
body
|
||||
block:
|
||||
template it: auto {.inject.} = ho
|
||||
body
|
||||
|
||||
let htro = hash_tree_root(o)
|
||||
let htrho = hash_tree_root(ho)
|
||||
|
||||
check:
|
||||
o.arr == ho.arr.data
|
||||
o.li == ho.li.data
|
||||
htro == htrho
|
||||
|
||||
both: it.arr[0].data[0] = byte 1
|
||||
|
||||
both: it.li.add Eth2Digest()
|
||||
|
|
Loading…
Reference in New Issue