Use nim-ssz-serialization module and rm local ssz code
This commit is contained in:
parent
782318d3fc
commit
03a70fbf36
|
@ -209,3 +209,8 @@
|
||||||
[submodule "vendor/nim-taskpools"]
|
[submodule "vendor/nim-taskpools"]
|
||||||
path = vendor/nim-taskpools
|
path = vendor/nim-taskpools
|
||||||
url = https://github.com/status-im/nim-taskpools
|
url = https://github.com/status-im/nim-taskpools
|
||||||
|
[submodule "vendor/nim-ssz-serialization"]
|
||||||
|
path = vendor/nim-ssz-serialization
|
||||||
|
url = https://github.com/status-im/nim-ssz-serialization.git
|
||||||
|
ignore = untracked
|
||||||
|
branch = master
|
||||||
|
|
|
@ -14,7 +14,7 @@ import
|
||||||
web3/[ethtypes, conversions],
|
web3/[ethtypes, conversions],
|
||||||
chronicles,
|
chronicles,
|
||||||
eth/common/eth_types_json_serialization,
|
eth/common/eth_types_json_serialization,
|
||||||
../ssz/[navigator],
|
ssz_serialization/navigator,
|
||||||
../spec/eth2_ssz_serialization,
|
../spec/eth2_ssz_serialization,
|
||||||
../spec/datatypes/phase0
|
../spec/datatypes/phase0
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ import
|
||||||
import ./base, ./phase0
|
import ./base, ./phase0
|
||||||
export base
|
export base
|
||||||
|
|
||||||
from ../../ssz/merkleization import GeneralizedIndex
|
from ssz_serialization/merkleization import GeneralizedIndex
|
||||||
export merkleization.GeneralizedIndex
|
export merkleization.GeneralizedIndex
|
||||||
|
|
||||||
const
|
const
|
||||||
|
|
|
@ -35,7 +35,8 @@ import
|
||||||
stew/[assign2, byteutils],
|
stew/[assign2, byteutils],
|
||||||
chronicles,
|
chronicles,
|
||||||
chronos/timer,
|
chronos/timer,
|
||||||
../../version, ../../ssz/types as sszTypes,
|
ssz_serialization/types as sszTypes,
|
||||||
|
../../version,
|
||||||
".."/[crypto, digest, presets]
|
".."/[crypto, digest, presets]
|
||||||
|
|
||||||
export
|
export
|
||||||
|
|
|
@ -17,7 +17,8 @@ import
|
||||||
stew/assign2,
|
stew/assign2,
|
||||||
json_serialization,
|
json_serialization,
|
||||||
json_serialization/types as jsonTypes,
|
json_serialization/types as jsonTypes,
|
||||||
../../ssz/types as sszTypes, ../digest,
|
ssz_serialization/types as sszTypes,
|
||||||
|
../digest,
|
||||||
./phase0, ./altair,
|
./phase0, ./altair,
|
||||||
#web3/ethtypes,
|
#web3/ethtypes,
|
||||||
nimcrypto/utils
|
nimcrypto/utils
|
||||||
|
|
|
@ -27,9 +27,9 @@ import
|
||||||
# Nimble packages
|
# Nimble packages
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
json_rpc/jsonmarshal,
|
json_rpc/jsonmarshal,
|
||||||
|
ssz_serialization/types,
|
||||||
|
|
||||||
# Local modules
|
# Local modules
|
||||||
../../ssz/types,
|
|
||||||
../datatypes/base
|
../datatypes/base
|
||||||
|
|
||||||
export jsonmarshal, base
|
export jsonmarshal, base
|
||||||
|
|
|
@ -10,8 +10,8 @@
|
||||||
# Import this module to get access to `hash_tree_root` for spec types
|
# Import this module to get access to `hash_tree_root` for spec types
|
||||||
|
|
||||||
import
|
import
|
||||||
|
ssz_serialization/merkleization,
|
||||||
./ssz_codec,
|
./ssz_codec,
|
||||||
../ssz/merkleization,
|
|
||||||
./datatypes/[phase0, altair]
|
./datatypes/[phase0, altair]
|
||||||
|
|
||||||
export ssz_codec, merkleization
|
export ssz_codec, merkleization
|
||||||
|
|
|
@ -11,8 +11,8 @@
|
||||||
# ssz_serialization directly! To bypass root updates, use `readSszBytes`
|
# ssz_serialization directly! To bypass root updates, use `readSszBytes`
|
||||||
# without involving SSZ!
|
# without involving SSZ!
|
||||||
import
|
import
|
||||||
|
ssz_serialization,
|
||||||
./ssz_codec,
|
./ssz_codec,
|
||||||
../ssz/ssz_serialization,
|
|
||||||
./datatypes/[phase0, altair, merge],
|
./datatypes/[phase0, altair, merge],
|
||||||
./eth2_merkleization
|
./eth2_merkleization
|
||||||
|
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
std/[typetraits],
|
std/[typetraits],
|
||||||
../ssz/codec,
|
ssz_serialization/codec,
|
||||||
../spec/datatypes/[phase0, altair],
|
../spec/datatypes/[phase0, altair],
|
||||||
./eth2_merkleization
|
./eth2_merkleization
|
||||||
|
|
||||||
|
|
|
@ -1,333 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
import
|
|
||||||
stew/[bitops2, endians2, byteutils, ptrops],
|
|
||||||
json_serialization
|
|
||||||
|
|
||||||
export json_serialization
|
|
||||||
|
|
||||||
type
|
|
||||||
Bytes = seq[byte]
|
|
||||||
|
|
||||||
BitSeq* = distinct Bytes
|
|
||||||
## The current design of BitSeq tries to follow precisely
|
|
||||||
## the bitwise representation of the SSZ bitlists.
|
|
||||||
## This is a relatively compact representation, but as
|
|
||||||
## evident from the code below, many of the operations
|
|
||||||
## are not trivial.
|
|
||||||
|
|
||||||
BitArray*[bits: static int] = object
|
|
||||||
bytes*: array[(bits + 7) div 8, byte]
|
|
||||||
|
|
||||||
func bitsLen*(bytes: openArray[byte]): int =
|
|
||||||
let
|
|
||||||
bytesCount = bytes.len
|
|
||||||
lastByte = bytes[bytesCount - 1]
|
|
||||||
markerPos = log2trunc(lastByte)
|
|
||||||
|
|
||||||
bytesCount * 8 - (8 - markerPos)
|
|
||||||
|
|
||||||
template len*(s: BitSeq): int =
|
|
||||||
bitsLen(Bytes s)
|
|
||||||
|
|
||||||
template len*(a: BitArray): int =
|
|
||||||
a.bits
|
|
||||||
|
|
||||||
func add*(s: var BitSeq, value: bool) =
|
|
||||||
let
|
|
||||||
lastBytePos = s.Bytes.len - 1
|
|
||||||
lastByte = s.Bytes[lastBytePos]
|
|
||||||
|
|
||||||
if (lastByte and byte(128)) == 0:
|
|
||||||
# There is at least one leading zero, so we have enough
|
|
||||||
# room to store the new bit
|
|
||||||
let markerPos = log2trunc(lastByte)
|
|
||||||
s.Bytes[lastBytePos].changeBit markerPos, value
|
|
||||||
s.Bytes[lastBytePos].setBit markerPos + 1
|
|
||||||
else:
|
|
||||||
s.Bytes[lastBytePos].changeBit 7, value
|
|
||||||
s.Bytes.add byte(1)
|
|
||||||
|
|
||||||
func toBytesLE(x: uint): array[sizeof(x), byte] =
|
|
||||||
# stew/endians2 supports explicitly sized uints only
|
|
||||||
when sizeof(uint) == 4:
|
|
||||||
static: doAssert sizeof(uint) == sizeof(uint32)
|
|
||||||
toBytesLE(x.uint32)
|
|
||||||
elif sizeof(uint) == 8:
|
|
||||||
static: doAssert sizeof(uint) == sizeof(uint64)
|
|
||||||
toBytesLE(x.uint64)
|
|
||||||
else:
|
|
||||||
static: doAssert false, "requires a 32-bit or 64-bit platform"
|
|
||||||
|
|
||||||
func loadLEBytes(WordType: type, bytes: openArray[byte]): WordType =
|
|
||||||
# TODO: this is a temporary proc until the endians API is improved
|
|
||||||
var shift = 0
|
|
||||||
for b in bytes:
|
|
||||||
result = result or (WordType(b) shl shift)
|
|
||||||
shift += 8
|
|
||||||
|
|
||||||
func storeLEBytes(value: SomeUnsignedInt, dst: var openArray[byte]) =
|
|
||||||
doAssert dst.len <= sizeof(value)
|
|
||||||
let bytesLE = toBytesLE(value)
|
|
||||||
copyMem(addr dst[0], unsafeAddr bytesLE[0], dst.len)
|
|
||||||
|
|
||||||
template loopOverWords(lhs, rhs: BitSeq,
|
|
||||||
lhsIsVar, rhsIsVar: static bool,
|
|
||||||
WordType: type,
|
|
||||||
lhsBits, rhsBits, body: untyped) =
|
|
||||||
const hasRhs = astToStr(lhs) != astToStr(rhs)
|
|
||||||
|
|
||||||
let bytesCount = len Bytes(lhs)
|
|
||||||
when hasRhs: doAssert len(Bytes(rhs)) == bytesCount
|
|
||||||
|
|
||||||
var fullWordsCount = bytesCount div sizeof(WordType)
|
|
||||||
let lastWordSize = bytesCount mod sizeof(WordType)
|
|
||||||
|
|
||||||
block:
|
|
||||||
var lhsWord: WordType
|
|
||||||
when hasRhs:
|
|
||||||
var rhsWord: WordType
|
|
||||||
var firstByteOfLastWord, lastByteOfLastWord: int
|
|
||||||
|
|
||||||
# TODO: Returning a `var` value from an iterator is always safe due to
|
|
||||||
# the way inlining works, but currently the compiler reports an error
|
|
||||||
# when a local variable escapes. We have to cheat it with this location
|
|
||||||
# obfuscation through pointers:
|
|
||||||
template lhsBits: auto = (addr(lhsWord))[]
|
|
||||||
|
|
||||||
when hasRhs:
|
|
||||||
template rhsBits: auto = (addr(rhsWord))[]
|
|
||||||
|
|
||||||
template lastWordBytes(bitseq): auto =
|
|
||||||
Bytes(bitseq).toOpenArray(firstByteOfLastWord, lastByteOfLastWord)
|
|
||||||
|
|
||||||
template initLastWords =
|
|
||||||
lhsWord = loadLEBytes(WordType, lastWordBytes(lhs))
|
|
||||||
when hasRhs: rhsWord = loadLEBytes(WordType, lastWordBytes(rhs))
|
|
||||||
|
|
||||||
if lastWordSize == 0:
|
|
||||||
firstByteOfLastWord = bytesCount - sizeof(WordType)
|
|
||||||
lastByteOfLastWord = bytesCount - 1
|
|
||||||
dec fullWordsCount
|
|
||||||
else:
|
|
||||||
firstByteOfLastWord = bytesCount - lastWordSize
|
|
||||||
lastByteOfLastWord = bytesCount - 1
|
|
||||||
|
|
||||||
initLastWords()
|
|
||||||
let markerPos = log2trunc(lhsWord)
|
|
||||||
when hasRhs: doAssert log2trunc(rhsWord) == markerPos
|
|
||||||
|
|
||||||
lhsWord.clearBit markerPos
|
|
||||||
when hasRhs: rhsWord.clearBit markerPos
|
|
||||||
|
|
||||||
body
|
|
||||||
|
|
||||||
when lhsIsVar or rhsIsVar:
|
|
||||||
let
|
|
||||||
markerBit = uint(1 shl markerPos)
|
|
||||||
mask = markerBit - 1'u
|
|
||||||
|
|
||||||
when lhsIsVar:
|
|
||||||
let lhsEndResult = (lhsWord and mask) or markerBit
|
|
||||||
storeLEBytes(lhsEndResult, lastWordBytes(lhs))
|
|
||||||
|
|
||||||
when rhsIsVar:
|
|
||||||
let rhsEndResult = (rhsWord and mask) or markerBit
|
|
||||||
storeLEBytes(rhsEndResult, lastWordBytes(rhs))
|
|
||||||
|
|
||||||
var lhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(lhs)[0])
|
|
||||||
let lhsEndAddr = offset(lhsCurrAddr, fullWordsCount)
|
|
||||||
when hasRhs:
|
|
||||||
var rhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(rhs)[0])
|
|
||||||
|
|
||||||
while lhsCurrAddr < lhsEndAddr:
|
|
||||||
template lhsBits: auto = lhsCurrAddr[]
|
|
||||||
when hasRhs:
|
|
||||||
template rhsBits: auto = rhsCurrAddr[]
|
|
||||||
|
|
||||||
body
|
|
||||||
|
|
||||||
lhsCurrAddr = offset(lhsCurrAddr, 1)
|
|
||||||
when hasRhs: rhsCurrAddr = offset(rhsCurrAddr, 1)
|
|
||||||
|
|
||||||
iterator words*(x: var BitSeq): var uint =
|
|
||||||
loopOverWords(x, x, true, false, uint, word, wordB):
|
|
||||||
yield word
|
|
||||||
|
|
||||||
iterator words*(x: BitSeq): uint =
|
|
||||||
loopOverWords(x, x, false, false, uint, word, word):
|
|
||||||
yield word
|
|
||||||
|
|
||||||
iterator words*(a, b: BitSeq): (uint, uint) =
|
|
||||||
loopOverWords(a, b, false, false, uint, wordA, wordB):
|
|
||||||
yield (wordA, wordB)
|
|
||||||
|
|
||||||
iterator words*(a: var BitSeq, b: BitSeq): (var uint, uint) =
|
|
||||||
loopOverWords(a, b, true, false, uint, wordA, wordB):
|
|
||||||
yield (wordA, wordB)
|
|
||||||
|
|
||||||
iterator words*(a, b: var BitSeq): (var uint, var uint) =
|
|
||||||
loopOverWords(a, b, true, true, uint, wordA, wordB):
|
|
||||||
yield (wordA, wordB)
|
|
||||||
|
|
||||||
func `[]`*(s: BitSeq, pos: Natural): bool {.inline.} =
|
|
||||||
doAssert pos < s.len
|
|
||||||
s.Bytes.getBit pos
|
|
||||||
|
|
||||||
func `[]=`*(s: var BitSeq, pos: Natural, value: bool) {.inline.} =
|
|
||||||
doAssert pos < s.len
|
|
||||||
s.Bytes.changeBit pos, value
|
|
||||||
|
|
||||||
func setBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
|
||||||
doAssert pos < s.len
|
|
||||||
setBit s.Bytes, pos
|
|
||||||
|
|
||||||
func clearBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
|
||||||
doAssert pos < s.len
|
|
||||||
clearBit s.Bytes, pos
|
|
||||||
|
|
||||||
func init*(T: type BitSeq, len: int): T =
|
|
||||||
result = BitSeq newSeq[byte](1 + len div 8)
|
|
||||||
Bytes(result).setBit len
|
|
||||||
|
|
||||||
func init*(T: type BitArray): T =
|
|
||||||
# The default zero-initializatio is fine
|
|
||||||
discard
|
|
||||||
|
|
||||||
template `[]`*(a: BitArray, pos: Natural): bool =
|
|
||||||
getBit a.bytes, pos
|
|
||||||
|
|
||||||
template `[]=`*(a: var BitArray, pos: Natural, value: bool) =
|
|
||||||
changeBit a.bytes, pos, value
|
|
||||||
|
|
||||||
template setBit*(a: var BitArray, pos: Natural) =
|
|
||||||
setBit a.bytes, pos
|
|
||||||
|
|
||||||
template clearBit*(a: var BitArray, pos: Natural) =
|
|
||||||
clearBit a.bytes, pos
|
|
||||||
|
|
||||||
# TODO: Submit this to the standard library as `cmp`
|
|
||||||
# At the moment, it doesn't work quite well because Nim selects
|
|
||||||
# the generic cmp[T] from the system module instead of choosing
|
|
||||||
# the openArray overload
|
|
||||||
func compareArrays[T](a, b: openArray[T]): int =
|
|
||||||
result = cmp(a.len, b.len)
|
|
||||||
if result != 0: return
|
|
||||||
|
|
||||||
for i in 0 ..< a.len:
|
|
||||||
result = cmp(a[i], b[i])
|
|
||||||
if result != 0: return
|
|
||||||
|
|
||||||
template cmp*(a, b: BitSeq): int =
|
|
||||||
compareArrays(Bytes a, Bytes b)
|
|
||||||
|
|
||||||
template `==`*(a, b: BitSeq): bool =
|
|
||||||
cmp(a, b) == 0
|
|
||||||
|
|
||||||
func `$`*(a: BitSeq | BitArray): string =
|
|
||||||
let length = a.len
|
|
||||||
result = newStringOfCap(2 + length)
|
|
||||||
result.add "0b"
|
|
||||||
for i in countdown(length - 1, 0):
|
|
||||||
result.add if a[i]: '1' else: '0'
|
|
||||||
|
|
||||||
func incl*(tgt: var BitSeq, src: BitSeq) =
|
|
||||||
# Update `tgt` to include the bits of `src`, as if applying `or` to each bit
|
|
||||||
doAssert tgt.len == src.len
|
|
||||||
for tgtWord, srcWord in words(tgt, src):
|
|
||||||
tgtWord = tgtWord or srcWord
|
|
||||||
|
|
||||||
func overlaps*(a, b: BitSeq): bool =
|
|
||||||
for wa, wb in words(a, b):
|
|
||||||
if (wa and wb) != 0:
|
|
||||||
return true
|
|
||||||
|
|
||||||
func countOverlap*(a, b: BitSeq): int =
|
|
||||||
var res = 0
|
|
||||||
for wa, wb in words(a, b):
|
|
||||||
res += countOnes(wa and wb)
|
|
||||||
res
|
|
||||||
|
|
||||||
func isSubsetOf*(a, b: BitSeq): bool =
|
|
||||||
let alen = a.len
|
|
||||||
doAssert b.len == alen
|
|
||||||
for i in 0 ..< alen:
|
|
||||||
if a[i] and not b[i]:
|
|
||||||
return false
|
|
||||||
true
|
|
||||||
|
|
||||||
func isZeros*(x: BitSeq): bool =
|
|
||||||
for w in words(x):
|
|
||||||
if w != 0: return false
|
|
||||||
return true
|
|
||||||
|
|
||||||
func isZeros*(x: BitArray): bool =
|
|
||||||
x == default(type(x))
|
|
||||||
|
|
||||||
func countOnes*(x: BitSeq): int =
|
|
||||||
# Count the number of set bits
|
|
||||||
var res = 0
|
|
||||||
for w in words(x):
|
|
||||||
res += w.countOnes()
|
|
||||||
res
|
|
||||||
|
|
||||||
func clear*(x: var BitSeq) =
|
|
||||||
for w in words(x):
|
|
||||||
w = 0
|
|
||||||
|
|
||||||
func countZeros*(x: BitSeq): int =
|
|
||||||
x.len() - x.countOnes()
|
|
||||||
|
|
||||||
template bytes*(x: BitSeq): untyped =
|
|
||||||
seq[byte](x)
|
|
||||||
|
|
||||||
iterator items*(x: BitArray): bool =
|
|
||||||
for i in 0..<x.bits:
|
|
||||||
yield x[i]
|
|
||||||
|
|
||||||
iterator pairs*(x: BitArray): (int, bool) =
|
|
||||||
for i in 0..<x.bits:
|
|
||||||
yield (i, x[i])
|
|
||||||
|
|
||||||
func incl*(a: var BitArray, b: BitArray) =
|
|
||||||
# Update `a` to include the bits of `b`, as if applying `or` to each bit
|
|
||||||
for i in 0..<a.bytes.len:
|
|
||||||
a[i] = a[i] or b[i]
|
|
||||||
|
|
||||||
func clear*(a: var BitArray) =
|
|
||||||
for b in a.bytes.mitems(): b = 0
|
|
||||||
|
|
||||||
# Set operations
|
|
||||||
func `+`*(a, b: BitArray): BitArray =
|
|
||||||
for i in 0..<a.bytes.len:
|
|
||||||
result.bytes[i] = a.bytes[i] or b.bytes[i]
|
|
||||||
|
|
||||||
func `-`*(a, b: BitArray): BitArray =
|
|
||||||
for i in 0..<a.bytes.len:
|
|
||||||
result.bytes[i] = a.bytes[i] and (not b.bytes[i])
|
|
||||||
|
|
||||||
iterator oneIndices*(a: BitArray): int =
|
|
||||||
for i in 0..<a.len:
|
|
||||||
if a[i]: yield i
|
|
||||||
|
|
||||||
func countOnes*(a: BitArray): int =
|
|
||||||
# TODO: This can be optimised to work on words
|
|
||||||
for bit in a:
|
|
||||||
if bit: inc result
|
|
||||||
|
|
||||||
Json.useCustomSerialization(BitSeq):
|
|
||||||
read:
|
|
||||||
try:
|
|
||||||
BitSeq reader.readValue(string).hexToSeqByte
|
|
||||||
except ValueError:
|
|
||||||
raiseUnexpectedValue(reader, "A BitSeq value should be a valid hex string")
|
|
||||||
|
|
||||||
write:
|
|
||||||
writer.writeValue "0x" & seq[byte](value).toHex
|
|
|
@ -1,163 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
{.pragma: raisesssz, raises: [Defect, IOError, MalformedSszError, SszSizeMismatchError].}
|
|
||||||
|
|
||||||
import
|
|
||||||
std/[strutils, parseutils],
|
|
||||||
stew/objects, faststreams/outputs, json_serialization/writer,
|
|
||||||
./codec, ./types, ./navigator
|
|
||||||
|
|
||||||
export
|
|
||||||
codec, navigator, types
|
|
||||||
|
|
||||||
type
|
|
||||||
ObjKind = enum
|
|
||||||
Record
|
|
||||||
Indexable
|
|
||||||
LeafValue
|
|
||||||
|
|
||||||
FieldInfo = ref object
|
|
||||||
name: string
|
|
||||||
fieldType: TypeInfo
|
|
||||||
navigator: proc (m: MemRange): MemRange {. gcsafe
|
|
||||||
noSideEffect
|
|
||||||
raisesssz }
|
|
||||||
TypeInfo = ref object
|
|
||||||
case kind: ObjKind
|
|
||||||
of Record:
|
|
||||||
fields: seq[FieldInfo]
|
|
||||||
of Indexable:
|
|
||||||
elemType: TypeInfo
|
|
||||||
navigator: proc (m: MemRange, idx: int): MemRange {. gcsafe
|
|
||||||
noSideEffect
|
|
||||||
raisesssz }
|
|
||||||
else:
|
|
||||||
discard
|
|
||||||
|
|
||||||
jsonPrinter: proc (m: MemRange,
|
|
||||||
outStream: OutputStream,
|
|
||||||
pretty: bool) {.gcsafe, raisesssz.}
|
|
||||||
|
|
||||||
DynamicSszNavigator* = object
|
|
||||||
m: MemRange
|
|
||||||
typ: TypeInfo
|
|
||||||
|
|
||||||
proc jsonPrinterImpl[T](m: MemRange, outStream: OutputStream, pretty: bool) {.raisesssz.} =
|
|
||||||
var typedNavigator = sszMount(m, T)
|
|
||||||
var jsonWriter = Json.Writer.init(outStream, pretty)
|
|
||||||
# TODO: it should be possible to serialize the navigator object
|
|
||||||
# without dereferencing it (to avoid the intermediate value).
|
|
||||||
writeValue(jsonWriter, typedNavigator[])
|
|
||||||
|
|
||||||
func findField(fields: seq[FieldInfo], name: string): FieldInfo =
|
|
||||||
# TODO: Replace this with a binary search?
|
|
||||||
# Will it buy us anything when there are only few fields?
|
|
||||||
for field in fields:
|
|
||||||
if field.name == name:
|
|
||||||
return field
|
|
||||||
|
|
||||||
func indexableNavigatorImpl[T](m: MemRange, idx: int): MemRange {.raisesssz.} =
|
|
||||||
var typedNavigator = sszMount(m, T)
|
|
||||||
getMemRange(typedNavigator[idx])
|
|
||||||
|
|
||||||
func fieldNavigatorImpl[RecordType; FieldType;
|
|
||||||
fieldName: static string](m: MemRange): MemRange {.raisesssz.} =
|
|
||||||
# TODO: Make sure this doesn't fail with a Defect when
|
|
||||||
# navigating to an inactive field in a case object.
|
|
||||||
var typedNavigator = sszMount(m, RecordType)
|
|
||||||
getMemRange navigateToField(typedNavigator, fieldName, FieldType)
|
|
||||||
|
|
||||||
func genTypeInfo(T: type): TypeInfo {.gcsafe.}
|
|
||||||
|
|
||||||
proc typeInfo*(T: type): TypeInfo =
|
|
||||||
let res {.global.} = genTypeInfo(T)
|
|
||||||
|
|
||||||
# TODO This will be safer if the RTTI object use only manually
|
|
||||||
# managed memory, but the `fields` sequence right now make
|
|
||||||
# things harder. We'll need to switch to a different seq type.
|
|
||||||
{.gcsafe, noSideEffect.}: res
|
|
||||||
|
|
||||||
func genTypeInfo(T: type): TypeInfo =
|
|
||||||
mixin toSszType, enumAllSerializedFields
|
|
||||||
type SszType = type toSszType(declval T)
|
|
||||||
result = when type(SszType) isnot T:
|
|
||||||
TypeInfo(kind: LeafValue)
|
|
||||||
elif T is object:
|
|
||||||
var fields: seq[FieldInfo]
|
|
||||||
enumAllSerializedFields(T):
|
|
||||||
fields.add FieldInfo(name: fieldName,
|
|
||||||
fieldType: typeInfo(FieldType),
|
|
||||||
navigator: fieldNavigatorImpl[T, FieldType, fieldName])
|
|
||||||
TypeInfo(kind: Record, fields: fields)
|
|
||||||
elif T is seq|array:
|
|
||||||
TypeInfo(kind: Indexable,
|
|
||||||
elemType: typeInfo(ElemType(T)),
|
|
||||||
navigator: indexableNavigatorImpl[T])
|
|
||||||
else:
|
|
||||||
TypeInfo(kind: LeafValue)
|
|
||||||
|
|
||||||
result.jsonPrinter = jsonPrinterImpl[T]
|
|
||||||
|
|
||||||
func `[]`*(n: DynamicSszNavigator, idx: int): DynamicSszNavigator {.raisesssz.} =
|
|
||||||
doAssert n.typ.kind == Indexable
|
|
||||||
DynamicSszNavigator(m: n.typ.navigator(n.m, idx), typ: n.typ.elemType)
|
|
||||||
|
|
||||||
func navigate*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
|
||||||
raises: [Defect, KeyError, IOError, MalformedSszError, SszSizeMismatchError, ValueError] .} =
|
|
||||||
case n.typ.kind
|
|
||||||
of Record:
|
|
||||||
let fieldInfo = n.typ.fields.findField(path)
|
|
||||||
if fieldInfo == nil:
|
|
||||||
raise newException(KeyError, "Unrecogned field name: " & path)
|
|
||||||
return DynamicSszNavigator(m: fieldInfo.navigator(n.m),
|
|
||||||
typ: fieldInfo.fieldType)
|
|
||||||
of Indexable:
|
|
||||||
var idx: int
|
|
||||||
let consumed = parseInt(path, idx)
|
|
||||||
if consumed == 0 or idx < 0:
|
|
||||||
raise newException(KeyError, "Indexing should be done with natural numbers")
|
|
||||||
return n[idx]
|
|
||||||
else:
|
|
||||||
doAssert false, "Navigation should be terminated once you reach a leaf value"
|
|
||||||
|
|
||||||
template navigatePathImpl(nav, iterabalePathFragments: untyped) =
|
|
||||||
result = nav
|
|
||||||
for pathFragment in iterabalePathFragments:
|
|
||||||
if pathFragment.len == 0:
|
|
||||||
continue
|
|
||||||
result = result.navigate(pathFragment)
|
|
||||||
if result.typ.kind == LeafValue:
|
|
||||||
return
|
|
||||||
|
|
||||||
func navigatePath*(n: DynamicSszNavigator, path: string): DynamicSszNavigator {.
|
|
||||||
raises: [Defect, IOError, ValueError, MalformedSszError, SszSizeMismatchError] .} =
|
|
||||||
navigatePathImpl n, split(path, '/')
|
|
||||||
|
|
||||||
func navigatePath*(n: DynamicSszNavigator, path: openArray[string]): DynamicSszNavigator {.
|
|
||||||
raises: [Defect, IOError, ValueError, MalformedSszError, SszSizeMismatchError] .} =
|
|
||||||
navigatePathImpl n, path
|
|
||||||
|
|
||||||
func init*(T: type DynamicSszNavigator,
|
|
||||||
bytes: openArray[byte], Navigated: type): T =
|
|
||||||
T(m: MemRange(startAddr: unsafeAddr bytes[0], length: bytes.len),
|
|
||||||
typ: typeInfo(Navigated))
|
|
||||||
|
|
||||||
proc writeJson*(n: DynamicSszNavigator, outStream: OutputStream, pretty = true) {.raisesssz.} =
|
|
||||||
n.typ.jsonPrinter(n.m, outStream, pretty)
|
|
||||||
|
|
||||||
func toJson*(n: DynamicSszNavigator, pretty = true): string {.raisesssz.} =
|
|
||||||
var outStream = memoryOutput()
|
|
||||||
{.noSideEffect.}:
|
|
||||||
# We are assuming that there are no side-effects here
|
|
||||||
# because we are using a `memoryOutput`. The computed
|
|
||||||
# side-effects are coming from the fact that the dynamic
|
|
||||||
# dispatch mechanisms used in faststreams may be reading
|
|
||||||
# from a file or a network device.
|
|
||||||
writeJson(n, outStream, pretty)
|
|
||||||
outStream.getOutput(string)
|
|
|
@ -1,691 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
# This module contains the parts necessary to create a merkle hash from the core
|
|
||||||
# SSZ types outlined in the spec:
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/ssz/simple-serialize.md#merkleization
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
|
|
||||||
import
|
|
||||||
stew/[bitops2, endians2, ptrops],
|
|
||||||
stew/ranges/ptr_arith,
|
|
||||||
serialization/testing/tracing,
|
|
||||||
../spec/digest,
|
|
||||||
"."/[bitseqs, codec, types]
|
|
||||||
|
|
||||||
export
|
|
||||||
codec, bitseqs, digest, types
|
|
||||||
|
|
||||||
when hasSerializationTracing:
|
|
||||||
import stew/byteutils, typetraits
|
|
||||||
|
|
||||||
const
|
|
||||||
zero64 = default array[64, byte]
|
|
||||||
bitsPerChunk = bytesPerChunk * 8
|
|
||||||
|
|
||||||
func binaryTreeHeight*(totalElements: Limit): int =
|
|
||||||
bitWidth nextPow2(uint64 totalElements)
|
|
||||||
|
|
||||||
type
|
|
||||||
# TODO Figure out what would be the right type for this.
|
|
||||||
# It probably fits in uint16 for all practical purposes.
|
|
||||||
GeneralizedIndex* = uint32
|
|
||||||
|
|
||||||
SszMerkleizerImpl = object
|
|
||||||
combinedChunks: ptr UncheckedArray[Eth2Digest]
|
|
||||||
totalChunks: uint64
|
|
||||||
topIndex: int
|
|
||||||
|
|
||||||
SszMerkleizer*[limit: static[Limit]] = object
|
|
||||||
combinedChunks: ref array[binaryTreeHeight limit, Eth2Digest]
|
|
||||||
impl: SszMerkleizerImpl
|
|
||||||
|
|
||||||
template chunks*(m: SszMerkleizerImpl): openArray[Eth2Digest] =
|
|
||||||
m.combinedChunks.toOpenArray(0, m.topIndex)
|
|
||||||
|
|
||||||
template getChunkCount*(m: SszMerkleizer): uint64 =
|
|
||||||
m.impl.totalChunks
|
|
||||||
|
|
||||||
template getCombinedChunks*(m: SszMerkleizer): openArray[Eth2Digest] =
|
|
||||||
toOpenArray(m.impl.combinedChunks, 0, m.impl.topIndex)
|
|
||||||
|
|
||||||
func digest(a, b: openArray[byte]): Eth2Digest =
|
|
||||||
result = withEth2Hash:
|
|
||||||
trs "DIGESTING ARRAYS ", toHex(a), " ", toHex(b)
|
|
||||||
trs toHex(a)
|
|
||||||
trs toHex(b)
|
|
||||||
|
|
||||||
h.update a
|
|
||||||
h.update b
|
|
||||||
trs "HASH RESULT ", result
|
|
||||||
|
|
||||||
func digest(a, b, c: openArray[byte]): Eth2Digest =
|
|
||||||
result = withEth2Hash:
|
|
||||||
trs "DIGESTING ARRAYS ", toHex(a), " ", toHex(b), " ", toHex(c)
|
|
||||||
|
|
||||||
h.update a
|
|
||||||
h.update b
|
|
||||||
h.update c
|
|
||||||
trs "HASH RESULT ", result
|
|
||||||
|
|
||||||
func mergeBranches(existing: Eth2Digest, newData: openArray[byte]): Eth2Digest =
|
|
||||||
trs "MERGING BRANCHES OPEN ARRAY"
|
|
||||||
|
|
||||||
let paddingBytes = bytesPerChunk - newData.len
|
|
||||||
digest(existing.data, newData, zero64.toOpenArray(0, paddingBytes - 1))
|
|
||||||
|
|
||||||
template mergeBranches(existing: Eth2Digest, newData: array[32, byte]): Eth2Digest =
|
|
||||||
trs "MERGING BRANCHES ARRAY"
|
|
||||||
digest(existing.data, newData)
|
|
||||||
|
|
||||||
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
|
||||||
trs "MERGING BRANCHES DIGEST"
|
|
||||||
digest(a.data, b.data)
|
|
||||||
|
|
||||||
func computeZeroHashes: array[sizeof(Limit) * 8, Eth2Digest] =
|
|
||||||
result[0] = Eth2Digest()
|
|
||||||
for i in 1 .. result.high:
|
|
||||||
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
|
||||||
|
|
||||||
const zeroHashes* = computeZeroHashes()
|
|
||||||
|
|
||||||
func addChunk*(merkleizer: var SszMerkleizerImpl, data: openArray[byte]) =
|
|
||||||
doAssert data.len > 0 and data.len <= bytesPerChunk
|
|
||||||
|
|
||||||
if getBitLE(merkleizer.totalChunks, 0):
|
|
||||||
var hash = mergeBranches(merkleizer.combinedChunks[0], data)
|
|
||||||
|
|
||||||
for i in 1 .. merkleizer.topIndex:
|
|
||||||
trs "ITERATING"
|
|
||||||
if getBitLE(merkleizer.totalChunks, i):
|
|
||||||
trs "CALLING MERGE BRANCHES"
|
|
||||||
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
|
||||||
else:
|
|
||||||
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
|
||||||
merkleizer.combinedChunks[i] = hash
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
let paddingBytes = bytesPerChunk - data.len
|
|
||||||
|
|
||||||
merkleizer.combinedChunks[0].data[0..<data.len] = data
|
|
||||||
merkleizer.combinedChunks[0].data[data.len..<bytesPerChunk] =
|
|
||||||
zero64.toOpenArray(0, paddingBytes - 1)
|
|
||||||
|
|
||||||
trs "WROTE BASE CHUNK ",
|
|
||||||
toHex(merkleizer.combinedChunks[0].data), " ", data.len
|
|
||||||
|
|
||||||
inc merkleizer.totalChunks
|
|
||||||
|
|
||||||
template isOdd(x: SomeNumber): bool =
|
|
||||||
(x and 1) != 0
|
|
||||||
|
|
||||||
func addChunkAndGenMerkleProof*(merkleizer: var SszMerkleizerImpl,
|
|
||||||
hash: Eth2Digest,
|
|
||||||
outProof: var openArray[Eth2Digest]) =
|
|
||||||
var
|
|
||||||
hashWrittenToMerkleizer = false
|
|
||||||
hash = hash
|
|
||||||
|
|
||||||
doAssert merkleizer.topIndex < outProof.len
|
|
||||||
|
|
||||||
for level in 0 .. merkleizer.topIndex:
|
|
||||||
if getBitLE(merkleizer.totalChunks, level):
|
|
||||||
outProof[level] = merkleizer.combinedChunks[level]
|
|
||||||
hash = mergeBranches(merkleizer.combinedChunks[level], hash)
|
|
||||||
else:
|
|
||||||
if not hashWrittenToMerkleizer:
|
|
||||||
merkleizer.combinedChunks[level] = hash
|
|
||||||
hashWrittenToMerkleizer = true
|
|
||||||
outProof[level] = zeroHashes[level]
|
|
||||||
hash = mergeBranches(hash, zeroHashes[level])
|
|
||||||
|
|
||||||
merkleizer.totalChunks += 1
|
|
||||||
|
|
||||||
func completeStartedChunk(merkleizer: var SszMerkleizerImpl,
|
|
||||||
hash: Eth2Digest, atLevel: int) =
|
|
||||||
when false:
|
|
||||||
let
|
|
||||||
insertedChunksCount = 1'u64 shl (atLevel - 1)
|
|
||||||
chunksStateMask = (insertedChunksCount shl 1) - 1
|
|
||||||
doAssert (merkleizer.totalChunks and chunksStateMask) == insertedChunksCount
|
|
||||||
|
|
||||||
var hash = hash
|
|
||||||
for i in atLevel .. merkleizer.topIndex:
|
|
||||||
if getBitLE(merkleizer.totalChunks, i):
|
|
||||||
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
|
||||||
else:
|
|
||||||
merkleizer.combinedChunks[i] = hash
|
|
||||||
break
|
|
||||||
|
|
||||||
func addChunksAndGenMerkleProofs*(merkleizer: var SszMerkleizerImpl,
|
|
||||||
chunks: openArray[Eth2Digest]): seq[Eth2Digest] =
|
|
||||||
doAssert chunks.len > 0 and merkleizer.topIndex > 0
|
|
||||||
|
|
||||||
let proofHeight = merkleizer.topIndex + 1
|
|
||||||
result = newSeq[Eth2Digest](chunks.len * proofHeight)
|
|
||||||
|
|
||||||
if chunks.len == 1:
|
|
||||||
merkleizer.addChunkAndGenMerkleProof(chunks[0], result)
|
|
||||||
return
|
|
||||||
|
|
||||||
let newTotalChunks = merkleizer.totalChunks + chunks.len.uint64
|
|
||||||
|
|
||||||
var
|
|
||||||
# A perfect binary tree will take either `chunks.len * 2` values if the
|
|
||||||
# number of elements in the base layer is odd and `chunks.len * 2 - 1`
|
|
||||||
# otherwise. Each row may also need a single extra element at most if
|
|
||||||
# it must be combined with the existing values in the Merkleizer:
|
|
||||||
merkleTree = newSeqOfCap[Eth2Digest](chunks.len + merkleizer.topIndex)
|
|
||||||
inRowIdx = merkleizer.totalChunks
|
|
||||||
postUpdateInRowIdx = newTotalChunks
|
|
||||||
zeroMixed = false
|
|
||||||
|
|
||||||
template writeResult(chunkIdx, level: int, chunk: Eth2Digest) =
|
|
||||||
result[chunkIdx * proofHeight + level] = chunk
|
|
||||||
|
|
||||||
# We'll start by generating the first row of the merkle tree.
|
|
||||||
var currPairEnd = if inRowIdx.isOdd:
|
|
||||||
# an odd chunk number means that we must combine the
|
|
||||||
# hash with the existing pending sibling hash in the
|
|
||||||
# merkleizer.
|
|
||||||
writeResult(0, 0, merkleizer.combinedChunks[0])
|
|
||||||
merkleTree.add mergeBranches(merkleizer.combinedChunks[0], chunks[0])
|
|
||||||
|
|
||||||
# TODO: can we immediately write this out?
|
|
||||||
merkleizer.completeStartedChunk(merkleTree[^1], 1)
|
|
||||||
2
|
|
||||||
else:
|
|
||||||
1
|
|
||||||
|
|
||||||
if postUpdateInRowIdx.isOdd:
|
|
||||||
merkleizer.combinedChunks[0] = chunks[^1]
|
|
||||||
|
|
||||||
while currPairEnd < chunks.len:
|
|
||||||
writeResult(currPairEnd - 1, 0, chunks[currPairEnd])
|
|
||||||
writeResult(currPairEnd, 0, chunks[currPairEnd - 1])
|
|
||||||
merkleTree.add mergeBranches(chunks[currPairEnd - 1],
|
|
||||||
chunks[currPairEnd])
|
|
||||||
currPairEnd += 2
|
|
||||||
|
|
||||||
if currPairEnd - 1 < chunks.len:
|
|
||||||
zeroMixed = true
|
|
||||||
writeResult(currPairEnd - 1, 0, zeroHashes[0])
|
|
||||||
merkleTree.add mergeBranches(chunks[currPairEnd - 1],
|
|
||||||
zeroHashes[0])
|
|
||||||
var
|
|
||||||
level = 0
|
|
||||||
baseChunksPerElement = 1
|
|
||||||
treeRowStart = 0
|
|
||||||
rowLen = merkleTree.len
|
|
||||||
|
|
||||||
template writeProofs(rowChunkIdx: int, hash: Eth2Digest) =
|
|
||||||
let
|
|
||||||
startAbsIdx = (inRowIdx.int + rowChunkIdx) * baseChunksPerElement
|
|
||||||
endAbsIdx = startAbsIdx + baseChunksPerElement
|
|
||||||
startResIdx = max(startAbsIdx - merkleizer.totalChunks.int, 0)
|
|
||||||
endResIdx = min(endAbsIdx - merkleizer.totalChunks.int, chunks.len)
|
|
||||||
|
|
||||||
for resultPos in startResIdx ..< endResIdx:
|
|
||||||
writeResult(resultPos, level, hash)
|
|
||||||
|
|
||||||
if rowLen > 1:
|
|
||||||
while level < merkleizer.topIndex:
|
|
||||||
inc level
|
|
||||||
baseChunksPerElement *= 2
|
|
||||||
inRowIdx = inRowIdx div 2
|
|
||||||
postUpdateInRowIdx = postUpdateInRowIdx div 2
|
|
||||||
|
|
||||||
var currPairEnd = if inRowIdx.isOdd:
|
|
||||||
# an odd chunk number means that we must combine the
|
|
||||||
# hash with the existing pending sibling hash in the
|
|
||||||
# merkleizer.
|
|
||||||
writeProofs(0, merkleizer.combinedChunks[level])
|
|
||||||
merkleTree.add mergeBranches(merkleizer.combinedChunks[level],
|
|
||||||
merkleTree[treeRowStart])
|
|
||||||
|
|
||||||
# TODO: can we immediately write this out?
|
|
||||||
merkleizer.completeStartedChunk(merkleTree[^1], level + 1)
|
|
||||||
2
|
|
||||||
else:
|
|
||||||
1
|
|
||||||
|
|
||||||
if postUpdateInRowIdx.isOdd:
|
|
||||||
merkleizer.combinedChunks[level] = merkleTree[treeRowStart + rowLen -
|
|
||||||
ord(zeroMixed) - 1]
|
|
||||||
while currPairEnd < rowLen:
|
|
||||||
writeProofs(currPairEnd - 1, merkleTree[treeRowStart + currPairEnd])
|
|
||||||
writeProofs(currPairEnd, merkleTree[treeRowStart + currPairEnd - 1])
|
|
||||||
merkleTree.add mergeBranches(merkleTree[treeRowStart + currPairEnd - 1],
|
|
||||||
merkleTree[treeRowStart + currPairEnd])
|
|
||||||
currPairEnd += 2
|
|
||||||
|
|
||||||
if currPairEnd - 1 < rowLen:
|
|
||||||
zeroMixed = true
|
|
||||||
writeProofs(currPairEnd - 1, zeroHashes[level])
|
|
||||||
merkleTree.add mergeBranches(merkleTree[treeRowStart + currPairEnd - 1],
|
|
||||||
zeroHashes[level])
|
|
||||||
|
|
||||||
treeRowStart += rowLen
|
|
||||||
rowLen = merkleTree.len - treeRowStart
|
|
||||||
|
|
||||||
if rowLen == 1:
|
|
||||||
break
|
|
||||||
|
|
||||||
doAssert rowLen == 1
|
|
||||||
|
|
||||||
if (inRowIdx and 2) != 0:
|
|
||||||
merkleizer.completeStartedChunk(
|
|
||||||
mergeBranches(merkleizer.combinedChunks[level + 1], merkleTree[^1]),
|
|
||||||
level + 2)
|
|
||||||
|
|
||||||
if (not zeroMixed) and (postUpdateInRowIdx and 2) != 0:
|
|
||||||
merkleizer.combinedChunks[level + 1] = merkleTree[^1]
|
|
||||||
|
|
||||||
while level < merkleizer.topIndex:
|
|
||||||
inc level
|
|
||||||
baseChunksPerElement *= 2
|
|
||||||
inRowIdx = inRowIdx div 2
|
|
||||||
|
|
||||||
let hash = if getBitLE(merkleizer.totalChunks, level):
|
|
||||||
merkleizer.combinedChunks[level]
|
|
||||||
else:
|
|
||||||
zeroHashes[level]
|
|
||||||
|
|
||||||
writeProofs(0, hash)
|
|
||||||
|
|
||||||
merkleizer.totalChunks = newTotalChunks
|
|
||||||
|
|
||||||
proc init*(S: type SszMerkleizer): S =
|
|
||||||
new result.combinedChunks
|
|
||||||
result.impl = SszMerkleizerImpl(
|
|
||||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](
|
|
||||||
addr result.combinedChunks[][0]),
|
|
||||||
topIndex: binaryTreeHeight(result.limit) - 1,
|
|
||||||
totalChunks: 0)
|
|
||||||
|
|
||||||
proc init*(S: type SszMerkleizer,
|
|
||||||
combinedChunks: openArray[Eth2Digest],
|
|
||||||
totalChunks: uint64): S =
|
|
||||||
new result.combinedChunks
|
|
||||||
result.combinedChunks[][0 ..< combinedChunks.len] = combinedChunks
|
|
||||||
result.impl = SszMerkleizerImpl(
|
|
||||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](
|
|
||||||
addr result.combinedChunks[][0]),
|
|
||||||
topIndex: binaryTreeHeight(result.limit) - 1,
|
|
||||||
totalChunks: totalChunks)
|
|
||||||
|
|
||||||
proc copy*[L: static[Limit]](cloned: SszMerkleizer[L]): SszMerkleizer[L] =
|
|
||||||
new result.combinedChunks
|
|
||||||
result.combinedChunks[] = cloned.combinedChunks[]
|
|
||||||
result.impl = SszMerkleizerImpl(
|
|
||||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](
|
|
||||||
addr result.combinedChunks[][0]),
|
|
||||||
topIndex: binaryTreeHeight(L) - 1,
|
|
||||||
totalChunks: cloned.totalChunks)
|
|
||||||
|
|
||||||
template addChunksAndGenMerkleProofs*(
|
|
||||||
merkleizer: var SszMerkleizer,
|
|
||||||
chunks: openArray[Eth2Digest]): seq[Eth2Digest] =
|
|
||||||
addChunksAndGenMerkleProofs(merkleizer.impl, chunks)
|
|
||||||
|
|
||||||
template addChunk*(merkleizer: var SszMerkleizer, data: openArray[byte]) =
|
|
||||||
addChunk(merkleizer.impl, data)
|
|
||||||
|
|
||||||
template totalChunks*(merkleizer: SszMerkleizer): uint64 =
|
|
||||||
merkleizer.impl.totalChunks
|
|
||||||
|
|
||||||
template getFinalHash*(merkleizer: SszMerkleizer): Eth2Digest =
|
|
||||||
merkleizer.impl.getFinalHash
|
|
||||||
|
|
||||||
template createMerkleizer*(totalElements: static Limit): SszMerkleizerImpl =
|
|
||||||
trs "CREATING A MERKLEIZER FOR ", totalElements
|
|
||||||
|
|
||||||
const treeHeight = binaryTreeHeight totalElements
|
|
||||||
var combinedChunks {.noInit.}: array[treeHeight, Eth2Digest]
|
|
||||||
|
|
||||||
SszMerkleizerImpl(
|
|
||||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](addr combinedChunks),
|
|
||||||
topIndex: treeHeight - 1,
|
|
||||||
totalChunks: 0)
|
|
||||||
|
|
||||||
func getFinalHash*(merkleizer: SszMerkleizerImpl): Eth2Digest =
|
|
||||||
if merkleizer.totalChunks == 0:
|
|
||||||
return zeroHashes[merkleizer.topIndex]
|
|
||||||
|
|
||||||
let
|
|
||||||
bottomHashIdx = firstOne(merkleizer.totalChunks) - 1
|
|
||||||
submittedChunksHeight = bitWidth(merkleizer.totalChunks - 1)
|
|
||||||
topHashIdx = merkleizer.topIndex
|
|
||||||
|
|
||||||
trs "BOTTOM HASH ", bottomHashIdx
|
|
||||||
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
|
||||||
trs "TOP HASH IDX ", topHashIdx
|
|
||||||
|
|
||||||
if bottomHashIdx != submittedChunksHeight:
|
|
||||||
# Our tree is not finished. We must complete the work in progress
|
|
||||||
# branches and then extend the tree to the right height.
|
|
||||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
|
||||||
zeroHashes[bottomHashIdx])
|
|
||||||
|
|
||||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
|
||||||
if getBitLE(merkleizer.totalChunks, i):
|
|
||||||
result = mergeBranches(merkleizer.combinedChunks[i], result)
|
|
||||||
trs "COMBINED"
|
|
||||||
else:
|
|
||||||
result = mergeBranches(result, zeroHashes[i])
|
|
||||||
trs "COMBINED WITH ZERO"
|
|
||||||
|
|
||||||
elif bottomHashIdx == topHashIdx:
|
|
||||||
# We have a perfect tree (chunks == 2**n) at just the right height!
|
|
||||||
result = merkleizer.combinedChunks[bottomHashIdx]
|
|
||||||
else:
|
|
||||||
# We have a perfect tree of user chunks, but we have more work to
|
|
||||||
# do - we must extend it to reach the desired height
|
|
||||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
|
||||||
zeroHashes[bottomHashIdx])
|
|
||||||
|
|
||||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
|
||||||
result = mergeBranches(result, zeroHashes[i])
|
|
||||||
|
|
||||||
func mixInLength*(root: Eth2Digest, length: int): Eth2Digest =
|
|
||||||
var dataLen: array[32, byte]
|
|
||||||
dataLen[0..<8] = uint64(length).toBytesLE()
|
|
||||||
mergeBranches(root, dataLen)
|
|
||||||
|
|
||||||
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe, raises: [Defect].}
|
|
||||||
|
|
||||||
template merkleizeFields(totalElements: static Limit, body: untyped): Eth2Digest =
|
|
||||||
var merkleizer {.inject.} = createMerkleizer(totalElements)
|
|
||||||
|
|
||||||
template addField(field) =
|
|
||||||
let hash = hash_tree_root(field)
|
|
||||||
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
|
||||||
addChunk(merkleizer, hash.data)
|
|
||||||
trs "CHUNK ADDED"
|
|
||||||
|
|
||||||
body
|
|
||||||
|
|
||||||
getFinalHash(merkleizer)
|
|
||||||
|
|
||||||
template writeBytesLE(chunk: var array[bytesPerChunk, byte], atParam: int,
|
|
||||||
val: UintN) =
|
|
||||||
let at = atParam
|
|
||||||
chunk[at ..< at + sizeof(val)] = toBytesLE(val)
|
|
||||||
|
|
||||||
func chunkedHashTreeRootForBasicTypes[T](merkleizer: var SszMerkleizerImpl,
|
|
||||||
arr: openArray[T]): Eth2Digest =
|
|
||||||
static:
|
|
||||||
doAssert T is BasicType
|
|
||||||
doAssert bytesPerChunk mod sizeof(T) == 0
|
|
||||||
|
|
||||||
if arr.len == 0:
|
|
||||||
return getFinalHash(merkleizer)
|
|
||||||
|
|
||||||
when sizeof(T) == 1 or cpuEndian == littleEndian:
|
|
||||||
var
|
|
||||||
remainingBytes = when sizeof(T) == 1: arr.len
|
|
||||||
else: arr.len * sizeof(T)
|
|
||||||
pos = cast[ptr byte](unsafeAddr arr[0])
|
|
||||||
|
|
||||||
while remainingBytes >= bytesPerChunk:
|
|
||||||
merkleizer.addChunk(makeOpenArray(pos, bytesPerChunk))
|
|
||||||
pos = offset(pos, bytesPerChunk)
|
|
||||||
remainingBytes -= bytesPerChunk
|
|
||||||
|
|
||||||
if remainingBytes > 0:
|
|
||||||
merkleizer.addChunk(makeOpenArray(pos, remainingBytes))
|
|
||||||
|
|
||||||
else:
|
|
||||||
const valuesPerChunk = bytesPerChunk div sizeof(T)
|
|
||||||
|
|
||||||
var writtenValues = 0
|
|
||||||
|
|
||||||
var chunk: array[bytesPerChunk, byte]
|
|
||||||
while writtenValues < arr.len - valuesPerChunk:
|
|
||||||
for i in 0 ..< valuesPerChunk:
|
|
||||||
chunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
|
||||||
merkleizer.addChunk chunk
|
|
||||||
inc writtenValues, valuesPerChunk
|
|
||||||
|
|
||||||
let remainingValues = arr.len - writtenValues
|
|
||||||
if remainingValues > 0:
|
|
||||||
var lastChunk: array[bytesPerChunk, byte]
|
|
||||||
for i in 0 ..< remainingValues:
|
|
||||||
lastChunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
|
||||||
merkleizer.addChunk lastChunk
|
|
||||||
|
|
||||||
getFinalHash(merkleizer)
|
|
||||||
|
|
||||||
func bitListHashTreeRoot(merkleizer: var SszMerkleizerImpl, x: BitSeq): Eth2Digest =
|
|
||||||
# TODO: Switch to a simpler BitList representation and
|
|
||||||
# replace this with `chunkedHashTreeRoot`
|
|
||||||
var
|
|
||||||
totalBytes = bytes(x).len
|
|
||||||
lastCorrectedByte = bytes(x)[^1]
|
|
||||||
|
|
||||||
if lastCorrectedByte == byte(1):
|
|
||||||
if totalBytes == 1:
|
|
||||||
# This is an empty bit list.
|
|
||||||
# It should be hashed as a tree containing all zeros:
|
|
||||||
return mergeBranches(zeroHashes[merkleizer.topIndex],
|
|
||||||
zeroHashes[0]) # this is the mixed length
|
|
||||||
|
|
||||||
totalBytes -= 1
|
|
||||||
lastCorrectedByte = bytes(x)[^2]
|
|
||||||
else:
|
|
||||||
let markerPos = log2trunc(lastCorrectedByte)
|
|
||||||
lastCorrectedByte.clearBit(markerPos)
|
|
||||||
|
|
||||||
var
|
|
||||||
bytesInLastChunk = totalBytes mod bytesPerChunk
|
|
||||||
fullChunks = totalBytes div bytesPerChunk
|
|
||||||
|
|
||||||
if bytesInLastChunk == 0:
|
|
||||||
fullChunks -= 1
|
|
||||||
bytesInLastChunk = 32
|
|
||||||
|
|
||||||
for i in 0 ..< fullChunks:
|
|
||||||
let
|
|
||||||
chunkStartPos = i * bytesPerChunk
|
|
||||||
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
|
||||||
|
|
||||||
merkleizer.addChunk bytes(x).toOpenArray(chunkStartPos, chunkEndPos)
|
|
||||||
|
|
||||||
var
|
|
||||||
lastChunk: array[bytesPerChunk, byte]
|
|
||||||
chunkStartPos = fullChunks * bytesPerChunk
|
|
||||||
|
|
||||||
for i in 0 .. bytesInLastChunk - 2:
|
|
||||||
lastChunk[i] = bytes(x)[chunkStartPos + i]
|
|
||||||
|
|
||||||
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
|
||||||
|
|
||||||
merkleizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
|
||||||
let contentsHash = merkleizer.getFinalHash
|
|
||||||
mixInLength contentsHash, x.len
|
|
||||||
|
|
||||||
func maxChunksCount(T: type, maxLen: Limit): Limit =
|
|
||||||
when T is BitList|BitArray:
|
|
||||||
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
|
||||||
elif T is array|List:
|
|
||||||
maxChunkIdx(ElemType(T), maxLen)
|
|
||||||
else:
|
|
||||||
unsupported T # This should never happen
|
|
||||||
|
|
||||||
func hashTreeRootAux[T](x: T): Eth2Digest =
|
|
||||||
when T is bool|char:
|
|
||||||
result.data[0] = byte(x)
|
|
||||||
elif T is UintN:
|
|
||||||
when cpuEndian == bigEndian:
|
|
||||||
result.data[0..<sizeof(x)] = toBytesLE(x)
|
|
||||||
else:
|
|
||||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
|
||||||
elif (when T is array: ElemType(T) is BasicType else: false):
|
|
||||||
type E = ElemType(T)
|
|
||||||
when sizeof(T) <= sizeof(result.data):
|
|
||||||
when E is byte|bool or cpuEndian == littleEndian:
|
|
||||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
|
||||||
else:
|
|
||||||
var pos = 0
|
|
||||||
for e in x:
|
|
||||||
writeBytesLE(result.data, pos, e)
|
|
||||||
pos += sizeof(E)
|
|
||||||
else:
|
|
||||||
trs "FIXED TYPE; USE CHUNK STREAM"
|
|
||||||
var merkleizer = createMerkleizer(maxChunksCount(T, Limit x.len))
|
|
||||||
chunkedHashTreeRootForBasicTypes(merkleizer, x)
|
|
||||||
elif T is BitArray:
|
|
||||||
hashTreeRootAux(x.bytes)
|
|
||||||
elif T is SingleMemberUnion:
|
|
||||||
doAssert x.selector == 0'u8
|
|
||||||
merkleizeFields(Limit 2):
|
|
||||||
addField hashTreeRoot(toSszType(x.value))
|
|
||||||
elif T is array|object|tuple:
|
|
||||||
trs "MERKLEIZING FIELDS"
|
|
||||||
const totalFields = when T is array: len(x)
|
|
||||||
else: totalSerializedFields(T)
|
|
||||||
merkleizeFields(Limit totalFields):
|
|
||||||
x.enumerateSubFields(f):
|
|
||||||
addField f
|
|
||||||
#elif isCaseObject(T):
|
|
||||||
# # TODO implement this
|
|
||||||
else:
|
|
||||||
unsupported T
|
|
||||||
|
|
||||||
func hashTreeRootList(x: List|BitList): Eth2Digest =
|
|
||||||
const maxLen = static(x.maxLen)
|
|
||||||
type T = type(x)
|
|
||||||
const limit = maxChunksCount(T, maxLen)
|
|
||||||
var merkleizer = createMerkleizer(limit)
|
|
||||||
|
|
||||||
when x is BitList:
|
|
||||||
merkleizer.bitListHashTreeRoot(BitSeq x)
|
|
||||||
else:
|
|
||||||
type E = ElemType(T)
|
|
||||||
let contentsHash = when E is BasicType:
|
|
||||||
chunkedHashTreeRootForBasicTypes(merkleizer, asSeq x)
|
|
||||||
else:
|
|
||||||
for elem in x:
|
|
||||||
let elemHash = hash_tree_root(elem)
|
|
||||||
merkleizer.addChunk(elemHash.data)
|
|
||||||
merkleizer.getFinalHash()
|
|
||||||
mixInLength(contentsHash, x.len)
|
|
||||||
|
|
||||||
func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest =
|
|
||||||
# The merged hash of the data at `chunkIdx` and `chunkIdx + 1`
|
|
||||||
trs "DATA HASH ", chunkIdx, " ", x.data.len
|
|
||||||
|
|
||||||
when x.T is BasicType:
|
|
||||||
when cpuEndian == bigEndian:
|
|
||||||
unsupported type x # No bigendian support here!
|
|
||||||
|
|
||||||
let
|
|
||||||
bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0])
|
|
||||||
byteIdx = chunkIdx * bytesPerChunk
|
|
||||||
byteLen = x.data.len * sizeof(x.T)
|
|
||||||
|
|
||||||
if byteIdx >= byteLen:
|
|
||||||
zeroHashes[1]
|
|
||||||
else:
|
|
||||||
let
|
|
||||||
nbytes = min(byteLen - byteIdx, 64)
|
|
||||||
padding = 64 - nbytes
|
|
||||||
|
|
||||||
digest(
|
|
||||||
toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)),
|
|
||||||
toOpenArray(zero64, 0, int(padding - 1)))
|
|
||||||
else:
|
|
||||||
if chunkIdx + 1 > x.data.len():
|
|
||||||
zeroHashes[x.maxDepth]
|
|
||||||
elif chunkIdx + 1 == x.data.len():
|
|
||||||
mergeBranches(
|
|
||||||
hash_tree_root(x.data[chunkIdx]),
|
|
||||||
Eth2Digest())
|
|
||||||
else:
|
|
||||||
mergeBranches(
|
|
||||||
hash_tree_root(x.data[chunkIdx]),
|
|
||||||
hash_tree_root(x.data[chunkIdx + 1]))
|
|
||||||
|
|
||||||
template mergedHash(x: HashList|HashArray, vIdxParam: int64): Eth2Digest =
|
|
||||||
# The merged hash of the data at `vIdx` and `vIdx + 1`
|
|
||||||
let vIdx = vIdxParam
|
|
||||||
if vIdx >= x.maxChunks:
|
|
||||||
let dataIdx = vIdx - x.maxChunks
|
|
||||||
mergedDataHash(x, dataIdx)
|
|
||||||
else:
|
|
||||||
mergeBranches(
|
|
||||||
hashTreeRootCached(x, vIdx),
|
|
||||||
hashTreeRootCached(x, vIdx + 1))
|
|
||||||
|
|
||||||
func hashTreeRootCached*(x: HashList, vIdx: int64): Eth2Digest =
|
|
||||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
|
||||||
|
|
||||||
let
|
|
||||||
layer = layer(vIdx)
|
|
||||||
idxInLayer = vIdx - (1'i64 shl layer)
|
|
||||||
layerIdx = idxInlayer + x.indices[layer]
|
|
||||||
|
|
||||||
trs "GETTING ", vIdx, " ", layerIdx, " ", layer, " ", x.indices.len
|
|
||||||
|
|
||||||
doAssert layer < x.maxDepth
|
|
||||||
if layerIdx >= x.indices[layer + 1]:
|
|
||||||
trs "ZERO ", x.indices[layer], " ", x.indices[layer + 1]
|
|
||||||
zeroHashes[x.maxDepth - layer]
|
|
||||||
else:
|
|
||||||
if not isCached(x.hashes[layerIdx]):
|
|
||||||
# TODO oops. so much for maintaining non-mutability.
|
|
||||||
let px = unsafeAddr x
|
|
||||||
|
|
||||||
trs "REFRESHING ", vIdx, " ", layerIdx, " ", layer
|
|
||||||
|
|
||||||
px[].hashes[layerIdx] = mergedHash(x, vIdx * 2)
|
|
||||||
else:
|
|
||||||
trs "CACHED ", layerIdx
|
|
||||||
|
|
||||||
x.hashes[layerIdx]
|
|
||||||
|
|
||||||
func hashTreeRootCached*(x: HashArray, vIdx: int): Eth2Digest =
|
|
||||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
|
||||||
|
|
||||||
if not isCached(x.hashes[vIdx]):
|
|
||||||
# TODO oops. so much for maintaining non-mutability.
|
|
||||||
let px = unsafeAddr x
|
|
||||||
|
|
||||||
px[].hashes[vIdx] = mergedHash(x, vIdx * 2)
|
|
||||||
|
|
||||||
return x.hashes[vIdx]
|
|
||||||
|
|
||||||
func hashTreeRootCached*(x: HashArray): Eth2Digest =
|
|
||||||
hashTreeRootCached(x, 1) # Array does not use idx 0
|
|
||||||
|
|
||||||
func hashTreeRootCached*(x: HashList): Eth2Digest =
|
|
||||||
if x.data.len == 0:
|
|
||||||
mergeBranches(
|
|
||||||
zeroHashes[x.maxDepth],
|
|
||||||
zeroHashes[0]) # mixInLength with 0!
|
|
||||||
else:
|
|
||||||
if not isCached(x.hashes[0]):
|
|
||||||
# TODO oops. so much for maintaining non-mutability.
|
|
||||||
let px = unsafeAddr x
|
|
||||||
px[].hashes[0] = mixInLength(hashTreeRootCached(x, 1), x.data.len)
|
|
||||||
|
|
||||||
x.hashes[0]
|
|
||||||
|
|
||||||
func hash_tree_root*(x: auto): Eth2Digest =
|
|
||||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
|
||||||
mixin toSszType
|
|
||||||
|
|
||||||
result =
|
|
||||||
when x is HashArray|HashList:
|
|
||||||
hashTreeRootCached(x)
|
|
||||||
elif x is List|BitList:
|
|
||||||
hashTreeRootList(x)
|
|
||||||
else:
|
|
||||||
hashTreeRootAux toSszType(x)
|
|
||||||
|
|
||||||
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
|
|
@ -1,143 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
|
||||||
|
|
||||||
import
|
|
||||||
stew/[ptrops, objects], stew/ranges/ptr_arith,
|
|
||||||
./codec, ./types
|
|
||||||
|
|
||||||
export codec, types
|
|
||||||
|
|
||||||
type
|
|
||||||
MemRange* = object
|
|
||||||
startAddr*: ptr byte
|
|
||||||
length*: int
|
|
||||||
|
|
||||||
SszNavigator*[T] = object
|
|
||||||
m: MemRange
|
|
||||||
|
|
||||||
func sszMount*(data: openArray[byte], T: type): SszNavigator[T] =
|
|
||||||
let startAddr = unsafeAddr data[0]
|
|
||||||
SszNavigator[T](m: MemRange(startAddr: startAddr, length: data.len))
|
|
||||||
|
|
||||||
func sszMount*(data: openArray[char], T: type): SszNavigator[T] =
|
|
||||||
let startAddr = cast[ptr byte](unsafeAddr data[0])
|
|
||||||
SszNavigator[T](m: MemRange(startAddr: startAddr, length: data.len))
|
|
||||||
|
|
||||||
template sszMount*(data: MemRange, T: type): SszNavigator[T] =
|
|
||||||
SszNavigator[T](m: data)
|
|
||||||
|
|
||||||
template getMemRange*(n: SszNavigator): MemRange =
|
|
||||||
# Please note that this accessor was created intentionally.
|
|
||||||
# We don't want to expose the `m` field, because the navigated
|
|
||||||
# type may have a field by that name. We wan't any dot field
|
|
||||||
# access to be redirected to the navigated type.
|
|
||||||
# For this reason, this template should always be used with
|
|
||||||
# the function call syntax `getMemRange(n)`.
|
|
||||||
n.m
|
|
||||||
|
|
||||||
template checkBounds(m: MemRange, offset: int) =
|
|
||||||
if offset > m.length:
|
|
||||||
raise newException(MalformedSszError, "Malformed SSZ")
|
|
||||||
|
|
||||||
template toOpenArray(m: MemRange): auto =
|
|
||||||
makeOpenArray(m.startAddr, m.length)
|
|
||||||
|
|
||||||
func navigateToField*[T](n: SszNavigator[T],
|
|
||||||
fieldName: static string,
|
|
||||||
FieldType: type): SszNavigator[FieldType] {.raisesssz.} =
|
|
||||||
mixin toSszType
|
|
||||||
type SszFieldType = type toSszType(declval FieldType)
|
|
||||||
|
|
||||||
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
|
|
||||||
checkBounds(n.m, boundingOffsets[1])
|
|
||||||
|
|
||||||
when isFixedSize(SszFieldType):
|
|
||||||
SszNavigator[FieldType](m: MemRange(
|
|
||||||
startAddr: offset(n.m.startAddr, boundingOffsets[0]),
|
|
||||||
length: boundingOffsets[1] - boundingOffsets[0]))
|
|
||||||
else:
|
|
||||||
template readOffset(off): int =
|
|
||||||
int fromSszBytes(uint32, makeOpenArray(offset(n.m.startAddr, off),
|
|
||||||
sizeof(uint32)))
|
|
||||||
let
|
|
||||||
startOffset = readOffset boundingOffsets[0]
|
|
||||||
endOffset = when boundingOffsets[1] == -1: n.m.length
|
|
||||||
else: readOffset boundingOffsets[1]
|
|
||||||
|
|
||||||
if endOffset < startOffset or endOffset > n.m.length:
|
|
||||||
raise newException(MalformedSszError, "Incorrect offset values")
|
|
||||||
|
|
||||||
SszNavigator[FieldType](m: MemRange(
|
|
||||||
startAddr: offset(n.m.startAddr, startOffset),
|
|
||||||
length: endOffset - startOffset))
|
|
||||||
|
|
||||||
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
|
||||||
type RecType = T
|
|
||||||
type FieldType = type(default(RecType).field)
|
|
||||||
navigateToField(n, astToStr(field), FieldType)
|
|
||||||
|
|
||||||
func indexVarSizeList(m: MemRange, idx: int): MemRange {.raisesssz.} =
|
|
||||||
template readOffset(pos): int =
|
|
||||||
int fromSszBytes(uint32, makeOpenArray(offset(m.startAddr, pos), offsetSize))
|
|
||||||
|
|
||||||
let offsetPos = offsetSize * idx
|
|
||||||
checkBounds(m, offsetPos + offsetSize)
|
|
||||||
|
|
||||||
let firstOffset = readOffset 0
|
|
||||||
let listLen = firstOffset div offsetSize
|
|
||||||
|
|
||||||
if idx >= listLen:
|
|
||||||
# TODO: Use a RangeError here?
|
|
||||||
# This would require the user to check the `len` upfront
|
|
||||||
raise newException(MalformedSszError, "Indexing past the end")
|
|
||||||
|
|
||||||
let elemPos = readOffset offsetPos
|
|
||||||
checkBounds(m, elemPos)
|
|
||||||
|
|
||||||
let endPos = if idx < listLen - 1:
|
|
||||||
let nextOffsetPos = offsetPos + offsetSize
|
|
||||||
# TODO. Is there a way to remove this bounds check?
|
|
||||||
checkBounds(m, nextOffsetPos + offsetSize)
|
|
||||||
readOffset(offsetPos + nextOffsetPos)
|
|
||||||
else:
|
|
||||||
m.length
|
|
||||||
|
|
||||||
MemRange(startAddr: m.startAddr.offset(elemPos), length: endPos - elemPos)
|
|
||||||
|
|
||||||
template indexList(n, idx, T: untyped): untyped =
|
|
||||||
type R = T
|
|
||||||
mixin toSszType
|
|
||||||
type ElemType = type toSszType(declval R)
|
|
||||||
when isFixedSize(ElemType):
|
|
||||||
const elemSize = fixedPortionSize(ElemType)
|
|
||||||
let elemPos = idx * elemSize
|
|
||||||
checkBounds(n.m, elemPos + elemSize)
|
|
||||||
SszNavigator[R](m: MemRange(startAddr: offset(n.m.startAddr, elemPos),
|
|
||||||
length: elemSize))
|
|
||||||
else:
|
|
||||||
SszNavigator[R](m: indexVarSizeList(n.m, idx))
|
|
||||||
|
|
||||||
template `[]`*[T](n: SszNavigator[seq[T]], idx: int): SszNavigator[T] =
|
|
||||||
indexList n, idx, T
|
|
||||||
|
|
||||||
template `[]`*[R, T](n: SszNavigator[array[R, T]], idx: int): SszNavigator[T] =
|
|
||||||
indexList(n, idx, T)
|
|
||||||
|
|
||||||
func `[]`*[T](n: SszNavigator[T]): T {.raisesssz.} =
|
|
||||||
mixin toSszType, fromSszBytes
|
|
||||||
type SszRepr = type toSszType(declval T)
|
|
||||||
when type(SszRepr) is type(T) or T is List:
|
|
||||||
readSszValue(toOpenArray(n.m), result)
|
|
||||||
else:
|
|
||||||
fromSszBytes(T, toOpenArray(n.m))
|
|
||||||
|
|
||||||
converter derefNavigator*[T](n: SszNavigator[T]): T {.raisesssz.} =
|
|
||||||
n[]
|
|
||||||
|
|
|
@ -1,250 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.push raises: [Defect].}
|
|
||||||
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
|
||||||
|
|
||||||
## SSZ serialization for core SSZ types, as specified in:
|
|
||||||
# https://github.com/ethereum/consensus-specs/blob/v1.0.1/ssz/simple-serialize.md#serialization
|
|
||||||
|
|
||||||
import
|
|
||||||
std/typetraits,
|
|
||||||
stew/[endians2, leb128, objects],
|
|
||||||
serialization, serialization/testing/tracing,
|
|
||||||
./codec, ./bitseqs, ./types
|
|
||||||
|
|
||||||
export
|
|
||||||
serialization, codec, types, bitseqs
|
|
||||||
|
|
||||||
type
|
|
||||||
SszReader* = object
|
|
||||||
stream: InputStream
|
|
||||||
|
|
||||||
SszWriter* = object
|
|
||||||
stream: OutputStream
|
|
||||||
|
|
||||||
SizePrefixed*[T] = distinct T
|
|
||||||
SszMaxSizeExceeded* = object of SerializationError
|
|
||||||
|
|
||||||
VarSizedWriterCtx = object
|
|
||||||
fixedParts: WriteCursor
|
|
||||||
offset: int
|
|
||||||
|
|
||||||
FixedSizedWriterCtx = object
|
|
||||||
|
|
||||||
serializationFormat SSZ
|
|
||||||
|
|
||||||
SSZ.setReader SszReader
|
|
||||||
SSZ.setWriter SszWriter, PreferredOutput = seq[byte]
|
|
||||||
|
|
||||||
template sizePrefixed*[TT](x: TT): untyped =
|
|
||||||
type T = TT
|
|
||||||
SizePrefixed[T](x)
|
|
||||||
|
|
||||||
proc init*(T: type SszReader,
|
|
||||||
stream: InputStream): T =
|
|
||||||
T(stream: stream)
|
|
||||||
|
|
||||||
proc writeFixedSized(s: var (OutputStream|WriteCursor), x: auto) {.raises: [Defect, IOError].} =
|
|
||||||
mixin toSszType
|
|
||||||
|
|
||||||
when x is byte:
|
|
||||||
s.write x
|
|
||||||
elif x is bool:
|
|
||||||
s.write byte(ord(x))
|
|
||||||
elif x is UintN:
|
|
||||||
when cpuEndian == bigEndian:
|
|
||||||
s.write toBytesLE(x)
|
|
||||||
else:
|
|
||||||
s.writeMemCopy x
|
|
||||||
elif x is array:
|
|
||||||
when x[0] is byte:
|
|
||||||
trs "APPENDING FIXED SIZE BYTES", x
|
|
||||||
s.write x
|
|
||||||
else:
|
|
||||||
for elem in x:
|
|
||||||
trs "WRITING FIXED SIZE ARRAY ELEMENT"
|
|
||||||
s.writeFixedSized toSszType(elem)
|
|
||||||
elif x is tuple|object:
|
|
||||||
enumInstanceSerializedFields(x, fieldName, field):
|
|
||||||
trs "WRITING FIXED SIZE FIELD", fieldName
|
|
||||||
s.writeFixedSized toSszType(field)
|
|
||||||
else:
|
|
||||||
unsupported x.type
|
|
||||||
|
|
||||||
template writeOffset(cursor: var WriteCursor, offset: int) =
|
|
||||||
write cursor, toBytesLE(uint32 offset)
|
|
||||||
|
|
||||||
template supports*(_: type SSZ, T: type): bool =
|
|
||||||
mixin toSszType
|
|
||||||
anonConst compiles(fixedPortionSize toSszType(declval T))
|
|
||||||
|
|
||||||
func init*(T: type SszWriter, stream: OutputStream): T =
|
|
||||||
result.stream = stream
|
|
||||||
|
|
||||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe, raises: [Defect, IOError].}
|
|
||||||
|
|
||||||
proc beginRecord*(w: var SszWriter, TT: type): auto =
|
|
||||||
type T = TT
|
|
||||||
when isFixedSize(T):
|
|
||||||
FixedSizedWriterCtx()
|
|
||||||
else:
|
|
||||||
const offset = when T is array|HashArray: len(T) * offsetSize
|
|
||||||
else: fixedPortionSize(T)
|
|
||||||
VarSizedWriterCtx(offset: offset,
|
|
||||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
|
||||||
|
|
||||||
template writeField*(w: var SszWriter,
|
|
||||||
ctx: var auto,
|
|
||||||
fieldName: string,
|
|
||||||
field: auto) =
|
|
||||||
mixin toSszType
|
|
||||||
when ctx is FixedSizedWriterCtx:
|
|
||||||
writeFixedSized(w.stream, toSszType(field))
|
|
||||||
else:
|
|
||||||
type FieldType = type toSszType(field)
|
|
||||||
|
|
||||||
when isFixedSize(FieldType):
|
|
||||||
writeFixedSized(ctx.fixedParts, toSszType(field))
|
|
||||||
else:
|
|
||||||
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
|
||||||
writeOffset(ctx.fixedParts, ctx.offset)
|
|
||||||
let initPos = w.stream.pos
|
|
||||||
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
|
||||||
when FieldType is BitList:
|
|
||||||
trs "BIT SEQ ", bytes(field)
|
|
||||||
writeVarSizeType(w, toSszType(field))
|
|
||||||
ctx.offset += w.stream.pos - initPos
|
|
||||||
|
|
||||||
template endRecord*(w: var SszWriter, ctx: var auto) =
|
|
||||||
when ctx is VarSizedWriterCtx:
|
|
||||||
finalize ctx.fixedParts
|
|
||||||
|
|
||||||
proc writeSeq[T](w: var SszWriter, value: seq[T])
|
|
||||||
{.raises: [Defect, IOError].} =
|
|
||||||
# Please note that `writeSeq` exists in order to reduce the code bloat
|
|
||||||
# produced from generic instantiations of the unique `List[N, T]` types.
|
|
||||||
when isFixedSize(T):
|
|
||||||
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
|
||||||
for elem in value:
|
|
||||||
w.stream.writeFixedSized toSszType(elem)
|
|
||||||
trs "DONE"
|
|
||||||
else:
|
|
||||||
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
|
||||||
var offset = value.len * offsetSize
|
|
||||||
var cursor = w.stream.delayFixedSizeWrite offset
|
|
||||||
for elem in value:
|
|
||||||
cursor.writeFixedSized uint32(offset)
|
|
||||||
let initPos = w.stream.pos
|
|
||||||
w.writeVarSizeType toSszType(elem)
|
|
||||||
offset += w.stream.pos - initPos
|
|
||||||
finalize cursor
|
|
||||||
trs "DONE"
|
|
||||||
|
|
||||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.raises: [Defect, IOError].} =
|
|
||||||
trs "STARTING VAR SIZE TYPE"
|
|
||||||
|
|
||||||
when value is HashArray|HashList:
|
|
||||||
writeVarSizeType(w, value.data)
|
|
||||||
elif value is SingleMemberUnion:
|
|
||||||
doAssert value.selector == 0'u8
|
|
||||||
w.writeValue 0'u8
|
|
||||||
w.writeValue value.value
|
|
||||||
elif value is List:
|
|
||||||
# We reduce code bloat by forwarding all `List` types to a general `seq[T]` proc.
|
|
||||||
writeSeq(w, asSeq value)
|
|
||||||
elif value is BitList:
|
|
||||||
# ATTENTION! We can reuse `writeSeq` only as long as our BitList type is implemented
|
|
||||||
# to internally match the binary representation of SSZ BitLists in memory.
|
|
||||||
writeSeq(w, bytes value)
|
|
||||||
elif value is object|tuple|array:
|
|
||||||
trs "WRITING OBJECT OR ARRAY"
|
|
||||||
var ctx = beginRecord(w, type value)
|
|
||||||
enumerateSubFields(value, field):
|
|
||||||
writeField w, ctx, astToStr(field), field
|
|
||||||
endRecord w, ctx
|
|
||||||
else:
|
|
||||||
unsupported type(value)
|
|
||||||
|
|
||||||
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe, raises: [Defect, IOError].} =
|
|
||||||
mixin toSszType
|
|
||||||
type T = type toSszType(x)
|
|
||||||
|
|
||||||
when isFixedSize(T):
|
|
||||||
w.stream.writeFixedSized toSszType(x)
|
|
||||||
else:
|
|
||||||
w.writeVarSizeType toSszType(x)
|
|
||||||
|
|
||||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].}
|
|
||||||
|
|
||||||
func sszSizeForVarSizeList[T](value: openArray[T]): int =
|
|
||||||
result = len(value) * offsetSize
|
|
||||||
for elem in value:
|
|
||||||
result += sszSize(toSszType elem)
|
|
||||||
|
|
||||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].} =
|
|
||||||
mixin toSszType
|
|
||||||
type T = type toSszType(value)
|
|
||||||
|
|
||||||
when isFixedSize(T):
|
|
||||||
anonConst fixedPortionSize(T)
|
|
||||||
|
|
||||||
elif T is array|List|HashList|HashArray:
|
|
||||||
type E = ElemType(T)
|
|
||||||
when isFixedSize(E):
|
|
||||||
len(value) * anonConst(fixedPortionSize(E))
|
|
||||||
elif T is HashArray:
|
|
||||||
sszSizeForVarSizeList(value.data)
|
|
||||||
elif T is array:
|
|
||||||
sszSizeForVarSizeList(value)
|
|
||||||
else:
|
|
||||||
sszSizeForVarSizeList(asSeq value)
|
|
||||||
|
|
||||||
elif T is BitList:
|
|
||||||
return len(bytes(value))
|
|
||||||
|
|
||||||
elif T is SingleMemberUnion:
|
|
||||||
sszSize(toSszType value.value) + 1
|
|
||||||
|
|
||||||
elif T is object|tuple:
|
|
||||||
result = anonConst fixedPortionSize(T)
|
|
||||||
enumInstanceSerializedFields(value, _{.used.}, field):
|
|
||||||
type FieldType = type toSszType(field)
|
|
||||||
when not isFixedSize(FieldType):
|
|
||||||
result += sszSize(toSszType field)
|
|
||||||
|
|
||||||
else:
|
|
||||||
unsupported T
|
|
||||||
|
|
||||||
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) {.raises: [Defect, IOError].} =
|
|
||||||
var cursor = w.stream.delayVarSizeWrite(Leb128.maxLen(uint64))
|
|
||||||
let initPos = w.stream.pos
|
|
||||||
w.writeValue T(x)
|
|
||||||
let length = toBytes(uint64(w.stream.pos - initPos), Leb128)
|
|
||||||
cursor.finalWrite length.toOpenArray()
|
|
||||||
|
|
||||||
proc readValue*(r: var SszReader, val: var auto) {.
|
|
||||||
raises: [Defect, MalformedSszError, SszSizeMismatchError, IOError].} =
|
|
||||||
mixin readSszBytes
|
|
||||||
type T = type val
|
|
||||||
when isFixedSize(T):
|
|
||||||
const minimalSize = fixedPortionSize(T)
|
|
||||||
if r.stream.readable(minimalSize):
|
|
||||||
readSszBytes(r.stream.read(minimalSize), val)
|
|
||||||
else:
|
|
||||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
|
||||||
else:
|
|
||||||
# TODO(zah) Read the fixed portion first and precisely measure the
|
|
||||||
# size of the dynamic portion to consume the right number of bytes.
|
|
||||||
readSszBytes(r.stream.read(r.stream.len.get), val)
|
|
||||||
|
|
||||||
proc readSszBytes*[T](data: openArray[byte], val: var T) {.
|
|
||||||
raises: [Defect, MalformedSszError, SszSizeMismatchError].} =
|
|
||||||
# Overload `readSszBytes` to perform custom operations on T after
|
|
||||||
# deserialization
|
|
||||||
mixin readSszValue
|
|
||||||
readSszValue(data, val)
|
|
|
@ -12,12 +12,10 @@ import
|
||||||
./testutil
|
./testutil
|
||||||
|
|
||||||
import # Unit test
|
import # Unit test
|
||||||
./ssz/all_tests as ssz_all_tests,
|
|
||||||
./test_action_tracker,
|
./test_action_tracker,
|
||||||
./test_attestation_pool,
|
./test_attestation_pool,
|
||||||
./test_beacon_chain_db,
|
./test_beacon_chain_db,
|
||||||
./test_beaconstate,
|
./test_beaconstate,
|
||||||
./test_bitseqs,
|
|
||||||
./test_block_pool,
|
./test_block_pool,
|
||||||
./test_datatypes,
|
./test_datatypes,
|
||||||
./test_discovery,
|
./test_discovery,
|
||||||
|
|
|
@ -7,8 +7,8 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
macros,
|
macros,
|
||||||
../../beacon_chain/spec/datatypes/base,
|
ssz_serialization/types,
|
||||||
../../beacon_chain/ssz/types
|
../../beacon_chain/spec/datatypes/base
|
||||||
# digest is necessary for them to be printed as hex
|
# digest is necessary for them to be printed as hex
|
||||||
|
|
||||||
export base.`==`
|
export base.`==`
|
||||||
|
|
|
@ -1,5 +0,0 @@
|
||||||
{.used.}
|
|
||||||
|
|
||||||
import
|
|
||||||
./test_ssz_roundtrip,
|
|
||||||
./test_ssz_serialization
|
|
|
@ -1,16 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.used.}
|
|
||||||
|
|
||||||
# this is not part of test_ssz because the roundtrip tests are incompatible
|
|
||||||
# with unittest2 as of writing
|
|
||||||
import
|
|
||||||
serialization/testing/generic_suite,
|
|
||||||
../../beacon_chain/ssz/ssz_serialization
|
|
||||||
|
|
||||||
executeRoundTripTests SSZ
|
|
|
@ -1,355 +0,0 @@
|
||||||
# beacon_chain
|
|
||||||
# Copyright (c) 2018-2021 Status Research & Development GmbH
|
|
||||||
# Licensed and distributed under either of
|
|
||||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
||||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
||||||
|
|
||||||
{.used.}
|
|
||||||
|
|
||||||
import
|
|
||||||
std/typetraits,
|
|
||||||
unittest2,
|
|
||||||
../../beacon_chain/ssz/[ssz_serialization, merkleization],
|
|
||||||
../../beacon_chain/ssz/[navigator, dynamic_navigator]
|
|
||||||
|
|
||||||
type
|
|
||||||
SomeEnum = enum
|
|
||||||
A, B, C
|
|
||||||
|
|
||||||
Simple = object
|
|
||||||
flag: bool
|
|
||||||
# ignored {.dontSerialize.}: string
|
|
||||||
data: array[256, bool]
|
|
||||||
data2: HashArray[256, bool]
|
|
||||||
|
|
||||||
NonFixed = object
|
|
||||||
data: HashList[uint64, 1024]
|
|
||||||
|
|
||||||
template reject(stmt) =
|
|
||||||
doAssert(not compiles(stmt))
|
|
||||||
|
|
||||||
static:
|
|
||||||
doAssert isFixedSize(bool) == true
|
|
||||||
|
|
||||||
doAssert fixedPortionSize(array[10, bool]) == 10
|
|
||||||
doAssert fixedPortionSize(array[SomeEnum, uint64]) == 24
|
|
||||||
doAssert fixedPortionSize(array[3..5, List[byte, 256]]) == 12
|
|
||||||
|
|
||||||
doAssert isFixedSize(array[20, bool]) == true
|
|
||||||
doAssert isFixedSize(Simple) == true
|
|
||||||
doAssert isFixedSize(List[bool, 128]) == false
|
|
||||||
|
|
||||||
doAssert isFixedSize(NonFixed) == false
|
|
||||||
|
|
||||||
reject fixedPortionSize(int)
|
|
||||||
|
|
||||||
type
|
|
||||||
ObjWithFields = object
|
|
||||||
f0: uint8
|
|
||||||
f1: uint32
|
|
||||||
f2: array[20, byte]
|
|
||||||
f3: Eth2Digest
|
|
||||||
|
|
||||||
static:
|
|
||||||
doAssert fixedPortionSize(ObjWithFields) ==
|
|
||||||
1 + 4 + sizeof(array[20, byte]) + (256 div 8)
|
|
||||||
|
|
||||||
type
|
|
||||||
Foo = object
|
|
||||||
bar: Bar
|
|
||||||
|
|
||||||
BarList = List[uint64, 128]
|
|
||||||
|
|
||||||
Bar = object
|
|
||||||
b: BarList
|
|
||||||
baz: Baz
|
|
||||||
|
|
||||||
Baz = object
|
|
||||||
i: uint64
|
|
||||||
|
|
||||||
func toDigest[N: static int](x: array[N, byte]): Eth2Digest =
|
|
||||||
result.data[0 .. N-1] = x
|
|
||||||
|
|
||||||
suite "SSZ navigator":
|
|
||||||
test "simple object fields":
|
|
||||||
var foo = Foo(bar: Bar(b: BarList @[1'u64, 2, 3], baz: Baz(i: 10'u64)))
|
|
||||||
let encoded = SSZ.encode(foo)
|
|
||||||
|
|
||||||
check SSZ.decode(encoded, Foo) == foo
|
|
||||||
|
|
||||||
let mountedFoo = sszMount(encoded, Foo)
|
|
||||||
check mountedFoo.bar.b[] == BarList @[1'u64, 2, 3]
|
|
||||||
|
|
||||||
let mountedBar = mountedFoo.bar
|
|
||||||
check mountedBar.baz.i == 10'u64
|
|
||||||
|
|
||||||
test "lists with max size":
|
|
||||||
let a = [byte 0x01, 0x02, 0x03].toDigest
|
|
||||||
let b = [byte 0x04, 0x05, 0x06].toDigest
|
|
||||||
let c = [byte 0x07, 0x08, 0x09].toDigest
|
|
||||||
|
|
||||||
var xx: List[uint64, 16]
|
|
||||||
check:
|
|
||||||
not xx.setLen(17)
|
|
||||||
xx.setLen(16)
|
|
||||||
|
|
||||||
var leaves = HashList[Eth2Digest, 1'i64 shl 3]()
|
|
||||||
check:
|
|
||||||
leaves.add a
|
|
||||||
leaves.add b
|
|
||||||
leaves.add c
|
|
||||||
let root = hash_tree_root(leaves)
|
|
||||||
check $root == "5248085b588fab1dd1e03f3cd62201602b12e6560665935964f46e805977e8c5"
|
|
||||||
|
|
||||||
while leaves.len < 1 shl 3:
|
|
||||||
check:
|
|
||||||
leaves.add c
|
|
||||||
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
|
||||||
|
|
||||||
leaves = default(type leaves)
|
|
||||||
|
|
||||||
while leaves.len < (1 shl 3) - 1:
|
|
||||||
check:
|
|
||||||
leaves.add c
|
|
||||||
leaves.add c
|
|
||||||
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
|
||||||
|
|
||||||
leaves = default(type leaves)
|
|
||||||
|
|
||||||
while leaves.len < (1 shl 3) - 2:
|
|
||||||
check:
|
|
||||||
leaves.add c
|
|
||||||
leaves.add c
|
|
||||||
leaves.add c
|
|
||||||
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
|
||||||
|
|
||||||
for i in 0 ..< leaves.data.len - 2:
|
|
||||||
leaves[i] = a
|
|
||||||
leaves[i + 1] = b
|
|
||||||
leaves[i + 2] = c
|
|
||||||
check hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
|
||||||
|
|
||||||
var leaves2 = HashList[Eth2Digest, 1'i64 shl 48]() # Large number!
|
|
||||||
check:
|
|
||||||
leaves2.add a
|
|
||||||
leaves2.add b
|
|
||||||
leaves2.add c
|
|
||||||
hash_tree_root(leaves2) == hash_tree_root(leaves2.data)
|
|
||||||
|
|
||||||
var leaves3 = HashList[Eth2Digest, 7]() # Non-power-of-2
|
|
||||||
check:
|
|
||||||
hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
|
||||||
leaves3.add a
|
|
||||||
leaves3.add b
|
|
||||||
leaves3.add c
|
|
||||||
hash_tree_root(leaves3) == hash_tree_root(leaves3.data)
|
|
||||||
|
|
||||||
test "basictype":
|
|
||||||
var leaves = HashList[uint64, 1'i64 shl 3]()
|
|
||||||
while leaves.len < leaves.maxLen:
|
|
||||||
check:
|
|
||||||
leaves.add leaves.len.uint64
|
|
||||||
hash_tree_root(leaves) == hash_tree_root(leaves.data)
|
|
||||||
|
|
||||||
suite "SSZ dynamic navigator":
|
|
||||||
test "navigating fields":
|
|
||||||
var fooOrig = Foo(bar: Bar(b: BarList @[1'u64, 2, 3], baz: Baz(i: 10'u64)))
|
|
||||||
let fooEncoded = SSZ.encode(fooOrig)
|
|
||||||
|
|
||||||
var navFoo = DynamicSszNavigator.init(fooEncoded, Foo)
|
|
||||||
|
|
||||||
var navBar = navFoo.navigate("bar")
|
|
||||||
check navBar.toJson(pretty = false) == """{"b":[1,2,3],"baz":{"i":10}}"""
|
|
||||||
|
|
||||||
var navB = navBar.navigate("b")
|
|
||||||
check navB.toJson(pretty = false) == "[1,2,3]"
|
|
||||||
|
|
||||||
var navBaz = navBar.navigate("baz")
|
|
||||||
var navI = navBaz.navigate("i")
|
|
||||||
check navI.toJson == "10"
|
|
||||||
|
|
||||||
expect KeyError:
|
|
||||||
discard navBar.navigate("biz")
|
|
||||||
|
|
||||||
type
|
|
||||||
Obj = object
|
|
||||||
arr: array[8, Eth2Digest]
|
|
||||||
|
|
||||||
li: List[Eth2Digest, 8]
|
|
||||||
|
|
||||||
HashObj = object
|
|
||||||
arr: HashArray[8, Eth2Digest]
|
|
||||||
|
|
||||||
li: HashList[Eth2Digest, 8]
|
|
||||||
|
|
||||||
suite "hash":
|
|
||||||
test "HashArray":
|
|
||||||
var
|
|
||||||
o = Obj()
|
|
||||||
ho = HashObj()
|
|
||||||
|
|
||||||
template both(body) =
|
|
||||||
block:
|
|
||||||
template it: auto {.inject.} = o
|
|
||||||
body
|
|
||||||
block:
|
|
||||||
template it: auto {.inject.} = ho
|
|
||||||
body
|
|
||||||
|
|
||||||
let htro = hash_tree_root(o)
|
|
||||||
let htrho = hash_tree_root(ho)
|
|
||||||
|
|
||||||
check:
|
|
||||||
o.arr == ho.arr.data
|
|
||||||
o.li == ho.li.data
|
|
||||||
htro == htrho
|
|
||||||
|
|
||||||
both: it.arr[0].data[0] = byte 1
|
|
||||||
|
|
||||||
both: check: it.li.add Eth2Digest()
|
|
||||||
|
|
||||||
var y: HashArray[32, uint64]
|
|
||||||
check: hash_tree_root(y) == hash_tree_root(y.data)
|
|
||||||
for i in 0..<y.len:
|
|
||||||
y[i] = 42'u64
|
|
||||||
check: hash_tree_root(y) == hash_tree_root(y.data)
|
|
||||||
|
|
||||||
test "HashList fixed":
|
|
||||||
type MyList = HashList[uint64, 1024]
|
|
||||||
var
|
|
||||||
small, large: MyList
|
|
||||||
|
|
||||||
let
|
|
||||||
emptyBytes = SSZ.encode(small)
|
|
||||||
emptyRoot = hash_tree_root(small)
|
|
||||||
|
|
||||||
check: small.add(10'u64)
|
|
||||||
|
|
||||||
for i in 0..<100:
|
|
||||||
check: large.add(uint64(i))
|
|
||||||
|
|
||||||
let
|
|
||||||
sroot = hash_tree_root(small)
|
|
||||||
lroot = hash_tree_root(large)
|
|
||||||
|
|
||||||
check:
|
|
||||||
sroot == hash_tree_root(small.data)
|
|
||||||
lroot == hash_tree_root(large.data)
|
|
||||||
|
|
||||||
var
|
|
||||||
sbytes = SSZ.encode(small)
|
|
||||||
lbytes = SSZ.encode(large)
|
|
||||||
sloaded = SSZ.decode(sbytes, MyList)
|
|
||||||
lloaded = SSZ.decode(lbytes, MyList)
|
|
||||||
|
|
||||||
check:
|
|
||||||
sroot == hash_tree_root(sloaded)
|
|
||||||
lroot == hash_tree_root(lloaded)
|
|
||||||
|
|
||||||
# Here we smoke test that the cache is reset correctly even when reading
|
|
||||||
# into an existing instance - the instances are size-swapped so the reader
|
|
||||||
# will have some more work to do
|
|
||||||
readSszValue(sbytes, lloaded)
|
|
||||||
readSszValue(lbytes, sloaded)
|
|
||||||
|
|
||||||
check:
|
|
||||||
lroot == hash_tree_root(sloaded)
|
|
||||||
sroot == hash_tree_root(lloaded)
|
|
||||||
|
|
||||||
readSszValue(emptyBytes, sloaded)
|
|
||||||
check:
|
|
||||||
emptyRoot == hash_tree_root(sloaded)
|
|
||||||
|
|
||||||
test "HashList variable":
|
|
||||||
type MyList = HashList[NonFixed, 1024]
|
|
||||||
var
|
|
||||||
small, large: MyList
|
|
||||||
|
|
||||||
let
|
|
||||||
emptyBytes = SSZ.encode(small)
|
|
||||||
emptyRoot = hash_tree_root(small)
|
|
||||||
|
|
||||||
check: small.add(NonFixed())
|
|
||||||
|
|
||||||
for i in 0..<100:
|
|
||||||
check: large.add(NonFixed())
|
|
||||||
|
|
||||||
let
|
|
||||||
sroot = hash_tree_root(small)
|
|
||||||
lroot = hash_tree_root(large)
|
|
||||||
|
|
||||||
check:
|
|
||||||
sroot == hash_tree_root(small.data)
|
|
||||||
lroot == hash_tree_root(large.data)
|
|
||||||
|
|
||||||
var
|
|
||||||
sbytes = SSZ.encode(small)
|
|
||||||
lbytes = SSZ.encode(large)
|
|
||||||
sloaded = SSZ.decode(sbytes, MyList)
|
|
||||||
lloaded = SSZ.decode(lbytes, MyList)
|
|
||||||
|
|
||||||
check:
|
|
||||||
sroot == hash_tree_root(sloaded)
|
|
||||||
lroot == hash_tree_root(lloaded)
|
|
||||||
|
|
||||||
# Here we smoke test that the cache is reset correctly even when reading
|
|
||||||
# into an existing instance - the instances are size-swapped so the reader
|
|
||||||
# will have some more work to do
|
|
||||||
readSszValue(sbytes, lloaded)
|
|
||||||
readSszValue(lbytes, sloaded)
|
|
||||||
|
|
||||||
check:
|
|
||||||
lroot == hash_tree_root(sloaded)
|
|
||||||
sroot == hash_tree_root(lloaded)
|
|
||||||
|
|
||||||
readSszValue(emptyBytes, sloaded)
|
|
||||||
check:
|
|
||||||
emptyRoot == hash_tree_root(sloaded)
|
|
||||||
|
|
||||||
suite "underlong values":
|
|
||||||
template testit(t: auto) =
|
|
||||||
test "Underlong SSZ.decode: " & type(t).name():
|
|
||||||
let encoded = SSZ.encode(t)
|
|
||||||
expect(SszError):
|
|
||||||
discard SSZ.decode(encoded[0..^2], type t)
|
|
||||||
|
|
||||||
test "Underlong readSszBytes: " & type(t).name():
|
|
||||||
let encoded = SSZ.encode(t)
|
|
||||||
var t2: type t
|
|
||||||
expect(SszError):
|
|
||||||
readSszBytes(encoded[0..^2], t2)
|
|
||||||
|
|
||||||
test "Overlong SSZ.decode: " & type(t).name():
|
|
||||||
when not (t is BasicType | BitArray | array | HashArray | BitList | Simple):
|
|
||||||
let encoded = SSZ.encode(t)
|
|
||||||
expect(SszError):
|
|
||||||
discard SSZ.decode(encoded & @[32'u8], type t)
|
|
||||||
else:
|
|
||||||
skip # TODO Difference between decode and readSszBytes needs revisiting
|
|
||||||
|
|
||||||
test "Overlong readSszBytes: " & type(t).name():
|
|
||||||
when not (t is BitList | Simple):
|
|
||||||
let encoded = SSZ.encode(t)
|
|
||||||
var t2: type t
|
|
||||||
expect(SszError):
|
|
||||||
readSszBytes(encoded & @[32'u8], t2)
|
|
||||||
else:
|
|
||||||
skip # TODO Difference between decode and readSszBytes needs revisiting
|
|
||||||
|
|
||||||
# All SszType types
|
|
||||||
testit(default(bool))
|
|
||||||
testit(default(uint8))
|
|
||||||
testit(default(uint16))
|
|
||||||
testit(default(uint32))
|
|
||||||
testit(default(uint64))
|
|
||||||
testit(default(UInt128))
|
|
||||||
testit(default(UInt256))
|
|
||||||
testit(default(array[32, uint8]))
|
|
||||||
testit(default(HashArray[32, uint8]))
|
|
||||||
testit(List[uint64, 32].init(@[42'u64]))
|
|
||||||
testit(HashList[uint64, 32].init(@[42'u64]))
|
|
||||||
testit(default(BitArray[32]))
|
|
||||||
testit(BitList[32].init(10))
|
|
||||||
testit(default(Simple))
|
|
||||||
# TODO testit((32'u8, )) fails with a semcheck bug
|
|
|
@ -1,163 +0,0 @@
|
||||||
{.used.}
|
|
||||||
|
|
||||||
import
|
|
||||||
unittest2,
|
|
||||||
std/[sequtils, strformat],
|
|
||||||
../beacon_chain/ssz/bitseqs,
|
|
||||||
./testutil
|
|
||||||
|
|
||||||
suite "Bit fields":
|
|
||||||
test "roundtrips BitArray":
|
|
||||||
var
|
|
||||||
a = BitArray[100]()
|
|
||||||
b = BitArray[100]()
|
|
||||||
c = BitArray[100]()
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
|
|
||||||
a.setBit 1
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
a[1]
|
|
||||||
toSeq(a.oneIndices()) == [1]
|
|
||||||
|
|
||||||
a + b == a
|
|
||||||
a - b == a
|
|
||||||
a - a == c # empty
|
|
||||||
|
|
||||||
b + a == a
|
|
||||||
b - b == c # b is empty
|
|
||||||
|
|
||||||
b.setBit 2
|
|
||||||
|
|
||||||
check:
|
|
||||||
(a + b)[2]
|
|
||||||
(b - a)[2]
|
|
||||||
not (b - a)[1]
|
|
||||||
|
|
||||||
a.setBit 99
|
|
||||||
|
|
||||||
check:
|
|
||||||
(a + b)[99]
|
|
||||||
(b - a)[2]
|
|
||||||
not (b - a)[1]
|
|
||||||
not (b - a)[99]
|
|
||||||
toSeq(a.oneIndices()) == [1, 99]
|
|
||||||
|
|
||||||
a.incl(b)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
a[1]
|
|
||||||
a[2]
|
|
||||||
|
|
||||||
a.clear()
|
|
||||||
check:
|
|
||||||
not a[1]
|
|
||||||
|
|
||||||
test "roundtrips BitSeq":
|
|
||||||
var
|
|
||||||
a = BitSeq.init(100)
|
|
||||||
b = BitSeq.init(100)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
a.isZeros()
|
|
||||||
|
|
||||||
a.setBit 1
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
a[1]
|
|
||||||
a.countOnes() == 1
|
|
||||||
a.countZeros() == 99
|
|
||||||
not a.isZeros()
|
|
||||||
a.countOverlap(a) == 1
|
|
||||||
|
|
||||||
b.setBit 2
|
|
||||||
|
|
||||||
a.incl(b)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a[0]
|
|
||||||
a[1]
|
|
||||||
a[2]
|
|
||||||
a.countOverlap(a) == 2
|
|
||||||
a.countOverlap(b) == 1
|
|
||||||
b.countOverlap(a) == 1
|
|
||||||
b.countOverlap(b) == 1
|
|
||||||
a.clear()
|
|
||||||
check:
|
|
||||||
not a[1]
|
|
||||||
|
|
||||||
test "iterating words":
|
|
||||||
for bitCount in [8, 3, 7, 8, 14, 15, 16, 19, 260]:
|
|
||||||
checkpoint &"trying bit count {bitCount}"
|
|
||||||
var
|
|
||||||
a = BitSeq.init(bitCount)
|
|
||||||
b = BitSeq.init(bitCount)
|
|
||||||
bitsInWord = sizeof(uint) * 8
|
|
||||||
expectedWordCount = (bitCount div bitsInWord) + 1
|
|
||||||
|
|
||||||
for i in 0 ..< expectedWordCount:
|
|
||||||
let every3rdBit = i * sizeof(uint) * 8 + 2
|
|
||||||
a[every3rdBit] = true
|
|
||||||
b[every3rdBit] = true
|
|
||||||
|
|
||||||
for word in words(a):
|
|
||||||
check word == 4
|
|
||||||
word = 2
|
|
||||||
|
|
||||||
for wa, wb in words(a, b):
|
|
||||||
check wa == 2 and wb == 4
|
|
||||||
wa = 1
|
|
||||||
wb = 2
|
|
||||||
|
|
||||||
for i in 0 ..< expectedWordCount:
|
|
||||||
for j in 0 ..< bitsInWord:
|
|
||||||
let bitPos = i * bitsInWord + j
|
|
||||||
if bitPos < bitCount:
|
|
||||||
check a[j] == (j == 0)
|
|
||||||
check b[j] == (j == 1)
|
|
||||||
|
|
||||||
test "overlaps":
|
|
||||||
for bitCount in [1, 62, 63, 64, 91, 127, 128, 129]:
|
|
||||||
checkpoint &"trying bit count {bitCount}"
|
|
||||||
var
|
|
||||||
a = BitSeq.init(bitCount)
|
|
||||||
b = BitSeq.init(bitCount)
|
|
||||||
|
|
||||||
for pos in [4, 8, 9, 12, 29, 32, 63, 64, 67]:
|
|
||||||
if pos + 2 < bitCount:
|
|
||||||
a.setBit(pos)
|
|
||||||
b.setBit(pos + 2)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a.overlaps(b)
|
|
||||||
not b.overlaps(a)
|
|
||||||
a.countOverlap(b) == 0
|
|
||||||
|
|
||||||
test "isZeros":
|
|
||||||
template carryOutTests(N: static int) =
|
|
||||||
var a = BitArray[N]()
|
|
||||||
check a.isZeros()
|
|
||||||
|
|
||||||
for i in 0 ..< N:
|
|
||||||
var b = a
|
|
||||||
b.setBit(i)
|
|
||||||
check(not b.isZeros())
|
|
||||||
|
|
||||||
carryOutTests(1)
|
|
||||||
carryOutTests(10)
|
|
||||||
carryOutTests(31)
|
|
||||||
carryOutTests(32)
|
|
||||||
carryOutTests(63)
|
|
||||||
carryOutTests(64)
|
|
||||||
carryOutTests(65)
|
|
||||||
carryOutTests(95)
|
|
||||||
carryOutTests(96)
|
|
||||||
carryOutTests(97)
|
|
||||||
carryOutTests(12494)
|
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 1cb21eda4ab02a6ec87839dba1beb4d4a5de127d
|
Loading…
Reference in New Issue