2018-09-20 15:45:02 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
# SSZ Serialization (simple serialize)
|
2018-12-17 18:03:53 +00:00
|
|
|
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2018-11-22 10:17:05 +00:00
|
|
|
import
|
2019-03-25 16:46:31 +00:00
|
|
|
endians, typetraits, options, algorithm, math,
|
2019-03-05 22:54:08 +00:00
|
|
|
faststreams/input_stream, serialization, eth/common, nimcrypto/keccak,
|
2019-03-20 20:01:48 +00:00
|
|
|
./spec/[bitfield, crypto, datatypes, digest]
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-09-20 15:45:02 +00:00
|
|
|
# ################### Helper functions ###################################
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
export
|
|
|
|
serialization
|
|
|
|
|
|
|
|
type
|
|
|
|
SszReader* = object
|
|
|
|
stream: ByteStreamVar
|
|
|
|
|
|
|
|
SszWriter* = object
|
|
|
|
stream: OutputStreamVar
|
|
|
|
|
|
|
|
SszError* = object of SerializationError
|
|
|
|
CorruptedDataError* = object of SszError
|
|
|
|
|
|
|
|
RecordWritingMemo = object
|
|
|
|
initialStreamPos: int
|
|
|
|
sizePrefixCursor: DelayedWriteCursor
|
|
|
|
|
|
|
|
serializationFormat SSZ,
|
|
|
|
Reader = SszReader,
|
|
|
|
Writer = SszWriter,
|
|
|
|
PreferedOutput = seq[byte]
|
|
|
|
|
|
|
|
proc init*(T: type SszReader, stream: ByteStreamVar): T =
|
|
|
|
result.stream = stream
|
|
|
|
|
2018-12-17 18:03:53 +00:00
|
|
|
# toBytesSSZ convert simple fixed-length types to their SSZ wire representation
|
2018-11-14 20:06:04 +00:00
|
|
|
func toBytesSSZ(x: SomeInteger): array[sizeof(x), byte] =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-01-22 17:56:01 +00:00
|
|
|
when x.sizeof == 8: littleEndian64(result.addr, x.unsafeAddr)
|
|
|
|
elif x.sizeof == 4: littleEndian32(result.addr, x.unsafeAddr)
|
|
|
|
elif x.sizeof == 2: littleEndian16(result.addr, x.unsafeAddr)
|
2018-11-14 20:06:04 +00:00
|
|
|
elif x.sizeof == 1: copyMem(result.addr, x.unsafeAddr, sizeof(result))
|
|
|
|
else: {.fatal: "Unsupported type serialization: " & $(type(x)).name.}
|
|
|
|
|
2019-01-29 04:15:00 +00:00
|
|
|
func toBytesSSZ(x: ValidatorIndex): array[3, byte] =
|
2019-01-22 17:56:01 +00:00
|
|
|
## Integers are all encoded as little endian and not padded
|
2018-11-14 20:06:04 +00:00
|
|
|
let v = x.uint32
|
2019-01-22 17:56:01 +00:00
|
|
|
result[0] = byte(v and 0xff)
|
2018-11-14 20:06:04 +00:00
|
|
|
result[1] = byte((v shr 8) and 0xff)
|
2019-01-22 17:56:01 +00:00
|
|
|
result[2] = byte((v shr 16) and 0xff)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-27 20:14:37 +00:00
|
|
|
func toBytesSSZ(x: bool): array[1, byte] =
|
|
|
|
[if x: 1'u8 else: 0'u8]
|
|
|
|
|
2018-11-14 20:06:04 +00:00
|
|
|
func toBytesSSZ(x: EthAddress): array[sizeof(x), byte] = x
|
2018-11-27 23:10:09 +00:00
|
|
|
func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-17 18:03:53 +00:00
|
|
|
# TODO these two are still being debated:
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/issues/308#issuecomment-447026815
|
2019-02-05 16:13:29 +00:00
|
|
|
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes()
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
type
|
2019-03-25 16:46:31 +00:00
|
|
|
BasicType =
|
2018-12-21 22:37:46 +00:00
|
|
|
# Types that serialize down to a fixed-length array - most importantly,
|
|
|
|
# these values don't carry a length prefix in the final encoding. toBytesSSZ
|
|
|
|
# provides the actual nim-type-to-bytes conversion.
|
|
|
|
# TODO think about this for a bit - depends where the serialization of
|
|
|
|
# validator keys ends up going..
|
2019-01-29 04:15:00 +00:00
|
|
|
# TODO can't put ranges like ValidatorIndex in here:
|
2018-12-21 22:37:46 +00:00
|
|
|
# https://github.com/nim-lang/Nim/issues/10027
|
2018-12-27 20:14:37 +00:00
|
|
|
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
|
|
|
|
bool
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
func sszLen(v: BasicType): int = toBytesSSZ(v).len
|
2019-01-29 04:15:00 +00:00
|
|
|
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len
|
2018-12-17 18:03:53 +00:00
|
|
|
|
|
|
|
func sszLen(v: object | tuple): int =
|
|
|
|
result = 4 # Length
|
|
|
|
for field in v.fields:
|
|
|
|
result += sszLen(type field)
|
|
|
|
|
|
|
|
func sszLen(v: seq | array): int =
|
|
|
|
result = 4 # Length
|
|
|
|
for i in v:
|
|
|
|
result += sszLen(i)
|
|
|
|
|
2019-03-20 20:01:48 +00:00
|
|
|
func sszLen(v: BitField): int =
|
|
|
|
sszLen(v.bits)
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
# fromBytesSSZ copies the wire representation to a Nim variable,
|
|
|
|
# assuming there's enough data in the buffer
|
|
|
|
func fromBytesSSZ(T: type SomeInteger, data: openarray[byte]): T =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-29 22:11:05 +00:00
|
|
|
## TODO: Assumes data points to a sufficiently large buffer
|
2019-03-05 22:54:08 +00:00
|
|
|
doAssert data.len == sizeof(result)
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: any better way to get a suitably aligned buffer in nim???
|
2018-11-14 20:06:04 +00:00
|
|
|
# see also: https://github.com/nim-lang/Nim/issues/9206
|
|
|
|
var tmp: uint64
|
|
|
|
var alignedBuf = cast[ptr byte](tmp.addr)
|
2019-03-05 22:54:08 +00:00
|
|
|
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-01-22 17:56:01 +00:00
|
|
|
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
|
|
|
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
|
|
|
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
2018-11-14 20:06:04 +00:00
|
|
|
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
|
|
|
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
func fromBytesSSZ(T: type bool, data: openarray[byte]): T =
|
2018-12-27 20:14:37 +00:00
|
|
|
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
|
|
|
# definition for now, but maybe this should be a parse error instead?
|
2019-03-05 22:54:08 +00:00
|
|
|
fromBytesSSZ(uint8, data) != 0
|
2018-12-27 20:14:37 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
func fromBytesSSZ(T: type ValidatorIndex, data: openarray[byte]): T =
|
2019-01-22 17:56:01 +00:00
|
|
|
## Integers are all encoded as littleendian and not padded
|
2019-03-05 22:54:08 +00:00
|
|
|
doAssert data.len == 3
|
2018-12-17 18:03:53 +00:00
|
|
|
var tmp: uint32
|
2019-03-05 22:54:08 +00:00
|
|
|
tmp = tmp or uint32(data[0])
|
|
|
|
tmp = tmp or uint32(data[1]) shl 8
|
|
|
|
tmp = tmp or uint32(data[2]) shl 16
|
2019-01-29 04:15:00 +00:00
|
|
|
result = tmp.ValidatorIndex
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
func fromBytesSSZ(T: type EthAddress, data: openarray[byte]): T =
|
|
|
|
doAssert data.len == sizeof(result)
|
|
|
|
copyMem(result.addr, unsafeAddr data[0], sizeof(result))
|
|
|
|
|
|
|
|
func fromBytesSSZ(T: type Eth2Digest, data: openarray[byte]): T =
|
|
|
|
doAssert data.len == sizeof(result.data)
|
|
|
|
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
|
|
|
|
|
|
|
proc init*(T: type SszWriter, stream: OutputStreamVar): T =
|
|
|
|
result.stream = stream
|
|
|
|
|
|
|
|
proc writeValue*(w: var SszWriter, obj: auto)
|
|
|
|
|
|
|
|
# This is an alternative lower-level API useful for RPC
|
|
|
|
# frameworks that can simulate the serialization of an
|
|
|
|
# object without constructing an actual instance:
|
|
|
|
proc beginRecord*(w: var SszWriter, T: type): RecordWritingMemo =
|
|
|
|
result.initialStreamPos = w.stream.pos
|
|
|
|
result.sizePrefixCursor = w.stream.delayFixedSizeWrite sizeof(uint32)
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
template writeField*(w: var SszWriter, name: string, value: auto) =
|
|
|
|
w.writeValue(value)
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc endRecord*(w: var SszWriter, memo: RecordWritingMemo) =
|
|
|
|
let finalSize = uint32(w.stream.pos - memo.initialStreamPos - 4)
|
|
|
|
memo.sizePrefixCursor.endWrite(finalSize.toBytesSSZ)
|
|
|
|
|
2019-03-13 01:46:44 +00:00
|
|
|
func toSSZType(x: Slot|Epoch): auto = x.uint64
|
|
|
|
func toSSZType(x: auto): auto = x
|
2019-03-12 22:21:32 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
proc writeValue*(w: var SszWriter, obj: auto) =
|
|
|
|
# We are not using overloads here, because this leads to
|
|
|
|
# slightly better error messages when the user provides
|
|
|
|
# additional overloads for `writeValue`.
|
|
|
|
mixin writeValue
|
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
when obj is ValidatorIndex|BasicType:
|
2019-03-05 22:54:08 +00:00
|
|
|
w.stream.append obj.toBytesSSZ
|
|
|
|
elif obj is enum:
|
|
|
|
w.stream.append uint64(obj).toBytesSSZ
|
2018-12-17 18:03:53 +00:00
|
|
|
else:
|
2019-03-05 22:54:08 +00:00
|
|
|
let memo = w.beginRecord(obj.type)
|
|
|
|
when obj is seq|array|openarray:
|
|
|
|
# If you get an error here that looks like:
|
|
|
|
# type mismatch: got <type range 0..8191(uint64)>
|
|
|
|
# you just used an unsigned int for an array index thinking you'd get
|
|
|
|
# away with it (surprise, surprise: you can't, uints are crippled!)
|
|
|
|
# https://github.com/nim-lang/Nim/issues/9984
|
|
|
|
for elem in obj:
|
|
|
|
w.writeValue elem
|
2019-03-20 20:01:48 +00:00
|
|
|
elif obj is BitField:
|
|
|
|
for elem in obj.bits:
|
|
|
|
w.writeValue elem
|
2018-12-17 18:03:53 +00:00
|
|
|
else:
|
2019-03-05 22:54:08 +00:00
|
|
|
obj.serializeFields(fieldName, field):
|
|
|
|
# for research/serialized_sizes, remove when appropriate
|
|
|
|
when defined(debugFieldSizes) and obj is (BeaconState|BeaconBlock):
|
|
|
|
let start = w.stream.pos
|
2019-03-12 22:21:32 +00:00
|
|
|
w.writeValue field.toSSZType
|
2019-03-12 13:38:58 +00:00
|
|
|
debugEcho fieldName, ": ", w.stream.pos - start
|
2019-03-05 22:54:08 +00:00
|
|
|
else:
|
2019-03-12 22:21:32 +00:00
|
|
|
w.writeValue field.toSSZType
|
2019-03-05 22:54:08 +00:00
|
|
|
w.endRecord(memo)
|
|
|
|
|
|
|
|
proc readValue*(r: var SszReader, result: var auto) =
|
|
|
|
# We are not using overloads here, because this leads to
|
|
|
|
# slightly better error messages when the user provides
|
|
|
|
# additional overloads for `readValue`.
|
|
|
|
type T = result.type
|
|
|
|
mixin readValue
|
|
|
|
|
|
|
|
template checkEof(n: int) =
|
|
|
|
if not r.stream[].ensureBytes(n):
|
|
|
|
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
when result is ValidatorIndex|BasicType:
|
2019-03-05 22:54:08 +00:00
|
|
|
let bytesToRead = result.sszLen;
|
|
|
|
checkEof bytesToRead
|
|
|
|
|
|
|
|
when result is ValidatorPubKey|ValidatorSig:
|
|
|
|
if not result.init(r.stream.readBytes(bytesToRead)):
|
|
|
|
raise newException(CorruptedDataError, "Failed to load a BLS key or signature")
|
|
|
|
else:
|
|
|
|
result = T.fromBytesSSZ(r.stream.readBytes(bytesToRead))
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
elif result is enum:
|
2018-12-17 18:03:53 +00:00
|
|
|
# TODO what to do with out-of-range values?? rejecting means breaking
|
|
|
|
# forwards compatibility..
|
2019-03-05 22:54:08 +00:00
|
|
|
result = cast[T](r.readValue(uint64))
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
elif result is string:
|
|
|
|
{.error: "The SSZ format doesn't support the string type yet".}
|
|
|
|
else:
|
|
|
|
let totalLen = int r.readValue(uint32)
|
|
|
|
checkEof totalLen
|
|
|
|
|
|
|
|
let endPos = r.stream[].pos + totalLen
|
|
|
|
when T is seq:
|
|
|
|
type ElemType = type(result[0])
|
|
|
|
# Items are of homogenous type, but not necessarily homogenous length,
|
|
|
|
# cannot pre-allocate item list generically
|
|
|
|
while r.stream[].pos < endPos:
|
|
|
|
result.add r.readValue(ElemType)
|
|
|
|
|
2019-03-20 20:01:48 +00:00
|
|
|
elif T is BitField:
|
|
|
|
type ElemType = type(result.bits[0])
|
|
|
|
while r.stream[].pos < endPos:
|
|
|
|
result.bits.add r.readValue(ElemType)
|
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
elif T is array:
|
|
|
|
type ElemType = type(result[0])
|
|
|
|
var i = 0
|
|
|
|
while r.stream[].pos < endPos:
|
|
|
|
if i > result.len:
|
|
|
|
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of an array")
|
|
|
|
result[i] = r.readValue(ElemType)
|
|
|
|
i += 1
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
else:
|
|
|
|
result.deserializeFields(fieldName, field):
|
2019-03-13 01:46:44 +00:00
|
|
|
# TODO This hardcoding's ugly; generalize & abstract.
|
2019-03-12 22:21:32 +00:00
|
|
|
when field is Slot:
|
|
|
|
field = r.readValue(uint64).Slot
|
2019-03-13 01:46:44 +00:00
|
|
|
elif field is Epoch:
|
|
|
|
field = r.readValue(uint64).Epoch
|
2019-03-12 22:21:32 +00:00
|
|
|
else:
|
|
|
|
field = r.readValue(field.type)
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
if r.stream[].pos != endPos:
|
|
|
|
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of the deserialized object")
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
# ################### Hashing ###################################
|
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
# Sample hash_tree_root implementation based on:
|
2019-03-25 16:46:31 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.1/specs/simple-serialize.md
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.1/utils/phase0/minimal_ssz.py
|
2018-12-21 22:37:46 +00:00
|
|
|
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
const
|
|
|
|
BYTES_PER_CHUNK = 32
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
# ################### Hashing helpers ###################################
|
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO varargs openarray, anyone?
|
2018-11-27 23:10:09 +00:00
|
|
|
template withHash(body: untyped): array[32, byte] =
|
|
|
|
let tmp = withEth2Hash: body
|
|
|
|
toBytesSSZ tmp
|
|
|
|
|
2018-11-14 20:06:04 +00:00
|
|
|
func hash(a, b: openArray[byte]): array[32, byte] =
|
|
|
|
withHash:
|
|
|
|
h.update(a)
|
|
|
|
h.update(b)
|
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
type
|
|
|
|
Chunk = array[BYTES_PER_CHUNK, byte]
|
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: er, how is this _actually_ done?
|
|
|
|
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
|
2019-03-05 22:54:08 +00:00
|
|
|
func empty(T: type): T = discard
|
2019-03-25 16:46:31 +00:00
|
|
|
const emptyChunk = empty(Chunk)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
func mix_in_length(root: Chunk, length: int): Chunk =
|
|
|
|
var dataLen: array[32, byte]
|
|
|
|
var lstLen = uint64(length)
|
|
|
|
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
2019-03-20 20:01:48 +00:00
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
hash(root, dataLen)
|
|
|
|
|
|
|
|
proc pack(values: seq|array): iterator(): Chunk =
|
|
|
|
result = iterator (): Chunk =
|
|
|
|
# TODO should be trivial to avoid this seq also..
|
|
|
|
# TODO I get a feeling a copy of the array is taken to the closure, which
|
|
|
|
# also needs fixing
|
|
|
|
# TODO avoid closure iterators that involve GC
|
|
|
|
var tmp = newSeqOfCap[byte](values.len() * sizeof(toBytesSSZ(values[0])))
|
|
|
|
for v in values:
|
|
|
|
tmp.add toBytesSSZ(v)
|
|
|
|
|
|
|
|
for v in 0..<tmp.len div sizeof(Chunk):
|
|
|
|
var c: Chunk
|
|
|
|
copyMem(addr c, addr tmp[v * sizeof(Chunk)], sizeof(Chunk))
|
|
|
|
yield c
|
|
|
|
|
|
|
|
let remains = tmp.len mod sizeof(Chunk)
|
|
|
|
if remains != 0:
|
|
|
|
var c: Chunk
|
|
|
|
copyMem(addr c, addr tmp[tmp.len - remains], remains)
|
|
|
|
yield c
|
|
|
|
|
|
|
|
proc pad(iter: iterator(): Chunk): iterator(): Chunk =
|
|
|
|
# Pad a list of chunks to the next power-of-two length with empty chunks -
|
|
|
|
# this includes ensuring there's at least one chunk return
|
|
|
|
result = iterator(): Chunk =
|
|
|
|
var count = 0
|
|
|
|
|
|
|
|
while true:
|
|
|
|
let item = iter()
|
|
|
|
if finished(iter): break
|
|
|
|
count += 1
|
|
|
|
yield item
|
|
|
|
|
|
|
|
doAssert nextPowerOfTwo(0) == 1,
|
|
|
|
"Usefully, empty lists will be padded to one empty block"
|
|
|
|
|
|
|
|
for _ in count..<nextPowerOfTwo(count):
|
|
|
|
yield emptyChunk
|
|
|
|
|
|
|
|
func merkleize(chunker: iterator(): Chunk): Chunk =
|
|
|
|
var
|
|
|
|
stack: seq[tuple[height: int, chunk: Chunk]]
|
|
|
|
paddedChunker = pad(chunker)
|
|
|
|
|
|
|
|
while true:
|
|
|
|
let chunk = paddedChunker()
|
|
|
|
if finished(paddedChunker): break
|
|
|
|
|
|
|
|
# Leaves start at height 0 - every time they move up, height is increased
|
|
|
|
# allowing us to detect two chunks at the same height ready for
|
|
|
|
# consolidation
|
|
|
|
# See also: http://szydlo.com/logspacetime03.pdf
|
|
|
|
stack.add (0, chunk)
|
|
|
|
|
|
|
|
# Consolidate items of the same height - this keeps stack size at log N
|
|
|
|
while stack.len > 1 and stack[^1].height == stack[^2].height:
|
|
|
|
# As tradition dictates - one feature, at least one nim bug:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/9684
|
|
|
|
let tmp = hash(stack[^2].chunk, stack[^1].chunk)
|
|
|
|
stack[^2].height += 1
|
|
|
|
stack[^2].chunk = tmp
|
|
|
|
discard stack.pop
|
|
|
|
|
|
|
|
doAssert stack.len == 1,
|
|
|
|
"With power-of-two leaves, we should end up with a single root"
|
|
|
|
|
|
|
|
stack[0].chunk
|
|
|
|
|
|
|
|
template elementType[T, N](_: type array[N, T]): typedesc = T
|
|
|
|
template elementType[T](_: type seq[T]): typedesc = T
|
|
|
|
|
|
|
|
func hash_tree_root*[T](value: T): Eth2Digest =
|
|
|
|
# Merkle tree
|
|
|
|
Eth2Digest(data:
|
|
|
|
when T is BasicType:
|
|
|
|
merkleize(pack([value]))
|
|
|
|
elif T is array|seq:
|
|
|
|
when T.elementType() is BasicType:
|
|
|
|
mix_in_length(merkleize(pack(value)), len(value))
|
|
|
|
else:
|
|
|
|
var roots = iterator(): Chunk =
|
|
|
|
for v in value:
|
|
|
|
yield hash_tree_root(v).data
|
|
|
|
mix_in_length(merkleize(roots), len(value))
|
|
|
|
elif T is object:
|
|
|
|
var roots = iterator(): Chunk =
|
|
|
|
for v in value.fields:
|
|
|
|
yield hash_tree_root(v).data
|
|
|
|
|
|
|
|
merkleize(roots)
|
|
|
|
)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-08 17:44:31 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots
|
2019-03-18 15:42:42 +00:00
|
|
|
func signed_root*[T: object](x: T): array[32, byte] =
|
2019-03-08 17:44:31 +00:00
|
|
|
# TODO write tests for this (check vs hash_tree_root)
|
|
|
|
|
|
|
|
var found_field_name = false
|
|
|
|
|
2019-03-16 19:52:37 +00:00
|
|
|
## TODO this isn't how 0.5 defines signed_root, but works well enough
|
2019-03-18 15:42:42 +00:00
|
|
|
## for now.
|
2019-03-08 17:44:31 +00:00
|
|
|
withHash:
|
|
|
|
for name, field in x.fieldPairs:
|
2019-03-16 19:52:37 +00:00
|
|
|
if name == "signature":
|
2019-03-08 17:44:31 +00:00
|
|
|
found_field_name = true
|
|
|
|
break
|
2019-03-25 16:46:31 +00:00
|
|
|
h.update hash_tree_root(field.toSSZType).data
|
2019-03-08 17:44:31 +00:00
|
|
|
|
|
|
|
doAssert found_field_name
|