2018-09-20 15:45:02 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
|
|
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
# SSZ Serialization (simple serialize)
|
2018-12-17 18:03:53 +00:00
|
|
|
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2018-11-22 10:17:05 +00:00
|
|
|
import
|
|
|
|
endians, typetraits, options, algorithm,
|
2019-03-01 23:50:01 +00:00
|
|
|
eth/common, nimcrypto/keccak,
|
2018-11-28 19:49:03 +00:00
|
|
|
./spec/[crypto, datatypes, digest]
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-09-20 15:45:02 +00:00
|
|
|
# ################### Helper functions ###################################
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-17 18:03:53 +00:00
|
|
|
# toBytesSSZ convert simple fixed-length types to their SSZ wire representation
|
2018-11-14 20:06:04 +00:00
|
|
|
func toBytesSSZ(x: SomeInteger): array[sizeof(x), byte] =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-01-22 17:56:01 +00:00
|
|
|
when x.sizeof == 8: littleEndian64(result.addr, x.unsafeAddr)
|
|
|
|
elif x.sizeof == 4: littleEndian32(result.addr, x.unsafeAddr)
|
|
|
|
elif x.sizeof == 2: littleEndian16(result.addr, x.unsafeAddr)
|
2018-11-14 20:06:04 +00:00
|
|
|
elif x.sizeof == 1: copyMem(result.addr, x.unsafeAddr, sizeof(result))
|
|
|
|
else: {.fatal: "Unsupported type serialization: " & $(type(x)).name.}
|
|
|
|
|
2019-01-29 04:15:00 +00:00
|
|
|
func toBytesSSZ(x: ValidatorIndex): array[3, byte] =
|
2019-01-22 17:56:01 +00:00
|
|
|
## Integers are all encoded as little endian and not padded
|
2018-11-14 20:06:04 +00:00
|
|
|
let v = x.uint32
|
2019-01-22 17:56:01 +00:00
|
|
|
result[0] = byte(v and 0xff)
|
2018-11-14 20:06:04 +00:00
|
|
|
result[1] = byte((v shr 8) and 0xff)
|
2019-01-22 17:56:01 +00:00
|
|
|
result[2] = byte((v shr 16) and 0xff)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-27 20:14:37 +00:00
|
|
|
func toBytesSSZ(x: bool): array[1, byte] =
|
|
|
|
[if x: 1'u8 else: 0'u8]
|
|
|
|
|
2018-11-14 20:06:04 +00:00
|
|
|
func toBytesSSZ(x: EthAddress): array[sizeof(x), byte] = x
|
2018-11-27 23:10:09 +00:00
|
|
|
func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-17 18:03:53 +00:00
|
|
|
# TODO these two are still being debated:
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/issues/308#issuecomment-447026815
|
2019-02-05 16:13:29 +00:00
|
|
|
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes()
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
type
|
|
|
|
TrivialTypes =
|
|
|
|
# Types that serialize down to a fixed-length array - most importantly,
|
|
|
|
# these values don't carry a length prefix in the final encoding. toBytesSSZ
|
|
|
|
# provides the actual nim-type-to-bytes conversion.
|
|
|
|
# TODO think about this for a bit - depends where the serialization of
|
|
|
|
# validator keys ends up going..
|
2019-01-29 04:15:00 +00:00
|
|
|
# TODO can't put ranges like ValidatorIndex in here:
|
2018-12-21 22:37:46 +00:00
|
|
|
# https://github.com/nim-lang/Nim/issues/10027
|
2018-12-27 20:14:37 +00:00
|
|
|
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
|
|
|
|
bool
|
2018-12-17 18:03:53 +00:00
|
|
|
|
|
|
|
func sszLen(v: TrivialTypes): int = toBytesSSZ(v).len
|
2019-01-29 04:15:00 +00:00
|
|
|
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len
|
2018-12-17 18:03:53 +00:00
|
|
|
|
|
|
|
func sszLen(v: object | tuple): int =
|
|
|
|
result = 4 # Length
|
|
|
|
for field in v.fields:
|
|
|
|
result += sszLen(type field)
|
|
|
|
|
|
|
|
func sszLen(v: seq | array): int =
|
|
|
|
result = 4 # Length
|
|
|
|
for i in v:
|
|
|
|
result += sszLen(i)
|
|
|
|
|
|
|
|
# fromBytesSSZUnsafe copy wire representation to a Nim variable, assuming
|
|
|
|
# there's enough data in the buffer
|
|
|
|
func fromBytesSSZUnsafe(T: typedesc[SomeInteger], data: pointer): T =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-29 22:11:05 +00:00
|
|
|
## TODO: Assumes data points to a sufficiently large buffer
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: any better way to get a suitably aligned buffer in nim???
|
2018-11-14 20:06:04 +00:00
|
|
|
# see also: https://github.com/nim-lang/Nim/issues/9206
|
|
|
|
var tmp: uint64
|
|
|
|
var alignedBuf = cast[ptr byte](tmp.addr)
|
|
|
|
copyMem(alignedBuf, data, result.sizeof)
|
|
|
|
|
2019-01-22 17:56:01 +00:00
|
|
|
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
|
|
|
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
|
|
|
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
2018-11-14 20:06:04 +00:00
|
|
|
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
|
|
|
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
|
|
|
|
2018-12-27 20:14:37 +00:00
|
|
|
func fromBytesSSZUnsafe(T: typedesc[bool], data: pointer): T =
|
|
|
|
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
|
|
|
# definition for now, but maybe this should be a parse error instead?
|
|
|
|
fromBytesSSZUnsafe(uint8, data) != 0
|
|
|
|
|
2019-01-29 04:15:00 +00:00
|
|
|
func fromBytesSSZUnsafe(T: typedesc[ValidatorIndex], data: pointer): T =
|
2019-01-22 17:56:01 +00:00
|
|
|
## Integers are all encoded as littleendian and not padded
|
2018-12-17 18:03:53 +00:00
|
|
|
var tmp: uint32
|
|
|
|
let p = cast[ptr UncheckedArray[byte]](data)
|
2019-01-22 17:56:01 +00:00
|
|
|
tmp = tmp or uint32(p[0])
|
2018-12-17 18:03:53 +00:00
|
|
|
tmp = tmp or uint32(p[1]) shl 8
|
2019-01-22 17:56:01 +00:00
|
|
|
tmp = tmp or uint32(p[2]) shl 16
|
2019-01-29 04:15:00 +00:00
|
|
|
result = tmp.ValidatorIndex
|
2018-12-17 18:03:53 +00:00
|
|
|
|
|
|
|
func fromBytesSSZUnsafe(T: typedesc[EthAddress], data: pointer): T =
|
|
|
|
copyMem(result.addr, data, sizeof(result))
|
|
|
|
|
|
|
|
func fromBytesSSZUnsafe(T: typedesc[Eth2Digest], data: pointer): T =
|
|
|
|
copyMem(result.data.addr, data, sizeof(result.data))
|
|
|
|
|
|
|
|
proc deserialize[T: TrivialTypes](
|
|
|
|
dest: var T, offset: var int, data: openArray[byte]): bool =
|
|
|
|
# TODO proc because milagro is problematic
|
|
|
|
if offset + sszLen(dest) > data.len():
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
when T is (ValidatorPubKey|ValidatorSig):
|
2019-02-05 16:13:29 +00:00
|
|
|
if dest.init(data[offset..data.len-1]):
|
2018-12-17 18:03:53 +00:00
|
|
|
offset += sszLen(dest)
|
|
|
|
true
|
|
|
|
else:
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
dest = fromBytesSSZUnsafe(T, data[offset].unsafeAddr)
|
|
|
|
offset += sszLen(dest)
|
|
|
|
true
|
|
|
|
|
|
|
|
func deserialize(
|
2019-01-29 04:15:00 +00:00
|
|
|
dest: var ValidatorIndex, offset: var int, data: openArray[byte]): bool =
|
2018-12-17 18:03:53 +00:00
|
|
|
if offset + sszLen(dest) > data.len():
|
|
|
|
false
|
|
|
|
else:
|
2019-01-29 04:15:00 +00:00
|
|
|
dest = fromBytesSSZUnsafe(ValidatorIndex, data[offset].unsafeAddr)
|
2018-12-17 18:03:53 +00:00
|
|
|
offset += sszLen(dest)
|
|
|
|
true
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2018-12-17 18:03:53 +00:00
|
|
|
func deserialize[T: enum](dest: var T, offset: var int, data: openArray[byte]): bool =
|
|
|
|
# TODO er, verify the size here, probably an uint64 but...
|
|
|
|
var tmp: uint64
|
|
|
|
if not deserialize(tmp, offset, data):
|
|
|
|
false
|
|
|
|
else:
|
|
|
|
# TODO what to do with out-of-range values?? rejecting means breaking
|
|
|
|
# forwards compatibility..
|
|
|
|
dest = cast[T](tmp)
|
|
|
|
true
|
|
|
|
|
2019-01-29 04:15:00 +00:00
|
|
|
proc deserialize[T: not (enum|TrivialTypes|ValidatorIndex)](
|
2018-12-17 18:03:53 +00:00
|
|
|
dest: var T, offset: var int, data: openArray[byte]): bool =
|
|
|
|
# Length in bytes, followed by each item
|
|
|
|
var totalLen: uint32
|
|
|
|
if not deserialize(totalLen, offset, data): return false
|
|
|
|
|
|
|
|
if offset + totalLen.int > data.len(): return false
|
|
|
|
|
|
|
|
let itemEnd = offset + totalLen.int
|
|
|
|
when T is seq:
|
|
|
|
# Items are of homogenous type, but not necessarily homogenous length,
|
|
|
|
# cannot pre-allocate item list generically
|
|
|
|
while offset < itemEnd:
|
|
|
|
dest.setLen dest.len + 1
|
|
|
|
if not deserialize(dest[^1], offset, data): return false
|
|
|
|
elif T is array:
|
|
|
|
var i = 0
|
|
|
|
while offset < itemEnd:
|
|
|
|
if not deserialize(dest[i], offset, data): return false
|
|
|
|
i += 1
|
|
|
|
if i > dest.len: return false
|
|
|
|
else:
|
|
|
|
for field in dest.fields:
|
|
|
|
if not deserialize(field, offset, data): return false
|
|
|
|
if offset != itemEnd: return false
|
|
|
|
|
|
|
|
true
|
|
|
|
|
|
|
|
func serialize(dest: var seq[byte], src: TrivialTypes) =
|
|
|
|
dest.add src.toBytesSSZ()
|
2019-01-29 04:15:00 +00:00
|
|
|
func serialize(dest: var seq[byte], src: ValidatorIndex) =
|
2018-12-17 18:03:53 +00:00
|
|
|
dest.add src.toBytesSSZ()
|
|
|
|
|
|
|
|
func serialize(dest: var seq[byte], x: enum) =
|
|
|
|
# TODO er, verify the size here, probably an uint64 but...
|
|
|
|
serialize dest, uint64(x)
|
|
|
|
|
|
|
|
func serialize[T: not enum](dest: var seq[byte], src: T) =
|
|
|
|
let lenPos = dest.len()
|
|
|
|
|
|
|
|
# Length is a prefix, so we'll put a dummy 0 here and fill it after
|
|
|
|
# serializing
|
|
|
|
dest.add toBytesSSZ(0'u32)
|
|
|
|
|
|
|
|
when T is seq|array:
|
|
|
|
# If you get an error here that looks like:
|
|
|
|
# type mismatch: got <type range 0..8191(uint64)>
|
|
|
|
# you just used an unsigned int for an array index thinking you'd get
|
|
|
|
# away with it (surprise, surprise: you can't, uints are crippled!)
|
|
|
|
# https://github.com/nim-lang/Nim/issues/9984
|
|
|
|
for val in src:
|
|
|
|
serialize dest, val
|
|
|
|
else:
|
|
|
|
when defined(debugFieldSizes) and T is (BeaconState | BeaconBlock):
|
|
|
|
# for research/serialized_sizes, remove when appropriate
|
|
|
|
for name, field in src.fieldPairs:
|
|
|
|
let start = dest.len()
|
|
|
|
serialize dest, field
|
|
|
|
let sz = dest.len() - start
|
|
|
|
debugEcho(name, ": ", sz)
|
|
|
|
else:
|
|
|
|
for field in src.fields:
|
|
|
|
serialize dest, field
|
|
|
|
|
|
|
|
# Write size (we only know it once we've serialized the object!)
|
|
|
|
var objLen = dest.len() - lenPos - 4
|
2019-01-22 17:56:01 +00:00
|
|
|
littleEndian32(dest[lenPos].addr, objLen.addr)
|
2018-09-20 15:45:02 +00:00
|
|
|
|
|
|
|
# ################### Core functions ###################################
|
2018-12-17 18:03:53 +00:00
|
|
|
|
|
|
|
proc deserialize*(data: openArray[byte],
|
|
|
|
typ: typedesc): auto {.inline.} =
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: returns Option[typ]: https://github.com/nim-lang/Nim/issues/9195
|
2018-12-17 18:03:53 +00:00
|
|
|
var ret: typ
|
|
|
|
var offset: int
|
|
|
|
if not deserialize(ret, offset, data): none(typ)
|
|
|
|
else: some(ret)
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
func serialize*(value: auto): seq[byte] =
|
2018-12-17 18:03:53 +00:00
|
|
|
serialize(result, value)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
# ################### Hashing ###################################
|
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
# Sample hash_tree_root implementation based on:
|
2018-12-21 22:37:46 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/a9328157a87451ee4f372df272ece158b386ec41/specs/simple-serialize.md
|
|
|
|
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
const CHUNK_SIZE = 128
|
|
|
|
|
|
|
|
# ################### Hashing helpers ###################################
|
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO varargs openarray, anyone?
|
2018-11-27 23:10:09 +00:00
|
|
|
template withHash(body: untyped): array[32, byte] =
|
|
|
|
let tmp = withEth2Hash: body
|
|
|
|
toBytesSSZ tmp
|
|
|
|
|
2018-11-14 20:06:04 +00:00
|
|
|
func hash(a: openArray[byte]): array[32, byte] =
|
|
|
|
withHash:
|
|
|
|
h.update(a)
|
|
|
|
|
|
|
|
func hash(a, b: openArray[byte]): array[32, byte] =
|
|
|
|
withHash:
|
|
|
|
h.update(a)
|
|
|
|
h.update(b)
|
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: er, how is this _actually_ done?
|
|
|
|
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
|
2018-11-14 20:06:04 +00:00
|
|
|
func empty(T: typedesc): T = discard
|
2018-11-29 22:11:05 +00:00
|
|
|
const emptyChunk = empty(array[CHUNK_SIZE, byte])
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-13 16:00:55 +00:00
|
|
|
func merkleHash[T](lst: openArray[T]): array[32, byte]
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
# ################### Hashing interface ###################################
|
|
|
|
|
2018-12-27 20:14:37 +00:00
|
|
|
func hash_tree_root*(x: SomeInteger | bool): array[sizeof(x), byte] =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-14 20:06:04 +00:00
|
|
|
toBytesSSZ(x)
|
|
|
|
|
2019-01-29 04:15:00 +00:00
|
|
|
func hash_tree_root*(x: ValidatorIndex): array[3, byte] =
|
2018-11-20 17:35:11 +00:00
|
|
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
2019-01-22 17:56:01 +00:00
|
|
|
## All integers are serialized as **little endian**.
|
2018-11-14 20:06:04 +00:00
|
|
|
toBytesSSZ(x)
|
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
func hash_tree_root*(x: EthAddress): array[sizeof(x), byte] =
|
2018-11-14 20:06:04 +00:00
|
|
|
## Addresses copied as-is
|
|
|
|
toBytesSSZ(x)
|
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
func hash_tree_root*(x: Eth2Digest): array[32, byte] =
|
2018-11-14 20:06:04 +00:00
|
|
|
## Hash32 copied as-is
|
|
|
|
toBytesSSZ(x)
|
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
func hash_tree_root*(x: openArray[byte]): array[32, byte] =
|
2018-11-14 20:06:04 +00:00
|
|
|
## Blobs are hashed
|
|
|
|
hash(x)
|
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
func hash_tree_root*[T: seq|array](x: T): array[32, byte] =
|
|
|
|
## Sequences are tree-hashed
|
|
|
|
merkleHash(x)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
func hash_tree_root*[T: object|tuple](x: T): array[32, byte] =
|
|
|
|
## Containers have their fields recursively hashed, concatenated and hashed
|
|
|
|
withHash:
|
|
|
|
for field in x.fields:
|
|
|
|
h.update hash_tree_root(field)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-08 17:44:31 +00:00
|
|
|
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots
|
|
|
|
func signed_root*[T: object](x: T, field_name: string): array[32, byte] =
|
|
|
|
# TODO write tests for this (check vs hash_tree_root)
|
|
|
|
|
|
|
|
var found_field_name = false
|
|
|
|
|
|
|
|
withHash:
|
|
|
|
for name, field in x.fieldPairs:
|
|
|
|
if name == field_name:
|
|
|
|
found_field_name = true
|
|
|
|
break
|
|
|
|
h.update hash_tree_root(field)
|
|
|
|
|
|
|
|
doAssert found_field_name
|
|
|
|
|
2018-11-22 10:17:05 +00:00
|
|
|
# #################################
|
2018-12-11 21:53:18 +00:00
|
|
|
# hash_tree_root not part of official spec
|
2018-12-21 22:37:46 +00:00
|
|
|
func hash_tree_root*(x: enum): array[8, byte] =
|
2018-11-22 10:17:05 +00:00
|
|
|
## TODO - Warning ⚠️: not part of the spec
|
|
|
|
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
|
|
|
## This is a "stub" needed for BeaconBlock hashing
|
|
|
|
static: assert x.sizeof == 1 # Check that the enum fits in 1 byte
|
2018-12-21 22:37:46 +00:00
|
|
|
# TODO We've put enums where the spec uses `uint64` - maybe we should not be
|
|
|
|
# using enums?
|
|
|
|
hash_tree_root(uint64(x))
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
func hash_tree_root*(x: ValidatorPubKey): array[32, byte] =
|
2018-11-22 10:17:05 +00:00
|
|
|
## TODO - Warning ⚠️: not part of the spec
|
|
|
|
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
|
|
|
## This is a "stub" needed for BeaconBlock hashing
|
2019-02-05 16:13:29 +00:00
|
|
|
x.getBytes().hash()
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-12-11 21:53:18 +00:00
|
|
|
func hash_tree_root*(x: ValidatorSig): array[32, byte] =
|
2018-11-22 10:17:05 +00:00
|
|
|
## TODO - Warning ⚠️: not part of the spec
|
|
|
|
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
|
|
|
## This is a "stub" needed for BeaconBlock hashing
|
2019-02-05 16:13:29 +00:00
|
|
|
x.getBytes().hash()
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-12-21 23:47:55 +00:00
|
|
|
func hash_tree_root_final*(x: object|tuple): Eth2Digest =
|
|
|
|
# TODO suggested for spec:
|
|
|
|
# https://github.com/ethereum/eth2.0-specs/issues/276
|
|
|
|
# only for objects now, else the padding would have to be implemented - not
|
|
|
|
# needed yet..
|
|
|
|
Eth2Digest(data: hash_tree_root(x))
|
|
|
|
|
2018-11-14 20:06:04 +00:00
|
|
|
# ################### Tree hash ###################################
|
|
|
|
|
2018-12-13 16:00:55 +00:00
|
|
|
func merkleHash[T](lst: openArray[T]): array[32, byte] =
|
2018-11-14 20:06:04 +00:00
|
|
|
## Merkle tree hash of a list of homogenous, non-empty items
|
|
|
|
|
2018-11-29 22:11:05 +00:00
|
|
|
# TODO: the heap allocations here can be avoided by computing the merkle tree
|
2018-11-14 20:06:04 +00:00
|
|
|
# recursively, but for now keep things simple and aligned with upstream
|
|
|
|
|
|
|
|
# Store length of list (to compensate for non-bijectiveness of padding)
|
|
|
|
var dataLen: array[32, byte]
|
|
|
|
var lstLen = uint64(len(lst))
|
2019-01-22 17:56:01 +00:00
|
|
|
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
# Divide into chunks
|
|
|
|
var chunkz: seq[seq[byte]]
|
|
|
|
|
|
|
|
if len(lst) == 0:
|
2018-11-29 22:11:05 +00:00
|
|
|
chunkz.add @emptyChunk
|
2018-12-11 21:53:18 +00:00
|
|
|
elif sizeof(hash_tree_root(lst[0])) < CHUNK_SIZE:
|
2018-11-14 20:06:04 +00:00
|
|
|
# See how many items fit in a chunk
|
2018-12-11 21:53:18 +00:00
|
|
|
let itemsPerChunk = CHUNK_SIZE div sizeof(hash_tree_root(lst[0]))
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
chunkz.setLen((len(lst) + itemsPerChunk - 1) div itemsPerChunk)
|
|
|
|
|
|
|
|
# Build a list of chunks based on the number of items in the chunk
|
|
|
|
for i in 0..<chunkz.len:
|
|
|
|
for j in 0..<itemsPerChunk:
|
2018-11-29 22:11:05 +00:00
|
|
|
if i == chunkz.len - 1:
|
|
|
|
let idx = i * itemsPerChunk + j
|
|
|
|
if idx >= lst.len: break # Last chunk may be partial!
|
2018-12-11 21:53:18 +00:00
|
|
|
chunkz[i].add hash_tree_root(lst[i * itemsPerChunk + j])
|
2018-11-14 20:06:04 +00:00
|
|
|
else:
|
|
|
|
# Leave large items alone
|
|
|
|
chunkz.setLen(len(lst))
|
|
|
|
for i in 0..<len(lst):
|
2018-12-11 21:53:18 +00:00
|
|
|
chunkz[i].add hash_tree_root(lst[i])
|
2018-11-14 20:06:04 +00:00
|
|
|
|
|
|
|
while chunkz.len() > 1:
|
|
|
|
if chunkz.len() mod 2 == 1:
|
2018-11-29 22:11:05 +00:00
|
|
|
chunkz.add @emptyChunk
|
2018-11-14 20:06:04 +00:00
|
|
|
for i in 0..<(chunkz.len div 2):
|
|
|
|
# As tradition dictates - one feature, at least one nim bug:
|
|
|
|
# https://github.com/nim-lang/Nim/issues/9684
|
|
|
|
let tmp = @(hash(chunkz[i * 2], chunkz[i * 2 + 1]))
|
|
|
|
chunkz[i] = tmp
|
|
|
|
|
|
|
|
chunkz.setLen(chunkz.len div 2)
|
|
|
|
|
2018-12-21 22:37:46 +00:00
|
|
|
hash(chunkz[0], dataLen)
|