2018-09-20 15:45:02 +00:00
|
|
|
# beacon_chain
|
|
|
|
# Copyright (c) 2018 Status Research & Development GmbH
|
|
|
|
# Licensed and distributed under either of
|
2019-11-25 15:30:02 +00:00
|
|
|
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
|
|
|
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
2018-09-20 15:45:02 +00:00
|
|
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
|
|
|
|
|
|
|
# SSZ Serialization (simple serialize)
|
2018-12-17 18:03:53 +00:00
|
|
|
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2018-11-22 10:17:05 +00:00
|
|
|
import
|
2020-03-05 00:29:27 +00:00
|
|
|
stew/shims/macros, options, algorithm, options,
|
|
|
|
stew/[bitops2, bitseqs, endians2, objects, varints, ptrops, ranges/ptr_arith], stint,
|
2019-07-03 07:35:05 +00:00
|
|
|
faststreams/input_stream, serialization, serialization/testing/tracing,
|
|
|
|
./spec/[crypto, datatypes, digest],
|
|
|
|
./ssz/[types, bytes_reader]
|
2018-11-22 10:17:05 +00:00
|
|
|
|
2018-09-20 15:45:02 +00:00
|
|
|
# ################### Helper functions ###################################
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
export
|
2019-07-03 07:35:05 +00:00
|
|
|
serialization, types, bytes_reader
|
|
|
|
|
|
|
|
when defined(serialization_tracing):
|
|
|
|
import
|
|
|
|
typetraits, stew/ranges/ptr_arith
|
|
|
|
|
|
|
|
const
|
|
|
|
bytesPerChunk = 32
|
|
|
|
bitsPerChunk = bytesPerChunk * 8
|
|
|
|
maxChunkTreeDepth = 25
|
|
|
|
defaultMaxObjectSize = 1 * 1024 * 1024
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
type
|
|
|
|
SszReader* = object
|
2020-04-09 18:36:00 +00:00
|
|
|
stream: InputStream
|
2019-07-03 07:35:05 +00:00
|
|
|
maxObjectSize: int
|
2019-03-05 22:54:08 +00:00
|
|
|
|
|
|
|
SszWriter* = object
|
2020-04-09 18:36:00 +00:00
|
|
|
stream: OutputStream
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-10-02 09:08:21 +00:00
|
|
|
BasicType = char|bool|SomeUnsignedInt|StUint|ValidatorIndex
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
SszChunksMerkleizer = ref object
|
2019-07-03 07:35:05 +00:00
|
|
|
combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
|
2019-08-22 02:59:35 +00:00
|
|
|
totalChunks: uint64
|
2019-07-03 07:35:05 +00:00
|
|
|
limit: uint64
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
SszHashingStream = ref object of OutputStream
|
2020-04-15 11:51:30 +00:00
|
|
|
merkleizer: SszChunksMerkleizer
|
2020-04-09 18:36:00 +00:00
|
|
|
|
2019-08-16 16:50:04 +00:00
|
|
|
TypeWithMaxLen*[T; maxLen: static int64] = distinct T
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
SizePrefixed*[T] = distinct T
|
|
|
|
SszMaxSizeExceeded* = object of SerializationError
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
VarSizedWriterCtx = object
|
|
|
|
fixedParts: WriteCursor
|
|
|
|
offset: int
|
|
|
|
|
|
|
|
FixedSizedWriterCtx = object
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-01-27 10:56:32 +00:00
|
|
|
ByteList = seq[byte]
|
2019-08-05 00:00:49 +00:00
|
|
|
|
2019-03-05 22:54:08 +00:00
|
|
|
serializationFormat SSZ,
|
|
|
|
Reader = SszReader,
|
|
|
|
Writer = SszWriter,
|
|
|
|
PreferedOutput = seq[byte]
|
|
|
|
|
2019-09-09 02:39:44 +00:00
|
|
|
template sizePrefixed*[TT](x: TT): untyped =
|
|
|
|
type T = TT
|
|
|
|
SizePrefixed[T](x)
|
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
proc init*(T: type SszReader,
|
2020-04-09 18:36:00 +00:00
|
|
|
stream: InputStream,
|
2019-07-03 07:35:05 +00:00
|
|
|
maxObjectSize = defaultMaxObjectSize): T =
|
|
|
|
T(stream: stream, maxObjectSize: maxObjectSize)
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc mount*(F: type SSZ, stream: InputStream, T: type): T =
|
2019-05-29 08:21:03 +00:00
|
|
|
mixin readValue
|
2019-06-03 17:07:50 +00:00
|
|
|
var reader = init(SszReader, stream)
|
|
|
|
reader.readValue(T)
|
2019-05-29 08:21:03 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe.} =
|
|
|
|
# TODO: implement proper error string
|
|
|
|
"Serialisation error while processing " & filename
|
|
|
|
|
|
|
|
when false:
|
|
|
|
# TODO: Nim can't handle yet this simpler definition. File an issue.
|
|
|
|
template valueOf[T; N](x: TypeWithMaxLen[T, N]): auto = T(x)
|
|
|
|
else:
|
2019-08-16 16:50:04 +00:00
|
|
|
proc unwrapImpl[T; N: static int64](x: ptr TypeWithMaxLen[T, N]): ptr T =
|
2019-07-03 07:35:05 +00:00
|
|
|
cast[ptr T](x)
|
|
|
|
|
|
|
|
template valueOf(x: TypeWithMaxLen): auto =
|
|
|
|
let xaddr = unsafeAddr x
|
|
|
|
unwrapImpl(xaddr)[]
|
|
|
|
|
2019-08-16 16:50:04 +00:00
|
|
|
template sszList*(x: seq|array, maxLen: static int64): auto =
|
|
|
|
TypeWithMaxLen[type(x), maxLen](x)
|
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
template toSszType*(x: auto): auto =
|
|
|
|
mixin toSszType
|
|
|
|
|
|
|
|
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
|
|
|
|
elif x is Eth2Digest: x.data
|
2020-04-11 08:51:07 +00:00
|
|
|
elif x is BlsCurveType: toRaw(x)
|
2020-01-27 10:56:32 +00:00
|
|
|
elif x is BitSeq|BitList: ByteList(x)
|
2019-08-05 00:00:49 +00:00
|
|
|
elif x is ref|ptr: toSszType x[]
|
|
|
|
elif x is Option: toSszType x.get
|
2019-07-03 07:35:05 +00:00
|
|
|
elif x is TypeWithMaxLen: toSszType valueOf(x)
|
|
|
|
elif useListType and x is List: seq[x.T](x)
|
|
|
|
else: x
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc writeFixedSized(c: var WriteCursor, x: auto) =
|
2019-07-03 07:35:05 +00:00
|
|
|
mixin toSszType
|
|
|
|
|
|
|
|
when x is byte:
|
|
|
|
c.append x
|
|
|
|
elif x is bool|char:
|
|
|
|
c.append byte(ord(x))
|
|
|
|
elif x is SomeUnsignedInt:
|
2020-03-05 00:29:27 +00:00
|
|
|
let value = x.toBytesLE()
|
|
|
|
trs "APPENDING INT ", x, " = ", value
|
|
|
|
c.appendMemCopy value
|
2019-07-03 07:35:05 +00:00
|
|
|
elif x is StUint:
|
|
|
|
c.appendMemCopy x # TODO: Is this always correct?
|
|
|
|
elif x is array|string|seq|openarray:
|
|
|
|
when x[0] is byte:
|
|
|
|
trs "APPENDING FIXED SIZE BYTES", x
|
|
|
|
c.append x
|
|
|
|
else:
|
|
|
|
for elem in x:
|
2019-11-26 14:32:45 +00:00
|
|
|
trs "WRITING FIXED SIZE ARRAY ELEMENT"
|
2019-07-03 07:35:05 +00:00
|
|
|
c.writeFixedSized toSszType(elem)
|
|
|
|
elif x is tuple|object:
|
|
|
|
enumInstanceSerializedFields(x, fieldName, field):
|
|
|
|
trs "WRITING FIXED SIZE FIELD", fieldName
|
|
|
|
c.writeFixedSized toSszType(field)
|
|
|
|
else:
|
|
|
|
unsupported x.type
|
2018-12-27 20:14:37 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
template writeFixedSized(s: OutputStream, x: auto) =
|
2019-07-03 07:35:05 +00:00
|
|
|
writeFixedSized(s.cursor, x)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
template supports*(_: type SSZ, T: type): bool =
|
|
|
|
mixin toSszType
|
|
|
|
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
func init*(T: type SszWriter, stream: OutputStream): T =
|
2019-03-05 22:54:08 +00:00
|
|
|
result.stream = stream
|
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
|
|
|
when holder is array|string|seq|openarray:
|
|
|
|
for fieldVar in holder: body
|
2018-12-17 18:03:53 +00:00
|
|
|
else:
|
2019-07-03 07:35:05 +00:00
|
|
|
enumInstanceSerializedFields(holder, _, fieldVar): body
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc beginRecord*(w: var SszWriter, TT: type): auto =
|
2019-07-03 07:35:05 +00:00
|
|
|
type T = TT
|
|
|
|
when isFixedSize(T):
|
|
|
|
FixedSizedWriterCtx()
|
|
|
|
else:
|
|
|
|
const offset = when T is array: len(T) * offsetSize
|
|
|
|
else: fixedPortionSize(T)
|
|
|
|
VarSizedWriterCtx(offset: offset,
|
|
|
|
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
|
|
|
|
|
|
|
template writeField*(w: var SszWriter,
|
|
|
|
ctx: var auto,
|
|
|
|
fieldName: string,
|
|
|
|
field: auto) =
|
|
|
|
mixin toSszType
|
|
|
|
when ctx is FixedSizedWriterCtx:
|
2019-08-05 00:00:49 +00:00
|
|
|
writeFixedSized(w.stream, toSszType(field))
|
2019-07-03 07:35:05 +00:00
|
|
|
else:
|
|
|
|
type FieldType = type toSszType(field)
|
2019-03-05 22:54:08 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
when isFixedSize(FieldType):
|
|
|
|
ctx.fixedParts.writeFixedSized toSszType(field)
|
|
|
|
else:
|
|
|
|
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
|
|
|
ctx.fixedParts.writeFixedSized uint32(ctx.offset)
|
|
|
|
let initPos = w.stream.pos
|
|
|
|
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
|
|
|
when FieldType is BitSeq:
|
2020-01-27 10:56:32 +00:00
|
|
|
trs "BIT SEQ ", ByteList(field)
|
2019-07-03 07:35:05 +00:00
|
|
|
writeVarSizeType(w, toSszType(field))
|
|
|
|
ctx.offset += w.stream.pos - initPos
|
|
|
|
|
|
|
|
template endRecord*(w: var SszWriter, ctx: var auto) =
|
|
|
|
when ctx is VarSizedWriterCtx:
|
|
|
|
finalize ctx.fixedParts
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc writeVarSizeType(w: var SszWriter, value: auto) =
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "STARTING VAR SIZE TYPE"
|
|
|
|
mixin toSszType
|
|
|
|
type T = type toSszType(value)
|
|
|
|
|
|
|
|
when T is seq|string|openarray:
|
|
|
|
type E = ElemType(T)
|
2019-08-05 00:00:49 +00:00
|
|
|
const isFixed = when E is Option: false
|
|
|
|
else: isFixedSize(E)
|
|
|
|
when isFixed:
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
|
|
|
for elem in value:
|
|
|
|
w.stream.writeFixedSized toSszType(elem)
|
|
|
|
trs "DONE"
|
2019-03-05 22:54:08 +00:00
|
|
|
else:
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
|
|
|
var offset = value.len * offsetSize
|
|
|
|
var cursor = w.stream.delayFixedSizeWrite offset
|
|
|
|
for elem in value:
|
|
|
|
cursor.writeFixedSized uint32(offset)
|
2019-08-05 00:00:49 +00:00
|
|
|
when elem is Option:
|
|
|
|
if not isSome(elem): continue
|
|
|
|
elif elem is ptr|ref:
|
|
|
|
if isNil(elem): continue
|
2019-07-03 07:35:05 +00:00
|
|
|
let initPos = w.stream.pos
|
|
|
|
w.writeVarSizeType toSszType(elem)
|
|
|
|
offset += w.stream.pos - initPos
|
|
|
|
finalize cursor
|
|
|
|
trs "DONE"
|
|
|
|
|
|
|
|
elif T is object|tuple|array:
|
|
|
|
trs "WRITING OBJECT OR ARRAY"
|
|
|
|
var ctx = beginRecord(w, T)
|
|
|
|
enumerateSubFields(value, field):
|
|
|
|
writeField w, ctx, astToStr(field), field
|
|
|
|
endRecord w, ctx
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
2019-07-03 07:35:05 +00:00
|
|
|
mixin toSszType
|
|
|
|
type T = type toSszType(x)
|
|
|
|
|
|
|
|
when isFixedSize(T):
|
|
|
|
w.stream.writeFixedSized toSszType(x)
|
|
|
|
elif T is array|seq|openarray|string|object|tuple:
|
|
|
|
w.writeVarSizeType toSszType(x)
|
|
|
|
else:
|
|
|
|
unsupported type(x)
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
2019-07-03 07:35:05 +00:00
|
|
|
var cursor = w.stream.delayVarSizeWrite(10)
|
|
|
|
let initPos = w.stream.pos
|
|
|
|
w.writeValue T(x)
|
2019-09-09 02:39:44 +00:00
|
|
|
let length = uint64(w.stream.pos - initPos)
|
|
|
|
when false:
|
|
|
|
discard
|
|
|
|
# TODO varintBytes is sub-optimal at the moment
|
|
|
|
# cursor.writeAndFinalize length.varintBytes
|
|
|
|
else:
|
|
|
|
var buf: VarintBuffer
|
|
|
|
buf.appendVarint length
|
|
|
|
cursor.writeAndFinalize buf.writtenBytes
|
2018-12-17 18:03:53 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
|
|
|
bytes: openarray[byte]): auto =
|
|
|
|
mixin fromSszBytes
|
|
|
|
fromSszBytes(T, bytes)
|
2018-09-20 15:45:02 +00:00
|
|
|
|
2020-01-29 00:40:27 +00:00
|
|
|
proc readValue*[T](r: var SszReader, val: var T) =
|
|
|
|
when isFixedSize(T):
|
2020-02-05 14:26:45 +00:00
|
|
|
const minimalSize = fixedPortionSize(T)
|
2020-04-09 18:36:00 +00:00
|
|
|
if r.stream.readable(minimalSize):
|
|
|
|
val = readSszValue(r.stream.read(minimalSize), T)
|
2020-01-29 00:40:27 +00:00
|
|
|
else:
|
|
|
|
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
|
|
|
else:
|
|
|
|
# TODO Read the fixed portion first and precisely measure the size of
|
|
|
|
# the dynamic portion to consume the right number of bytes.
|
2020-04-09 18:36:00 +00:00
|
|
|
val = readSszValue(r.stream.read(r.stream.endPos), T)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
|
|
|
let length = r.stream.readVarint(uint64)
|
|
|
|
if length > r.maxObjectSize:
|
|
|
|
raise newException(SszMaxSizeExceeded,
|
|
|
|
"Maximum SSZ object size exceeded: " & $length)
|
2020-04-09 18:36:00 +00:00
|
|
|
val = readSszValue(r.stream.read(length), T)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-03-25 16:46:31 +00:00
|
|
|
const
|
2019-07-03 07:35:05 +00:00
|
|
|
zeroChunk = default array[32, byte]
|
|
|
|
|
|
|
|
func hash(a, b: openArray[byte]): Eth2Digest =
|
|
|
|
result = withEth2Hash:
|
|
|
|
trs "MERGING BRANCHES "
|
|
|
|
trs a
|
|
|
|
trs b
|
|
|
|
|
|
|
|
h.update a
|
|
|
|
h.update b
|
|
|
|
trs "HASH RESULT ", result
|
|
|
|
|
|
|
|
func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
|
|
|
|
result = withEth2Hash:
|
|
|
|
trs "MERGING BRANCHES OPEN ARRAY"
|
|
|
|
trs existing.data
|
|
|
|
trs newData
|
|
|
|
|
|
|
|
h.update existing.data
|
|
|
|
h.update newData
|
|
|
|
|
|
|
|
let paddingBytes = bytesPerChunk - newData.len
|
|
|
|
if paddingBytes > 0:
|
|
|
|
trs "USING ", paddingBytes, " PADDING BYTES"
|
|
|
|
h.update zeroChunk[0 ..< paddingBytes]
|
|
|
|
trs "HASH RESULT ", result
|
|
|
|
|
|
|
|
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
|
|
|
hash(a.data, b.data)
|
|
|
|
|
|
|
|
func computeZeroHashes: array[100, Eth2Digest] =
|
|
|
|
result[0] = Eth2Digest(data: zeroChunk)
|
|
|
|
for i in 1 .. result.high:
|
|
|
|
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
|
|
|
|
|
|
|
let zeroHashes = computeZeroHashes()
|
|
|
|
|
|
|
|
func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
|
|
|
|
# TODO this is a work-around for the somewhat broken side
|
|
|
|
# effects analysis of Nim - reading from global let variables
|
|
|
|
# is considered a side-effect.
|
2019-11-18 10:57:53 +00:00
|
|
|
{.noSideEffect.}:
|
|
|
|
zeroHashes[idx]
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func addChunk(merkleizer: SszChunksMerkleizer, data: openarray[byte]) =
|
2019-07-03 07:35:05 +00:00
|
|
|
doAssert data.len > 0 and data.len <= bytesPerChunk
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
if not getBitLE(merkleizer.totalChunks, 0):
|
|
|
|
let chunkStartAddr = addr merkleizer.combinedChunks[0].data[0]
|
2019-07-03 07:35:05 +00:00
|
|
|
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
2019-10-25 10:59:56 +00:00
|
|
|
zeroMem(chunkStartAddr.offset(data.len), bytesPerChunk - data.len)
|
2020-04-09 18:36:00 +00:00
|
|
|
trs "WROTE BASE CHUNK ", merkleizer.combinedChunks[0]
|
2019-07-03 07:35:05 +00:00
|
|
|
else:
|
2020-04-09 18:36:00 +00:00
|
|
|
var hash = mergeBranches(merkleizer.combinedChunks[0], data)
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
for i in 1 .. high(merkleizer.combinedChunks):
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "ITERATING"
|
2020-04-09 18:36:00 +00:00
|
|
|
if getBitLE(merkleizer.totalChunks, i):
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "CALLING MERGE BRANCHES"
|
2020-04-09 18:36:00 +00:00
|
|
|
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
2019-07-03 07:35:05 +00:00
|
|
|
else:
|
|
|
|
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.combinedChunks[i] = hash
|
2019-07-03 07:35:05 +00:00
|
|
|
break
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
inc merkleizer.totalChunks
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func getFinalHash(merkleizer: SszChunksMerkleizer): Eth2Digest =
|
2020-04-09 18:36:00 +00:00
|
|
|
let limit = merkleizer.limit
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
if merkleizer.totalChunks == 0:
|
2019-07-03 07:35:05 +00:00
|
|
|
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
|
|
|
|
return getZeroHashWithoutSideEffect(limitHeight)
|
|
|
|
|
|
|
|
let
|
2020-04-09 18:36:00 +00:00
|
|
|
bottomHashIdx = firstOne(merkleizer.totalChunks) - 1
|
|
|
|
submittedChunksHeight = bitWidth(merkleizer.totalChunks - 1)
|
2019-07-03 07:35:05 +00:00
|
|
|
topHashIdx = if limit <= 1: submittedChunksHeight
|
|
|
|
else: max(submittedChunksHeight, bitWidth(limit - 1))
|
|
|
|
|
|
|
|
trs "BOTTOM HASH ", bottomHashIdx
|
|
|
|
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
|
|
|
trs "LIMIT ", limit
|
|
|
|
|
|
|
|
if bottomHashIdx != submittedChunksHeight:
|
|
|
|
# Our tree is not finished. We must complete the work in progress
|
|
|
|
# branches and then extend the tree to the right height.
|
2020-04-09 18:36:00 +00:00
|
|
|
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
2019-07-03 07:35:05 +00:00
|
|
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
|
|
|
|
|
|
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
2020-04-09 18:36:00 +00:00
|
|
|
if getBitLE(merkleizer.totalChunks, i):
|
|
|
|
result = mergeBranches(merkleizer.combinedChunks[i], result)
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "COMBINED"
|
|
|
|
else:
|
|
|
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
|
|
|
trs "COMBINED WITH ZERO"
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
elif bottomHashIdx == topHashIdx:
|
|
|
|
# We have a perfect tree (chunks == 2**n) at just the right height!
|
2020-04-09 18:36:00 +00:00
|
|
|
result = merkleizer.combinedChunks[bottomHashIdx]
|
2019-07-03 07:35:05 +00:00
|
|
|
else:
|
|
|
|
# We have a perfect tree of user chunks, but we have more work to
|
|
|
|
# do - we must extend it to reach the desired height
|
2020-04-09 18:36:00 +00:00
|
|
|
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
2019-07-03 07:35:05 +00:00
|
|
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
|
|
|
|
|
|
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
|
|
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
let SszHashingStreamVTable = OutputStreamVTable(
|
|
|
|
writePageSync: proc (s: OutputStream, data: openarray[byte])
|
|
|
|
{.nimcall, gcsafe, raises: [Defect, IOError].} =
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "ADDING STREAM CHUNK ", data
|
2020-04-09 18:36:00 +00:00
|
|
|
SszHashingStream(s).merkleizer.addChunk(data)
|
2019-07-03 07:35:05 +00:00
|
|
|
,
|
2020-04-09 18:36:00 +00:00
|
|
|
flushSync: proc (s: OutputStream) {.nimcall, gcsafe.} =
|
2019-07-03 07:35:05 +00:00
|
|
|
discard
|
|
|
|
)
|
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func newSszHashingStream(merkleizer: SszChunksMerkleizer): OutputStream =
|
2020-04-09 18:36:00 +00:00
|
|
|
result = SszHashingStream(vtable: vtableAddr SszHashingStreamVTable,
|
|
|
|
pageSize: bytesPerChunk,
|
|
|
|
maxWriteSize: bytesPerChunk,
|
|
|
|
minWriteSize: bytesPerChunk,
|
|
|
|
merkleizer: merkleizer)
|
|
|
|
result.initWithSinglePage()
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
|
|
|
var dataLen: array[32, byte]
|
2020-03-05 00:29:27 +00:00
|
|
|
dataLen[0..<8] = uint64(length).toBytesLE()
|
2019-07-03 07:35:05 +00:00
|
|
|
hash(root.data, dataLen)
|
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func merkleizeSerializedChunks(merkleizer: SszChunksMerkleizer,
|
2019-07-03 07:35:05 +00:00
|
|
|
obj: auto): Eth2Digest =
|
2020-04-09 18:36:00 +00:00
|
|
|
|
|
|
|
var hashingStream = newSszHashingStream merkleizer
|
|
|
|
{.noSideEffect.}:
|
|
|
|
# We assume there are no side-effects here, because the
|
|
|
|
# SszHashingStream is keeping all of its output in memory.
|
|
|
|
hashingStream.writeFixedSized obj
|
|
|
|
hashingStream.flush
|
|
|
|
merkleizer.getFinalHash
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func merkleizeSerializedChunks(obj: auto): Eth2Digest =
|
|
|
|
merkleizeSerializedChunks(SszChunksMerkleizer(), obj)
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2019-08-28 12:07:00 +00:00
|
|
|
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe.}
|
2019-07-03 07:35:05 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
template merkleizeFields(body: untyped): Eth2Digest {.dirty.} =
|
|
|
|
var merkleizer {.inject.} = SszChunksMerkleizer()
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
template addField(field) =
|
2019-08-28 12:07:00 +00:00
|
|
|
let hash = hash_tree_root(field)
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
2020-04-09 18:36:00 +00:00
|
|
|
addChunk(merkleizer, hash.data)
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "CHUNK ADDED"
|
|
|
|
|
2019-11-06 10:36:05 +00:00
|
|
|
template addField2(field) {.used.}=
|
2019-07-03 07:35:05 +00:00
|
|
|
const maxLen = fieldMaxLen(field)
|
|
|
|
when maxLen > 0:
|
|
|
|
type FieldType = type field
|
|
|
|
addField TypeWithMaxLen[FieldType, maxLen](field)
|
|
|
|
else:
|
|
|
|
addField field
|
2018-11-27 23:10:09 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
body
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.getFinalHash
|
2019-03-25 16:46:31 +00:00
|
|
|
|
2020-04-15 11:51:30 +00:00
|
|
|
func bitlistHashTreeRoot(merkleizer: SszChunksMerkleizer, x: BitSeq): Eth2Digest =
|
2020-04-09 18:36:00 +00:00
|
|
|
trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkleizer.limit
|
2018-11-14 20:06:04 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
var
|
2020-01-27 10:56:32 +00:00
|
|
|
totalBytes = ByteList(x).len
|
|
|
|
lastCorrectedByte = ByteList(x)[^1]
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
if lastCorrectedByte == byte(1):
|
|
|
|
if totalBytes == 1:
|
|
|
|
# This is an empty bit list.
|
|
|
|
# It should be hashed as a tree containing all zeros:
|
2020-04-09 18:36:00 +00:00
|
|
|
let treeHeight = if merkleizer.limit == 0: 0
|
|
|
|
else: log2trunc(merkleizer.limit)
|
2019-07-03 07:35:05 +00:00
|
|
|
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
|
|
|
|
getZeroHashWithoutSideEffect(0)) # this is the mixed length
|
|
|
|
|
|
|
|
totalBytes -= 1
|
2020-01-27 10:56:32 +00:00
|
|
|
lastCorrectedByte = ByteList(x)[^2]
|
2019-07-03 07:35:05 +00:00
|
|
|
else:
|
|
|
|
let markerPos = log2trunc(lastCorrectedByte)
|
2019-12-20 13:25:33 +00:00
|
|
|
lastCorrectedByte.clearBit(markerPos)
|
2019-03-20 20:01:48 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
var
|
|
|
|
bytesInLastChunk = totalBytes mod bytesPerChunk
|
|
|
|
fullChunks = totalBytes div bytesPerChunk
|
2019-03-25 16:46:31 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
if bytesInLastChunk == 0:
|
|
|
|
fullChunks -= 1
|
|
|
|
bytesInLastChunk = 32
|
2019-04-03 15:46:22 +00:00
|
|
|
|
2019-07-03 07:35:05 +00:00
|
|
|
for i in 0 ..< fullChunks:
|
|
|
|
let
|
|
|
|
chunkStartPos = i * bytesPerChunk
|
|
|
|
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
2019-04-03 15:46:22 +00:00
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.addChunk ByteList(x).toOpenArray(chunkEndPos, chunkEndPos)
|
2019-04-03 15:46:22 +00:00
|
|
|
|
|
|
|
var
|
2019-07-03 07:35:05 +00:00
|
|
|
lastChunk: array[bytesPerChunk, byte]
|
|
|
|
chunkStartPos = fullChunks * bytesPerChunk
|
|
|
|
|
|
|
|
for i in 0 .. bytesInLastChunk - 2:
|
2020-01-27 10:56:32 +00:00
|
|
|
lastChunk[i] = ByteList(x)[chunkStartPos + i]
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
|
|
|
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
|
|
|
let contentsHash = merkleizer.getFinalHash
|
2019-07-03 07:35:05 +00:00
|
|
|
mixInLength contentsHash, x.len
|
|
|
|
|
|
|
|
func hashTreeRootImpl[T](x: T): Eth2Digest =
|
2019-11-26 18:22:36 +00:00
|
|
|
when T is uint64:
|
|
|
|
trs "UINT64; LITTLE-ENDIAN IDENTITY MAPPING"
|
2020-03-05 00:29:27 +00:00
|
|
|
result.data[0..<8] = x.toBytesLE()
|
2019-11-26 18:22:36 +00:00
|
|
|
elif (when T is array: ElemType(T) is byte and
|
2019-11-26 14:32:45 +00:00
|
|
|
sizeof(T) == sizeof(Eth2Digest) else: false):
|
|
|
|
# TODO is this sizeof comparison guranteed? it's whole structure vs field
|
|
|
|
trs "ETH2DIGEST; IDENTITY MAPPING"
|
|
|
|
Eth2Digest(data: x)
|
|
|
|
elif (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "FIXED TYPE; USE CHUNK STREAM"
|
2020-04-15 11:51:30 +00:00
|
|
|
merkleizeSerializedChunks x
|
2019-07-03 07:35:05 +00:00
|
|
|
elif T is string or (when T is (seq|openarray): ElemType(T) is BasicType else: false):
|
|
|
|
trs "TYPE WITH LENGTH"
|
2020-04-15 11:51:30 +00:00
|
|
|
mixInLength merkleizeSerializedChunks(x), x.len
|
2019-07-03 07:35:05 +00:00
|
|
|
elif T is array|object|tuple:
|
2020-04-15 11:51:30 +00:00
|
|
|
trs "MERKLEIZING FIELDS"
|
|
|
|
merkleizeFields:
|
2019-07-03 07:35:05 +00:00
|
|
|
x.enumerateSubFields(f):
|
|
|
|
const maxLen = fieldMaxLen(f)
|
|
|
|
when maxLen > 0:
|
|
|
|
type FieldType = type f
|
|
|
|
addField TypeWithMaxLen[FieldType, maxLen](f)
|
|
|
|
else:
|
|
|
|
addField f
|
|
|
|
elif T is seq:
|
|
|
|
trs "SEQ WITH VAR SIZE"
|
2020-04-15 11:51:30 +00:00
|
|
|
let hash = merkleizeFields(for e in x: addField e)
|
2019-07-03 07:35:05 +00:00
|
|
|
mixInLength hash, x.len
|
|
|
|
#elif isCaseObject(T):
|
|
|
|
# # TODO implement this
|
|
|
|
else:
|
|
|
|
unsupported T
|
|
|
|
|
|
|
|
func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
|
|
|
|
when T is BitList:
|
|
|
|
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
|
|
|
elif T is seq:
|
|
|
|
type E = ElemType(T)
|
|
|
|
when E is BasicType:
|
|
|
|
(maxLen * sizeof(E) + bytesPerChunk - 1) div bytesPerChunk
|
|
|
|
else:
|
|
|
|
maxLen
|
|
|
|
else:
|
|
|
|
unsupported T # This should never happen
|
|
|
|
|
2019-08-28 12:07:00 +00:00
|
|
|
func hash_tree_root*(x: auto): Eth2Digest =
|
2019-07-03 07:35:05 +00:00
|
|
|
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
|
|
|
mixin toSszType
|
2019-12-16 18:08:50 +00:00
|
|
|
when x is SignedBeaconBlock:
|
|
|
|
doassert false
|
2019-07-03 07:35:05 +00:00
|
|
|
when x is TypeWithMaxLen:
|
|
|
|
const maxLen = x.maxLen
|
|
|
|
type T = type valueOf(x)
|
|
|
|
const limit = maxChunksCount(T, maxLen)
|
2020-04-15 11:51:30 +00:00
|
|
|
var merkleizer = SszChunksMerkleizer(limit: uint64(limit))
|
2019-07-03 07:35:05 +00:00
|
|
|
|
|
|
|
when T is BitList:
|
2020-04-09 18:36:00 +00:00
|
|
|
result = merkleizer.bitlistHashTreeRoot(BitSeq valueOf(x))
|
2019-07-03 07:35:05 +00:00
|
|
|
elif T is seq:
|
|
|
|
type E = ElemType(T)
|
|
|
|
let contentsHash = when E is BasicType:
|
2020-04-15 11:51:30 +00:00
|
|
|
merkleizeSerializedChunks(merkleizer, valueOf(x))
|
2019-03-25 16:46:31 +00:00
|
|
|
else:
|
2019-07-03 07:35:05 +00:00
|
|
|
for elem in valueOf(x):
|
2019-08-28 12:07:00 +00:00
|
|
|
let elemHash = hash_tree_root(elem)
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.addChunk(elemHash.data)
|
|
|
|
merkleizer.getFinalHash()
|
2019-07-03 07:35:05 +00:00
|
|
|
result = mixInLength(contentsHash, valueOf(x).len)
|
2019-03-27 01:32:35 +00:00
|
|
|
else:
|
2019-07-03 07:35:05 +00:00
|
|
|
unsupported T # This should never happen
|
|
|
|
else:
|
|
|
|
result = hashTreeRootImpl toSszType(x)
|
|
|
|
|
|
|
|
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
|
|
|
|
2019-11-25 13:13:12 +00:00
|
|
|
iterator hash_tree_roots_prefix*[T](lst: openarray[T], limit: auto):
|
|
|
|
Eth2Digest =
|
2019-11-25 11:46:47 +00:00
|
|
|
# This is a particular type's instantiation of a general fold, reduce,
|
|
|
|
# accumulation, prefix sums, etc family of operations. As long as that
|
|
|
|
# Eth1 deposit case is the only notable example -- the usual uses of a
|
|
|
|
# list involve, at some point, tree-hashing it -- finalized hashes are
|
|
|
|
# the only abstraction that escapes from this module this way.
|
2020-04-15 11:51:30 +00:00
|
|
|
var merkleizer = SszChunksMerkleizer(limit: uint64(limit))
|
2019-11-25 11:46:47 +00:00
|
|
|
for i, elem in lst:
|
2020-04-09 18:36:00 +00:00
|
|
|
merkleizer.addChunk(hash_tree_root(elem).data)
|
|
|
|
yield mixInLength(merkleizer.getFinalHash(), i + 1)
|