reorder ssz (#1099)
* reorder ssz * split into hash_trees and ssz_serialization, roughly, for hashing and IO * move bitseqs into ssz (from stew) * clean up imports * docs, imports
This commit is contained in:
parent
405e9db199
commit
56ffb696be
|
@ -259,4 +259,4 @@ OK: 8/8 Fail: 0/8 Skip: 0/8
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 156/159 Fail: 0/159 Skip: 3/159
|
||||
OK: 158/161 Fail: 0/161 Skip: 3/161
|
||||
|
|
|
@ -265,4 +265,4 @@ OK: 8/8 Fail: 0/8 Skip: 0/8
|
|||
OK: 1/1 Fail: 0/1 Skip: 0/1
|
||||
|
||||
---TOTAL---
|
||||
OK: 158/161 Fail: 0/161 Skip: 3/161
|
||||
OK: 160/163 Fail: 0/163 Skip: 3/163
|
||||
|
|
|
@ -11,7 +11,7 @@ import
|
|||
options,
|
||||
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator,
|
||||
state_transition_block],
|
||||
./attestation_pool, ./beacon_node_types, ./ssz
|
||||
./attestation_pool, ./beacon_node_types
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.11.1/specs/phase0/validator.md#aggregation-selection
|
||||
func is_aggregator(state: BeaconState, slot: Slot, index: CommitteeIndex,
|
||||
|
|
|
@ -9,9 +9,9 @@
|
|||
|
||||
import
|
||||
deques, sequtils, tables, options,
|
||||
chronicles, stew/[bitseqs, byteutils], json_serialization/std/sets,
|
||||
chronicles, stew/[byteutils], json_serialization/std/sets,
|
||||
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
||||
./extras, ./ssz, ./block_pool, ./beacon_node_types
|
||||
./extras, ./block_pool, ./beacon_node_types
|
||||
|
||||
logScope: topics = "attpool"
|
||||
|
||||
|
|
|
@ -3,8 +3,9 @@
|
|||
import
|
||||
typetraits, stew/[results, endians2],
|
||||
serialization, chronicles,
|
||||
spec/[datatypes, digest, crypto],
|
||||
eth/db/kvstore, ssz, state_transition
|
||||
eth/db/kvstore,
|
||||
./spec/[datatypes, digest, crypto],
|
||||
./ssz/[ssz_serialization, merkleization], ./state_transition
|
||||
|
||||
type
|
||||
BeaconChainDB* = ref object
|
||||
|
|
|
@ -10,7 +10,7 @@ import
|
|||
os, tables, random, strutils, times, math,
|
||||
|
||||
# Nimble packages
|
||||
stew/[objects, bitseqs, byteutils], stew/shims/macros,
|
||||
stew/[objects, byteutils], stew/shims/macros,
|
||||
chronos, confutils, metrics, json_rpc/[rpcserver, jsonmarshal],
|
||||
chronicles,
|
||||
json_serialization/std/[options, sets, net], serialization/errors,
|
||||
|
@ -24,7 +24,7 @@ import
|
|||
attestation_pool, block_pool, eth2_network, eth2_discovery,
|
||||
beacon_node_common, beacon_node_types,
|
||||
nimbus_binary_common,
|
||||
mainchain_monitor, version, ssz,
|
||||
mainchain_monitor, version, ssz/[merkleization],
|
||||
sync_protocol, request_manager, validator_keygen, interop, statusbar,
|
||||
sync_manager, state_transition,
|
||||
validator_duties, validator_api
|
||||
|
|
|
@ -18,6 +18,7 @@ import
|
|||
# Local modules
|
||||
spec/[datatypes, crypto, digest, helpers],
|
||||
conf, time, beacon_chain_db, sszdump,
|
||||
ssz/merkleization,
|
||||
attestation_pool, block_pool, eth2_network,
|
||||
beacon_node_types, mainchain_monitor, request_manager,
|
||||
sync_manager
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
import
|
||||
chronicles, options, sequtils, tables,
|
||||
metrics,
|
||||
../ssz, ../beacon_chain_db, ../state_transition, ../extras,
|
||||
../ssz/merkleization, ../beacon_chain_db, ../state_transition, ../extras,
|
||||
../spec/[crypto, datatypes, digest, helpers, validator],
|
||||
block_pools_types
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
import
|
||||
chronicles, tables,
|
||||
metrics, stew/results,
|
||||
../ssz, ../state_transition, ../extras,
|
||||
../ssz/merkleization, ../state_transition, ../extras,
|
||||
../spec/[crypto, datatypes, digest, helpers],
|
||||
|
||||
block_pools_types, candidate_chains
|
||||
|
|
|
@ -1,13 +1,13 @@
|
|||
import
|
||||
# Standard library
|
||||
tables, json,
|
||||
|
||||
|
||||
# Nimble packages
|
||||
stew/[byteutils, bitseqs],
|
||||
stew/byteutils, ssz/types,
|
||||
json_rpc/jsonmarshal,
|
||||
|
||||
# Local modules
|
||||
spec/[datatypes, digest, crypto]
|
||||
spec/[datatypes, crypto]
|
||||
|
||||
proc fromJson*(n: JsonNode, argName: string, result: var ValidatorPubKey) =
|
||||
result = ValidatorPubKey.fromHex(n.getStr()).tryGet()
|
||||
|
|
|
@ -4,7 +4,7 @@ import
|
|||
options as stdOptions, net as stdNet,
|
||||
|
||||
# Status libs
|
||||
stew/[varints, base58, bitseqs, endians2, results, byteutils],
|
||||
stew/[varints, base58, endians2, results, byteutils],
|
||||
stew/shims/[macros, tables],
|
||||
faststreams/[inputs, outputs, buffers], snappy, snappy/framing,
|
||||
json_serialization, json_serialization/std/[net, options],
|
||||
|
@ -20,12 +20,13 @@ import
|
|||
eth/[keys, async_utils], eth/p2p/p2p_protocol_dsl,
|
||||
eth/net/nat, eth/p2p/discoveryv5/[enr, node],
|
||||
# Beacon node modules
|
||||
version, conf, eth2_discovery, libp2p_json_serialization, conf, ssz,
|
||||
version, conf, eth2_discovery, libp2p_json_serialization, conf,
|
||||
ssz/ssz_serialization,
|
||||
peer_pool, spec/[datatypes, network]
|
||||
|
||||
export
|
||||
version, multiaddress, peer_pool, peerinfo, p2pProtocol,
|
||||
libp2p_json_serialization, ssz, peer, results
|
||||
libp2p_json_serialization, ssz_serialization, peer, results
|
||||
|
||||
logScope:
|
||||
topics = "networking"
|
||||
|
|
|
@ -19,7 +19,7 @@ import nimcrypto/[hash, keccak]
|
|||
import secp256k1 as s
|
||||
import stint
|
||||
import snappy
|
||||
import spec/[crypto, datatypes, network, digest], ssz
|
||||
import spec/[crypto, datatypes, network, digest], ssz/ssz_serialization
|
||||
|
||||
const
|
||||
InspectorName* = "Beacon-Chain Network Inspector"
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
import
|
||||
stew/endians2, stint,
|
||||
./extras, ./ssz,
|
||||
./extras, ./ssz/merkleization,
|
||||
spec/[crypto, datatypes, digest, helpers]
|
||||
|
||||
func get_eth1data_stub*(deposit_count: uint64, current_epoch: Epoch): Eth1Data =
|
||||
|
|
|
@ -16,7 +16,7 @@ import
|
|||
sequtils, strutils, macros, bitops,
|
||||
# Specs
|
||||
../../beacon_chain/spec/[beaconstate, datatypes, digest, helpers],
|
||||
../../beacon_chain/ssz
|
||||
../../beacon_chain/ssz/merkleization
|
||||
|
||||
func round_step_down*(x: Natural, step: static Natural): int {.inline.} =
|
||||
## Round the input to the previous multiple of "step"
|
||||
|
|
|
@ -9,8 +9,8 @@
|
|||
|
||||
import
|
||||
tables, algorithm, math, sequtils, options,
|
||||
json_serialization/std/sets, chronicles, stew/bitseqs,
|
||||
../extras, ../ssz,
|
||||
json_serialization/std/sets, chronicles,
|
||||
../extras, ../ssz/merkleization,
|
||||
./crypto, ./datatypes, ./digest, ./helpers, ./validator,
|
||||
../../nbench/bench_lab
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
|
||||
import
|
||||
# Internal
|
||||
./digest, ../ssz/types,
|
||||
./digest,
|
||||
# Status
|
||||
stew/[endians2, objects, results, byteutils],
|
||||
nimcrypto/sysrand,
|
||||
|
|
|
@ -24,11 +24,11 @@
|
|||
|
||||
import
|
||||
macros, hashes, json, strutils, tables,
|
||||
stew/[byteutils, bitseqs], chronicles,
|
||||
../ssz/types as sszTypes, ./crypto, ./digest
|
||||
stew/[byteutils], chronicles,
|
||||
../ssz/types, ./crypto, ./digest
|
||||
|
||||
export
|
||||
sszTypes
|
||||
types
|
||||
|
||||
# TODO Data types:
|
||||
# Presently, we're reusing the data types from the serialization (uint64) in the
|
||||
|
|
|
@ -15,7 +15,7 @@ import
|
|||
# Third-party
|
||||
stew/endians2,
|
||||
# Internal
|
||||
./datatypes, ./digest, ./crypto, ../ssz
|
||||
./datatypes, ./digest, ./crypto, ../ssz/merkleization
|
||||
|
||||
type
|
||||
# This solves an ambiguous identifier Error in some contexts
|
||||
|
|
|
@ -31,7 +31,7 @@
|
|||
|
||||
import
|
||||
algorithm, collections/sets, chronicles, options, sequtils, sets,
|
||||
../extras, ../ssz, metrics,
|
||||
../extras, ../ssz/merkleization, metrics,
|
||||
beaconstate, crypto, datatypes, digest, helpers, validator,
|
||||
../../nbench/bench_lab
|
||||
|
||||
|
|
|
@ -36,8 +36,8 @@
|
|||
|
||||
import
|
||||
math, sequtils, tables,
|
||||
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
||||
metrics, ../extras, ../ssz,
|
||||
stew/[bitops2], chronicles, json_serialization/std/sets,
|
||||
metrics, ../extras, ../ssz/merkleization,
|
||||
beaconstate, crypto, datatypes, digest, helpers, validator,
|
||||
state_transition_helpers,
|
||||
../../nbench/bench_lab
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
|
@ -8,712 +8,14 @@
|
|||
# SSZ Serialization (simple serialize)
|
||||
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
||||
|
||||
# TODO Cannot override push, even though the function is annotated
|
||||
# nim-beacon-chain/beacon_chain/ssz.nim(212, 18) Error: can raise an unlisted exception: IOError
|
||||
#{.push raises: [Defect].}
|
||||
{.push raises: [Defect].}
|
||||
|
||||
# TODO Many RVO bugs, careful
|
||||
# https://github.com/nim-lang/Nim/issues/14470
|
||||
# https://github.com/nim-lang/Nim/issues/14126
|
||||
|
||||
import
|
||||
options, algorithm, options, strformat, typetraits,
|
||||
stew/[bitops2, byteutils, bitseqs, endians2, objects, varints, ptrops],
|
||||
stew/ranges/ptr_arith, stew/shims/macros,
|
||||
faststreams/[inputs, outputs, buffers],
|
||||
serialization, serialization/testing/tracing,
|
||||
./spec/[crypto, datatypes, digest],
|
||||
./ssz/[types, bytes_reader],
|
||||
../nbench/bench_lab
|
||||
|
||||
# ################### Helper functions ###################################
|
||||
|
||||
when hasSerializationTracing:
|
||||
import stew/byteutils
|
||||
./ssz/[merkleization, ssz_serialization, types]
|
||||
|
||||
export
|
||||
serialization, types, bytes_reader
|
||||
|
||||
when defined(serialization_tracing):
|
||||
import
|
||||
typetraits
|
||||
|
||||
const
|
||||
bytesPerChunk = 32
|
||||
bitsPerChunk = bytesPerChunk * 8
|
||||
|
||||
type
|
||||
SszReader* = object
|
||||
stream: InputStream
|
||||
|
||||
SszWriter* = object
|
||||
stream: OutputStream
|
||||
|
||||
SszChunksMerkleizer = object
|
||||
combinedChunks: ptr UncheckedArray[Eth2Digest]
|
||||
totalChunks: uint64
|
||||
topIndex: int
|
||||
|
||||
SizePrefixed*[T] = distinct T
|
||||
SszMaxSizeExceeded* = object of SerializationError
|
||||
|
||||
VarSizedWriterCtx = object
|
||||
fixedParts: WriteCursor
|
||||
offset: int
|
||||
|
||||
FixedSizedWriterCtx = object
|
||||
|
||||
serializationFormat SSZ,
|
||||
Reader = SszReader,
|
||||
Writer = SszWriter,
|
||||
PreferedOutput = seq[byte]
|
||||
|
||||
template bytes(x: BitSeq): untyped =
|
||||
seq[byte](x)
|
||||
|
||||
template sizePrefixed*[TT](x: TT): untyped =
|
||||
type T = TT
|
||||
SizePrefixed[T](x)
|
||||
|
||||
proc init*(T: type SszReader, stream: InputStream): T {.raises: [Defect].} =
|
||||
T(stream: stream)
|
||||
|
||||
method formatMsg*(
|
||||
err: ref SszSizeMismatchError,
|
||||
filename: string): string {.gcsafe, raises: [Defect].} =
|
||||
try:
|
||||
&"SSZ size mismatch, element {err.elementSize}, actual {err.actualSszSize}, type {err.deserializedType}, file {filename}"
|
||||
except CatchableError:
|
||||
"SSZ size mismatch"
|
||||
|
||||
template toSszType*(x: auto): auto =
|
||||
mixin toSszType
|
||||
|
||||
# Please note that BitArray doesn't need any special treatment here
|
||||
# because it can be considered a regular fixed-size object type.
|
||||
|
||||
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
|
||||
elif x is Eth2Digest: x.data
|
||||
elif x is BlsCurveType: toRaw(x)
|
||||
elif x is ForkDigest|Version: distinctBase(x)
|
||||
else: x
|
||||
|
||||
proc writeFixedSized(s: var (OutputStream|WriteCursor), x: auto) {.raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
|
||||
when x is byte:
|
||||
s.write x
|
||||
elif x is bool:
|
||||
s.write byte(ord(x))
|
||||
elif x is UintN:
|
||||
when cpuEndian == bigEndian:
|
||||
s.write toBytesLE(x)
|
||||
else:
|
||||
s.writeMemCopy x
|
||||
elif x is array:
|
||||
when x[0] is byte:
|
||||
trs "APPENDING FIXED SIZE BYTES", x
|
||||
s.write x
|
||||
else:
|
||||
for elem in x:
|
||||
trs "WRITING FIXED SIZE ARRAY ELEMENT"
|
||||
s.writeFixedSized toSszType(elem)
|
||||
elif x is tuple|object:
|
||||
enumInstanceSerializedFields(x, fieldName, field):
|
||||
trs "WRITING FIXED SIZE FIELD", fieldName
|
||||
s.writeFixedSized toSszType(field)
|
||||
else:
|
||||
unsupported x.type
|
||||
|
||||
template writeOffset(cursor: var WriteCursor, offset: int) =
|
||||
write cursor, toBytesLE(uint32 offset)
|
||||
|
||||
template supports*(_: type SSZ, T: type): bool =
|
||||
mixin toSszType
|
||||
anonConst compiles(fixedPortionSize toSszType(declval T))
|
||||
|
||||
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
||||
result.stream = stream
|
||||
|
||||
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||
when holder is array|HashArray:
|
||||
for fieldVar in holder: body
|
||||
else:
|
||||
enumInstanceSerializedFields(holder, _{.used.}, fieldVar): body
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||
|
||||
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
||||
type T = TT
|
||||
when isFixedSize(T):
|
||||
FixedSizedWriterCtx()
|
||||
else:
|
||||
const offset = when T is array|HashArray: len(T) * offsetSize
|
||||
else: fixedPortionSize(T)
|
||||
VarSizedWriterCtx(offset: offset,
|
||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||
|
||||
template writeField*(w: var SszWriter,
|
||||
ctx: var auto,
|
||||
fieldName: string,
|
||||
field: auto) =
|
||||
mixin toSszType
|
||||
when ctx is FixedSizedWriterCtx:
|
||||
writeFixedSized(w.stream, toSszType(field))
|
||||
else:
|
||||
type FieldType = type toSszType(field)
|
||||
|
||||
when isFixedSize(FieldType):
|
||||
writeFixedSized(ctx.fixedParts, toSszType(field))
|
||||
else:
|
||||
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
||||
writeOffset(ctx.fixedParts, ctx.offset)
|
||||
let initPos = w.stream.pos
|
||||
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
||||
when FieldType is BitList:
|
||||
trs "BIT SEQ ", bytes(field)
|
||||
writeVarSizeType(w, toSszType(field))
|
||||
ctx.offset += w.stream.pos - initPos
|
||||
|
||||
template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||
when ctx is VarSizedWriterCtx:
|
||||
finalize ctx.fixedParts
|
||||
|
||||
proc writeSeq[T](w: var SszWriter, value: seq[T])
|
||||
{.raises: [Defect, IOError].} =
|
||||
# Please note that `writeSeq` exists in order to reduce the code bloat
|
||||
# produced from generic instantiations of the unique `List[N, T]` types.
|
||||
when isFixedSize(T):
|
||||
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
||||
for elem in value:
|
||||
w.stream.writeFixedSized toSszType(elem)
|
||||
trs "DONE"
|
||||
else:
|
||||
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
||||
var offset = value.len * offsetSize
|
||||
var cursor = w.stream.delayFixedSizeWrite offset
|
||||
for elem in value:
|
||||
cursor.writeFixedSized uint32(offset)
|
||||
let initPos = w.stream.pos
|
||||
w.writeVarSizeType toSszType(elem)
|
||||
offset += w.stream.pos - initPos
|
||||
finalize cursor
|
||||
trs "DONE"
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.raises: [Defect, IOError].} =
|
||||
trs "STARTING VAR SIZE TYPE"
|
||||
|
||||
when value is HashArray|HashList:
|
||||
writeVarSizeType(w, value.data)
|
||||
elif value is List:
|
||||
# We reduce code bloat by forwarding all `List` types to a general `seq[T]` proc.
|
||||
writeSeq(w, asSeq value)
|
||||
elif value is BitList:
|
||||
# ATTENTION! We can reuse `writeSeq` only as long as our BitList type is implemented
|
||||
# to internally match the binary representation of SSZ BitLists in memory.
|
||||
writeSeq(w, bytes value)
|
||||
elif value is object|tuple|array:
|
||||
trs "WRITING OBJECT OR ARRAY"
|
||||
var ctx = beginRecord(w, type value)
|
||||
enumerateSubFields(value, field):
|
||||
writeField w, ctx, astToStr(field), field
|
||||
endRecord w, ctx
|
||||
else:
|
||||
unsupported type(value)
|
||||
|
||||
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe, raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(x)
|
||||
|
||||
when isFixedSize(T):
|
||||
w.stream.writeFixedSized toSszType(x)
|
||||
else:
|
||||
w.writeVarSizeType toSszType(x)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].}
|
||||
|
||||
func sszSizeForVarSizeList[T](value: openarray[T]): int =
|
||||
result = len(value) * offsetSize
|
||||
for elem in value:
|
||||
result += sszSize(toSszType elem)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(value)
|
||||
|
||||
when isFixedSize(T):
|
||||
anonConst fixedPortionSize(T)
|
||||
|
||||
elif T is array|List|HashList|HashArray:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E):
|
||||
len(value) * anonConst(fixedPortionSize(E))
|
||||
elif T is HashArray:
|
||||
sszSizeForVarSizeList(value.data)
|
||||
elif T is array:
|
||||
sszSizeForVarSizeList(value)
|
||||
else:
|
||||
sszSizeForVarSizeList(asSeq value)
|
||||
|
||||
elif T is BitList:
|
||||
return len(bytes(value))
|
||||
|
||||
elif T is object|tuple:
|
||||
result = anonConst fixedPortionSize(T)
|
||||
enumInstanceSerializedFields(value, _{.used.}, field):
|
||||
type FieldType = type toSszType(field)
|
||||
when not isFixedSize(FieldType):
|
||||
result += sszSize(toSszType field)
|
||||
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) {.raises: [Defect, IOError].} =
|
||||
var cursor = w.stream.delayVarSizeWrite(10)
|
||||
let initPos = w.stream.pos
|
||||
w.writeValue T(x)
|
||||
let length = uint64(w.stream.pos - initPos)
|
||||
when false:
|
||||
discard
|
||||
# TODO varintBytes is sub-optimal at the moment
|
||||
# cursor.writeAndFinalize length.varintBytes
|
||||
else:
|
||||
var buf: VarintBuffer
|
||||
buf.writeVarint length
|
||||
cursor.finalWrite buf.writtenBytes
|
||||
|
||||
proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSszError, SszSizeMismatchError, IOError].} =
|
||||
when isFixedSize(T):
|
||||
const minimalSize = fixedPortionSize(T)
|
||||
if r.stream.readable(minimalSize):
|
||||
readSszValue(r.stream.read(minimalSize), val)
|
||||
else:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
else:
|
||||
# TODO Read the fixed portion first and precisely measure the size of
|
||||
# the dynamic portion to consume the right number of bytes.
|
||||
readSszValue(r.stream.read(r.stream.len.get), val)
|
||||
|
||||
const
|
||||
zero64 = default array[64, byte]
|
||||
|
||||
func hash(a, b: openArray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "MERGING BRANCHES "
|
||||
trs toHex(a)
|
||||
trs toHex(b)
|
||||
|
||||
h.update a
|
||||
h.update b
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "MERGING BRANCHES OPEN ARRAY"
|
||||
trs toHex(existing.data)
|
||||
trs toHex(newData)
|
||||
|
||||
h.update existing.data
|
||||
h.update newData
|
||||
|
||||
let paddingBytes = bytesPerChunk - newData.len
|
||||
if paddingBytes > 0:
|
||||
trs "USING ", paddingBytes, " PADDING BYTES"
|
||||
h.update zero64.toOpenArray(0, paddingBytes - 1)
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
||||
hash(a.data, b.data)
|
||||
|
||||
func computeZeroHashes: array[sizeof(Limit) * 8, Eth2Digest] =
|
||||
result[0] = Eth2Digest()
|
||||
for i in 1 .. result.high:
|
||||
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
||||
|
||||
const zeroHashes = computeZeroHashes()
|
||||
|
||||
func addChunk(merkleizer: var SszChunksMerkleizer, data: openarray[byte]) =
|
||||
doAssert data.len > 0 and data.len <= bytesPerChunk
|
||||
|
||||
if not getBitLE(merkleizer.totalChunks, 0):
|
||||
let chunkStartAddr = addr merkleizer.combinedChunks[0].data[0]
|
||||
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
||||
zeroMem(chunkStartAddr.offset(data.len), bytesPerChunk - data.len)
|
||||
trs "WROTE BASE CHUNK ", merkleizer.combinedChunks[0], " ", data.len
|
||||
else:
|
||||
var hash = mergeBranches(merkleizer.combinedChunks[0], data)
|
||||
|
||||
for i in 1 .. merkleizer.topIndex:
|
||||
trs "ITERATING"
|
||||
if getBitLE(merkleizer.totalChunks, i):
|
||||
trs "CALLING MERGE BRANCHES"
|
||||
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
||||
else:
|
||||
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
||||
merkleizer.combinedChunks[i] = hash
|
||||
break
|
||||
|
||||
inc merkleizer.totalChunks
|
||||
|
||||
template createMerkleizer(totalElements: static Limit): SszChunksMerkleizer =
|
||||
trs "CREATING A MERKLEIZER FOR ", totalElements
|
||||
|
||||
const treeHeight = bitWidth nextPow2(uint64 totalElements)
|
||||
var combinedChunks {.noInit.}: array[treeHeight, Eth2Digest]
|
||||
|
||||
SszChunksMerkleizer(
|
||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](addr combinedChunks),
|
||||
topIndex: treeHeight - 1,
|
||||
totalChunks: 0)
|
||||
|
||||
func getFinalHash(merkleizer: var SszChunksMerkleizer): Eth2Digest =
|
||||
if merkleizer.totalChunks == 0:
|
||||
return zeroHashes[merkleizer.topIndex]
|
||||
|
||||
let
|
||||
bottomHashIdx = firstOne(merkleizer.totalChunks) - 1
|
||||
submittedChunksHeight = bitWidth(merkleizer.totalChunks - 1)
|
||||
topHashIdx = merkleizer.topIndex
|
||||
|
||||
trs "BOTTOM HASH ", bottomHashIdx
|
||||
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
||||
trs "TOP HASH IDX ", topHashIdx
|
||||
|
||||
if bottomHashIdx != submittedChunksHeight:
|
||||
# Our tree is not finished. We must complete the work in progress
|
||||
# branches and then extend the tree to the right height.
|
||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||
zeroHashes[bottomHashIdx])
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
if getBitLE(merkleizer.totalChunks, i):
|
||||
result = mergeBranches(merkleizer.combinedChunks[i], result)
|
||||
trs "COMBINED"
|
||||
else:
|
||||
result = mergeBranches(result, zeroHashes[i])
|
||||
trs "COMBINED WITH ZERO"
|
||||
|
||||
elif bottomHashIdx == topHashIdx:
|
||||
# We have a perfect tree (chunks == 2**n) at just the right height!
|
||||
result = merkleizer.combinedChunks[bottomHashIdx]
|
||||
else:
|
||||
# We have a perfect tree of user chunks, but we have more work to
|
||||
# do - we must extend it to reach the desired height
|
||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||
zeroHashes[bottomHashIdx])
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
result = mergeBranches(result, zeroHashes[i])
|
||||
|
||||
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||
var dataLen: array[32, byte]
|
||||
dataLen[0..<8] = uint64(length).toBytesLE()
|
||||
hash(root.data, dataLen)
|
||||
|
||||
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe, raises: [Defect].}
|
||||
|
||||
template merkleizeFields(totalElements: static Limit, body: untyped): Eth2Digest =
|
||||
var merkleizer {.inject.} = createMerkleizer(totalElements)
|
||||
|
||||
template addField(field) =
|
||||
let hash = hash_tree_root(field)
|
||||
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
||||
addChunk(merkleizer, hash.data)
|
||||
trs "CHUNK ADDED"
|
||||
|
||||
body
|
||||
|
||||
getFinalHash(merkleizer)
|
||||
|
||||
template writeBytesLE(chunk: var array[bytesPerChunk, byte], atParam: int,
|
||||
val: SomeUnsignedInt) =
|
||||
let at = atParam
|
||||
chunk[at ..< at + sizeof(val)] = toBytesLE(val)
|
||||
|
||||
func chunkedHashTreeRootForBasicTypes[T](merkleizer: var SszChunksMerkleizer,
|
||||
arr: openarray[T]): Eth2Digest =
|
||||
static:
|
||||
doAssert T is BasicType
|
||||
|
||||
when T is byte:
|
||||
var
|
||||
remainingBytes = arr.len
|
||||
pos = cast[ptr byte](unsafeAddr arr[0])
|
||||
|
||||
while remainingBytes >= bytesPerChunk:
|
||||
merkleizer.addChunk(makeOpenArray(pos, bytesPerChunk))
|
||||
pos = offset(pos, bytesPerChunk)
|
||||
remainingBytes -= bytesPerChunk
|
||||
|
||||
if remainingBytes > 0:
|
||||
merkleizer.addChunk(makeOpenArray(pos, remainingBytes))
|
||||
|
||||
elif T is bool or cpuEndian == littleEndian:
|
||||
let
|
||||
baseAddr = cast[ptr byte](unsafeAddr arr[0])
|
||||
len = arr.len * sizeof(T)
|
||||
return chunkedHashTreeRootForBasicTypes(merkleizer, makeOpenArray(baseAddr, len))
|
||||
|
||||
else:
|
||||
static:
|
||||
doAssert T is UintN
|
||||
doAssert bytesPerChunk mod sizeof(Т) == 0
|
||||
|
||||
const valuesPerChunk = bytesPerChunk div sizeof(Т)
|
||||
|
||||
var writtenValues = 0
|
||||
|
||||
var chunk: array[bytesPerChunk, byte]
|
||||
while writtenValues < arr.len - valuesPerChunk:
|
||||
for i in 0 ..< valuesPerChunk:
|
||||
chunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
||||
merkleizer.addChunk chunk
|
||||
inc writtenValues, valuesPerChunk
|
||||
|
||||
let remainingValues = arr.len - writtenValues
|
||||
if remainingValues > 0:
|
||||
var lastChunk: array[bytesPerChunk, byte]
|
||||
for i in 0 ..< remainingValues:
|
||||
chunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
||||
merkleizer.addChunk lastChunk
|
||||
|
||||
getFinalHash(merkleizer)
|
||||
|
||||
func bitListHashTreeRoot(merkleizer: var SszChunksMerkleizer, x: BitSeq): Eth2Digest =
|
||||
# TODO: Switch to a simpler BitList representation and
|
||||
# replace this with `chunkedHashTreeRoot`
|
||||
trs "CHUNKIFYING BIT SEQ WITH TOP INDEX ", merkleizer.topIndex
|
||||
|
||||
var
|
||||
totalBytes = bytes(x).len
|
||||
lastCorrectedByte = bytes(x)[^1]
|
||||
|
||||
if lastCorrectedByte == byte(1):
|
||||
if totalBytes == 1:
|
||||
# This is an empty bit list.
|
||||
# It should be hashed as a tree containing all zeros:
|
||||
return mergeBranches(zeroHashes[merkleizer.topIndex],
|
||||
zeroHashes[0]) # this is the mixed length
|
||||
|
||||
totalBytes -= 1
|
||||
lastCorrectedByte = bytes(x)[^2]
|
||||
else:
|
||||
let markerPos = log2trunc(lastCorrectedByte)
|
||||
lastCorrectedByte.clearBit(markerPos)
|
||||
|
||||
var
|
||||
bytesInLastChunk = totalBytes mod bytesPerChunk
|
||||
fullChunks = totalBytes div bytesPerChunk
|
||||
|
||||
if bytesInLastChunk == 0:
|
||||
fullChunks -= 1
|
||||
bytesInLastChunk = 32
|
||||
|
||||
for i in 0 ..< fullChunks:
|
||||
let
|
||||
chunkStartPos = i * bytesPerChunk
|
||||
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
||||
|
||||
merkleizer.addChunk bytes(x).toOpenArray(chunkStartPos, chunkEndPos)
|
||||
|
||||
var
|
||||
lastChunk: array[bytesPerChunk, byte]
|
||||
chunkStartPos = fullChunks * bytesPerChunk
|
||||
|
||||
for i in 0 .. bytesInLastChunk - 2:
|
||||
lastChunk[i] = bytes(x)[chunkStartPos + i]
|
||||
|
||||
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
||||
|
||||
merkleizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
||||
let contentsHash = merkleizer.getFinalHash
|
||||
mixInLength contentsHash, x.len
|
||||
|
||||
func maxChunksCount(T: type, maxLen: int64): int64 =
|
||||
when T is BitList|BitArray:
|
||||
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
||||
elif T is array|List:
|
||||
maxChunkIdx(ElemType(T), maxLen)
|
||||
else:
|
||||
unsupported T # This should never happen
|
||||
|
||||
func hashTreeRootAux[T](x: T): Eth2Digest =
|
||||
when T is SignedBeaconBlock:
|
||||
unsupported T # Blocks are identified by htr(BeaconBlock) so we avoid these
|
||||
elif T is bool|char:
|
||||
result.data[0] = byte(x)
|
||||
elif T is SomeUnsignedInt:
|
||||
when cpuEndian == bigEndian:
|
||||
result.data[0..<sizeof(x)] = toBytesLE(x)
|
||||
else:
|
||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
||||
elif (when T is array: ElemType(T) is BasicType else: false):
|
||||
type E = ElemType(T)
|
||||
when sizeof(T) <= sizeof(result.data):
|
||||
when E is byte|bool or cpuEndian == littleEndian:
|
||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
||||
else:
|
||||
var pos = 0
|
||||
for e in x:
|
||||
writeBytesLE(result.data, pos, e)
|
||||
pos += sizeof(E)
|
||||
else:
|
||||
trs "FIXED TYPE; USE CHUNK STREAM"
|
||||
var markleizer = createMerkleizer(maxChunksCount(T, x.len))
|
||||
chunkedHashTreeRootForBasicTypes(markleizer, x)
|
||||
elif T is BitArray:
|
||||
hashTreeRootAux(x.bytes)
|
||||
elif T is array|object|tuple:
|
||||
trs "MERKLEIZING FIELDS"
|
||||
const totalFields = when T is array: len(x)
|
||||
else: totalSerializedFields(T)
|
||||
merkleizeFields(totalFields):
|
||||
x.enumerateSubFields(f):
|
||||
addField f
|
||||
#elif isCaseObject(T):
|
||||
# # TODO implement this
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
func hashTreeRootList(x: List|BitList): Eth2Digest =
|
||||
const maxLen = static(x.maxLen)
|
||||
type T = type(x)
|
||||
const limit = maxChunksCount(T, maxLen)
|
||||
var merkleizer = createMerkleizer(limit)
|
||||
|
||||
when x is BitList:
|
||||
merkleizer.bitListHashTreeRoot(BitSeq x)
|
||||
else:
|
||||
type E = ElemType(T)
|
||||
let contentsHash = when E is BasicType:
|
||||
chunkedHashTreeRootForBasicTypes(merkleizer, asSeq x)
|
||||
else:
|
||||
for elem in x:
|
||||
let elemHash = hash_tree_root(elem)
|
||||
merkleizer.addChunk(elemHash.data)
|
||||
merkleizer.getFinalHash()
|
||||
mixInLength(contentsHash, x.len)
|
||||
|
||||
|
||||
func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest =
|
||||
# The merged hash of the data at `chunkIdx` and `chunkIdx + 1`
|
||||
trs "DATA HASH ", chunkIdx, " ", x.data.len
|
||||
|
||||
when x.T is BasicType:
|
||||
when cpuEndian == bigEndian:
|
||||
unsupported type x # No bigendian support here!
|
||||
|
||||
let
|
||||
bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0])
|
||||
byteIdx = chunkIdx * bytesPerChunk
|
||||
byteLen = x.data.len * sizeof(x.T)
|
||||
|
||||
if byteIdx >= byteLen:
|
||||
zeroHashes[1]
|
||||
else:
|
||||
let
|
||||
nbytes = min(byteLen - byteIdx, 64)
|
||||
padding = 64 - nbytes
|
||||
|
||||
hash(
|
||||
toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)),
|
||||
toOpenArray(zero64, 0, int(padding - 1)))
|
||||
else:
|
||||
if chunkIdx + 1 > x.data.len():
|
||||
zeroHashes[x.maxDepth]
|
||||
elif chunkIdx + 1 == x.data.len():
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
Eth2Digest())
|
||||
else:
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
hash_tree_root(x.data[chunkIdx + 1]))
|
||||
|
||||
template mergedHash(x: HashList|HashArray, vIdxParam: int64): Eth2Digest =
|
||||
# The merged hash of the data at `vIdx` and `vIdx + 1`
|
||||
|
||||
let vIdx = vIdxParam
|
||||
if vIdx >= x.maxChunks:
|
||||
let dataIdx = vIdx - x.maxChunks
|
||||
mergedDataHash(x, dataIdx)
|
||||
else:
|
||||
mergeBranches(
|
||||
hashTreeRootCached(x, vIdx),
|
||||
hashTreeRootCached(x, vIdx + 1))
|
||||
|
||||
func hashTreeRootCached*(x: HashList, vIdx: int64): Eth2Digest =
|
||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
||||
|
||||
let
|
||||
layer = layer(vIdx)
|
||||
idxInLayer = vIdx - (1'i64 shl layer)
|
||||
layerIdx = idxInlayer + x.indices[layer]
|
||||
|
||||
doAssert layer < x.maxDepth
|
||||
trs "GETTING ", vIdx, " ", layerIdx, " ", layer, " ", x.indices.len
|
||||
if layerIdx >= x.indices[layer + 1]:
|
||||
trs "ZERO ", x.indices[layer], " ", x.indices[layer + 1]
|
||||
zeroHashes[x.maxDepth - layer]
|
||||
else:
|
||||
if not isCached(x.hashes[layerIdx]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
trs "REFRESHING ", vIdx, " ", layerIdx, " ", layer
|
||||
|
||||
px[].hashes[layerIdx] = mergedHash(x, vIdx * 2)
|
||||
else:
|
||||
trs "CACHED ", layerIdx
|
||||
|
||||
x.hashes[layerIdx]
|
||||
|
||||
func hashTreeRootCached*(x: HashArray, vIdx: int): Eth2Digest =
|
||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
||||
|
||||
if not isCached(x.hashes[vIdx]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
px[].hashes[vIdx] = mergedHash(x, vIdx * 2)
|
||||
|
||||
return x.hashes[vIdx]
|
||||
|
||||
func hashTreeRootCached*(x: HashArray): Eth2Digest =
|
||||
hashTreeRootCached(x, 1) # Array does not use idx 0
|
||||
|
||||
func hashTreeRootCached*(x: HashList): Eth2Digest =
|
||||
if x.data.len == 0:
|
||||
mixInLength(zeroHashes[x.maxDepth], x.data.len())
|
||||
else:
|
||||
if not isCached(x.hashes[0]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
px[].hashes[0] = mixInLength(hashTreeRootCached(x, 1), x.data.len)
|
||||
|
||||
x.hashes[0]
|
||||
|
||||
func hash_tree_root*(x: auto): Eth2Digest {.raises: [Defect], nbench.} =
|
||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||
mixin toSszType
|
||||
|
||||
result =
|
||||
when x is HashArray|HashList:
|
||||
hashTreeRootCached(x)
|
||||
elif x is List|BitList:
|
||||
hashTreeRootList(x)
|
||||
else:
|
||||
hashTreeRootAux toSszType(x)
|
||||
|
||||
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
||||
|
||||
iterator hash_tree_roots_prefix*[T](lst: openarray[T], limit: static Limit): Eth2Digest =
|
||||
# This is a particular type's instantiation of a general fold, reduce,
|
||||
# accumulation, prefix sums, etc family of operations. As long as that
|
||||
# Eth1 deposit case is the only notable example -- the usual uses of a
|
||||
# list involve, at some point, tree-hashing it -- finalized hashes are
|
||||
# the only abstraction that escapes from this module this way.
|
||||
var merkleizer = createMerkleizer(limit)
|
||||
for i, elem in lst:
|
||||
merkleizer.addChunk(hash_tree_root(elem).data)
|
||||
yield mixInLength(merkleizer.getFinalHash(), i + 1)
|
||||
merkleization, ssz_serialization, types
|
||||
|
|
|
@ -0,0 +1,255 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
stew/[bitops2, ptrops]
|
||||
|
||||
type
|
||||
Bytes = seq[byte]
|
||||
|
||||
BitSeq* = distinct Bytes
|
||||
## The current design of BitSeq tries to follow precisely
|
||||
## the bitwise representation of the SSZ bitlists.
|
||||
## This is a relatively compact representation, but as
|
||||
## evident from the code below, many of the operations
|
||||
## are not trivial.
|
||||
|
||||
BitArray*[bits: static int] = object
|
||||
bytes*: array[(bits + 7) div 8, byte]
|
||||
|
||||
func bitsLen*(bytes: openarray[byte]): int =
|
||||
let
|
||||
bytesCount = bytes.len
|
||||
lastByte = bytes[bytesCount - 1]
|
||||
markerPos = log2trunc(lastByte)
|
||||
|
||||
bytesCount * 8 - (8 - markerPos)
|
||||
|
||||
template len*(s: BitSeq): int =
|
||||
bitsLen(Bytes s)
|
||||
|
||||
template len*(a: BitArray): int =
|
||||
a.bits
|
||||
|
||||
func add*(s: var BitSeq, value: bool) =
|
||||
let
|
||||
lastBytePos = s.Bytes.len - 1
|
||||
lastByte = s.Bytes[lastBytePos]
|
||||
|
||||
if (lastByte and byte(128)) == 0:
|
||||
# There is at least one leading zero, so we have enough
|
||||
# room to store the new bit
|
||||
let markerPos = log2trunc(lastByte)
|
||||
s.Bytes[lastBytePos].changeBit markerPos, value
|
||||
s.Bytes[lastBytePos].setBit markerPos + 1
|
||||
else:
|
||||
s.Bytes[lastBytePos].changeBit 7, value
|
||||
s.Bytes.add byte(1)
|
||||
|
||||
func loadLEBytes(WordType: type, bytes: openarray[byte]): WordType =
|
||||
# TODO: this is a temporary proc until the endians API is improved
|
||||
var shift = 0
|
||||
for b in bytes:
|
||||
result = result or (WordType(b) shl shift)
|
||||
shift += 8
|
||||
|
||||
func storeLEBytes(value: SomeUnsignedInt, dst: var openarray[byte]) =
|
||||
when system.cpuEndian == bigEndian:
|
||||
var shift = 0
|
||||
for i in 0 ..< dst.len:
|
||||
result[i] = byte((v shr shift) and 0xff)
|
||||
shift += 8
|
||||
else:
|
||||
copyMem(addr dst[0], unsafeAddr value, dst.len)
|
||||
|
||||
template loopOverWords(lhs, rhs: BitSeq,
|
||||
lhsIsVar, rhsIsVar: static bool,
|
||||
WordType: type,
|
||||
lhsBits, rhsBits, body: untyped) =
|
||||
const hasRhs = astToStr(lhs) != astToStr(rhs)
|
||||
|
||||
let bytesCount = len Bytes(lhs)
|
||||
when hasRhs: doAssert len(Bytes(rhs)) == bytesCount
|
||||
|
||||
var fullWordsCount = bytesCount div sizeof(WordType)
|
||||
let lastWordSize = bytesCount mod sizeof(WordType)
|
||||
|
||||
block:
|
||||
var lhsWord: WordType
|
||||
when hasRhs:
|
||||
var rhsWord: WordType
|
||||
var firstByteOfLastWord, lastByteOfLastWord: int
|
||||
|
||||
# TODO: Returing a `var` value from an iterator is always safe due to
|
||||
# the way inlining works, but currently the compiler reports an error
|
||||
# when a local variable escapes. We have to cheat it with this location
|
||||
# obfuscation through pointers:
|
||||
template lhsBits: auto = (addr(lhsWord))[]
|
||||
|
||||
when hasRhs:
|
||||
template rhsBits: auto = (addr(rhsWord))[]
|
||||
|
||||
template lastWordBytes(bitseq): auto =
|
||||
Bytes(bitseq).toOpenArray(firstByteOfLastWord, lastByteOfLastWord)
|
||||
|
||||
template initLastWords =
|
||||
lhsWord = loadLEBytes(WordType, lastWordBytes(lhs))
|
||||
when hasRhs: rhsWord = loadLEBytes(WordType, lastWordBytes(rhs))
|
||||
|
||||
if lastWordSize == 0:
|
||||
firstByteOfLastWord = bytesCount - sizeof(WordType)
|
||||
lastByteOfLastWord = bytesCount - 1
|
||||
dec fullWordsCount
|
||||
else:
|
||||
firstByteOfLastWord = bytesCount - lastWordSize
|
||||
lastByteOfLastWord = bytesCount - 1
|
||||
|
||||
initLastWords()
|
||||
let markerPos = log2trunc(lhsWord)
|
||||
when hasRhs: doAssert log2trunc(rhsWord) == markerPos
|
||||
|
||||
lhsWord.clearBit markerPos
|
||||
when hasRhs: rhsWord.clearBit markerPos
|
||||
|
||||
body
|
||||
|
||||
when lhsIsVar or rhsIsVar:
|
||||
let
|
||||
markerBit = uint(1 shl markerPos)
|
||||
mask = markerBit - 1'u
|
||||
|
||||
when lhsIsVar:
|
||||
let lhsEndResult = (lhsWord and mask) or markerBit
|
||||
storeLEBytes(lhsEndResult, lastWordBytes(lhs))
|
||||
|
||||
when rhsIsVar:
|
||||
let rhsEndResult = (rhsWord and mask) or markerBit
|
||||
storeLEBytes(rhsEndResult, lastWordBytes(rhs))
|
||||
|
||||
var lhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(lhs)[0])
|
||||
let lhsEndAddr = offset(lhsCurrAddr, fullWordsCount)
|
||||
when hasRhs:
|
||||
var rhsCurrAddr = cast[ptr WordType](unsafeAddr Bytes(rhs)[0])
|
||||
|
||||
while lhsCurrAddr < lhsEndAddr:
|
||||
template lhsBits: auto = lhsCurrAddr[]
|
||||
when hasRhs:
|
||||
template rhsBits: auto = rhsCurrAddr[]
|
||||
|
||||
body
|
||||
|
||||
lhsCurrAddr = offset(lhsCurrAddr, 1)
|
||||
when hasRhs: rhsCurrAddr = offset(rhsCurrAddr, 1)
|
||||
|
||||
iterator words*(x: var BitSeq): var uint =
|
||||
loopOverWords(x, x, true, false, uint, word, wordB):
|
||||
yield word
|
||||
|
||||
iterator words*(x: BitSeq): uint =
|
||||
loopOverWords(x, x, false, false, uint, word, word):
|
||||
yield word
|
||||
|
||||
iterator words*(a, b: BitSeq): (uint, uint) =
|
||||
loopOverWords(a, b, false, false, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
iterator words*(a: var BitSeq, b: BitSeq): (var uint, uint) =
|
||||
loopOverWords(a, b, true, false, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
iterator words*(a, b: var BitSeq): (var uint, var uint) =
|
||||
loopOverWords(a, b, true, true, uint, wordA, wordB):
|
||||
yield (wordA, wordB)
|
||||
|
||||
func `[]`*(s: BitSeq, pos: Natural): bool {.inline.} =
|
||||
doAssert pos < s.len
|
||||
s.Bytes.getBit pos
|
||||
|
||||
func `[]=`*(s: var BitSeq, pos: Natural, value: bool) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
s.Bytes.changeBit pos, value
|
||||
|
||||
func setBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
setBit s.Bytes, pos
|
||||
|
||||
func clearBit*(s: var BitSeq, pos: Natural) {.inline.} =
|
||||
doAssert pos < s.len
|
||||
clearBit s.Bytes, pos
|
||||
|
||||
func init*(T: type BitSeq, len: int): T =
|
||||
result = BitSeq newSeq[byte](1 + len div 8)
|
||||
Bytes(result).setBit len
|
||||
|
||||
func init*(T: type BitArray): T =
|
||||
# The default zero-initializatio is fine
|
||||
discard
|
||||
|
||||
template `[]`*(a: BitArray, pos: Natural): bool =
|
||||
getBit a.bytes, pos
|
||||
|
||||
template `[]=`*(a: var BitArray, pos: Natural, value: bool) =
|
||||
changeBit a.bytes, pos, value
|
||||
|
||||
template setBit*(a: var BitArray, pos: Natural) =
|
||||
setBit a.bytes, pos
|
||||
|
||||
template clearBit*(a: var BitArray, pos: Natural) =
|
||||
clearBit a.bytes, pos
|
||||
|
||||
# TODO: Submit this to the standard library as `cmp`
|
||||
# At the moment, it doesn't work quite well because Nim selects
|
||||
# the generic cmp[T] from the system module instead of choosing
|
||||
# the openarray overload
|
||||
func compareArrays[T](a, b: openarray[T]): int =
|
||||
result = cmp(a.len, b.len)
|
||||
if result != 0: return
|
||||
|
||||
for i in 0 ..< a.len:
|
||||
result = cmp(a[i], b[i])
|
||||
if result != 0: return
|
||||
|
||||
template cmp*(a, b: BitSeq): int =
|
||||
compareArrays(Bytes a, Bytes b)
|
||||
|
||||
template `==`*(a, b: BitSeq): bool =
|
||||
cmp(a, b) == 0
|
||||
|
||||
func `$`*(a: BitSeq): string =
|
||||
let length = a.len
|
||||
result = newStringOfCap(2 + length)
|
||||
result.add "0b"
|
||||
for i in countdown(length - 1, 0):
|
||||
result.add if a[i]: '1' else: '0'
|
||||
|
||||
func combine*(tgt: var BitSeq, src: BitSeq) =
|
||||
doAssert tgt.len == src.len
|
||||
for tgtWord, srcWord in words(tgt, src):
|
||||
tgtWord = tgtWord or srcWord
|
||||
|
||||
func overlaps*(a, b: BitSeq): bool =
|
||||
for wa, wb in words(a, b):
|
||||
if (wa and wb) != 0:
|
||||
return true
|
||||
|
||||
func isSubsetOf*(a, b: BitSeq): bool =
|
||||
let alen = a.len
|
||||
doAssert b.len == alen
|
||||
for i in 0 ..< alen:
|
||||
if a[i] and not b[i]:
|
||||
return false
|
||||
true
|
||||
|
||||
proc isZeros*(x: BitSeq): bool =
|
||||
for w in words(x):
|
||||
if w != 0: return false
|
||||
return true
|
||||
|
||||
template bytes*(x: BitSeq): untyped =
|
||||
seq[byte](x)
|
|
@ -3,8 +3,8 @@
|
|||
|
||||
import
|
||||
typetraits, options,
|
||||
stew/[bitseqs, bitops2, endians2, objects, bitseqs], serialization/testing/tracing,
|
||||
../spec/[digest, datatypes], ./types
|
||||
stew/[bitops2, endians2, objects], serialization/testing/tracing,
|
||||
../spec/[digest, datatypes], ./types, ./spec_types
|
||||
|
||||
template raiseIncorrectSize(T: type) =
|
||||
const typeName = name(T)
|
||||
|
|
|
@ -5,10 +5,10 @@ import
|
|||
strutils, parseutils,
|
||||
stew/objects, faststreams/outputs, json_serialization/writer,
|
||||
../spec/datatypes,
|
||||
types, bytes_reader, navigator
|
||||
./bytes_reader, ./types, ./navigator, ./spec_types
|
||||
|
||||
export
|
||||
navigator
|
||||
bytes_reader, navigator, types
|
||||
|
||||
type
|
||||
ObjKind = enum
|
||||
|
|
|
@ -0,0 +1,464 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018-2020 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# This module contains the parts necessary to create a merkle hash from the core
|
||||
# SSZ types outlined in the spec:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md#merkleization
|
||||
|
||||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
stew/[bitops2, endians2, ptrops],
|
||||
stew/ranges/ptr_arith,
|
||||
serialization/testing/tracing,
|
||||
../spec/digest,
|
||||
./bitseqs, ./spec_types, ./types
|
||||
|
||||
export
|
||||
spec_types, types
|
||||
|
||||
const
|
||||
zero64 = default array[64, byte]
|
||||
bitsPerChunk = bytesPerChunk * 8
|
||||
|
||||
type
|
||||
SszChunksMerkleizer = object
|
||||
combinedChunks: ptr UncheckedArray[Eth2Digest]
|
||||
totalChunks: uint64
|
||||
topIndex: int
|
||||
|
||||
func digest(a, b: openArray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "DIGESTING ARRAYS ", toHex(a), " ", toHex(b)
|
||||
trs toHex(a)
|
||||
trs toHex(b)
|
||||
|
||||
h.update a
|
||||
h.update b
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
func digest(a, b, c: openArray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "DIGESTING ARRAYS ", toHex(a), " ", toHex(b), " ", toHex(c)
|
||||
|
||||
h.update a
|
||||
h.update b
|
||||
h.update c
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
|
||||
trs "MERGING BRANCHES OPEN ARRAY"
|
||||
|
||||
let paddingBytes = bytesPerChunk - newData.len
|
||||
digest(existing.data, newData, zero64.toOpenArray(0, paddingBytes - 1))
|
||||
|
||||
template mergeBranches(existing: Eth2Digest, newData: array[32, byte]): Eth2Digest =
|
||||
trs "MERGING BRANCHES ARRAY"
|
||||
digest(existing.data, newData)
|
||||
|
||||
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
||||
trs "MERGING BRANCHES DIGEST"
|
||||
digest(a.data, b.data)
|
||||
|
||||
func computeZeroHashes: array[sizeof(Limit) * 8, Eth2Digest] =
|
||||
result[0] = Eth2Digest()
|
||||
for i in 1 .. result.high:
|
||||
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
||||
|
||||
const zeroHashes = computeZeroHashes()
|
||||
|
||||
func addChunk(merkleizer: var SszChunksMerkleizer, data: openarray[byte]) =
|
||||
doAssert data.len > 0 and data.len <= bytesPerChunk
|
||||
|
||||
if not getBitLE(merkleizer.totalChunks, 0):
|
||||
let paddingBytes = bytesPerChunk - data.len
|
||||
|
||||
merkleizer.combinedChunks[0].data[0..<data.len] = data
|
||||
merkleizer.combinedChunks[0].data[data.len..<bytesPerChunk] =
|
||||
zero64[0..<paddingBytes]
|
||||
|
||||
trs "WROTE BASE CHUNK ",
|
||||
toHex(merkleizer.combinedChunks[0].data), " ", data.len
|
||||
else:
|
||||
var hash = mergeBranches(merkleizer.combinedChunks[0], data)
|
||||
|
||||
for i in 1 .. merkleizer.topIndex:
|
||||
trs "ITERATING"
|
||||
if getBitLE(merkleizer.totalChunks, i):
|
||||
trs "CALLING MERGE BRANCHES"
|
||||
hash = mergeBranches(merkleizer.combinedChunks[i], hash)
|
||||
else:
|
||||
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
||||
merkleizer.combinedChunks[i] = hash
|
||||
break
|
||||
|
||||
inc merkleizer.totalChunks
|
||||
|
||||
template createMerkleizer(totalElements: static Limit): SszChunksMerkleizer =
|
||||
trs "CREATING A MERKLEIZER FOR ", totalElements
|
||||
|
||||
const treeHeight = bitWidth nextPow2(uint64 totalElements)
|
||||
var combinedChunks {.noInit.}: array[treeHeight, Eth2Digest]
|
||||
|
||||
SszChunksMerkleizer(
|
||||
combinedChunks: cast[ptr UncheckedArray[Eth2Digest]](addr combinedChunks),
|
||||
topIndex: treeHeight - 1,
|
||||
totalChunks: 0)
|
||||
|
||||
func getFinalHash(merkleizer: var SszChunksMerkleizer): Eth2Digest =
|
||||
if merkleizer.totalChunks == 0:
|
||||
return zeroHashes[merkleizer.topIndex]
|
||||
|
||||
let
|
||||
bottomHashIdx = firstOne(merkleizer.totalChunks) - 1
|
||||
submittedChunksHeight = bitWidth(merkleizer.totalChunks - 1)
|
||||
topHashIdx = merkleizer.topIndex
|
||||
|
||||
trs "BOTTOM HASH ", bottomHashIdx
|
||||
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
||||
trs "TOP HASH IDX ", topHashIdx
|
||||
|
||||
if bottomHashIdx != submittedChunksHeight:
|
||||
# Our tree is not finished. We must complete the work in progress
|
||||
# branches and then extend the tree to the right height.
|
||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||
zeroHashes[bottomHashIdx])
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
if getBitLE(merkleizer.totalChunks, i):
|
||||
result = mergeBranches(merkleizer.combinedChunks[i], result)
|
||||
trs "COMBINED"
|
||||
else:
|
||||
result = mergeBranches(result, zeroHashes[i])
|
||||
trs "COMBINED WITH ZERO"
|
||||
|
||||
elif bottomHashIdx == topHashIdx:
|
||||
# We have a perfect tree (chunks == 2**n) at just the right height!
|
||||
result = merkleizer.combinedChunks[bottomHashIdx]
|
||||
else:
|
||||
# We have a perfect tree of user chunks, but we have more work to
|
||||
# do - we must extend it to reach the desired height
|
||||
result = mergeBranches(merkleizer.combinedChunks[bottomHashIdx],
|
||||
zeroHashes[bottomHashIdx])
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
result = mergeBranches(result, zeroHashes[i])
|
||||
|
||||
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||
var dataLen: array[32, byte]
|
||||
dataLen[0..<8] = uint64(length).toBytesLE()
|
||||
mergeBranches(root, dataLen)
|
||||
|
||||
func hash_tree_root*(x: auto): Eth2Digest {.gcsafe, raises: [Defect].}
|
||||
|
||||
template merkleizeFields(totalElements: static Limit, body: untyped): Eth2Digest =
|
||||
var merkleizer {.inject.} = createMerkleizer(totalElements)
|
||||
|
||||
template addField(field) =
|
||||
let hash = hash_tree_root(field)
|
||||
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
||||
addChunk(merkleizer, hash.data)
|
||||
trs "CHUNK ADDED"
|
||||
|
||||
body
|
||||
|
||||
getFinalHash(merkleizer)
|
||||
|
||||
template writeBytesLE(chunk: var array[bytesPerChunk, byte], atParam: int,
|
||||
val: SomeUnsignedInt) =
|
||||
let at = atParam
|
||||
chunk[at ..< at + sizeof(val)] = toBytesLE(val)
|
||||
|
||||
func chunkedHashTreeRootForBasicTypes[T](merkleizer: var SszChunksMerkleizer,
|
||||
arr: openarray[T]): Eth2Digest =
|
||||
static:
|
||||
doAssert T is BasicType
|
||||
|
||||
if arr.len == 0:
|
||||
return getFinalHash(merkleizer)
|
||||
|
||||
when T is byte:
|
||||
var
|
||||
remainingBytes = arr.len
|
||||
pos = cast[ptr byte](unsafeAddr arr[0])
|
||||
|
||||
while remainingBytes >= bytesPerChunk:
|
||||
merkleizer.addChunk(makeOpenArray(pos, bytesPerChunk))
|
||||
pos = offset(pos, bytesPerChunk)
|
||||
remainingBytes -= bytesPerChunk
|
||||
|
||||
if remainingBytes > 0:
|
||||
merkleizer.addChunk(makeOpenArray(pos, remainingBytes))
|
||||
|
||||
elif T is bool or cpuEndian == littleEndian:
|
||||
let
|
||||
baseAddr = cast[ptr byte](unsafeAddr arr[0])
|
||||
len = arr.len * sizeof(T)
|
||||
return chunkedHashTreeRootForBasicTypes(merkleizer, makeOpenArray(baseAddr, len))
|
||||
|
||||
else:
|
||||
static:
|
||||
doAssert T is UintN
|
||||
doAssert bytesPerChunk mod sizeof(Т) == 0
|
||||
|
||||
const valuesPerChunk = bytesPerChunk div sizeof(Т)
|
||||
|
||||
var writtenValues = 0
|
||||
|
||||
var chunk: array[bytesPerChunk, byte]
|
||||
while writtenValues < arr.len - valuesPerChunk:
|
||||
for i in 0 ..< valuesPerChunk:
|
||||
chunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
||||
merkleizer.addChunk chunk
|
||||
inc writtenValues, valuesPerChunk
|
||||
|
||||
let remainingValues = arr.len - writtenValues
|
||||
if remainingValues > 0:
|
||||
var lastChunk: array[bytesPerChunk, byte]
|
||||
for i in 0 ..< remainingValues:
|
||||
chunk.writeBytesLE(i * sizeof(T), arr[writtenValues + i])
|
||||
merkleizer.addChunk lastChunk
|
||||
|
||||
getFinalHash(merkleizer)
|
||||
|
||||
func bitListHashTreeRoot(merkleizer: var SszChunksMerkleizer, x: BitSeq): Eth2Digest =
|
||||
# TODO: Switch to a simpler BitList representation and
|
||||
# replace this with `chunkedHashTreeRoot`
|
||||
trs "CHUNKIFYING BIT SEQ WITH TOP INDEX ", merkleizer.topIndex
|
||||
|
||||
var
|
||||
totalBytes = bytes(x).len
|
||||
lastCorrectedByte = bytes(x)[^1]
|
||||
|
||||
if lastCorrectedByte == byte(1):
|
||||
if totalBytes == 1:
|
||||
# This is an empty bit list.
|
||||
# It should be hashed as a tree containing all zeros:
|
||||
return mergeBranches(zeroHashes[merkleizer.topIndex],
|
||||
zeroHashes[0]) # this is the mixed length
|
||||
|
||||
totalBytes -= 1
|
||||
lastCorrectedByte = bytes(x)[^2]
|
||||
else:
|
||||
let markerPos = log2trunc(lastCorrectedByte)
|
||||
lastCorrectedByte.clearBit(markerPos)
|
||||
|
||||
var
|
||||
bytesInLastChunk = totalBytes mod bytesPerChunk
|
||||
fullChunks = totalBytes div bytesPerChunk
|
||||
|
||||
if bytesInLastChunk == 0:
|
||||
fullChunks -= 1
|
||||
bytesInLastChunk = 32
|
||||
|
||||
for i in 0 ..< fullChunks:
|
||||
let
|
||||
chunkStartPos = i * bytesPerChunk
|
||||
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
||||
|
||||
merkleizer.addChunk bytes(x).toOpenArray(chunkStartPos, chunkEndPos)
|
||||
|
||||
var
|
||||
lastChunk: array[bytesPerChunk, byte]
|
||||
chunkStartPos = fullChunks * bytesPerChunk
|
||||
|
||||
for i in 0 .. bytesInLastChunk - 2:
|
||||
lastChunk[i] = bytes(x)[chunkStartPos + i]
|
||||
|
||||
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
||||
|
||||
merkleizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
||||
let contentsHash = merkleizer.getFinalHash
|
||||
mixInLength contentsHash, x.len
|
||||
|
||||
func maxChunksCount(T: type, maxLen: int64): int64 =
|
||||
when T is BitList|BitArray:
|
||||
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
||||
elif T is array|List:
|
||||
maxChunkIdx(ElemType(T), maxLen)
|
||||
else:
|
||||
unsupported T # This should never happen
|
||||
|
||||
func hashTreeRootAux[T](x: T): Eth2Digest =
|
||||
when T is bool|char:
|
||||
result.data[0] = byte(x)
|
||||
elif T is SomeUnsignedInt:
|
||||
when cpuEndian == bigEndian:
|
||||
result.data[0..<sizeof(x)] = toBytesLE(x)
|
||||
else:
|
||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
||||
elif (when T is array: ElemType(T) is BasicType else: false):
|
||||
type E = ElemType(T)
|
||||
when sizeof(T) <= sizeof(result.data):
|
||||
when E is byte|bool or cpuEndian == littleEndian:
|
||||
copyMem(addr result.data[0], unsafeAddr x, sizeof x)
|
||||
else:
|
||||
var pos = 0
|
||||
for e in x:
|
||||
writeBytesLE(result.data, pos, e)
|
||||
pos += sizeof(E)
|
||||
else:
|
||||
trs "FIXED TYPE; USE CHUNK STREAM"
|
||||
var markleizer = createMerkleizer(maxChunksCount(T, x.len))
|
||||
chunkedHashTreeRootForBasicTypes(markleizer, x)
|
||||
elif T is BitArray:
|
||||
hashTreeRootAux(x.bytes)
|
||||
elif T is array|object|tuple:
|
||||
trs "MERKLEIZING FIELDS"
|
||||
const totalFields = when T is array: len(x)
|
||||
else: totalSerializedFields(T)
|
||||
merkleizeFields(totalFields):
|
||||
x.enumerateSubFields(f):
|
||||
addField f
|
||||
#elif isCaseObject(T):
|
||||
# # TODO implement this
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
func hashTreeRootList(x: List|BitList): Eth2Digest =
|
||||
const maxLen = static(x.maxLen)
|
||||
type T = type(x)
|
||||
const limit = maxChunksCount(T, maxLen)
|
||||
var merkleizer = createMerkleizer(limit)
|
||||
|
||||
when x is BitList:
|
||||
merkleizer.bitListHashTreeRoot(BitSeq x)
|
||||
else:
|
||||
type E = ElemType(T)
|
||||
let contentsHash = when E is BasicType:
|
||||
chunkedHashTreeRootForBasicTypes(merkleizer, asSeq x)
|
||||
else:
|
||||
for elem in x:
|
||||
let elemHash = hash_tree_root(elem)
|
||||
merkleizer.addChunk(elemHash.data)
|
||||
merkleizer.getFinalHash()
|
||||
mixInLength(contentsHash, x.len)
|
||||
|
||||
func mergedDataHash(x: HashList|HashArray, chunkIdx: int64): Eth2Digest =
|
||||
# The merged hash of the data at `chunkIdx` and `chunkIdx + 1`
|
||||
trs "DATA HASH ", chunkIdx, " ", x.data.len
|
||||
|
||||
when x.T is BasicType:
|
||||
when cpuEndian == bigEndian:
|
||||
unsupported type x # No bigendian support here!
|
||||
|
||||
let
|
||||
bytes = cast[ptr UncheckedArray[byte]](unsafeAddr x.data[0])
|
||||
byteIdx = chunkIdx * bytesPerChunk
|
||||
byteLen = x.data.len * sizeof(x.T)
|
||||
|
||||
if byteIdx >= byteLen:
|
||||
zeroHashes[1]
|
||||
else:
|
||||
let
|
||||
nbytes = min(byteLen - byteIdx, 64)
|
||||
padding = 64 - nbytes
|
||||
|
||||
digest(
|
||||
toOpenArray(bytes, int(byteIdx), int(byteIdx + nbytes - 1)),
|
||||
toOpenArray(zero64, 0, int(padding - 1)))
|
||||
else:
|
||||
if chunkIdx + 1 > x.data.len():
|
||||
zeroHashes[x.maxDepth]
|
||||
elif chunkIdx + 1 == x.data.len():
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
Eth2Digest())
|
||||
else:
|
||||
mergeBranches(
|
||||
hash_tree_root(x.data[chunkIdx]),
|
||||
hash_tree_root(x.data[chunkIdx + 1]))
|
||||
|
||||
template mergedHash(x: HashList|HashArray, vIdxParam: int64): Eth2Digest =
|
||||
# The merged hash of the data at `vIdx` and `vIdx + 1`
|
||||
|
||||
let vIdx = vIdxParam
|
||||
if vIdx >= x.maxChunks:
|
||||
let dataIdx = vIdx - x.maxChunks
|
||||
mergedDataHash(x, dataIdx)
|
||||
else:
|
||||
mergeBranches(
|
||||
hashTreeRootCached(x, vIdx),
|
||||
hashTreeRootCached(x, vIdx + 1))
|
||||
|
||||
func hashTreeRootCached*(x: HashList, vIdx: int64): Eth2Digest =
|
||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
||||
|
||||
let
|
||||
layer = layer(vIdx)
|
||||
idxInLayer = vIdx - (1'i64 shl layer)
|
||||
layerIdx = idxInlayer + x.indices[layer]
|
||||
|
||||
doAssert layer < x.maxDepth
|
||||
trs "GETTING ", vIdx, " ", layerIdx, " ", layer, " ", x.indices.len
|
||||
if layerIdx >= x.indices[layer + 1]:
|
||||
trs "ZERO ", x.indices[layer], " ", x.indices[layer + 1]
|
||||
zeroHashes[x.maxDepth - layer]
|
||||
else:
|
||||
if not isCached(x.hashes[layerIdx]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
trs "REFRESHING ", vIdx, " ", layerIdx, " ", layer
|
||||
|
||||
px[].hashes[layerIdx] = mergedHash(x, vIdx * 2)
|
||||
else:
|
||||
trs "CACHED ", layerIdx
|
||||
|
||||
x.hashes[layerIdx]
|
||||
|
||||
func hashTreeRootCached*(x: HashArray, vIdx: int): Eth2Digest =
|
||||
doAssert vIdx >= 1, "Only valid for flat merkle tree indices"
|
||||
|
||||
if not isCached(x.hashes[vIdx]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
|
||||
px[].hashes[vIdx] = mergedHash(x, vIdx * 2)
|
||||
|
||||
return x.hashes[vIdx]
|
||||
|
||||
func hashTreeRootCached*(x: HashArray): Eth2Digest =
|
||||
hashTreeRootCached(x, 1) # Array does not use idx 0
|
||||
|
||||
func hashTreeRootCached*(x: HashList): Eth2Digest =
|
||||
if x.data.len == 0:
|
||||
mergeBranches(
|
||||
zeroHashes[x.maxDepth],
|
||||
zeroHashes[0]) # mixInLength with 0!
|
||||
else:
|
||||
if not isCached(x.hashes[0]):
|
||||
# TODO oops. so much for maintaining non-mutability.
|
||||
let px = unsafeAddr x
|
||||
px[].hashes[0] = mixInLength(hashTreeRootCached(x, 1), x.data.len)
|
||||
|
||||
x.hashes[0]
|
||||
|
||||
func hash_tree_root*(x: auto): Eth2Digest {.raises: [Defect].} =
|
||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||
mixin toSszType
|
||||
|
||||
result =
|
||||
when x is HashArray|HashList:
|
||||
hashTreeRootCached(x)
|
||||
elif x is List|BitList:
|
||||
hashTreeRootList(x)
|
||||
else:
|
||||
hashTreeRootAux toSszType(x)
|
||||
|
||||
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
||||
|
||||
iterator hash_tree_roots_prefix*[T](lst: openarray[T], limit: static Limit): Eth2Digest =
|
||||
# This is a particular type's instantiation of a general fold, reduce,
|
||||
# accumulation, prefix sums, etc family of operations. As long as that
|
||||
# Eth1 deposit case is the only notable example -- the usual uses of a
|
||||
# list involve, at some point, tree-hashing it -- finalized hashes are
|
||||
# the only abstraction that escapes from this module this way.
|
||||
var merkleizer = createMerkleizer(limit)
|
||||
for i, elem in lst:
|
||||
merkleizer.addChunk(hash_tree_root(elem).data)
|
||||
yield mixInLength(merkleizer.getFinalHash(), i + 1)
|
|
@ -3,7 +3,9 @@
|
|||
|
||||
import
|
||||
stew/[ptrops, objects], stew/ranges/ptr_arith,
|
||||
./types, ./bytes_reader
|
||||
./bytes_reader, ./types, ./spec_types
|
||||
|
||||
export bytes_reader, types
|
||||
|
||||
type
|
||||
MemRange* = object
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
import
|
||||
typetraits,
|
||||
../spec/[crypto, digest]
|
||||
|
||||
# Eth2-spec-specific type handling that is not generic to SSZ
|
||||
|
||||
template toSszType*(x: auto): auto =
|
||||
mixin toSszType
|
||||
|
||||
# Please note that BitArray doesn't need any special treatment here
|
||||
# because it can be considered a regular fixed-size object type.
|
||||
|
||||
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
|
||||
elif x is Eth2Digest: x.data
|
||||
elif x is BlsCurveType: toRaw(x)
|
||||
elif x is ForkDigest|Version: distinctBase(x)
|
||||
else: x
|
|
@ -0,0 +1,236 @@
|
|||
# TODO Cannot override push, even though the function is annotated
|
||||
# nim-beacon-chain/beacon_chain/ssz.nim(212, 18) Error: can raise an unlisted exception: IOError
|
||||
# {.push raises: [Defect].}
|
||||
{.pragma: raisesssz, raises: [Defect, MalformedSszError, SszSizeMismatchError].}
|
||||
|
||||
## SSZ serialiazation for core SSZ types, as specified in:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.12.1/ssz/simple-serialize.md#serialization
|
||||
|
||||
import
|
||||
typetraits, options,
|
||||
stew/[bitops2, endians2, objects],
|
||||
serialization, serialization/testing/tracing,
|
||||
../spec/[digest, datatypes],
|
||||
./bytes_reader, ./bitseqs, ./types, ./spec_types
|
||||
|
||||
export
|
||||
serialization, types, bitseqs
|
||||
|
||||
type
|
||||
SszReader* = object
|
||||
stream: InputStream
|
||||
|
||||
SszWriter* = object
|
||||
stream: OutputStream
|
||||
|
||||
SizePrefixed*[T] = distinct T
|
||||
SszMaxSizeExceeded* = object of SerializationError
|
||||
|
||||
VarSizedWriterCtx = object
|
||||
fixedParts: WriteCursor
|
||||
offset: int
|
||||
|
||||
FixedSizedWriterCtx = object
|
||||
|
||||
serializationFormat SSZ,
|
||||
Reader = SszReader,
|
||||
Writer = SszWriter,
|
||||
PreferedOutput = seq[byte]
|
||||
|
||||
template sizePrefixed*[TT](x: TT): untyped =
|
||||
type T = TT
|
||||
SizePrefixed[T](x)
|
||||
|
||||
proc init*(T: type SszReader, stream: InputStream): T {.raises: [Defect].} =
|
||||
T(stream: stream)
|
||||
|
||||
proc writeFixedSized(s: var (OutputStream|WriteCursor), x: auto) {.raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
|
||||
when x is byte:
|
||||
s.write x
|
||||
elif x is bool:
|
||||
s.write byte(ord(x))
|
||||
elif x is UintN:
|
||||
when cpuEndian == bigEndian:
|
||||
s.write toBytesLE(x)
|
||||
else:
|
||||
s.writeMemCopy x
|
||||
elif x is array:
|
||||
when x[0] is byte:
|
||||
trs "APPENDING FIXED SIZE BYTES", x
|
||||
s.write x
|
||||
else:
|
||||
for elem in x:
|
||||
trs "WRITING FIXED SIZE ARRAY ELEMENT"
|
||||
s.writeFixedSized toSszType(elem)
|
||||
elif x is tuple|object:
|
||||
enumInstanceSerializedFields(x, fieldName, field):
|
||||
trs "WRITING FIXED SIZE FIELD", fieldName
|
||||
s.writeFixedSized toSszType(field)
|
||||
else:
|
||||
unsupported x.type
|
||||
|
||||
template writeOffset(cursor: var WriteCursor, offset: int) =
|
||||
write cursor, toBytesLE(uint32 offset)
|
||||
|
||||
template supports*(_: type SSZ, T: type): bool =
|
||||
mixin toSszType
|
||||
anonConst compiles(fixedPortionSize toSszType(declval T))
|
||||
|
||||
func init*(T: type SszWriter, stream: OutputStream): T {.raises: [Defect].} =
|
||||
result.stream = stream
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||
|
||||
proc beginRecord*(w: var SszWriter, TT: type): auto {.raises: [Defect].} =
|
||||
type T = TT
|
||||
when isFixedSize(T):
|
||||
FixedSizedWriterCtx()
|
||||
else:
|
||||
const offset = when T is array|HashArray: len(T) * offsetSize
|
||||
else: fixedPortionSize(T)
|
||||
VarSizedWriterCtx(offset: offset,
|
||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||
|
||||
template writeField*(w: var SszWriter,
|
||||
ctx: var auto,
|
||||
fieldName: string,
|
||||
field: auto) =
|
||||
mixin toSszType
|
||||
when ctx is FixedSizedWriterCtx:
|
||||
writeFixedSized(w.stream, toSszType(field))
|
||||
else:
|
||||
type FieldType = type toSszType(field)
|
||||
|
||||
when isFixedSize(FieldType):
|
||||
writeFixedSized(ctx.fixedParts, toSszType(field))
|
||||
else:
|
||||
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
||||
writeOffset(ctx.fixedParts, ctx.offset)
|
||||
let initPos = w.stream.pos
|
||||
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
||||
when FieldType is BitList:
|
||||
trs "BIT SEQ ", bytes(field)
|
||||
writeVarSizeType(w, toSszType(field))
|
||||
ctx.offset += w.stream.pos - initPos
|
||||
|
||||
template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||
when ctx is VarSizedWriterCtx:
|
||||
finalize ctx.fixedParts
|
||||
|
||||
proc writeSeq[T](w: var SszWriter, value: seq[T])
|
||||
{.raises: [Defect, IOError].} =
|
||||
# Please note that `writeSeq` exists in order to reduce the code bloat
|
||||
# produced from generic instantiations of the unique `List[N, T]` types.
|
||||
when isFixedSize(T):
|
||||
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
||||
for elem in value:
|
||||
w.stream.writeFixedSized toSszType(elem)
|
||||
trs "DONE"
|
||||
else:
|
||||
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
||||
var offset = value.len * offsetSize
|
||||
var cursor = w.stream.delayFixedSizeWrite offset
|
||||
for elem in value:
|
||||
cursor.writeFixedSized uint32(offset)
|
||||
let initPos = w.stream.pos
|
||||
w.writeVarSizeType toSszType(elem)
|
||||
offset += w.stream.pos - initPos
|
||||
finalize cursor
|
||||
trs "DONE"
|
||||
|
||||
proc writeVarSizeType(w: var SszWriter, value: auto) {.raises: [Defect, IOError].} =
|
||||
trs "STARTING VAR SIZE TYPE"
|
||||
|
||||
when value is HashArray|HashList:
|
||||
writeVarSizeType(w, value.data)
|
||||
elif value is List:
|
||||
# We reduce code bloat by forwarding all `List` types to a general `seq[T]` proc.
|
||||
writeSeq(w, asSeq value)
|
||||
elif value is BitList:
|
||||
# ATTENTION! We can reuse `writeSeq` only as long as our BitList type is implemented
|
||||
# to internally match the binary representation of SSZ BitLists in memory.
|
||||
writeSeq(w, bytes value)
|
||||
elif value is object|tuple|array:
|
||||
trs "WRITING OBJECT OR ARRAY"
|
||||
var ctx = beginRecord(w, type value)
|
||||
enumerateSubFields(value, field):
|
||||
writeField w, ctx, astToStr(field), field
|
||||
endRecord w, ctx
|
||||
else:
|
||||
unsupported type(value)
|
||||
|
||||
proc writeValue*(w: var SszWriter, x: auto) {.gcsafe, raises: [Defect, IOError].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(x)
|
||||
|
||||
when isFixedSize(T):
|
||||
w.stream.writeFixedSized toSszType(x)
|
||||
else:
|
||||
w.writeVarSizeType toSszType(x)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].}
|
||||
|
||||
func sszSizeForVarSizeList[T](value: openarray[T]): int =
|
||||
result = len(value) * offsetSize
|
||||
for elem in value:
|
||||
result += sszSize(toSszType elem)
|
||||
|
||||
func sszSize*(value: auto): int {.gcsafe, raises: [Defect].} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(value)
|
||||
|
||||
when isFixedSize(T):
|
||||
anonConst fixedPortionSize(T)
|
||||
|
||||
elif T is array|List|HashList|HashArray:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E):
|
||||
len(value) * anonConst(fixedPortionSize(E))
|
||||
elif T is HashArray:
|
||||
sszSizeForVarSizeList(value.data)
|
||||
elif T is array:
|
||||
sszSizeForVarSizeList(value)
|
||||
else:
|
||||
sszSizeForVarSizeList(asSeq value)
|
||||
|
||||
elif T is BitList:
|
||||
return len(bytes(value))
|
||||
|
||||
elif T is object|tuple:
|
||||
result = anonConst fixedPortionSize(T)
|
||||
enumInstanceSerializedFields(value, _{.used.}, field):
|
||||
type FieldType = type toSszType(field)
|
||||
when not isFixedSize(FieldType):
|
||||
result += sszSize(toSszType field)
|
||||
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
proc writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) {.raises: [Defect, IOError].} =
|
||||
var cursor = w.stream.delayVarSizeWrite(10)
|
||||
let initPos = w.stream.pos
|
||||
w.writeValue T(x)
|
||||
let length = uint64(w.stream.pos - initPos)
|
||||
when false:
|
||||
discard
|
||||
# TODO varintBytes is sub-optimal at the moment
|
||||
# cursor.writeAndFinalize length.varintBytes
|
||||
else:
|
||||
var buf: VarintBuffer
|
||||
buf.writeVarint length
|
||||
cursor.finalWrite buf.writtenBytes
|
||||
|
||||
proc readValue*[T](r: var SszReader, val: var T) {.raises: [Defect, MalformedSszError, SszSizeMismatchError, IOError].} =
|
||||
when isFixedSize(T):
|
||||
const minimalSize = fixedPortionSize(T)
|
||||
if r.stream.readable(minimalSize):
|
||||
readSszValue(r.stream.read(minimalSize), val)
|
||||
else:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
else:
|
||||
# TODO Read the fixed portion first and precisely measure the size of
|
||||
# the dynamic portion to consume the right number of bytes.
|
||||
readSszValue(r.stream.read(r.stream.len.get), val)
|
||||
|
|
@ -1,11 +1,14 @@
|
|||
{.push raises: [Defect].}
|
||||
|
||||
import
|
||||
tables, options, typetraits,
|
||||
stew/shims/macros, stew/[byteutils, bitops2, objects, bitseqs],
|
||||
tables, options, typetraits, strformat,
|
||||
stew/shims/macros, stew/[byteutils, bitops2, objects],
|
||||
serialization/[object_serialization, errors],
|
||||
./spec_types, ./bitseqs,
|
||||
../spec/digest
|
||||
|
||||
export bitseqs
|
||||
|
||||
const
|
||||
offsetSize* = 4
|
||||
bytesPerChunk* = 32
|
||||
|
@ -47,6 +50,15 @@ type
|
|||
List*[T; maxLen: static Limit] = distinct seq[T]
|
||||
BitList*[maxLen: static Limit] = distinct BitSeq
|
||||
|
||||
HashArray*[maxLen: static Limit; T] = object
|
||||
data*: array[maxLen, T]
|
||||
hashes* {.dontSerialize.}: array[maxChunkIdx(T, maxLen), Eth2Digest]
|
||||
|
||||
HashList*[T; maxLen: static Limit] = object
|
||||
data*: List[T, maxLen]
|
||||
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
||||
indices* {.dontSerialize.}: array[layer(maxChunkIdx(T, maxLen)) + 1, int64]
|
||||
|
||||
# Note for readers:
|
||||
# We use `array` for `Vector` and
|
||||
# `BitArray` for `BitVector`
|
||||
|
@ -60,15 +72,6 @@ type
|
|||
actualSszSize*: int
|
||||
elementSize*: int
|
||||
|
||||
HashArray*[maxLen: static Limit; T] = object
|
||||
data*: array[maxLen, T]
|
||||
hashes* {.dontSerialize.}: array[maxChunkIdx(T, maxLen), Eth2Digest]
|
||||
|
||||
HashList*[T; maxLen: static Limit] = object
|
||||
data*: List[T, maxLen]
|
||||
hashes* {.dontSerialize.}: seq[Eth2Digest]
|
||||
indices* {.dontSerialize.}: array[layer(maxChunkIdx(T, maxLen)) + 1, int64]
|
||||
|
||||
template asSeq*(x: List): auto = distinctBase(x)
|
||||
|
||||
template init*[T](L: type List, x: seq[T], N: static Limit): auto =
|
||||
|
@ -385,3 +388,17 @@ func getFieldBoundingOffsets*(RecordType: type,
|
|||
## the end of the variable-size field.
|
||||
type T = RecordType
|
||||
anonConst getFieldBoundingOffsetsImpl(T, fieldName)
|
||||
|
||||
template enumerateSubFields*(holder, fieldVar, body: untyped) =
|
||||
when holder is array|HashArray:
|
||||
for fieldVar in holder: body
|
||||
else:
|
||||
enumInstanceSerializedFields(holder, _{.used.}, fieldVar): body
|
||||
|
||||
method formatMsg*(
|
||||
err: ref SszSizeMismatchError,
|
||||
filename: string): string {.gcsafe, raises: [Defect].} =
|
||||
try:
|
||||
&"SSZ size mismatch, element {err.elementSize}, actual {err.actualSszSize}, type {err.deserializedType}, file {filename}"
|
||||
except CatchableError:
|
||||
"SSZ size mismatch"
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
import
|
||||
os, strformat,
|
||||
ssz,
|
||||
serialization,
|
||||
ssz/ssz_serialization,
|
||||
beacon_node_types,
|
||||
./spec/[crypto, datatypes, digest]
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import
|
|||
tables,
|
||||
chronicles,
|
||||
stew/results,
|
||||
./extras, ./ssz, metrics,
|
||||
./extras, ./ssz/merkleization, metrics,
|
||||
./spec/[datatypes, crypto, digest, helpers, validator],
|
||||
./spec/[state_transition_block, state_transition_epoch],
|
||||
../nbench/bench_lab
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import chronicles
|
||||
import options, deques, heapqueue, tables, strutils, sequtils, math, algorithm
|
||||
import stew/[bitseqs, results], chronos, chronicles
|
||||
import stew/results, chronos, chronicles
|
||||
import spec/datatypes, spec/digest, peer_pool, eth2_network
|
||||
import eth/async_utils
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ import
|
|||
options, tables, sets, macros,
|
||||
chronicles, chronos, stew/ranges/bitranges, libp2p/switch,
|
||||
spec/[datatypes, crypto, digest],
|
||||
beacon_node_types, eth2_network, block_pool, ssz
|
||||
beacon_node_types, eth2_network, block_pool
|
||||
|
||||
logScope:
|
||||
topics = "sync"
|
||||
|
|
|
@ -10,7 +10,7 @@ import
|
|||
tables, strutils, sequtils,
|
||||
|
||||
# Nimble packages
|
||||
stew/[objects, bitseqs],
|
||||
stew/[objects],
|
||||
chronos, metrics, json_rpc/[rpcserver, jsonmarshal],
|
||||
|
||||
# Local modules
|
||||
|
@ -51,7 +51,7 @@ proc installValidatorApiHandlers*(rpcServer: RpcServer, node: BeaconNode) =
|
|||
rpcServer.rpc("get_v1_beacon_genesis") do () -> BeaconGenesisTuple:
|
||||
notice "== get_v1_beacon_genesis"
|
||||
return BeaconGenesisTuple(genesis_time: node.blockPool.headState.data.data.genesis_time,
|
||||
genesis_validators_root: node.blockPool.headState.data.data.genesis_validators_root,
|
||||
genesis_validators_root: node.blockPool.headState.data.data.genesis_validators_root,
|
||||
genesis_fork_version: Version(GENESIS_FORK_VERSION))
|
||||
|
||||
rpcServer.rpc("get_v1_validator_blocks") do (slot: Slot, graffiti: Eth2Digest, randao_reveal: ValidatorSig) -> BeaconBlock:
|
||||
|
|
|
@ -20,7 +20,7 @@ import
|
|||
conf, time,
|
||||
eth2_network, eth2_discovery, validator_pool, beacon_node_types,
|
||||
nimbus_binary_common,
|
||||
version, ssz, ssz/dynamic_navigator,
|
||||
version, ssz/merkleization,
|
||||
sync_manager,
|
||||
spec/eth2_apis/validator_callsigs_types,
|
||||
eth2_json_rpc_serialization
|
||||
|
@ -103,7 +103,7 @@ proc onSlotStart(vc: ValidatorClient, lastSlot, scheduledSlot: Slot) {.gcsafe, a
|
|||
)
|
||||
|
||||
vc.port_logged "await 4"
|
||||
|
||||
|
||||
let blockRoot = hash_tree_root(newBlock.message)
|
||||
newBlock.signature = await validator.signBlockProposal(
|
||||
vc.fork, vc.beaconGenesis.genesis_validators_root, slot, blockRoot)
|
||||
|
|
|
@ -10,7 +10,7 @@ import
|
|||
os, tables, strutils,
|
||||
|
||||
# Nimble packages
|
||||
stew/[objects, bitseqs], stew/shims/macros,
|
||||
stew/[objects], stew/shims/macros,
|
||||
chronos, metrics, json_rpc/[rpcserver, jsonmarshal],
|
||||
chronicles,
|
||||
json_serialization/std/[options, sets, net], serialization/errors,
|
||||
|
@ -22,7 +22,7 @@ import
|
|||
conf, time, validator_pool, state_transition,
|
||||
attestation_pool, block_pool, eth2_network,
|
||||
beacon_node_common, beacon_node_types,
|
||||
mainchain_monitor, version, ssz, interop,
|
||||
mainchain_monitor, version, ssz/merkleization, interop,
|
||||
attestation_aggregation, sync_manager, sszdump
|
||||
|
||||
# Metrics for tracking attestation and beacon block loss
|
||||
|
|
|
@ -2,7 +2,7 @@ import
|
|||
os, strutils,
|
||||
chronicles, chronos, blscurve, nimcrypto, json_serialization, serialization,
|
||||
web3, stint, eth/keys,
|
||||
spec/[datatypes, digest, crypto], conf, ssz, interop, merkle_minimal
|
||||
spec/[datatypes, digest, crypto], conf, ssz/merkleization, interop, merkle_minimal
|
||||
|
||||
contract(DepositContract):
|
||||
proc deposit(pubkey: Bytes48, withdrawalCredentials: Bytes32, signature: Bytes96, deposit_data_root: FixedBytes[32])
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
tables,
|
||||
chronos, chronicles,
|
||||
spec/[datatypes, crypto, digest, state_transition_block], ssz,
|
||||
spec/[datatypes, crypto, digest, state_transition_block],
|
||||
beacon_node_types
|
||||
|
||||
func init*(T: type ValidatorPool): T =
|
||||
|
|
|
@ -11,8 +11,11 @@ import
|
|||
# Status libraries
|
||||
confutils/defs, serialization,
|
||||
# Beacon-chain
|
||||
../beacon_chain/spec/[datatypes, crypto, helpers, beaconstate, validator, state_transition_block, state_transition_epoch],
|
||||
../beacon_chain/[ssz, state_transition, extras]
|
||||
../beacon_chain/spec/[
|
||||
datatypes, crypto, helpers, beaconstate, validator,
|
||||
state_transition_block, state_transition_epoch],
|
||||
../beacon_chain/[state_transition, extras],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization]
|
||||
|
||||
# Nimbus Bench - Scenario configuration
|
||||
# --------------------------------------------------
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
confutils, stats, chronicles, strformat, tables,
|
||||
../beacon_chain/block_pool,
|
||||
../beacon_chain/spec/[crypto, datatypes, helpers],
|
||||
../beacon_chain/[beacon_chain_db, extras, state_transition, ssz],
|
||||
../beacon_chain/[beacon_chain_db, extras, state_transition],
|
||||
../research/simutils,
|
||||
eth/db/[kvstore, kvstore_sqlite3]
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ import
|
|||
confutils, os, strutils, json_serialization,
|
||||
stew/byteutils,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../beacon_chain/ssz
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization]
|
||||
|
||||
# TODO turn into arguments
|
||||
cli do(kind: string, file: string):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
confutils, os, strutils, chronicles, json_serialization,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../beacon_chain/[ssz]
|
||||
../beacon_chain/ssz/ssz_serialization
|
||||
|
||||
# TODO turn into arguments
|
||||
cli do(kind: string, file: string):
|
||||
|
|
|
@ -1,8 +1,7 @@
|
|||
import
|
||||
confutils, os, strutils, chronicles, json_serialization,
|
||||
confutils, os, strutils, json_serialization,
|
||||
stew/byteutils,
|
||||
../beacon_chain/spec/[crypto, datatypes, digest],
|
||||
../beacon_chain/[ssz],
|
||||
../beacon_chain/spec/[crypto, datatypes],
|
||||
../beacon_chain/ssz/dynamic_navigator
|
||||
|
||||
type
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import
|
||||
confutils, chronicles,
|
||||
../beacon_chain/spec/[crypto, datatypes],
|
||||
../beacon_chain/[extras, state_transition, ssz]
|
||||
../beacon_chain/[extras, state_transition],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization]
|
||||
|
||||
cli do(pre: string, blck: string, post: string, verifyStateRoot = false):
|
||||
let
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
# Required for deserialisation of ValidatorSig in Attestation due to
|
||||
# https://github.com/nim-lang/Nim/issues/11225
|
||||
|
||||
import
|
||||
stew/ptrops, stew/ranges/ptr_arith,
|
||||
../beacon_chain/[ssz, state_transition],
|
||||
../beacon_chain/spec/[datatypes, digest, validator, beaconstate,
|
||||
../beacon_chain/[extras, state_transition],
|
||||
../beacon_chain/spec/[crypto, datatypes, digest, validator, beaconstate,
|
||||
state_transition_block],
|
||||
# Required for deserialisation of ValidatorSig in Attestation due to
|
||||
# https://github.com/nim-lang/Nim/issues/11225
|
||||
../beacon_chain/spec/crypto,
|
||||
../beacon_chain/extras
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization]
|
||||
|
||||
type
|
||||
AttestationInput = object
|
||||
|
|
|
@ -24,8 +24,9 @@ import
|
|||
state_transition_block],
|
||||
../beacon_chain/[
|
||||
attestation_pool, block_pool, beacon_node_types, beacon_chain_db,
|
||||
interop, ssz, state_transition, validator_pool],
|
||||
interop, state_transition, validator_pool],
|
||||
eth/db/[kvstore, kvstore_sqlite3],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization],
|
||||
./simutils
|
||||
|
||||
type Timers = enum
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
import
|
||||
stats, os, strformat, times,
|
||||
../tests/[testblockutil],
|
||||
../beacon_chain/[extras, ssz],
|
||||
../beacon_chain/[extras],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization],
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest, helpers]
|
||||
|
||||
template withTimer*(stats: var RunningStat, body: untyped) =
|
||||
|
|
|
@ -14,7 +14,8 @@ import
|
|||
options, sequtils, random, tables,
|
||||
../tests/[testblockutil],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||
../beacon_chain/[attestation_pool, extras, ssz],
|
||||
../beacon_chain/[attestation_pool, extras],
|
||||
../beacon_chain/ssz/[merkleization, ssz_serialization],
|
||||
./simutils
|
||||
|
||||
type Timers = enum
|
||||
|
|
|
@ -15,6 +15,7 @@ import # Unit test
|
|||
./test_beacon_chain_db,
|
||||
./test_beacon_node,
|
||||
./test_beaconstate,
|
||||
./test_bitseqs,
|
||||
./test_block_pool,
|
||||
./test_helpers,
|
||||
./test_keystore,
|
||||
|
|
|
@ -9,8 +9,6 @@ import
|
|||
# Standard library
|
||||
os, unittest, strutils, streams, strformat,
|
||||
macros, sets,
|
||||
# Status libraries
|
||||
stew/bitseqs,
|
||||
# Third-party
|
||||
yaml,
|
||||
# Beacon chain internals
|
||||
|
|
|
@ -10,7 +10,7 @@ import
|
|||
os, unittest, strutils, streams, strformat, strscans,
|
||||
macros, typetraits,
|
||||
# Status libraries
|
||||
faststreams, stew/bitseqs, ../testutil,
|
||||
faststreams, ../testutil,
|
||||
# Third-party
|
||||
yaml,
|
||||
# Beacon chain internals
|
||||
|
|
|
@ -0,0 +1,76 @@
|
|||
{.used.}
|
||||
|
||||
import
|
||||
unittest, strformat,
|
||||
../beacon_chain/ssz/bitseqs
|
||||
|
||||
suite "Bit fields":
|
||||
test "roundtrips":
|
||||
var
|
||||
a = BitSeq.init(100)
|
||||
b = BitSeq.init(100)
|
||||
|
||||
check:
|
||||
not a[0]
|
||||
|
||||
a.setBit 1
|
||||
|
||||
check:
|
||||
not a[0]
|
||||
a[1]
|
||||
|
||||
b.setBit 2
|
||||
|
||||
a.combine(b)
|
||||
|
||||
check:
|
||||
not a[0]
|
||||
a[1]
|
||||
a[2]
|
||||
|
||||
test "iterating words":
|
||||
for bitCount in [8, 3, 7, 8, 14, 15, 16, 19, 260]:
|
||||
checkpoint &"trying bit count {bitCount}"
|
||||
var
|
||||
a = BitSeq.init(bitCount)
|
||||
b = BitSeq.init(bitCount)
|
||||
bitsInWord = sizeof(uint) * 8
|
||||
expectedWordCount = (bitCount div bitsInWord) + 1
|
||||
|
||||
for i in 0 ..< expectedWordCount:
|
||||
let every3rdBit = i * sizeof(uint) * 8 + 2
|
||||
a[every3rdBit] = true
|
||||
b[every3rdBit] = true
|
||||
|
||||
for word in words(a):
|
||||
check word == 4
|
||||
word = 2
|
||||
|
||||
for wa, wb in words(a, b):
|
||||
check wa == 2 and wb == 4
|
||||
wa = 1
|
||||
wb = 2
|
||||
|
||||
for i in 0 ..< expectedWordCount:
|
||||
for j in 0 ..< bitsInWord:
|
||||
let bitPos = i * bitsInWord + j
|
||||
if bitPos < bitCount:
|
||||
check a[j] == (j == 0)
|
||||
check b[j] == (j == 1)
|
||||
|
||||
test "overlaps":
|
||||
for bitCount in [1, 62, 63, 64, 91, 127, 128, 129]:
|
||||
checkpoint &"trying bit count {bitCount}"
|
||||
var
|
||||
a = BitSeq.init(bitCount)
|
||||
b = BitSeq.init(bitCount)
|
||||
|
||||
for pos in [4, 8, 9, 12, 29, 32, 63, 64, 67]:
|
||||
if pos + 2 < bitCount:
|
||||
a.setBit(pos)
|
||||
b.setBit(pos + 2)
|
||||
|
||||
check:
|
||||
not a.overlaps(b)
|
||||
not b.overlaps(a)
|
||||
|
|
@ -8,7 +8,8 @@
|
|||
import
|
||||
options, stew/endians2,
|
||||
chronicles, eth/trie/[db],
|
||||
../beacon_chain/[beacon_chain_db, block_pool, extras, merkle_minimal, ssz,
|
||||
../beacon_chain/[beacon_chain_db, block_pool, extras, merkle_minimal,
|
||||
../beacon_chain/ssz/merkleization,
|
||||
state_transition, validator_pool],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
|
||||
helpers, validator, state_transition_block]
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit a99dafab420bcbbffee35e9bd847a9014eafaffe
|
||||
Subproject commit 86ac01122c29119cd585e400e85396b6bd3cceb6
|
Loading…
Reference in New Issue