ssz: update to 0.5.1:ish (#202)
* ssz: update to 0.5.1:ish * slightly fewer seq allocations * still a lot of potential for optimization * fixes #174 * ssz: avoid reallocating leaves (logN merkle impl)
This commit is contained in:
parent
53699460c6
commit
1b0e67c88c
|
@ -56,10 +56,10 @@ proc putState*(db: BeaconChainDB, key: Eth2Digest, value: BeaconState) =
|
|||
db.backend.put(subkey(type value, key), SSZ.encode(value))
|
||||
|
||||
proc putState*(db: BeaconChainDB, value: BeaconState) =
|
||||
db.putState(hash_tree_root_final(value), value)
|
||||
db.putState(hash_tree_root(value), value)
|
||||
|
||||
proc putBlock*(db: BeaconChainDB, value: BeaconBlock) =
|
||||
db.putBlock(hash_tree_root_final(value), value)
|
||||
db.putBlock(hash_tree_root(value), value)
|
||||
|
||||
proc putHeadBlock*(db: BeaconChainDB, key: Eth2Digest) =
|
||||
db.backend.put(subkey(kHeadBlock), key.data) # TODO head block?
|
||||
|
|
|
@ -141,7 +141,7 @@ proc init*(T: type BeaconNode, conf: BeaconNodeConf): Future[BeaconNode] {.async
|
|||
let
|
||||
tailState = Json.loadFile(snapshotFile, BeaconState)
|
||||
tailBlock = get_initial_beacon_block(tailState)
|
||||
blockRoot = hash_tree_root_final(tailBlock)
|
||||
blockRoot = hash_tree_root(tailBlock)
|
||||
|
||||
notice "Creating new database from snapshot",
|
||||
blockRoot = shortLog(blockRoot),
|
||||
|
@ -424,7 +424,7 @@ proc proposeBlock(node: BeaconNode,
|
|||
updateState(
|
||||
node.state.data, node.state.blck.root, newBlock, {skipValidation})
|
||||
doAssert ok # TODO: err, could this fail somehow?
|
||||
node.state.root = hash_tree_root_final(node.state.data)
|
||||
node.state.root = hash_tree_root(node.state.data)
|
||||
|
||||
newBlock.state_root = node.state.root
|
||||
|
||||
|
@ -640,7 +640,7 @@ proc onAttestation(node: BeaconNode, attestation: Attestation) =
|
|||
proc onBeaconBlock(node: BeaconNode, blck: BeaconBlock) =
|
||||
# We received a block but don't know much about it yet - in particular, we
|
||||
# don't know if it's part of the chain we're currently building.
|
||||
let blockRoot = hash_tree_root_final(blck)
|
||||
let blockRoot = hash_tree_root(blck)
|
||||
debug "Block received",
|
||||
blck = shortLog(blck),
|
||||
blockRoot = shortLog(blockRoot)
|
||||
|
@ -731,7 +731,7 @@ when isMainModule:
|
|||
|
||||
testnetMetadata = NetworkMetadata(
|
||||
networkId: config.networkId,
|
||||
genesisRoot: hash_tree_root_final(initialState),
|
||||
genesisRoot: hash_tree_root(initialState),
|
||||
bootstrapNodes: @[bootstrapAddress],
|
||||
numShards: SHARD_COUNT,
|
||||
slotDuration: SECONDS_PER_SLOT,
|
||||
|
|
|
@ -143,7 +143,7 @@ proc add*(
|
|||
## everything checks out
|
||||
# TODO reevaluate passing the state in like this
|
||||
# TODO reevaluate this API - it's pretty ugly with the bool return
|
||||
doAssert blockRoot == hash_tree_root_final(blck)
|
||||
doAssert blockRoot == hash_tree_root(blck)
|
||||
|
||||
# Already seen this block??
|
||||
if blockRoot in pool.blocks:
|
||||
|
@ -320,7 +320,7 @@ proc maybePutState(pool: BlockPool, state: BeaconState) =
|
|||
if state.slot mod SLOTS_PER_EPOCH == 0:
|
||||
info "Storing state",
|
||||
stateSlot = humaneSlotNum(state.slot),
|
||||
stateRoot = shortLog(hash_tree_root_final(state)) # TODO cache?
|
||||
stateRoot = shortLog(hash_tree_root(state)) # TODO cache?
|
||||
pool.db.putState(state)
|
||||
|
||||
proc updateState*(
|
||||
|
|
|
@ -222,7 +222,7 @@ func get_temporary_block_header*(blck: BeaconBlock): BeaconBlockHeader =
|
|||
slot: blck.slot.uint64,
|
||||
previous_block_root: blck.previous_block_root,
|
||||
state_root: ZERO_HASH,
|
||||
block_body_root: hash_tree_root_final(blck.body),
|
||||
block_body_root: hash_tree_root(blck.body),
|
||||
# signed_root(block) is used for block id purposes so signature is a stub
|
||||
signature: EMPTY_SIGNATURE,
|
||||
)
|
||||
|
@ -311,8 +311,8 @@ func get_genesis_beacon_state*(
|
|||
if get_effective_balance(state, vi) >= MAX_DEPOSIT_AMOUNT:
|
||||
activate_validator(state, vi, true)
|
||||
|
||||
let genesis_active_index_root = Eth2Digest(data: hash_tree_root(
|
||||
get_active_validator_indices(state.validator_registry, GENESIS_EPOCH)))
|
||||
let genesis_active_index_root = hash_tree_root(
|
||||
get_active_validator_indices(state.validator_registry, GENESIS_EPOCH))
|
||||
for index in 0 ..< LATEST_ACTIVE_INDEX_ROOTS_LENGTH:
|
||||
state.latest_active_index_roots[index] = genesis_active_index_root
|
||||
state.current_shuffling_seed = generate_seed(state, GENESIS_EPOCH)
|
||||
|
@ -327,7 +327,7 @@ func get_genesis_beacon_state*(
|
|||
func get_initial_beacon_block*(state: BeaconState): BeaconBlock =
|
||||
BeaconBlock(
|
||||
slot: GENESIS_SLOT,
|
||||
state_root: Eth2Digest(data: hash_tree_root(state))
|
||||
state_root: hash_tree_root(state)
|
||||
# parent_root, randao_reveal, eth1_data, signature, and body automatically
|
||||
# initialized to default values.
|
||||
)
|
||||
|
|
|
@ -47,7 +47,8 @@
|
|||
import
|
||||
sequtils,
|
||||
hashes, eth/rlp,
|
||||
blscurve, json_serialization
|
||||
blscurve, json_serialization,
|
||||
digest
|
||||
|
||||
export
|
||||
json_serialization
|
||||
|
@ -87,7 +88,7 @@ func bls_verify*(
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.0/specs/bls_signature.md#bls_verify_multiple
|
||||
func bls_verify_multiple*(
|
||||
pubkeys: seq[ValidatorPubKey], message_hashes: seq[array[0..31, byte]],
|
||||
pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest],
|
||||
sig: ValidatorSig, domain: uint64): bool =
|
||||
let L = len(pubkeys)
|
||||
doAssert L == len(message_hashes)
|
||||
|
@ -98,7 +99,7 @@ func bls_verify_multiple*(
|
|||
# TODO spec doesn't say to handle this specially, but it's silly to
|
||||
# validate without any actual public keys.
|
||||
if pubkey != ValidatorPubKey() and
|
||||
not sig.verify(message_hash, domain, pubkey):
|
||||
not sig.verify(message_hash.data, domain, pubkey):
|
||||
return false
|
||||
|
||||
true
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
||||
|
||||
import
|
||||
endians, typetraits, options, algorithm,
|
||||
endians, typetraits, options, algorithm, math,
|
||||
faststreams/input_stream, serialization, eth/common, nimcrypto/keccak,
|
||||
./spec/[bitfield, crypto, datatypes, digest]
|
||||
|
||||
|
@ -69,7 +69,7 @@ func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
|
|||
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes()
|
||||
|
||||
type
|
||||
TrivialType =
|
||||
BasicType =
|
||||
# Types that serialize down to a fixed-length array - most importantly,
|
||||
# these values don't carry a length prefix in the final encoding. toBytesSSZ
|
||||
# provides the actual nim-type-to-bytes conversion.
|
||||
|
@ -80,7 +80,7 @@ type
|
|||
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
|
||||
bool
|
||||
|
||||
func sszLen(v: TrivialType): int = toBytesSSZ(v).len
|
||||
func sszLen(v: BasicType): int = toBytesSSZ(v).len
|
||||
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len
|
||||
|
||||
func sszLen(v: object | tuple): int =
|
||||
|
@ -165,7 +165,7 @@ proc writeValue*(w: var SszWriter, obj: auto) =
|
|||
# additional overloads for `writeValue`.
|
||||
mixin writeValue
|
||||
|
||||
when obj is ValidatorIndex|TrivialType:
|
||||
when obj is ValidatorIndex|BasicType:
|
||||
w.stream.append obj.toBytesSSZ
|
||||
elif obj is enum:
|
||||
w.stream.append uint64(obj).toBytesSSZ
|
||||
|
@ -204,7 +204,7 @@ proc readValue*(r: var SszReader, result: var auto) =
|
|||
if not r.stream[].ensureBytes(n):
|
||||
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
||||
|
||||
when result is ValidatorIndex|TrivialType:
|
||||
when result is ValidatorIndex|BasicType:
|
||||
let bytesToRead = result.sszLen;
|
||||
checkEof bytesToRead
|
||||
|
||||
|
@ -263,10 +263,12 @@ proc readValue*(r: var SszReader, result: var auto) =
|
|||
# ################### Hashing ###################################
|
||||
|
||||
# Sample hash_tree_root implementation based on:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/a9328157a87451ee4f372df272ece158b386ec41/specs/simple-serialize.md
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.1/specs/simple-serialize.md
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.1/utils/phase0/minimal_ssz.py
|
||||
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet
|
||||
|
||||
const CHUNK_SIZE = 128
|
||||
const
|
||||
BYTES_PER_CHUNK = 32
|
||||
|
||||
# ################### Hashing helpers ###################################
|
||||
|
||||
|
@ -275,59 +277,117 @@ template withHash(body: untyped): array[32, byte] =
|
|||
let tmp = withEth2Hash: body
|
||||
toBytesSSZ tmp
|
||||
|
||||
func hash(a: openArray[byte]): array[32, byte] =
|
||||
withHash:
|
||||
h.update(a)
|
||||
|
||||
func hash(a, b: openArray[byte]): array[32, byte] =
|
||||
withHash:
|
||||
h.update(a)
|
||||
h.update(b)
|
||||
|
||||
type
|
||||
Chunk = array[BYTES_PER_CHUNK, byte]
|
||||
|
||||
# TODO: er, how is this _actually_ done?
|
||||
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
|
||||
func empty(T: type): T = discard
|
||||
const emptyChunk = empty(array[CHUNK_SIZE, byte])
|
||||
const emptyChunk = empty(Chunk)
|
||||
|
||||
func merkleHash[T](lst: openArray[T]): array[32, byte]
|
||||
func mix_in_length(root: Chunk, length: int): Chunk =
|
||||
var dataLen: array[32, byte]
|
||||
var lstLen = uint64(length)
|
||||
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
||||
|
||||
# ################### Hashing interface ###################################
|
||||
hash(root, dataLen)
|
||||
|
||||
func hash_tree_root*(x: SomeInteger | bool): array[sizeof(x), byte] =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
toBytesSSZ(x)
|
||||
proc pack(values: seq|array): iterator(): Chunk =
|
||||
result = iterator (): Chunk =
|
||||
# TODO should be trivial to avoid this seq also..
|
||||
# TODO I get a feeling a copy of the array is taken to the closure, which
|
||||
# also needs fixing
|
||||
# TODO avoid closure iterators that involve GC
|
||||
var tmp = newSeqOfCap[byte](values.len() * sizeof(toBytesSSZ(values[0])))
|
||||
for v in values:
|
||||
tmp.add toBytesSSZ(v)
|
||||
|
||||
func hash_tree_root*(x: ValidatorIndex): array[3, byte] =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
toBytesSSZ(x)
|
||||
for v in 0..<tmp.len div sizeof(Chunk):
|
||||
var c: Chunk
|
||||
copyMem(addr c, addr tmp[v * sizeof(Chunk)], sizeof(Chunk))
|
||||
yield c
|
||||
|
||||
func hash_tree_root*(x: EthAddress): array[sizeof(x), byte] =
|
||||
## Addresses copied as-is
|
||||
toBytesSSZ(x)
|
||||
let remains = tmp.len mod sizeof(Chunk)
|
||||
if remains != 0:
|
||||
var c: Chunk
|
||||
copyMem(addr c, addr tmp[tmp.len - remains], remains)
|
||||
yield c
|
||||
|
||||
func hash_tree_root*(x: Eth2Digest): array[32, byte] =
|
||||
## Hash32 copied as-is
|
||||
toBytesSSZ(x)
|
||||
proc pad(iter: iterator(): Chunk): iterator(): Chunk =
|
||||
# Pad a list of chunks to the next power-of-two length with empty chunks -
|
||||
# this includes ensuring there's at least one chunk return
|
||||
result = iterator(): Chunk =
|
||||
var count = 0
|
||||
|
||||
func hash_tree_root*(x: openArray[byte]): array[32, byte] =
|
||||
## Blobs are hashed
|
||||
hash(x)
|
||||
while true:
|
||||
let item = iter()
|
||||
if finished(iter): break
|
||||
count += 1
|
||||
yield item
|
||||
|
||||
func hash_tree_root*[T: seq|array](x: T): array[32, byte] =
|
||||
## Sequences are tree-hashed
|
||||
merkleHash(x)
|
||||
doAssert nextPowerOfTwo(0) == 1,
|
||||
"Usefully, empty lists will be padded to one empty block"
|
||||
|
||||
func hash_tree_root*[T: BitField](x: T): array[32, byte] =
|
||||
## Sequences are tree-hashed
|
||||
merkleHash(x.bits)
|
||||
for _ in count..<nextPowerOfTwo(count):
|
||||
yield emptyChunk
|
||||
|
||||
func hash_tree_root*[T: object|tuple](x: T): array[32, byte] =
|
||||
## Containers have their fields recursively hashed, concatenated and hashed
|
||||
withHash:
|
||||
for field in x.fields:
|
||||
h.update hash_tree_root(field.toSSZType)
|
||||
func merkleize(chunker: iterator(): Chunk): Chunk =
|
||||
var
|
||||
stack: seq[tuple[height: int, chunk: Chunk]]
|
||||
paddedChunker = pad(chunker)
|
||||
|
||||
while true:
|
||||
let chunk = paddedChunker()
|
||||
if finished(paddedChunker): break
|
||||
|
||||
# Leaves start at height 0 - every time they move up, height is increased
|
||||
# allowing us to detect two chunks at the same height ready for
|
||||
# consolidation
|
||||
# See also: http://szydlo.com/logspacetime03.pdf
|
||||
stack.add (0, chunk)
|
||||
|
||||
# Consolidate items of the same height - this keeps stack size at log N
|
||||
while stack.len > 1 and stack[^1].height == stack[^2].height:
|
||||
# As tradition dictates - one feature, at least one nim bug:
|
||||
# https://github.com/nim-lang/Nim/issues/9684
|
||||
let tmp = hash(stack[^2].chunk, stack[^1].chunk)
|
||||
stack[^2].height += 1
|
||||
stack[^2].chunk = tmp
|
||||
discard stack.pop
|
||||
|
||||
doAssert stack.len == 1,
|
||||
"With power-of-two leaves, we should end up with a single root"
|
||||
|
||||
stack[0].chunk
|
||||
|
||||
template elementType[T, N](_: type array[N, T]): typedesc = T
|
||||
template elementType[T](_: type seq[T]): typedesc = T
|
||||
|
||||
func hash_tree_root*[T](value: T): Eth2Digest =
|
||||
# Merkle tree
|
||||
Eth2Digest(data:
|
||||
when T is BasicType:
|
||||
merkleize(pack([value]))
|
||||
elif T is array|seq:
|
||||
when T.elementType() is BasicType:
|
||||
mix_in_length(merkleize(pack(value)), len(value))
|
||||
else:
|
||||
var roots = iterator(): Chunk =
|
||||
for v in value:
|
||||
yield hash_tree_root(v).data
|
||||
mix_in_length(merkleize(roots), len(value))
|
||||
elif T is object:
|
||||
var roots = iterator(): Chunk =
|
||||
for v in value.fields:
|
||||
yield hash_tree_root(v).data
|
||||
|
||||
merkleize(roots)
|
||||
)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots
|
||||
func signed_root*[T: object](x: T): array[32, byte] =
|
||||
|
@ -342,86 +402,6 @@ func signed_root*[T: object](x: T): array[32, byte] =
|
|||
if name == "signature":
|
||||
found_field_name = true
|
||||
break
|
||||
h.update hash_tree_root(field.toSSZType)
|
||||
h.update hash_tree_root(field.toSSZType).data
|
||||
|
||||
doAssert found_field_name
|
||||
|
||||
# #################################
|
||||
# hash_tree_root not part of official spec
|
||||
func hash_tree_root*(x: enum): array[8, byte] =
|
||||
## TODO - Warning ⚠️: not part of the spec
|
||||
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
||||
## This is a "stub" needed for BeaconBlock hashing
|
||||
static: doAssert x.sizeof == 1 # Check that the enum fits in 1 byte
|
||||
# TODO We've put enums where the spec uses `uint64` - maybe we should not be
|
||||
# using enums?
|
||||
hash_tree_root(uint64(x))
|
||||
|
||||
func hash_tree_root*(x: ValidatorPubKey): array[32, byte] =
|
||||
## TODO - Warning ⚠️: not part of the spec
|
||||
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
||||
## This is a "stub" needed for BeaconBlock hashing
|
||||
x.getBytes().hash()
|
||||
|
||||
func hash_tree_root*(x: ValidatorSig): array[32, byte] =
|
||||
## TODO - Warning ⚠️: not part of the spec
|
||||
## as of https://github.com/ethereum/beacon_chain/pull/133/files
|
||||
## This is a "stub" needed for BeaconBlock hashing
|
||||
x.getBytes().hash()
|
||||
|
||||
func hash_tree_root_final*(x: object|tuple): Eth2Digest =
|
||||
# TODO suggested for spec:
|
||||
# https://github.com/ethereum/eth2.0-specs/issues/276
|
||||
# only for objects now, else the padding would have to be implemented - not
|
||||
# needed yet..
|
||||
Eth2Digest(data: hash_tree_root(x))
|
||||
|
||||
# ################### Tree hash ###################################
|
||||
|
||||
func merkleHash[T](lst: openArray[T]): array[32, byte] =
|
||||
## Merkle tree hash of a list of homogenous, non-empty items
|
||||
|
||||
# TODO: the heap allocations here can be avoided by computing the merkle tree
|
||||
# recursively, but for now keep things simple and aligned with upstream
|
||||
|
||||
# Store length of list (to compensate for non-bijectiveness of padding)
|
||||
var dataLen: array[32, byte]
|
||||
var lstLen = uint64(len(lst))
|
||||
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
||||
|
||||
# Divide into chunks
|
||||
var chunkz: seq[seq[byte]]
|
||||
|
||||
if len(lst) == 0:
|
||||
chunkz.add @emptyChunk
|
||||
elif sizeof(hash_tree_root(lst[0])) < CHUNK_SIZE:
|
||||
# See how many items fit in a chunk
|
||||
let itemsPerChunk = CHUNK_SIZE div sizeof(hash_tree_root(lst[0]))
|
||||
|
||||
chunkz.setLen((len(lst) + itemsPerChunk - 1) div itemsPerChunk)
|
||||
|
||||
# Build a list of chunks based on the number of items in the chunk
|
||||
for i in 0..<chunkz.len:
|
||||
for j in 0..<itemsPerChunk:
|
||||
if i == chunkz.len - 1:
|
||||
let idx = i * itemsPerChunk + j
|
||||
if idx >= lst.len: break # Last chunk may be partial!
|
||||
chunkz[i].add hash_tree_root(lst[i * itemsPerChunk + j])
|
||||
else:
|
||||
# Leave large items alone
|
||||
chunkz.setLen(len(lst))
|
||||
for i in 0..<len(lst):
|
||||
chunkz[i].add hash_tree_root(lst[i])
|
||||
|
||||
while chunkz.len() > 1:
|
||||
if chunkz.len() mod 2 == 1:
|
||||
chunkz.add @emptyChunk
|
||||
for i in 0..<(chunkz.len div 2):
|
||||
# As tradition dictates - one feature, at least one nim bug:
|
||||
# https://github.com/nim-lang/Nim/issues/9684
|
||||
let tmp = @(hash(chunkz[i * 2], chunkz[i * 2 + 1]))
|
||||
chunkz[i] = tmp
|
||||
|
||||
chunkz.setLen(chunkz.len div 2)
|
||||
|
||||
hash(chunkz[0], dataLen)
|
||||
|
|
|
@ -59,7 +59,7 @@ proc processBlockHeader(
|
|||
notice "Block header: previous block root mismatch",
|
||||
previous_block_root = blck.previous_block_root,
|
||||
latest_block_header = state.latest_block_header,
|
||||
latest_block_header_root = hash_tree_root_final(state.latest_block_header)
|
||||
latest_block_header_root = hash_tree_root(state.latest_block_header)
|
||||
return false
|
||||
|
||||
state.latest_block_header = get_temporary_block_header(blck)
|
||||
|
@ -89,7 +89,7 @@ proc processRandao(
|
|||
if skipValidation notin flags:
|
||||
if not bls_verify(
|
||||
proposer.pubkey,
|
||||
hash_tree_root(get_current_epoch(state).uint64),
|
||||
hash_tree_root(get_current_epoch(state).uint64).data,
|
||||
blck.body.randao_reveal,
|
||||
get_domain(state.fork, get_current_epoch(state), DOMAIN_RANDAO)):
|
||||
|
||||
|
@ -471,7 +471,7 @@ func cacheState(state: var BeaconState) =
|
|||
if not (state.slot > GENESIS_SLOT):
|
||||
return
|
||||
|
||||
let previous_slot_state_root = hash_tree_root_final(state)
|
||||
let previous_slot_state_root = hash_tree_root(state)
|
||||
|
||||
# store the previous slot's post state transition root
|
||||
state.latest_state_roots[state.slot mod SLOTS_PER_HISTORICAL_ROOT] =
|
||||
|
@ -1049,8 +1049,8 @@ func finish_epoch_update(state: var BeaconState) =
|
|||
let index_root_position =
|
||||
(next_epoch + ACTIVATION_EXIT_DELAY) mod LATEST_ACTIVE_INDEX_ROOTS_LENGTH
|
||||
state.latest_active_index_roots[index_root_position] =
|
||||
Eth2Digest(data: hash_tree_root(get_active_validator_indices(
|
||||
state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY))
|
||||
hash_tree_root(get_active_validator_indices(
|
||||
state.validator_registry, next_epoch + ACTIVATION_EXIT_DELAY)
|
||||
)
|
||||
|
||||
# Set total slashed balances
|
||||
|
@ -1068,7 +1068,7 @@ func finish_epoch_update(state: var BeaconState) =
|
|||
block_roots: state.latest_block_roots,
|
||||
state_roots: state.latest_state_roots,
|
||||
)
|
||||
state.historical_roots.add (hash_tree_root_final(historical_batch))
|
||||
state.historical_roots.add (hash_tree_root(historical_batch))
|
||||
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
|
@ -1111,7 +1111,7 @@ func processEpoch(state: var BeaconState) =
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.5.0/specs/core/0_beacon-chain.md#state-root-verification
|
||||
proc verifyStateRoot(state: BeaconState, blck: BeaconBlock): bool =
|
||||
let state_root = hash_tree_root_final(state)
|
||||
let state_root = hash_tree_root(state)
|
||||
if state_root != blck.state_root:
|
||||
notice "Block: root verification failed",
|
||||
block_state_root = blck.state_root, state_root
|
||||
|
|
|
@ -31,11 +31,11 @@ func toHeader(b: BeaconBlock): BeaconBlockHeaderRLP =
|
|||
randao_reveal: b.body.randao_reveal,
|
||||
eth1_data : b.body.eth1_data,
|
||||
signature: b.signature,
|
||||
body: hash_tree_root_final(b.body)
|
||||
body: hash_tree_root(b.body)
|
||||
)
|
||||
|
||||
proc fromHeaderAndBody(b: var BeaconBlock, h: BeaconBlockHeaderRLP, body: BeaconBlockBody) =
|
||||
doAssert(hash_tree_root_final(body) == h.body)
|
||||
doAssert(hash_tree_root(body) == h.body)
|
||||
b.slot = h.slot.Slot
|
||||
b.previous_block_root = h.parent_root
|
||||
b.state_root = h.state_root
|
||||
|
@ -51,7 +51,7 @@ proc importBlocks(node: BeaconNode,
|
|||
var bodyMap = initTable[Eth2Digest, int]()
|
||||
|
||||
for i, b in bodies:
|
||||
bodyMap[hash_tree_root_final(b)] = i
|
||||
bodyMap[hash_tree_root(b)] = i
|
||||
|
||||
var goodBlocks, badBlocks = 0
|
||||
for h in headers:
|
||||
|
|
|
@ -38,7 +38,7 @@ cli do (validators: int = 125000,
|
|||
withdrawal_credentials: withdrawalCredentials)
|
||||
|
||||
proofOfPossession = bls_sign(
|
||||
privkey, hash_tree_root_final(proofOfPossessionData).data,
|
||||
privkey, hash_tree_root(proofOfPossessionData).data,
|
||||
0 # TODO - domain
|
||||
)
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ proc signAttestation*(v: AttachedValidator,
|
|||
if v.kind == inProcess:
|
||||
await sleepAsync(1)
|
||||
|
||||
let attestationRoot = hash_tree_root_final(attestation)
|
||||
let attestationRoot = hash_tree_root(attestation)
|
||||
# TODO: Avoid the allocations belows
|
||||
var dataToSign = @(attestationRoot.data) & @[0'u8]
|
||||
# TODO: Use `domain` here
|
||||
|
@ -58,7 +58,7 @@ func genRandaoReveal*(k: ValidatorPrivKey, state: BeaconState, slot: Slot):
|
|||
|
||||
# Off-by-one? I often get slot == state.slot but the check was "doAssert slot > state.slot" (Mamy)
|
||||
doAssert slot >= state.slot, "input slot: " & $humaneSlotNum(slot) & " - beacon state slot: " & $humaneSlotNum(state.slot)
|
||||
bls_sign(k, hash_tree_root(slot_to_epoch(slot).uint64),
|
||||
bls_sign(k, hash_tree_root(slot_to_epoch(slot).uint64).data,
|
||||
get_domain(state.fork, slot_to_epoch(slot), DOMAIN_RANDAO))
|
||||
|
||||
func genRandaoReveal*(v: AttachedValidator, state: BeaconState, slot: Slot):
|
||||
|
|
|
@ -60,7 +60,7 @@ cli do(slots = 1945,
|
|||
var
|
||||
attestations: array[MIN_ATTESTATION_INCLUSION_DELAY, seq[Attestation]]
|
||||
state = genesisState
|
||||
latest_block_root = hash_tree_root_final(genesisBlock)
|
||||
latest_block_root = hash_tree_root(genesisBlock)
|
||||
timers: array[Timers, RunningStat]
|
||||
attesters: RunningStat
|
||||
r: Rand
|
||||
|
@ -90,7 +90,7 @@ cli do(slots = 1945,
|
|||
withTimer(timers[t]):
|
||||
blck = addBlock(state, latest_block_root, body, flags)
|
||||
latest_block_root = withTimerRet(timers[tHashBlock]):
|
||||
hash_tree_root_final(blck)
|
||||
hash_tree_root(blck)
|
||||
|
||||
if attesterRatio > 0.0:
|
||||
# attesterRatio is the fraction of attesters that actually do their
|
||||
|
|
|
@ -25,7 +25,7 @@ suite "Beacon chain DB":
|
|||
|
||||
let
|
||||
blck = BeaconBlock()
|
||||
root = hash_tree_root_final(blck)
|
||||
root = hash_tree_root(blck)
|
||||
|
||||
db.putBlock(blck)
|
||||
|
||||
|
@ -39,7 +39,7 @@ suite "Beacon chain DB":
|
|||
|
||||
let
|
||||
state = BeaconState()
|
||||
root = hash_tree_root_final(state)
|
||||
root = hash_tree_root(state)
|
||||
|
||||
db.putState(state)
|
||||
|
||||
|
@ -58,11 +58,11 @@ suite "Beacon chain DB":
|
|||
|
||||
let
|
||||
a0 = BeaconBlock(slot: GENESIS_SLOT + 0)
|
||||
a0r = hash_tree_root_final(a0)
|
||||
a0r = hash_tree_root(a0)
|
||||
a1 = BeaconBlock(slot: GENESIS_SLOT + 1, previous_block_root: a0r)
|
||||
a1r = hash_tree_root_final(a1)
|
||||
a1r = hash_tree_root(a1)
|
||||
a2 = BeaconBlock(slot: GENESIS_SLOT + 2, previous_block_root: a1r)
|
||||
a2r = hash_tree_root_final(a2)
|
||||
a2r = hash_tree_root(a2)
|
||||
|
||||
doAssert toSeq(db.getAncestors(a0r)) == []
|
||||
doAssert toSeq(db.getAncestors(a2r)) == []
|
||||
|
|
|
@ -36,7 +36,7 @@ suite "Block pool processing":
|
|||
|
||||
let
|
||||
b1 = makeBlock(state.data, state.blck.root, BeaconBlockBody())
|
||||
b1Root = hash_tree_root_final(b1)
|
||||
b1Root = hash_tree_root(b1)
|
||||
|
||||
# TODO the return value is ugly here, need to fix and test..
|
||||
discard pool.add(state, b1Root, b1)
|
||||
|
@ -55,9 +55,9 @@ suite "Block pool processing":
|
|||
|
||||
let
|
||||
b1 = addBlock(state.data, state.blck.root, BeaconBlockBody(), {})
|
||||
b1Root = hash_tree_root_final(b1)
|
||||
b1Root = hash_tree_root(b1)
|
||||
b2 = addBlock(state.data, b1Root, BeaconBlockBody(), {})
|
||||
b2Root = hash_tree_root_final(b2)
|
||||
b2Root = hash_tree_root(b2)
|
||||
|
||||
discard pool.add(state, b2Root, b2)
|
||||
|
||||
|
|
|
@ -81,16 +81,10 @@ suite "Simple serialization":
|
|||
suite "Tree hashing":
|
||||
# TODO Nothing but smoke tests for now..
|
||||
|
||||
test "Hash Validator":
|
||||
let vr = Validator()
|
||||
check: hash_tree_root(vr).len > 0
|
||||
|
||||
test "Hash BeaconBlock":
|
||||
## TODO: Test genesis hash when spec is updated
|
||||
let bb = BeaconBlock()
|
||||
check: hash_tree_root(bb).len > 0
|
||||
|
||||
test "Hash integer":
|
||||
check: hash_tree_root(0x01'u32) == [1'u8, 0, 0, 0] # little endian!
|
||||
check: hash_tree_root(ValidatorIndex(0x01)) == [1'u8, 0, 0] # little endian!
|
||||
let vr = BeaconBlock()
|
||||
check: hash_tree_root(vr) != Eth2Digest()
|
||||
|
||||
test "Hash BeaconState":
|
||||
let vr = BeaconBlock()
|
||||
check: hash_tree_root(vr) != Eth2Digest()
|
||||
|
|
|
@ -25,7 +25,7 @@ suite "Block processing":
|
|||
test "Passes from genesis state, no block":
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = hash_tree_root_final(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock)
|
||||
|
||||
advanceState(state, previous_block_root)
|
||||
check:
|
||||
|
@ -34,7 +34,7 @@ suite "Block processing":
|
|||
test "Passes from genesis state, empty block":
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = hash_tree_root_final(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock)
|
||||
new_block = makeBlock(state, previous_block_root, BeaconBlockBody())
|
||||
|
||||
let block_ok = updateState(state, previous_block_root, new_block, {})
|
||||
|
@ -47,7 +47,7 @@ suite "Block processing":
|
|||
test "Passes through epoch update, no block":
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = hash_tree_root_final(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock)
|
||||
|
||||
for i in 1..SLOTS_PER_EPOCH.int:
|
||||
advanceState(state, previous_block_root)
|
||||
|
@ -58,7 +58,7 @@ suite "Block processing":
|
|||
test "Passes through epoch update, empty block":
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = hash_tree_root_final(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock)
|
||||
|
||||
for i in 1..SLOTS_PER_EPOCH.int:
|
||||
var new_block = makeBlock(state, previous_block_root, BeaconBlockBody())
|
||||
|
@ -69,7 +69,7 @@ suite "Block processing":
|
|||
check:
|
||||
block_ok
|
||||
|
||||
previous_block_root = hash_tree_root_final(new_block)
|
||||
previous_block_root = hash_tree_root(new_block)
|
||||
|
||||
check:
|
||||
state.slot == genesisState.slot + SLOTS_PER_EPOCH
|
||||
|
@ -77,7 +77,7 @@ suite "Block processing":
|
|||
test "Attestation gets processed at epoch":
|
||||
var
|
||||
state = genesisState
|
||||
previous_block_root = hash_tree_root_final(genesisBlock)
|
||||
previous_block_root = hash_tree_root(genesisBlock)
|
||||
|
||||
# Slot 0 is a finalized slot - won't be making attestations for it..
|
||||
advanceState(state, previous_block_root)
|
||||
|
|
|
@ -44,7 +44,7 @@ func makeDeposit(i: int, flags: UpdateFlags): Deposit =
|
|||
withdrawal_credentials: withdrawal_credentials,
|
||||
)
|
||||
let domain = 0'u64
|
||||
bls_sign(privkey, hash_tree_root_final(proof_of_possession_data).data, domain)
|
||||
bls_sign(privkey, hash_tree_root(proof_of_possession_data).data, domain)
|
||||
|
||||
Deposit(
|
||||
index: i.uint64,
|
||||
|
@ -116,7 +116,7 @@ proc addBlock*(
|
|||
|
||||
# Ok, we have the new state as it would look with the block applied - now we
|
||||
# can set the state root in order to be able to create a valid signature
|
||||
new_block.state_root = Eth2Digest(data: hash_tree_root(state))
|
||||
new_block.state_root = hash_tree_root(state)
|
||||
|
||||
let proposerPrivkey = hackPrivKey(proposer)
|
||||
doAssert proposerPrivkey.pubKey() == proposer.pubkey,
|
||||
|
@ -170,7 +170,7 @@ proc makeAttestation*(
|
|||
set_bitfield_bit(aggregation_bitfield, sac_index)
|
||||
|
||||
let
|
||||
msg = hash_tree_root_final(
|
||||
msg = hash_tree_root(
|
||||
AttestationDataAndCustodyBit(data: data, custody_bit: false))
|
||||
sig =
|
||||
if skipValidation notin flags:
|
||||
|
@ -192,7 +192,7 @@ proc makeAttestation*(
|
|||
|
||||
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
|
||||
let
|
||||
tailRoot = hash_tree_root_final(tailBlock)
|
||||
tailRoot = hash_tree_root(tailBlock)
|
||||
|
||||
result = init(BeaconChainDB, newMemoryDB())
|
||||
result.putState(tailState)
|
||||
|
|
Loading…
Reference in New Issue