Implement the latest SSZ specification and integrate the official SSZ test suite
This commit is contained in:
parent
7a4b7a6cfb
commit
398ea55801
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
deques, options, sequtils, tables,
|
||||
chronicles,
|
||||
./spec/[beaconstate, bitfield, datatypes, crypto, digest, helpers, validator],
|
||||
chronicles, stew/bitseqs,
|
||||
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
||||
./extras, ./beacon_chain_db, ./ssz, ./block_pool,
|
||||
beacon_node_types
|
||||
|
||||
|
@ -61,11 +61,11 @@ proc validate(
|
|||
finalizedEpoch = humaneEpochNum(state.finalized_checkpoint.epoch)
|
||||
return
|
||||
|
||||
if not allIt(attestation.custody_bits.bits, it == 0):
|
||||
if not allIt(attestation.custody_bits.bytes, it == 0):
|
||||
notice "Invalid custody bitfield for phase 0"
|
||||
return false
|
||||
|
||||
if not anyIt(attestation.aggregation_bits.bits, it != 0):
|
||||
if not anyIt(attestation.aggregation_bits.bytes, it != 0):
|
||||
notice "Empty aggregation bitfield"
|
||||
return false
|
||||
|
||||
|
@ -211,8 +211,7 @@ proc add*(pool: var AttestationPool,
|
|||
# Attestations in the pool that are a subset of the new attestation
|
||||
# can now be removed per same logic as above
|
||||
a.validations.keepItIf(
|
||||
if it.aggregation_bits.isSubsetOf(
|
||||
validation.aggregation_bits):
|
||||
if it.aggregation_bits.isSubsetOf(validation.aggregation_bits):
|
||||
debug "Removing subset attestation",
|
||||
existingParticipants = get_attesting_indices_seq(
|
||||
state, a.data, it.aggregation_bits),
|
||||
|
@ -314,10 +313,8 @@ proc getAttestationsForBlock*(
|
|||
# and naively add as much as possible in one go, by we could also
|
||||
# add the same attestation data twice, as long as there's at least
|
||||
# one new attestation in there
|
||||
if not attestation.aggregation_bits.overlaps(
|
||||
v.aggregation_bits):
|
||||
attestation.aggregation_bits.combine(
|
||||
v.aggregation_bits)
|
||||
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
||||
attestation.aggregation_bits.combine(v.aggregation_bits)
|
||||
attestation.custody_bits.combine(v.custody_bits)
|
||||
attestation.signature.combine(v.aggregate_signature)
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
import
|
||||
net, sequtils, options, tables, osproc, random, strutils, times, strformat,
|
||||
stew/shims/os, stew/objects,
|
||||
stew/shims/os, stew/[objects, bitseqs],
|
||||
chronos, chronicles, confutils, serialization/errors,
|
||||
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
|
||||
spec/[bitfield, datatypes, digest, crypto, beaconstate, helpers, validator],
|
||||
spec/[datatypes, digest, crypto, beaconstate, helpers, validator],
|
||||
conf, time, state_transition, fork_choice, ssz, beacon_chain_db,
|
||||
validator_pool, extras, attestation_pool, block_pool, eth2_network,
|
||||
beacon_node_types, mainchain_monitor, trusted_state_snapshots, version,
|
||||
|
@ -309,15 +309,15 @@ proc sendAttestation(node: BeaconNode,
|
|||
let
|
||||
validatorSignature = await validator.signAttestation(attestationData)
|
||||
|
||||
var aggregationBitfield = BitField.init(committeeLen)
|
||||
set_bitfield_bit(aggregationBitfield, indexInCommittee)
|
||||
var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
|
||||
aggregationBits.raiseBit indexInCommittee
|
||||
|
||||
var attestation = Attestation(
|
||||
data: attestationData,
|
||||
signature: validatorSignature,
|
||||
aggregation_bits: aggregationBitfield,
|
||||
aggregation_bits: aggregationBits,
|
||||
# Stub in phase0
|
||||
custody_bits: BitField.init(committeeLen)
|
||||
custody_bits: CommitteeValidatorsBits.init(committeeLen)
|
||||
)
|
||||
|
||||
node.network.broadcast(topicAttestations, attestation)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
sets, deques, tables,
|
||||
eth/keys,
|
||||
spec/[bitfield, datatypes, crypto, digest],
|
||||
eth/keys, stew/bitseqs,
|
||||
spec/[datatypes, crypto, digest],
|
||||
beacon_chain_db, conf, mainchain_monitor, eth2_network, time
|
||||
|
||||
type
|
||||
|
@ -45,8 +45,8 @@ type
|
|||
#
|
||||
# #############################################
|
||||
Validation* = object
|
||||
aggregation_bits*: BitField
|
||||
custody_bits*: BitField ##\
|
||||
aggregation_bits*: CommitteeValidatorsBits
|
||||
custody_bits*: CommitteeValidatorsBits ##\
|
||||
## Phase 1 - the handling of this field is probably broken..
|
||||
aggregate_signature*: ValidatorSig
|
||||
|
||||
|
@ -54,7 +54,7 @@ type
|
|||
# Yeah, you can do any linear combination of signatures. but you have to
|
||||
# remember the linear combination of pubkeys that constructed
|
||||
# if you have two instances of a signature from pubkey p, then you need 2*p
|
||||
# in the group pubkey because the attestation bitfield is only 1 bit per
|
||||
# in the group pubkey because the attestation bitlist is only 1 bit per
|
||||
# pubkey right now, attestations do not support this it could be extended to
|
||||
# support N overlaps up to N times per pubkey if we had N bits per validator
|
||||
# instead of 1
|
||||
|
|
|
@ -12,7 +12,7 @@ type
|
|||
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
|
||||
|
||||
proc fetchAncestorBlocksFromPeer(peer: Peer, rec: FetchRecord, responseHandler: FetchAncestorsResponseHandler) {.async.} =
|
||||
let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots.int, 0, 1)
|
||||
let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots, 0, true)
|
||||
if blocks.isSome:
|
||||
for b in blocks.get:
|
||||
responseHandler(b)
|
||||
|
|
|
@ -6,10 +6,10 @@
|
|||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
algorithm, chronicles, collections/sets, math, options, sequtils,
|
||||
tables, algorithm, sets, math, options, sequtils,
|
||||
chronicles, stew/bitseqs,
|
||||
../extras, ../ssz, ../beacon_node_types,
|
||||
./bitfield, ./crypto, ./datatypes, ./digest, ./helpers, ./validator,
|
||||
tables
|
||||
./crypto, ./datatypes, ./digest, ./helpers, ./validator
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#verify_merkle_branch
|
||||
func verify_merkle_branch(leaf: Eth2Digest, proof: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
|
||||
|
@ -357,8 +357,8 @@ func is_valid_indexed_attestation*(
|
|||
# Check if ``indexed_attestation`` has valid indices and signature.
|
||||
|
||||
let
|
||||
bit_0_indices = indexed_attestation.custody_bit_0_indices
|
||||
bit_1_indices = indexed_attestation.custody_bit_1_indices
|
||||
bit_0_indices = indexed_attestation.custody_bit_0_indices.asSeq
|
||||
bit_1_indices = indexed_attestation.custody_bit_1_indices.asSeq
|
||||
|
||||
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||
if len(bit_1_indices) != 0:
|
||||
|
@ -370,7 +370,7 @@ func is_valid_indexed_attestation*(
|
|||
return false
|
||||
|
||||
# Verify index sets are disjoint
|
||||
if len(intersection(toSet(bit_0_indices), toSet(bit_1_indices))) != 0:
|
||||
if len(intersection(bit_0_indices.toSet, bit_1_indices.toSet)) != 0:
|
||||
return false
|
||||
|
||||
# Verify indices are sorted
|
||||
|
@ -405,11 +405,11 @@ func is_valid_indexed_attestation*(
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_attesting_indices
|
||||
func get_attesting_indices*(state: BeaconState,
|
||||
attestation_data: AttestationData,
|
||||
bitfield: BitField,
|
||||
bits: CommitteeValidatorsBits,
|
||||
stateCache: var StateCache):
|
||||
HashSet[ValidatorIndex] =
|
||||
## Return the sorted attesting indices corresponding to ``attestation_data``
|
||||
## and ``bitfield``.
|
||||
## and ``bits``.
|
||||
## The spec goes through a lot of hoops to sort things, and sometimes
|
||||
## constructs sets from the results here. The basic idea is to always
|
||||
## just keep it in a HashSet, which seems to suffice. If needed, it's
|
||||
|
@ -420,15 +420,15 @@ func get_attesting_indices*(state: BeaconState,
|
|||
state, attestation_data.target.epoch, attestation_data.crosslink.shard,
|
||||
stateCache)
|
||||
for i, index in committee:
|
||||
if get_bitfield_bit(bitfield, i):
|
||||
if bits[i]:
|
||||
result.incl index
|
||||
|
||||
func get_attesting_indices_seq*(
|
||||
state: BeaconState, attestation_data: AttestationData, bitfield: BitField):
|
||||
seq[ValidatorIndex] =
|
||||
func get_attesting_indices_seq*(state: BeaconState,
|
||||
attestation_data: AttestationData,
|
||||
bits: CommitteeValidatorsBits): seq[ValidatorIndex] =
|
||||
var cache = get_empty_per_epoch_cache()
|
||||
toSeq(items(get_attesting_indices(
|
||||
state, attestation_data, bitfield, cache)))
|
||||
state, attestation_data, bits, cache)))
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_indexed_attestation
|
||||
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
||||
|
@ -469,13 +469,13 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
|||
## 0.6.3 highlights and explicates) except in that the spec,
|
||||
## for no obvious reason, verifies it.
|
||||
IndexedAttestation(
|
||||
custody_bit_0_indices: sorted(
|
||||
custody_bit_0_indices: CustodyBitIndices sorted(
|
||||
mapIt(custody_bit_0_indices, it.uint64), system.cmp),
|
||||
# toSeq pointlessly constructs int-indexable copy so mapIt can infer type;
|
||||
# see above
|
||||
custody_bit_1_indices:
|
||||
sorted(mapIt(toSeq(items(custody_bit_1_indices)), it.uint64),
|
||||
system.cmp),
|
||||
custody_bit_1_indices: CustodyBitIndices sorted(
|
||||
mapIt(toSeq(items(custody_bit_1_indices)), it.uint64),
|
||||
system.cmp),
|
||||
data: attestation.data,
|
||||
signature: attestation.signature,
|
||||
)
|
||||
|
|
|
@ -1,50 +0,0 @@
|
|||
import stew/byteutils, json_serialization, stew/bitops2
|
||||
|
||||
type
|
||||
BitField* = object
|
||||
## A simple bit field type that follows the semantics of the spec, with
|
||||
## regards to bit endian operations
|
||||
# TODO stew contains utilities for with bitsets - could try to
|
||||
# recycle that, but there are open questions about bit endianess there.
|
||||
bits*: seq[byte]
|
||||
|
||||
func ceil_div8(v: int): int = (v + 7) div 8
|
||||
|
||||
func init*(T: type BitField, bits: int): BitField =
|
||||
BitField(bits: newSeq[byte](ceil_div8(bits)))
|
||||
|
||||
# TODO fix this for state tests..
|
||||
#proc readValue*(r: var JsonReader, a: var BitField) {.inline.} =
|
||||
# a.bits = r.readValue(string).hexToSeqByte()
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_bitfield_bit
|
||||
func get_bitfield_bit*(bitfield: BitField, i: int): bool =
|
||||
# Extract the bit in ``bitfield`` at position ``i``.
|
||||
doAssert 0 <= i div 8, "i: " & $i & " i div 8: " & $(i div 8)
|
||||
doAssert i div 8 < bitfield.bits.len, "i: " & $i & " i div 8: " & $(i div 8)
|
||||
((bitfield.bits[i div 8] shr (i mod 8)) mod 2) > 0'u8
|
||||
|
||||
# TODO spec candidatidates below, though they're used only indirectly there..
|
||||
func set_bitfield_bit*(bitfield: var BitField, i: int) =
|
||||
bitfield.bits[i div 8] = bitfield.bits[i div 8] or 1'u8 shl (i mod 8)
|
||||
|
||||
func combine*(tgt: var BitField, src: BitField) =
|
||||
for i in 0 ..< tgt.bits.len:
|
||||
tgt.bits[i] = tgt.bits[i] or src.bits[i]
|
||||
|
||||
func overlaps*(a, b: BitField): bool =
|
||||
for i in 0..<a.bits.len:
|
||||
if (a.bits[i] and b.bits[i]) > 0'u8:
|
||||
return true
|
||||
|
||||
func countOnes*(a: BitField): int {.inline.} =
|
||||
for v in a.bits: result += countOnes(v)
|
||||
|
||||
func len*(a: BitField): int {.inline.} =
|
||||
countOnes(a)
|
||||
|
||||
func isSubsetOf*(a, b: Bitfield): bool =
|
||||
for i in 0 ..< (len(a.bits) * 8):
|
||||
if get_bitfield_bit(a, i) and not get_bitfield_bit(b, i):
|
||||
return false
|
||||
true
|
Binary file not shown.
|
@ -46,7 +46,7 @@
|
|||
|
||||
import
|
||||
sequtils,
|
||||
hashes, eth/rlp,
|
||||
stew/objects, hashes, eth/rlp, nimcrypto/utils,
|
||||
blscurve, json_serialization,
|
||||
digest
|
||||
|
||||
|
@ -56,20 +56,102 @@ export
|
|||
export blscurve.init, blscurve.getBytes, blscurve.combine, blscurve.`$`, blscurve.`==`
|
||||
|
||||
type
|
||||
ValidatorPubKey* = blscurve.VerKey
|
||||
BlsValueType* = enum
|
||||
Real
|
||||
OpaqueBlob
|
||||
|
||||
BlsValue*[T] = object
|
||||
# TODO This is a temporary type needed until we sort out the
|
||||
# issues with invalid BLS values appearing in the SSZ test suites.
|
||||
case kind*: BlsValueType
|
||||
of Real:
|
||||
blsValue*: T
|
||||
of OpaqueBlob:
|
||||
when T is blscurve.Signature:
|
||||
blob*: array[96, byte]
|
||||
else:
|
||||
blob*: array[48, byte]
|
||||
|
||||
ValidatorPubKey* = BlsValue[blscurve.VerKey]
|
||||
# ValidatorPubKey* = blscurve.VerKey
|
||||
|
||||
# ValidatorPubKey* = array[48, byte]
|
||||
# The use of byte arrays proved to be a dead end pretty quickly.
|
||||
# Plenty of code needs to be modified for a successful build and
|
||||
# the changes will negatively affect the performance.
|
||||
|
||||
# ValidatorPrivKey* = BlsValue[blscurve.SigKey]
|
||||
ValidatorPrivKey* = blscurve.SigKey
|
||||
ValidatorSig* = blscurve.Signature
|
||||
|
||||
ValidatorSig* = BlsValue[blscurve.Signature]
|
||||
|
||||
BlsCurveType* = VerKey|SigKey|Signature
|
||||
ValidatorPKI* = ValidatorPrivKey|ValidatorPubKey|ValidatorSig
|
||||
|
||||
func shortLog*(x: ValidatorPKI): string =
|
||||
proc init*[T](BLS: type BlsValue[T], val: auto): BLS =
|
||||
result.kind = BlsValueType.Real
|
||||
result.blsValue = init(T, val)
|
||||
|
||||
func `$`*(x: BlsValue): string =
|
||||
if x.kind == Real:
|
||||
$x.blsValue
|
||||
else:
|
||||
"r:" & toHex(x.blob)
|
||||
|
||||
func `==`*(a, b: BlsValue): bool =
|
||||
if a.kind != b.kind: return false
|
||||
if a.kind == Real:
|
||||
return a.blsValue == b.blsValue
|
||||
else:
|
||||
return a.blob == b.blob
|
||||
|
||||
func getBytes*(x: BlsValue): auto =
|
||||
if x.kind == Real:
|
||||
getBytes x.blsValue
|
||||
else:
|
||||
x.blob
|
||||
|
||||
func shortLog*(x: BlsValue): string =
|
||||
($x)[0..7]
|
||||
|
||||
template hash*(k: ValidatorPubKey|ValidatorPrivKey): Hash =
|
||||
hash(k.getBytes())
|
||||
func shortLog*(x: BlsCurveType): string =
|
||||
($x)[0..7]
|
||||
|
||||
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey = pk.getKey()
|
||||
proc hash*(x: BlsValue): Hash {.inline.} =
|
||||
if x.kind == Real:
|
||||
hash x.blsValue.getBytes()
|
||||
else:
|
||||
hash x.blob
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_aggregate_pubkeys
|
||||
template hash*(x: BlsCurveType): Hash =
|
||||
hash(getBytes(x))
|
||||
|
||||
template `==`*[T](a: BlsValue[T], b: T): bool =
|
||||
a.blsValue == b
|
||||
|
||||
template `==`*[T](a: T, b: BlsValue[T]): bool =
|
||||
a == b.blsValue
|
||||
|
||||
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey =
|
||||
when ValidatorPubKey is BlsValue:
|
||||
ValidatorPubKey(kind: Real, blsValue: pk.getKey())
|
||||
elif ValidatorPubKey is array:
|
||||
pk.getKey.getBytes
|
||||
else:
|
||||
pk.getKey
|
||||
|
||||
proc combine*[T](a: openarray[BlsValue[T]]): T =
|
||||
doAssert a.len > 0 and a[0].kind == Real
|
||||
result = a[0].blsValue
|
||||
for i in 1 ..< a.len:
|
||||
doAssert a[i].kind == Real
|
||||
result.combine a[i].blsValue
|
||||
|
||||
proc combine*[T](x: var BlsValue[T], other: BlsValue[T]) =
|
||||
doAssert x.kind == Real and other.kind == Real
|
||||
x.blsValue.combine(other.blsValue)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_aggregate_pubkeys
|
||||
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
||||
var empty = true
|
||||
for key in keys:
|
||||
|
@ -79,14 +161,18 @@ func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
|||
else:
|
||||
result.combine(key)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify
|
||||
func bls_verify*(
|
||||
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
||||
domain: uint64): bool =
|
||||
# name from spec!
|
||||
sig.verify(msg, domain, pubkey)
|
||||
when ValidatorPubKey is BlsValue:
|
||||
doAssert sig.kind == Real and pubkey.kind == Real
|
||||
sig.blsValue.verify(msg, domain, pubkey.blsValue)
|
||||
else:
|
||||
sig.verify(msg, domain, pubkey)
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify_multiple
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify_multiple
|
||||
func bls_verify_multiple*(
|
||||
pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest],
|
||||
sig: ValidatorSig, domain: uint64): bool =
|
||||
|
@ -98,49 +184,94 @@ func bls_verify_multiple*(
|
|||
let (pubkey, message_hash) = pubkey_message_hash
|
||||
# TODO spec doesn't say to handle this specially, but it's silly to
|
||||
# validate without any actual public keys.
|
||||
if pubkey != ValidatorPubKey() and
|
||||
not sig.verify(message_hash.data, domain, pubkey):
|
||||
if not pubkey.bls_verify(message_hash.data, sig, domain):
|
||||
return false
|
||||
|
||||
true
|
||||
|
||||
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
||||
domain: uint64): ValidatorSig =
|
||||
# name from spec!
|
||||
key.sign(domain, msg)
|
||||
when ValidatorPrivKey is BlsValue:
|
||||
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
||||
domain: uint64): ValidatorSig =
|
||||
# name from spec!
|
||||
if key.kind == Real:
|
||||
ValidatorSig(kind: Real, blsValue: key.blsValue.sign(domain, msg))
|
||||
else:
|
||||
ValidatorSig(kind: OpaqueBlob)
|
||||
else:
|
||||
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
||||
domain: uint64): ValidatorSig =
|
||||
# name from spec!
|
||||
ValidatorSig(kind: Real, blsValue: key.sign(domain, msg))
|
||||
|
||||
proc fromBytes*[T](R: type BlsValue[T], bytes: openarray[byte]): R =
|
||||
when defined(ssz_testing):
|
||||
result = R(kind: OpaqueBlob, blob: toArray(result.blob.len, bytes))
|
||||
else:
|
||||
result = R(kind: Real, blsValue: init(T, bytes))
|
||||
|
||||
proc initFromBytes*[T](val: var BlsValue[T], bytes: openarray[byte]) =
|
||||
val = fromBytes(BlsValue[T], bytes)
|
||||
|
||||
proc initFromBytes*(val: var BlsCurveType, bytes: openarray[byte]) =
|
||||
val = init(type(val), bytes)
|
||||
|
||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.inline.} =
|
||||
writer.writeValue($value)
|
||||
when value is BlsValue:
|
||||
doAssert value.kind == Real
|
||||
writer.writeValue($value.blsValue)
|
||||
else:
|
||||
writer.writeValue($value)
|
||||
|
||||
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.inline.} =
|
||||
value = VerKey.init(reader.readValue(string))
|
||||
value.initFromBytes(fromHex reader.readValue(string))
|
||||
|
||||
proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.inline.} =
|
||||
writer.writeValue($value)
|
||||
when value is BlsValue:
|
||||
doAssert value.kind == Real
|
||||
writer.writeValue($value.blsValue)
|
||||
else:
|
||||
writer.writeValue($value)
|
||||
|
||||
proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.inline.} =
|
||||
value = Signature.init(reader.readValue(string))
|
||||
value.initFromBytes(fromHex reader.readValue(string))
|
||||
|
||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.inline.} =
|
||||
writer.writeValue($value)
|
||||
when value is BlsValue:
|
||||
doAssert value.kind == Real
|
||||
writer.writeValue($value.blsValue)
|
||||
else:
|
||||
writer.writeValue($value)
|
||||
|
||||
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.inline.} =
|
||||
value = SigKey.init(reader.readValue(string))
|
||||
value.initFromBytes(fromHex reader.readValue(string))
|
||||
|
||||
proc newPrivKey*(): ValidatorPrivKey = SigKey.random()
|
||||
when ValidatorPrivKey is BlsValue:
|
||||
proc newPrivKey*(): ValidatorPrivKey =
|
||||
ValidatorPrivKey(kind: Real, blsValue: SigKey.random())
|
||||
else:
|
||||
proc newPrivKey*(): ValidatorPrivKey =
|
||||
SigKey.random()
|
||||
|
||||
# RLP serialization (TODO: remove if no longer necessary)
|
||||
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
||||
writer.append value.getBytes()
|
||||
when ValidatorPubKey is BlsValue:
|
||||
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
||||
writer.append if value.kind == Real: value.blsValue.getBytes()
|
||||
else: value.blob
|
||||
else:
|
||||
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
||||
writer.append value.getBytes()
|
||||
|
||||
proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} =
|
||||
result = ValidatorPubKey.init(rlp.toBytes.toOpenArray)
|
||||
rlp.skipElem()
|
||||
result fromBytes(T, rlp.toBytes)
|
||||
|
||||
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
||||
writer.append value.getBytes()
|
||||
when ValidatorSig is BlsValue:
|
||||
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
||||
writer.append if value.kind == Real: value.blsValue.getBytes()
|
||||
else: value.blob
|
||||
else:
|
||||
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
||||
writer.append value.getBytes()
|
||||
|
||||
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
|
||||
result = ValidatorSig.init(rlp.toBytes.toOpenArray)
|
||||
rlp.skipElem()
|
||||
let bytes = fromBytes(T, rlp.toBytes)
|
||||
|
||||
|
|
|
@ -18,9 +18,9 @@
|
|||
# types / composition
|
||||
|
||||
import
|
||||
hashes, math, json,
|
||||
chronicles, eth/[common, rlp],
|
||||
./bitfield, ./crypto, ./digest
|
||||
macros, hashes, math, json, strutils,
|
||||
stew/[byteutils, bitseqs], chronicles, eth/[common, rlp],
|
||||
../ssz/types, ./crypto, ./digest
|
||||
|
||||
# TODO Data types:
|
||||
# Presently, we're reusing the data types from the serialization (uint64) in the
|
||||
|
@ -41,7 +41,7 @@ import
|
|||
|
||||
# Constant presets
|
||||
# https://github.com/ethereum/eth2.0-specs/tree/v0.6.3/configs/constant_presets/
|
||||
const const_preset*{.strdefine.} = "mainnet"
|
||||
const const_preset* {.strdefine.} = "minimal"
|
||||
|
||||
when const_preset == "mainnet":
|
||||
import ./presets/mainnet
|
||||
|
@ -63,16 +63,21 @@ const
|
|||
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
|
||||
## compute_epoch_of_slot(GENESIS_SLOT)
|
||||
|
||||
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
||||
|
||||
# Not part of spec. Still useful, pending removing usage if appropriate.
|
||||
ZERO_HASH* = Eth2Digest()
|
||||
|
||||
template maxSize*(n: int) {.pragma.}
|
||||
|
||||
type
|
||||
ValidatorIndex* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around
|
||||
|
||||
Shard* = uint64
|
||||
Gwei* = uint64
|
||||
Domain* = uint64
|
||||
|
||||
BitList*[maxLen: static int] = distinct BitSeq
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#proposerslashing
|
||||
ProposerSlashing* = object
|
||||
proposer_index*: uint64 ##\
|
||||
|
@ -91,11 +96,13 @@ type
|
|||
attestation_2*: IndexedAttestation ## \
|
||||
## Second attestation
|
||||
|
||||
CustodyBitIndices* = List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#indexedattestation
|
||||
IndexedAttestation* = object
|
||||
# These probably should be seq[ValidatorIndex], but that throws RLP errors
|
||||
custody_bit_0_indices*: seq[uint64]
|
||||
custody_bit_1_indices*: seq[uint64]
|
||||
custody_bit_0_indices*: CustodyBitIndices
|
||||
custody_bit_1_indices*: CustodyBitIndices
|
||||
|
||||
data*: AttestationData ## \
|
||||
## Attestation data
|
||||
|
@ -103,15 +110,17 @@ type
|
|||
signature*: ValidatorSig ## \
|
||||
## Aggregate signature
|
||||
|
||||
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestation
|
||||
Attestation* = object
|
||||
aggregation_bits*: BitField ##\
|
||||
aggregation_bits*: CommitteeValidatorsBits ##\
|
||||
## Attester aggregation bitfield
|
||||
|
||||
data*: AttestationData ##\
|
||||
## Attestation data
|
||||
|
||||
custody_bits*: BitField ##\
|
||||
custody_bits*: CommitteeValidatorsBits ##\
|
||||
## Custody bitfield
|
||||
|
||||
signature*: ValidatorSig ##\
|
||||
|
@ -143,7 +152,7 @@ type
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#deposit
|
||||
Deposit* = object
|
||||
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] ##\
|
||||
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
|
||||
## Merkle path to deposit data list root
|
||||
|
||||
data*: DepositData
|
||||
|
@ -159,9 +168,6 @@ type
|
|||
amount*: uint64 ##\
|
||||
## Amount in Gwei
|
||||
|
||||
# TODO remove, not in spec
|
||||
dummy*: uint64
|
||||
|
||||
signature*: ValidatorSig ##\
|
||||
## Container self-signature
|
||||
|
||||
|
@ -280,7 +286,7 @@ type
|
|||
|
||||
# Shuffling
|
||||
start_shard*: Shard
|
||||
randao_mixes*: array[LATEST_RANDAO_MIXES_LENGTH, Eth2Digest]
|
||||
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||
|
||||
active_index_roots*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] ##\
|
||||
## Active index digests for light clients
|
||||
|
@ -348,10 +354,10 @@ type
|
|||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#pendingattestation
|
||||
PendingAttestation* = object
|
||||
aggregation_bits*: BitField ## Attester participation bitfield
|
||||
data*: AttestationData ## Attestation data
|
||||
inclusion_delay*: uint64 ## Inclusion delay
|
||||
proposer_index*: ValidatorIndex ## Proposer index
|
||||
aggregation_bits*: CommitteeValidatorsBits ## Attester participation bitfield
|
||||
data*: AttestationData ## Attestation data
|
||||
inclusion_delay*: uint64 ## Inclusion delay
|
||||
proposer_index*: uint64 ## Proposer index
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#historicalbatch
|
||||
HistoricalBatch* = object
|
||||
|
@ -382,6 +388,65 @@ type
|
|||
data*: BeaconState
|
||||
root*: Eth2Digest # hash_tree_root (not signing_root!)
|
||||
|
||||
template foreachSpecType*(op: untyped) =
|
||||
## These are all spec types that will appear in network messages
|
||||
## and persistent consensus data. This helper template is useful
|
||||
## for populating RTTI tables that concern them.
|
||||
op Attestation
|
||||
op AttestationData
|
||||
op AttestationDataAndCustodyBit
|
||||
op AttesterSlashing
|
||||
op BeaconBlock
|
||||
op BeaconBlockBody
|
||||
op BeaconBlockHeader
|
||||
op BeaconState
|
||||
op Crosslink
|
||||
op Deposit
|
||||
op DepositData
|
||||
op Eth1Data
|
||||
op Fork
|
||||
op HistoricalBatch
|
||||
op IndexedAttestation
|
||||
op PendingAttestation
|
||||
op ProposerSlashing
|
||||
op Transfer
|
||||
op Validator
|
||||
op VoluntaryExit
|
||||
|
||||
macro fieldMaxLen*(x: typed): untyped =
|
||||
# TODO This macro is a temporary solution for the lack of a
|
||||
# more proper way to specify the max length of the List[T; N]
|
||||
# objects in the spec.
|
||||
# May be replaced with `getCustomPragma` once we upgrade to
|
||||
# Nim 0.20.2 or with a distinct List type, which would require
|
||||
# more substantial refactorings in the spec code.
|
||||
if x.kind != nnkDotExpr:
|
||||
return newLit(0)
|
||||
|
||||
let size = case $x[1]
|
||||
of "pubkeys",
|
||||
"compact_validators",
|
||||
"custody_bit_0_indices",
|
||||
"custody_bit_1_indices",
|
||||
"aggregation_bits",
|
||||
"custody_bits": int64(MAX_VALIDATORS_PER_COMMITTEE)
|
||||
of "proposer_slashings": MAX_PROPOSER_SLASHINGS
|
||||
of "attester_slashings": MAX_ATTESTER_SLASHINGS
|
||||
of "attestations": MAX_ATTESTATIONS
|
||||
of "deposits": MAX_DEPOSITS
|
||||
of "voluntary_exits": MAX_VOLUNTARY_EXITS
|
||||
of "transfers": MAX_TRANSFERS
|
||||
of "historical_roots": HISTORICAL_ROOTS_LIMIT
|
||||
of "eth1_data_votes": SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
of "validators": VALIDATOR_REGISTRY_LIMIT
|
||||
of "balances": VALIDATOR_REGISTRY_LIMIT
|
||||
of "previous_epoch_attestations",
|
||||
"current_epoch_attestations": MAX_ATTESTATIONS *
|
||||
SLOTS_PER_EPOCH
|
||||
else: 0
|
||||
|
||||
newLit size
|
||||
|
||||
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
|
||||
($state.validators[validatorIdx].pubkey)[0..7]
|
||||
|
||||
|
@ -439,6 +504,51 @@ proc `%`*(i: uint64): JsonNode =
|
|||
ethTimeUnit Slot
|
||||
ethTimeUnit Epoch
|
||||
|
||||
Json.useCustomSerialization(BeaconState.justification_bits):
|
||||
read:
|
||||
let s = reader.readValue(string)
|
||||
if s.len != 4: raise newException(ValueError, "unexpected number of bytes")
|
||||
s.parseHexInt.uint8
|
||||
|
||||
write:
|
||||
writer.writeValue "0x" & value.toHex
|
||||
|
||||
Json.useCustomSerialization(BitSeq):
|
||||
read:
|
||||
BitSeq reader.readValue(string).hexToSeqByte
|
||||
|
||||
write:
|
||||
writer.writeValue "0x" & value.bytes.toHex
|
||||
|
||||
template readValue*(reader: var JsonReader, value: var BitList) =
|
||||
type T = type(value)
|
||||
value = T readValue(reader, BitSeq)
|
||||
|
||||
template writeValue*(writer: var JsonWriter, value: BitList) =
|
||||
writeValue(writer, BitSeq value)
|
||||
|
||||
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
|
||||
template len*(x: BitList): auto = len(BitSeq(x))
|
||||
template bytes*(x: BitList): auto = bytes(BitSeq(x))
|
||||
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
|
||||
template `[]=`*(x: BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
|
||||
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
|
||||
template raiseBit*(x: BitList, idx: int) = raiseBit(BitSeq(x), idx)
|
||||
template lowerBit*(x: BitList, idx: int) = lowerBit(BitSeq(x), idx)
|
||||
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
|
||||
template combine*(a, b: BitList) = combine(BitSeq(a), BitSeq(b))
|
||||
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
|
||||
|
||||
when useListType:
|
||||
template len*[T; N](x: List[T, N]): auto = len(seq[T](x))
|
||||
template `[]`*[T; N](x: List[T, N], idx: auto): auto = seq[T](x)[idx]
|
||||
template `[]=`*[T; N](x: List[T, N], idx: auto, val: bool) = seq[T](x)[idx] = val
|
||||
template `==`*[T; N](a, b: List[T, N]): bool = seq[T](a) == seq[T](b)
|
||||
template asSeq*[T; N](x: List[T, N]): auto = seq[T](x)
|
||||
template `&`*[T; N](a, b: List[T, N]): List[T, N] = seq[T](a) & seq[T](b)
|
||||
else:
|
||||
template asSeq*[T; N](x: List[T, N]): auto = x
|
||||
|
||||
func humaneSlotNum*(s: Slot): uint64 =
|
||||
s - GENESIS_SLOT
|
||||
|
||||
|
|
Binary file not shown.
|
@ -44,17 +44,16 @@ func eth2hash*(v: openArray[byte]): Eth2Digest {.inline.} =
|
|||
var ctx: sha256
|
||||
ctx.init()
|
||||
ctx.update(v)
|
||||
result = ctx.finish()
|
||||
ctx.finish()
|
||||
|
||||
template withEth2Hash*(body: untyped): Eth2Digest =
|
||||
## This little helper will init the hash function and return the sliced
|
||||
## hash:
|
||||
## let hashOfData = withHash: h.update(data)
|
||||
var h {.inject.}: sha256
|
||||
var h {.inject.}: sha256
|
||||
h.init()
|
||||
body
|
||||
var res = h.finish()
|
||||
res
|
||||
h.finish()
|
||||
|
||||
func hash*(x: Eth2Digest): Hash =
|
||||
## Hash for digests for Nim hash tables
|
||||
|
@ -63,3 +62,4 @@ func hash*(x: Eth2Digest): Hash =
|
|||
# We just slice the first 4 or 8 bytes of the block hash
|
||||
# depending of if we are on a 32 or 64-bit platform
|
||||
result = cast[ptr Hash](unsafeAddr x)[]
|
||||
|
||||
|
|
|
@ -82,7 +82,6 @@ const
|
|||
|
||||
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
||||
GENESIS_SLOT* = 0.Slot
|
||||
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
||||
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
||||
|
||||
# Time parameters
|
||||
|
@ -139,8 +138,10 @@ const
|
|||
# ---------------------------------------------------------------
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#state-list-lengths
|
||||
LATEST_RANDAO_MIXES_LENGTH* = 8192
|
||||
EPOCHS_PER_HISTORICAL_VECTOR* = 8192 # 2'u64^13, epochs
|
||||
EPOCHS_PER_SLASHINGS_VECTOR* = 8192 # epochs
|
||||
EPOCHS_PER_HISTORICAL_VECTOR* = 65536
|
||||
EPOCHS_PER_SLASHINGS_VECTOR* = 8192
|
||||
HISTORICAL_ROOTS_LIMIT* = 16777216
|
||||
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
|
||||
|
||||
# Reward and penalty quotients
|
||||
# ---------------------------------------------------------------
|
||||
|
|
|
@ -65,7 +65,6 @@ const
|
|||
# Unchanged
|
||||
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
||||
GENESIS_SLOT* = 0.Slot
|
||||
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
||||
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
||||
|
||||
# Time parameters
|
||||
|
@ -88,7 +87,7 @@ const
|
|||
|
||||
# Changed
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD* = 16
|
||||
SLOTS_PER_HISTORICAL_ROOT* = 128 # 64 doesn't work with GENESIS_SLOT == 0?
|
||||
SLOTS_PER_HISTORICAL_ROOT* = 64 # doesn't work with GENESIS_SLOT == 0?
|
||||
|
||||
# Unchanged
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
|
||||
|
@ -104,6 +103,8 @@ const
|
|||
LATEST_RANDAO_MIXES_LENGTH* = 64
|
||||
EPOCHS_PER_HISTORICAL_VECTOR* = 64
|
||||
EPOCHS_PER_SLASHINGS_VECTOR* = 64
|
||||
HISTORICAL_ROOTS_LIMIT* = 16777216
|
||||
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
|
||||
|
||||
# Reward and penalty quotients
|
||||
# ---------------------------------------------------------------
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
import # TODO - cleanup imports
|
||||
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
||||
../extras, ../ssz, ../beacon_node_types,
|
||||
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator
|
||||
beaconstate, crypto, datatypes, digest, helpers, validator
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#block-header
|
||||
proc processBlockHeader(
|
||||
|
|
|
@ -34,9 +34,9 @@
|
|||
|
||||
import # TODO - cleanup imports
|
||||
algorithm, math, options, sequtils, tables,
|
||||
chronicles, json_serialization/std/sets,
|
||||
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
||||
../extras, ../ssz, ../beacon_node_types,
|
||||
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator
|
||||
beaconstate, crypto, datatypes, digest, helpers, validator
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_total_active_balance
|
||||
func get_total_active_balance(state: BeaconState): Gwei =
|
||||
|
@ -198,13 +198,6 @@ proc process_justification_and_finalization(
|
|||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
## Bitvector[4] <-> uint8 mapping:
|
||||
## state.justification_bits[0] is (state.justification_bits shr 0) and 1
|
||||
## state.justification_bits[1] is (state.justification_bits shr 1) and 1
|
||||
## state.justification_bits[2] is (state.justification_bits shr 2) and 1
|
||||
## state.justification_bits[3] is (state.justification_bits shr 3) and 1
|
||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/simple-serialize.md#bitvectorn
|
||||
|
||||
# Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
|
@ -247,8 +240,7 @@ proc process_justification_and_finalization(
|
|||
Checkpoint(epoch: previous_epoch,
|
||||
root: get_block_root(state, previous_epoch))
|
||||
|
||||
# Spec: state.justification_bits[1] = 0b1
|
||||
state.justification_bits = state.justification_bits or (1 shl 1)
|
||||
state.justification_bits.raiseBit 1
|
||||
|
||||
let matching_target_attestations_current =
|
||||
get_matching_target_attestations(state, current_epoch) # Current epoch
|
||||
|
@ -258,34 +250,33 @@ proc process_justification_and_finalization(
|
|||
Checkpoint(epoch: current_epoch,
|
||||
root: get_block_root(state, current_epoch))
|
||||
|
||||
# Spec: state.justification_bits[0] = 0b1
|
||||
state.justification_bits = state.justification_bits or (1 shl 0)
|
||||
state.justification_bits.raiseBit 0
|
||||
|
||||
# Process finalizations
|
||||
let bitfield = state.justification_bits
|
||||
|
||||
## The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th
|
||||
## as source
|
||||
if (bitfield shr 1) mod 8 == 0b111 and
|
||||
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
if (bitfield and 0b1110) == 0b1110 and
|
||||
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
|
||||
## The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as
|
||||
## source
|
||||
if (bitfield shr 1) mod 4 == 0b11 and
|
||||
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
if (bitfield and 0b110) == 0b110 and
|
||||
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
|
||||
## The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as
|
||||
## source
|
||||
if (bitfield shr 0) mod 8 == 0b111 and
|
||||
old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
if (bitfield and 0b111) == 0b111 and
|
||||
old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
|
||||
## The 1st/2nd most recent epochs are justified, the 1st using the 2nd as
|
||||
## source
|
||||
if (bitfield shr 0) mod 4 == 0b11 and
|
||||
old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
if (bitfield and 0b11) == 0b11 and
|
||||
old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#crosslinks
|
||||
|
@ -384,7 +375,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
|
|||
|
||||
let proposer_reward =
|
||||
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei
|
||||
rewards[attestation.proposer_index] += proposer_reward
|
||||
rewards[attestation.proposer_index.int] += proposer_reward
|
||||
let max_attester_reward = get_base_reward(state, index) - proposer_reward
|
||||
rewards[index] +=
|
||||
(max_attester_reward *
|
||||
|
|
|
@ -9,435 +9,569 @@
|
|||
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
||||
|
||||
import
|
||||
endians, typetraits, options, algorithm, math,
|
||||
faststreams/input_stream, serialization, eth/common, nimcrypto/sha2,
|
||||
./spec/[bitfield, crypto, datatypes, digest]
|
||||
endians, stew/shims/macros, options, algorithm, math,
|
||||
stew/[bitops2, bitseqs, objects, varints], stew/ranges/ptr_arith, stint,
|
||||
faststreams/input_stream, serialization, serialization/testing/tracing,
|
||||
nimcrypto/sha2, blscurve, eth/common,
|
||||
./spec/[crypto, datatypes, digest],
|
||||
./ssz/[types, bytes_reader]
|
||||
|
||||
# ################### Helper functions ###################################
|
||||
|
||||
export
|
||||
serialization
|
||||
serialization, types, bytes_reader
|
||||
|
||||
when defined(serialization_tracing):
|
||||
import
|
||||
typetraits, stew/ranges/ptr_arith
|
||||
|
||||
const
|
||||
bytesPerChunk = 32
|
||||
bitsPerChunk = bytesPerChunk * 8
|
||||
maxChunkTreeDepth = 25
|
||||
defaultMaxObjectSize = 1 * 1024 * 1024
|
||||
|
||||
type
|
||||
SszReader* = object
|
||||
stream: ByteStreamVar
|
||||
maxObjectSize: int
|
||||
|
||||
SszWriter* = object
|
||||
stream: OutputStreamVar
|
||||
|
||||
SszError* = object of SerializationError
|
||||
CorruptedDataError* = object of SszError
|
||||
BasicType = char|bool|SomeUnsignedInt|StUint
|
||||
|
||||
RecordWritingMemo = object
|
||||
initialStreamPos: int
|
||||
sizePrefixCursor: DelayedWriteCursor
|
||||
SszChunksMerkelizer = ref object of RootObj
|
||||
combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
|
||||
totalChunks: uint
|
||||
limit: uint64
|
||||
|
||||
Chunk = array[bytesPerChunk, byte]
|
||||
|
||||
TypeWithMaxLen[T; maxLen: static int64] = distinct T
|
||||
|
||||
SizePrefixed*[T] = distinct T
|
||||
SszMaxSizeExceeded* = object of SerializationError
|
||||
|
||||
VarSizedWriterCtx = object
|
||||
fixedParts: WriteCursor
|
||||
offset: int
|
||||
|
||||
FixedSizedWriterCtx = object
|
||||
|
||||
serializationFormat SSZ,
|
||||
Reader = SszReader,
|
||||
Writer = SszWriter,
|
||||
PreferedOutput = seq[byte]
|
||||
|
||||
proc init*(T: type SszReader, stream: ByteStreamVar): T =
|
||||
result.stream = stream
|
||||
proc init*(T: type SszReader,
|
||||
stream: ByteStreamVar,
|
||||
maxObjectSize = defaultMaxObjectSize): T =
|
||||
T(stream: stream, maxObjectSize: maxObjectSize)
|
||||
|
||||
proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T =
|
||||
mixin readValue
|
||||
var reader = init(SszReader, stream)
|
||||
reader.readValue(T)
|
||||
|
||||
func toSSZType(x: Slot|Epoch): auto = x.uint64
|
||||
func toSSZType(x: auto): auto = x
|
||||
method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe.} =
|
||||
# TODO: implement proper error string
|
||||
"Serialisation error while processing " & filename
|
||||
|
||||
# toBytesSSZ convert simple fixed-length types to their SSZ wire representation
|
||||
func toBytesSSZ(x: SomeInteger): array[sizeof(x), byte] =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
when false:
|
||||
# TODO: Nim can't handle yet this simpler definition. File an issue.
|
||||
template valueOf[T; N](x: TypeWithMaxLen[T, N]): auto = T(x)
|
||||
else:
|
||||
proc unwrapImpl[T; N](x: ptr TypeWithMaxLen[T, N]): ptr T =
|
||||
cast[ptr T](x)
|
||||
|
||||
when x.sizeof == 8: littleEndian64(result.addr, x.unsafeAddr)
|
||||
elif x.sizeof == 4: littleEndian32(result.addr, x.unsafeAddr)
|
||||
elif x.sizeof == 2: littleEndian16(result.addr, x.unsafeAddr)
|
||||
elif x.sizeof == 1: copyMem(result.addr, x.unsafeAddr, sizeof(result))
|
||||
else: {.fatal: "Unsupported type serialization: " & $(type(x)).name.}
|
||||
template valueOf(x: TypeWithMaxLen): auto =
|
||||
let xaddr = unsafeAddr x
|
||||
unwrapImpl(xaddr)[]
|
||||
|
||||
func toBytesSSZ(x: ValidatorIndex): array[3, byte] =
|
||||
## Integers are all encoded as little endian and not padded
|
||||
let v = x.uint32
|
||||
result[0] = byte(v and 0xff)
|
||||
result[1] = byte((v shr 8) and 0xff)
|
||||
result[2] = byte((v shr 16) and 0xff)
|
||||
template toSszType*(x: auto): auto =
|
||||
mixin toSszType
|
||||
|
||||
func toBytesSSZ(x: bool): array[1, byte] =
|
||||
[if x: 1'u8 else: 0'u8]
|
||||
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
|
||||
elif x is Eth2Digest: x.data
|
||||
elif x is BlsValue|BlsCurveType: getBytes(x)
|
||||
elif x is BitSeq|BitList: bytes(x)
|
||||
elif x is TypeWithMaxLen: toSszType valueOf(x)
|
||||
elif useListType and x is List: seq[x.T](x)
|
||||
else: x
|
||||
|
||||
func toBytesSSZ(x: EthAddress): array[sizeof(x), byte] = x
|
||||
func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
|
||||
func writeFixedSized(c: var WriteCursor, x: auto) =
|
||||
mixin toSszType
|
||||
|
||||
# TODO these two are still being debated:
|
||||
# https://github.com/ethereum/eth2.0-specs/issues/308#issuecomment-447026815
|
||||
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes()
|
||||
when x is byte:
|
||||
c.append x
|
||||
elif x is bool|char:
|
||||
c.append byte(ord(x))
|
||||
elif x is SomeUnsignedInt:
|
||||
when system.cpuEndian == bigEndian:
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
var bytes: array[sizeof(x), byte]
|
||||
when x.sizeof == 8: littleEndian64(addr bytes[0], x.unsafeAddr)
|
||||
elif x.sizeof == 4: littleEndian32(addr bytes[0], x.unsafeAddr)
|
||||
elif x.sizeof == 2: littleEndian16(addr bytes[0], x.unsafeAddr)
|
||||
elif x.sizeof == 1: copyMem(addr bytes[0], x.unsafeAddr, sizeof(x))
|
||||
else: unsupported x.type
|
||||
c.append bytes
|
||||
else:
|
||||
let valueAddr = unsafeAddr x
|
||||
trs "APPENDING INT ", x, " = ", makeOpenArray(cast[ptr byte](valueAddr), sizeof(x))
|
||||
c.appendMemCopy x
|
||||
elif x is StUint:
|
||||
c.appendMemCopy x # TODO: Is this always correct?
|
||||
elif x is array|string|seq|openarray:
|
||||
when x[0] is byte:
|
||||
trs "APPENDING FIXED SIZE BYTES", x
|
||||
c.append x
|
||||
else:
|
||||
for elem in x:
|
||||
trs "WRITING FIXED SIZE ARRAY ELEMENENT"
|
||||
c.writeFixedSized toSszType(elem)
|
||||
elif x is tuple|object:
|
||||
enumInstanceSerializedFields(x, fieldName, field):
|
||||
trs "WRITING FIXED SIZE FIELD", fieldName
|
||||
c.writeFixedSized toSszType(field)
|
||||
else:
|
||||
unsupported x.type
|
||||
|
||||
type
|
||||
BasicType =
|
||||
# Types that serialize down to a fixed-length array - most importantly,
|
||||
# these values don't carry a length prefix in the final encoding. toBytesSSZ
|
||||
# provides the actual nim-type-to-bytes conversion.
|
||||
# TODO think about this for a bit - depends where the serialization of
|
||||
# validator keys ends up going..
|
||||
# TODO can't put ranges like ValidatorIndex in here:
|
||||
# https://github.com/nim-lang/Nim/issues/10027
|
||||
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
|
||||
bool | Slot | Epoch
|
||||
template writeFixedSized(s: OutputStreamVar, x: auto) =
|
||||
writeFixedSized(s.cursor, x)
|
||||
|
||||
func sszLen(v: BasicType): int = toBytesSSZ(v.toSSZType()).len
|
||||
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len
|
||||
template supports*(_: type SSZ, T: type): bool =
|
||||
mixin toSszType
|
||||
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
||||
|
||||
func sszLen(v: object | tuple): int =
|
||||
result = 4 # Length
|
||||
for field in v.fields:
|
||||
result += sszLen(type field)
|
||||
|
||||
func sszLen(v: seq | array): int =
|
||||
result = 4 # Length
|
||||
for i in v:
|
||||
result += sszLen(i)
|
||||
|
||||
func sszLen(v: BitField): int =
|
||||
sszLen(v.bits)
|
||||
|
||||
# fromBytesSSZ copies the wire representation to a Nim variable,
|
||||
# assuming there's enough data in the buffer
|
||||
func fromBytesSSZ(T: type SomeInteger, data: openarray[byte]): T =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
## TODO: Assumes data points to a sufficiently large buffer
|
||||
doAssert data.len == sizeof(result)
|
||||
# TODO: any better way to get a suitably aligned buffer in nim???
|
||||
# see also: https://github.com/nim-lang/Nim/issues/9206
|
||||
var tmp: uint64
|
||||
var alignedBuf = cast[ptr byte](tmp.addr)
|
||||
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
|
||||
|
||||
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
||||
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
||||
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
||||
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
||||
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
||||
|
||||
func fromBytesSSZ(T: type bool, data: openarray[byte]): T =
|
||||
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
||||
# definition for now, but maybe this should be a parse error instead?
|
||||
fromBytesSSZ(uint8, data) != 0
|
||||
|
||||
func fromBytesSSZ(T: type ValidatorIndex, data: openarray[byte]): T =
|
||||
## Integers are all encoded as littleendian and not padded
|
||||
doAssert data.len == 3
|
||||
var tmp: uint32
|
||||
tmp = tmp or uint32(data[0])
|
||||
tmp = tmp or uint32(data[1]) shl 8
|
||||
tmp = tmp or uint32(data[2]) shl 16
|
||||
result = tmp.ValidatorIndex
|
||||
|
||||
func fromBytesSSZ(T: type EthAddress, data: openarray[byte]): T =
|
||||
doAssert data.len == sizeof(result)
|
||||
copyMem(result.addr, unsafeAddr data[0], sizeof(result))
|
||||
|
||||
func fromBytesSSZ(T: type Eth2Digest, data: openarray[byte]): T =
|
||||
doAssert data.len == sizeof(result.data)
|
||||
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
||||
|
||||
proc init*(T: type SszWriter, stream: OutputStreamVar): T =
|
||||
func init*(T: type SszWriter, stream: OutputStreamVar): T =
|
||||
result.stream = stream
|
||||
|
||||
proc writeValue*(w: var SszWriter, obj: auto)
|
||||
|
||||
# This is an alternative lower-level API useful for RPC
|
||||
# frameworks that can simulate the serialization of an
|
||||
# object without constructing an actual instance:
|
||||
proc beginRecord*(w: var SszWriter, T: type): RecordWritingMemo =
|
||||
result.initialStreamPos = w.stream.pos
|
||||
result.sizePrefixCursor = w.stream.delayFixedSizeWrite sizeof(uint32)
|
||||
|
||||
template writeField*(w: var SszWriter, name: string, value: auto) =
|
||||
w.writeValue(value)
|
||||
|
||||
proc endRecord*(w: var SszWriter, memo: RecordWritingMemo) =
|
||||
let finalSize = uint32(w.stream.pos - memo.initialStreamPos - 4)
|
||||
memo.sizePrefixCursor.endWrite(finalSize.toBytesSSZ)
|
||||
|
||||
proc writeValue*(w: var SszWriter, obj: auto) =
|
||||
# We are not using overloads here, because this leads to
|
||||
# slightly better error messages when the user provides
|
||||
# additional overloads for `writeValue`.
|
||||
mixin writeValue
|
||||
|
||||
when obj is ValidatorIndex|BasicType:
|
||||
w.stream.append obj.toSSZType().toBytesSSZ
|
||||
elif obj is byte|char:
|
||||
w.stream.append obj
|
||||
elif obj is enum:
|
||||
w.stream.append uint64(obj).toBytesSSZ
|
||||
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||
when holder is array|string|seq|openarray:
|
||||
for fieldVar in holder: body
|
||||
else:
|
||||
let memo = w.beginRecord(obj.type)
|
||||
when obj is seq|array|openarray|string:
|
||||
# If you get an error here that looks like:
|
||||
# type mismatch: got <type range 0..8191(uint64)>
|
||||
# you just used an unsigned int for an array index thinking you'd get
|
||||
# away with it (surprise, surprise: you can't, uints are crippled!)
|
||||
# https://github.com/nim-lang/Nim/issues/9984
|
||||
for elem in obj:
|
||||
w.writeValue elem
|
||||
elif obj is BitField:
|
||||
for elem in obj.bits:
|
||||
w.writeValue elem
|
||||
else:
|
||||
obj.serializeFields(fieldName, field):
|
||||
# for research/serialized_sizes, remove when appropriate
|
||||
when defined(debugFieldSizes) and obj is (BeaconState|BeaconBlock):
|
||||
let start = w.stream.pos
|
||||
w.writeValue field.toSSZType
|
||||
debugEcho fieldName, ": ", w.stream.pos - start
|
||||
else:
|
||||
w.writeValue field.toSSZType
|
||||
w.endRecord(memo)
|
||||
enumInstanceSerializedFields(holder, _, fieldVar): body
|
||||
|
||||
proc readValue*(r: var SszReader, result: var auto) =
|
||||
# We are not using overloads here, because this leads to
|
||||
# slightly better error messages when the user provides
|
||||
# additional overloads for `readValue`.
|
||||
type T = result.type
|
||||
mixin readValue
|
||||
func writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||
|
||||
template checkEof(n: int) =
|
||||
if not r.stream[].ensureBytes(n):
|
||||
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
||||
|
||||
when result is ValidatorIndex|BasicType:
|
||||
let bytesToRead = result.sszLen;
|
||||
checkEof bytesToRead
|
||||
|
||||
when result is ValidatorPubKey|ValidatorSig:
|
||||
if not result.init(r.stream.readBytes(bytesToRead)):
|
||||
raise newException(CorruptedDataError, "Failed to load a BLS key or signature")
|
||||
else:
|
||||
result = T.fromBytesSSZ(r.stream.readBytes(bytesToRead))
|
||||
|
||||
elif result is enum:
|
||||
# TODO what to do with out-of-range values?? rejecting means breaking
|
||||
# forwards compatibility..
|
||||
result = cast[T](r.readValue(uint64))
|
||||
|
||||
elif result is string:
|
||||
{.error: "The SSZ format doesn't support the string type yet".}
|
||||
func beginRecord*(w: var SszWriter, TT: type): auto =
|
||||
type T = TT
|
||||
when isFixedSize(T):
|
||||
FixedSizedWriterCtx()
|
||||
else:
|
||||
let totalLen = int r.readValue(uint32)
|
||||
checkEof totalLen
|
||||
const offset = when T is array: len(T) * offsetSize
|
||||
else: fixedPortionSize(T)
|
||||
VarSizedWriterCtx(offset: offset,
|
||||
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||
|
||||
let endPos = r.stream[].pos + totalLen
|
||||
when T is seq:
|
||||
type ElemType = type(result[0])
|
||||
# Items are of homogenous type, but not necessarily homogenous length,
|
||||
# cannot pre-allocate item list generically
|
||||
while r.stream[].pos < endPos:
|
||||
result.add r.readValue(ElemType)
|
||||
|
||||
elif T is BitField:
|
||||
type ElemType = type(result.bits[0])
|
||||
while r.stream[].pos < endPos:
|
||||
result.bits.add r.readValue(ElemType)
|
||||
|
||||
elif T is array:
|
||||
type ElemType = type(result[0])
|
||||
var i = 0
|
||||
while r.stream[].pos < endPos:
|
||||
if i > result.len:
|
||||
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of an array")
|
||||
result[i] = r.readValue(ElemType)
|
||||
i += 1
|
||||
template writeField*(w: var SszWriter,
|
||||
ctx: var auto,
|
||||
fieldName: string,
|
||||
field: auto) =
|
||||
mixin toSszType
|
||||
when ctx is FixedSizedWriterCtx:
|
||||
writeFixedSized(w, toSszType(field))
|
||||
else:
|
||||
type FieldType = type toSszType(field)
|
||||
|
||||
when isFixedSize(FieldType):
|
||||
ctx.fixedParts.writeFixedSized toSszType(field)
|
||||
else:
|
||||
result.deserializeFields(fieldName, field):
|
||||
# TODO This hardcoding's ugly; generalize & abstract.
|
||||
when field is Slot:
|
||||
field = r.readValue(uint64).Slot
|
||||
elif field is Epoch:
|
||||
field = r.readValue(uint64).Epoch
|
||||
else:
|
||||
field = r.readValue(field.type)
|
||||
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
||||
ctx.fixedParts.writeFixedSized uint32(ctx.offset)
|
||||
let initPos = w.stream.pos
|
||||
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
||||
when FieldType is BitSeq:
|
||||
trs "BIT SEQ ", field.bytes
|
||||
writeVarSizeType(w, toSszType(field))
|
||||
ctx.offset += w.stream.pos - initPos
|
||||
|
||||
if r.stream[].pos != endPos:
|
||||
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of the deserialized object")
|
||||
template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||
when ctx is VarSizedWriterCtx:
|
||||
finalize ctx.fixedParts
|
||||
|
||||
# ################### Hashing ###################################
|
||||
func writeVarSizeType(w: var SszWriter, value: auto) =
|
||||
trs "STARTING VAR SIZE TYPE"
|
||||
mixin toSszType
|
||||
type T = type toSszType(value)
|
||||
|
||||
# Sample hash_tree_root implementation based on:
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/specs/simple-serialize.md
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/test_libs/pyspec/eth2spec/utils/minimal_ssz.py
|
||||
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet
|
||||
when T is seq|string|openarray:
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E):
|
||||
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
||||
for elem in value:
|
||||
w.stream.writeFixedSized toSszType(elem)
|
||||
trs "DONE"
|
||||
else:
|
||||
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
||||
var offset = value.len * offsetSize
|
||||
var cursor = w.stream.delayFixedSizeWrite offset
|
||||
for elem in value:
|
||||
cursor.writeFixedSized uint32(offset)
|
||||
let initPos = w.stream.pos
|
||||
w.writeVarSizeType toSszType(elem)
|
||||
offset += w.stream.pos - initPos
|
||||
finalize cursor
|
||||
trs "DONE"
|
||||
|
||||
elif T is object|tuple|array:
|
||||
trs "WRITING OBJECT OR ARRAY"
|
||||
var ctx = beginRecord(w, T)
|
||||
enumerateSubFields(value, field):
|
||||
writeField w, ctx, astToStr(field), field
|
||||
endRecord w, ctx
|
||||
|
||||
func writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
||||
mixin toSszType
|
||||
type T = type toSszType(x)
|
||||
|
||||
when isFixedSize(T):
|
||||
w.stream.writeFixedSized toSszType(x)
|
||||
elif T is array|seq|openarray|string|object|tuple:
|
||||
w.writeVarSizeType toSszType(x)
|
||||
else:
|
||||
unsupported type(x)
|
||||
|
||||
func writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
||||
var cursor = w.stream.delayVarSizeWrite(10)
|
||||
let initPos = w.stream.pos
|
||||
w.writeValue T(x)
|
||||
cursor.appendVarint uint64(w.stream.pos - initPos)
|
||||
finalize cursor
|
||||
|
||||
template checkEof(n: int) =
|
||||
if not r.stream[].ensureBytes(n):
|
||||
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
||||
|
||||
template fromSszBytes*(T: type BlsValue, bytes: openarray[byte]): auto =
|
||||
fromBytes(T, bytes)
|
||||
|
||||
template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
||||
bytes: openarray[byte]): auto =
|
||||
mixin fromSszBytes
|
||||
fromSszBytes(T, bytes)
|
||||
|
||||
proc fromSszBytes*(T: type BlsCurveType, bytes: openarray[byte]): auto =
|
||||
init(T, bytes)
|
||||
|
||||
proc readValue*(r: var SszReader, val: var auto) =
|
||||
val = readSszValue(r.stream.readBytes(r.stream.endPos), val.type)
|
||||
|
||||
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
||||
let length = r.stream.readVarint(uint64)
|
||||
if length > r.maxObjectSize:
|
||||
raise newException(SszMaxSizeExceeded,
|
||||
"Maximum SSZ object size exceeded: " & $length)
|
||||
val = readSszValue(r.stream.readBytes(length), T)
|
||||
|
||||
const
|
||||
BYTES_PER_CHUNK = 32
|
||||
zeroChunk = default array[32, byte]
|
||||
|
||||
# ################### Hashing helpers ###################################
|
||||
func hash(a, b: openArray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "MERGING BRANCHES "
|
||||
trs a
|
||||
trs b
|
||||
|
||||
# TODO varargs openarray, anyone?
|
||||
template withHash(body: untyped): array[32, byte] =
|
||||
let tmp = withEth2Hash: body
|
||||
toBytesSSZ tmp
|
||||
h.update a
|
||||
h.update b
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
func hash(a, b: openArray[byte]): array[32, byte] =
|
||||
withHash:
|
||||
h.update(a)
|
||||
h.update(b)
|
||||
func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
|
||||
result = withEth2Hash:
|
||||
trs "MERGING BRANCHES OPEN ARRAY"
|
||||
trs existing.data
|
||||
trs newData
|
||||
|
||||
type
|
||||
Chunk = array[BYTES_PER_CHUNK, byte]
|
||||
h.update existing.data
|
||||
h.update newData
|
||||
|
||||
# TODO: er, how is this _actually_ done?
|
||||
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
|
||||
func empty(T: type): T = discard
|
||||
const emptyChunk = empty(Chunk)
|
||||
let paddingBytes = bytesPerChunk - newData.len
|
||||
if paddingBytes > 0:
|
||||
trs "USING ", paddingBytes, " PADDING BYTES"
|
||||
h.update zeroChunk[0 ..< paddingBytes]
|
||||
trs "HASH RESULT ", result
|
||||
|
||||
func mix_in_length(root: Chunk, length: int): Chunk =
|
||||
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
||||
hash(a.data, b.data)
|
||||
|
||||
func computeZeroHashes: array[100, Eth2Digest] =
|
||||
result[0] = Eth2Digest(data: zeroChunk)
|
||||
for i in 1 .. result.high:
|
||||
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
||||
|
||||
let zeroHashes = computeZeroHashes()
|
||||
|
||||
func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
|
||||
# TODO this is a work-around for the somewhat broken side
|
||||
# effects analysis of Nim - reading from global let variables
|
||||
# is considered a side-effect.
|
||||
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
|
||||
# we should revisit this in Nim 0.20.2.
|
||||
{.emit: "`result` = `zeroHashes`[`idx`];".}
|
||||
|
||||
func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
|
||||
doAssert data.len > 0 and data.len <= bytesPerChunk
|
||||
|
||||
if not getBitLE(merkelizer.totalChunks, 0):
|
||||
let chunkStartAddr = addr merkelizer.combinedChunks[0].data[0]
|
||||
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
||||
zeroMem(chunkStartAddr.shift(data.len), bytesPerChunk - data.len)
|
||||
trs "WROTE BASE CHUNK ", merkelizer.combinedChunks[0]
|
||||
else:
|
||||
var hash = mergeBranches(merkelizer.combinedChunks[0], data)
|
||||
|
||||
for i in 1 .. high(merkelizer.combinedChunks):
|
||||
trs "ITERATING"
|
||||
if getBitLE(merkelizer.totalChunks, i):
|
||||
trs "CALLING MERGE BRANCHES"
|
||||
hash = mergeBranches(merkelizer.combinedChunks[i], hash)
|
||||
else:
|
||||
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
||||
merkelizer.combinedChunks[i] = hash
|
||||
break
|
||||
|
||||
inc merkelizer.totalChunks
|
||||
|
||||
func getFinalHash*(merkelizer: SszChunksMerkelizer): Eth2Digest =
|
||||
let limit = merkelizer.limit
|
||||
|
||||
if merkelizer.totalChunks == 0:
|
||||
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
|
||||
return getZeroHashWithoutSideEffect(limitHeight)
|
||||
|
||||
let
|
||||
bottomHashIdx = firstOne(merkelizer.totalChunks) - 1
|
||||
submittedChunksHeight = bitWidth(merkelizer.totalChunks - 1)
|
||||
topHashIdx = if limit <= 1: submittedChunksHeight
|
||||
else: max(submittedChunksHeight, bitWidth(limit - 1))
|
||||
|
||||
trs "BOTTOM HASH ", bottomHashIdx
|
||||
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
||||
trs "LIMIT ", limit
|
||||
|
||||
if bottomHashIdx != submittedChunksHeight:
|
||||
# Our tree is not finished. We must complete the work in progress
|
||||
# branches and then extend the tree to the right height.
|
||||
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
||||
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
if getBitLE(merkelizer.totalChunks, i):
|
||||
result = mergeBranches(merkelizer.combinedChunks[i], result)
|
||||
trs "COMBINED"
|
||||
else:
|
||||
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||
trs "COMBINED WITH ZERO"
|
||||
|
||||
elif bottomHashIdx == topHashIdx:
|
||||
# We have a perfect tree (chunks == 2**n) at just the right height!
|
||||
result = merkelizer.combinedChunks[bottomHashIdx]
|
||||
else:
|
||||
# We have a perfect tree of user chunks, but we have more work to
|
||||
# do - we must extend it to reach the desired height
|
||||
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
||||
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||
|
||||
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||
|
||||
let HashingStreamVTable = OutputStreamVTable(
|
||||
writePage: proc (s: OutputStreamVar, data: openarray[byte])
|
||||
{.nimcall, gcsafe, raises: [IOError, Defect].} =
|
||||
trs "ADDING STREAM CHUNK ", data
|
||||
SszChunksMerkelizer(s.outputDevice).addChunk(data)
|
||||
,
|
||||
flush: proc (s: OutputStreamVar) {.nimcall, gcsafe.} =
|
||||
discard
|
||||
)
|
||||
|
||||
func getVtableAddresWithoutSideEffect: ptr OutputStreamVTable =
|
||||
# TODO this is a work-around for the somewhat broken side
|
||||
# effects analysis of Nim - reading from global let variables
|
||||
# is considered a side-effect.
|
||||
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
|
||||
# we should revisit this in Nim 0.20.2.
|
||||
{.emit: "`result` = &`HashingStreamVTable`;".}
|
||||
|
||||
func newSszHashingStream(merkelizer: SszChunksMerkelizer): ref OutputStream =
|
||||
new result
|
||||
result.initWithSinglePage(pageSize = bytesPerChunk,
|
||||
maxWriteSize = bytesPerChunk,
|
||||
minWriteSize = bytesPerChunk)
|
||||
result.outputDevice = merkelizer
|
||||
result.vtable = getVtableAddresWithoutSideEffect()
|
||||
|
||||
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||
var dataLen: array[32, byte]
|
||||
var lstLen = uint64(length)
|
||||
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
||||
littleEndian64(addr dataLen[0], addr lstLen)
|
||||
hash(root.data, dataLen)
|
||||
|
||||
hash(root, dataLen)
|
||||
func merkelizeSerializedChunks(merkelizer: SszChunksMerkelizer,
|
||||
obj: auto): Eth2Digest =
|
||||
var hashingStream = newSszHashingStream merkelizer
|
||||
hashingStream.writeFixedSized obj
|
||||
hashingStream.flush
|
||||
merkelizer.getFinalHash
|
||||
|
||||
template padEmptyChunks(chunks: int) =
|
||||
for i in chunks..<nextPowerOfTwo(chunks):
|
||||
yield emptyChunk
|
||||
func merkelizeSerializedChunks(obj: auto): Eth2Digest =
|
||||
merkelizeSerializedChunks(SszChunksMerkelizer(), obj)
|
||||
|
||||
iterator packAndPad(values: seq|array): Chunk =
|
||||
## Produce a stream of chunks that are packed and padded such that they number
|
||||
## a power of two
|
||||
func hashTreeRoot*(x: auto): Eth2Digest {.gcsafe.}
|
||||
|
||||
when sizeof(values[0].toSSZType().toBytesSSZ()) == sizeof(Chunk):
|
||||
# When chunks and value lengths coincide, do the simple thing
|
||||
for v in values:
|
||||
yield v.toSSZType().toBytesSSZ()
|
||||
padEmptyChunks(values.len)
|
||||
template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
|
||||
var merkelizer {.inject.} = SszChunksMerkelizer()
|
||||
|
||||
else:
|
||||
var
|
||||
chunks: int
|
||||
tmp: Chunk
|
||||
tmpPos: int # how many bytes of tmp we've filled with ssz values
|
||||
template addField(field) =
|
||||
let hash = hashTreeRoot(field)
|
||||
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
||||
addChunk(merkelizer, hash.data)
|
||||
trs "CHUNK ADDED"
|
||||
|
||||
for v in values:
|
||||
var
|
||||
vssz = toBytesSSZ(v.toSSZType)
|
||||
vPos = 0 # how many bytes of vssz that we've consumed
|
||||
|
||||
while vPos < vssz.len:
|
||||
# there are still bytes of vssz left to consume - looping happens when
|
||||
# vssz.len > sizeof(Chunk)
|
||||
|
||||
let left = min(tmp.len - tmpPos, vssz.len - vPos)
|
||||
copyMem(addr tmp[tmpPos], addr vssz[vPos], left)
|
||||
vPos += left
|
||||
tmpPos += left
|
||||
|
||||
if tmpPos == tmp.len:
|
||||
# When vssz.len < sizeof(Chunk), multiple values will fit in a chunk
|
||||
yield tmp
|
||||
tmpPos = 0
|
||||
chunks += 1
|
||||
|
||||
if tmpPos > 0:
|
||||
# If vssz.len is not a multiple of Chunk, we might need to pad the last
|
||||
# chunk with zeroes and return it
|
||||
for i in tmpPos..<tmp.len:
|
||||
tmp[i] = 0'u8
|
||||
yield tmp
|
||||
tmpPos = 0
|
||||
chunks += 1
|
||||
|
||||
padEmptyChunks(chunks)
|
||||
|
||||
iterator hash_tree_collection(value: array|seq): Chunk =
|
||||
mixin hash_tree_root
|
||||
var chunks = 0
|
||||
for v in value:
|
||||
yield hash_tree_root(v).data
|
||||
chunks += 1
|
||||
padEmptyChunks(chunks)
|
||||
|
||||
iterator hash_tree_fields(value: object): Chunk =
|
||||
mixin hash_tree_root
|
||||
var chunks = 0
|
||||
for v in value.fields:
|
||||
yield hash_tree_root(v).data
|
||||
chunks += 1
|
||||
padEmptyChunks(chunks)
|
||||
|
||||
template merkleize(chunker: untyped): Chunk =
|
||||
var
|
||||
# a depth of 32 here should give us capability to handle 2^32 chunks,
|
||||
# more than enough
|
||||
# TODO replace with SmallVector-like thing..
|
||||
stack: array[32, tuple[height: int, chunk: Chunk]]
|
||||
stackPos = 0
|
||||
|
||||
for chunk in chunker:
|
||||
# Leaves start at height 0 - every time they move up, height is increased
|
||||
# allowing us to detect two chunks at the same height ready for
|
||||
# consolidation
|
||||
# See also: http://szydlo.com/logspacetime03.pdf
|
||||
stack[stackPos] = (0, chunk)
|
||||
inc stackPos
|
||||
|
||||
# Consolidate items of the same height - this keeps stack size at log N
|
||||
while stackPos > 1 and stack[stackPos - 1].height == stack[stackPos - 2].height:
|
||||
# As tradition dictates - one feature, at least one nim bug:
|
||||
# https://github.com/nim-lang/Nim/issues/9684
|
||||
let tmp = hash(stack[stackPos - 2].chunk, stack[stackPos - 1].chunk)
|
||||
stack[stackPos - 2].height += 1
|
||||
stack[stackPos - 2].chunk = tmp
|
||||
stackPos -= 1
|
||||
|
||||
doAssert stackPos == 1,
|
||||
"With power-of-two leaves, we should end up with a single root"
|
||||
|
||||
stack[0].chunk
|
||||
|
||||
template elementType[T, N](_: type array[N, T]): typedesc = T
|
||||
template elementType[T](_: type seq[T]): typedesc = T
|
||||
|
||||
func hash_tree_root*[T](value: T): Eth2Digest =
|
||||
# Merkle tree
|
||||
Eth2Digest(data:
|
||||
when T is BasicType:
|
||||
merkleize(packAndPad([value]))
|
||||
elif T is array|seq:
|
||||
when T.elementType() is BasicType:
|
||||
mix_in_length(merkleize(packAndPad(value)), len(value))
|
||||
else:
|
||||
mix_in_length(merkleize(hash_tree_collection(value)), len(value))
|
||||
elif T is object:
|
||||
merkleize(hash_tree_fields(value))
|
||||
template addField2(field) =
|
||||
const maxLen = fieldMaxLen(field)
|
||||
when maxLen > 0:
|
||||
type FieldType = type field
|
||||
addField TypeWithMaxLen[FieldType, maxLen](field)
|
||||
else:
|
||||
static: doAssert false, "Unexpected type: " & T.name
|
||||
)
|
||||
addField field
|
||||
|
||||
iterator hash_tree_most(v: object): Chunk =
|
||||
const numFields = (proc(): int =
|
||||
var o: type(v)
|
||||
var i = 0
|
||||
for _, _ in o.fieldPairs: inc i
|
||||
i)()
|
||||
body
|
||||
|
||||
var i = 0
|
||||
for name, field in v.fieldPairs:
|
||||
if i == numFields - 1:
|
||||
break
|
||||
inc i
|
||||
yield hash_tree_root(field).data
|
||||
merkelizer.getFinalHash
|
||||
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots
|
||||
func signing_root*[T: object](x: T): Eth2Digest =
|
||||
# TODO write tests for this (check vs hash_tree_root)
|
||||
func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest =
|
||||
trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkelizer.limit
|
||||
|
||||
let root = merkleize(hash_tree_most(x))
|
||||
var
|
||||
totalBytes = x.bytes.len
|
||||
lastCorrectedByte = x.bytes[^1]
|
||||
|
||||
if lastCorrectedByte == byte(1):
|
||||
if totalBytes == 1:
|
||||
# This is an empty bit list.
|
||||
# It should be hashed as a tree containing all zeros:
|
||||
let treeHeight = if merkelizer.limit == 0: 0
|
||||
else: log2trunc(merkelizer.limit)
|
||||
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
|
||||
getZeroHashWithoutSideEffect(0)) # this is the mixed length
|
||||
|
||||
totalBytes -= 1
|
||||
lastCorrectedByte = x.bytes[^2]
|
||||
else:
|
||||
let markerPos = log2trunc(lastCorrectedByte)
|
||||
lastCorrectedByte.lowerBit(markerPos)
|
||||
|
||||
var
|
||||
bytesInLastChunk = totalBytes mod bytesPerChunk
|
||||
paddingBytes = bytesPerChunk - bytesInLastChunk
|
||||
fullChunks = totalBytes div bytesPerChunk
|
||||
|
||||
if bytesInLastChunk == 0:
|
||||
fullChunks -= 1
|
||||
bytesInLastChunk = 32
|
||||
|
||||
for i in 0 ..< fullChunks:
|
||||
let
|
||||
chunkStartPos = i * bytesPerChunk
|
||||
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
||||
|
||||
merkelizer.addChunk x.bytes.toOpenArray(chunkEndPos, chunkEndPos)
|
||||
|
||||
var
|
||||
lastChunk: array[bytesPerChunk, byte]
|
||||
chunkStartPos = fullChunks * bytesPerChunk
|
||||
|
||||
for i in 0 .. bytesInLastChunk - 2:
|
||||
lastChunk[i] = x.bytes[chunkStartPos + i]
|
||||
|
||||
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
||||
|
||||
merkelizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
||||
let contentsHash = merkelizer.getFinalHash
|
||||
mixInLength contentsHash, x.len
|
||||
|
||||
func hashTreeRootImpl[T](x: T): Eth2Digest =
|
||||
when (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
|
||||
trs "FIXED TYPE; USE CHUNK STREAM"
|
||||
merkelizeSerializedChunks x
|
||||
elif T is string or (when T is (seq|openarray): ElemType(T) is BasicType else: false):
|
||||
trs "TYPE WITH LENGTH"
|
||||
mixInLength merkelizeSerializedChunks(x), x.len
|
||||
elif T is array|object|tuple:
|
||||
trs "MERKELIZING FIELDS"
|
||||
merkelizeFields:
|
||||
x.enumerateSubFields(f):
|
||||
const maxLen = fieldMaxLen(f)
|
||||
when maxLen > 0:
|
||||
type FieldType = type f
|
||||
addField TypeWithMaxLen[FieldType, maxLen](f)
|
||||
else:
|
||||
addField f
|
||||
elif T is seq:
|
||||
trs "SEQ WITH VAR SIZE"
|
||||
let hash = merkelizeFields(for e in x: addField e)
|
||||
mixInLength hash, x.len
|
||||
#elif isCaseObject(T):
|
||||
# # TODO implement this
|
||||
else:
|
||||
unsupported T
|
||||
|
||||
func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
|
||||
when T is BitList:
|
||||
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
||||
elif T is seq:
|
||||
type E = ElemType(T)
|
||||
when E is BasicType:
|
||||
(maxLen * sizeof(E) + bytesPerChunk - 1) div bytesPerChunk
|
||||
else:
|
||||
maxLen
|
||||
else:
|
||||
unsupported T # This should never happen
|
||||
|
||||
func hashTreeRoot*(x: auto): Eth2Digest =
|
||||
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||
mixin toSszType
|
||||
when x is TypeWithMaxLen:
|
||||
const maxLen = x.maxLen
|
||||
type T = type valueOf(x)
|
||||
const limit = maxChunksCount(T, maxLen)
|
||||
var merkelizer = SszChunksMerkelizer(limit: uint64(limit))
|
||||
|
||||
when T is BitList:
|
||||
result = merkelizer.bitlistHashTreeRoot(BitSeq valueOf(x))
|
||||
elif T is seq:
|
||||
type E = ElemType(T)
|
||||
let contentsHash = when E is BasicType:
|
||||
merkelizeSerializedChunks(merkelizer, valueOf(x))
|
||||
else:
|
||||
for elem in valueOf(x):
|
||||
let elemHash = hashTreeRoot(elem)
|
||||
merkelizer.addChunk(elemHash.data)
|
||||
merkelizer.getFinalHash()
|
||||
result = mixInLength(contentsHash, valueOf(x).len)
|
||||
else:
|
||||
unsupported T # This should never happen
|
||||
else:
|
||||
result = hashTreeRootImpl toSszType(x)
|
||||
|
||||
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
||||
|
||||
func lastFieldName(RecordType: type): string {.compileTime.} =
|
||||
enumAllSerializedFields(RecordType):
|
||||
result = fieldName
|
||||
|
||||
func hasSigningRoot*(T: type): bool {.compileTime.} =
|
||||
lastFieldName(T) == "signature"
|
||||
|
||||
func signingRoot*(obj: object): Eth2Digest =
|
||||
const lastField = lastFieldName(obj.type)
|
||||
merkelizeFields:
|
||||
obj.enumInstanceSerializedFields(fieldName, field):
|
||||
when fieldName != lastField:
|
||||
addField2 field
|
||||
|
||||
Eth2Digest(data: root)
|
||||
|
|
|
@ -0,0 +1,142 @@
|
|||
import
|
||||
endians, typetraits,
|
||||
stew/[objects, bitseqs], serialization/testing/tracing,
|
||||
../spec/[digest, datatypes], ./types
|
||||
|
||||
template setLen[R, T](a: var array[R, T], length: int) =
|
||||
if length != a.len:
|
||||
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||
|
||||
# fromSszBytes copies the wire representation to a Nim variable,
|
||||
# assuming there's enough data in the buffer
|
||||
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||
## All integers are serialized as **little endian**.
|
||||
## TODO: Assumes data points to a sufficiently large buffer
|
||||
doAssert data.len == sizeof(result)
|
||||
# TODO: any better way to get a suitably aligned buffer in nim???
|
||||
# see also: https://github.com/nim-lang/Nim/issues/9206
|
||||
var tmp: uint64
|
||||
var alignedBuf = cast[ptr byte](tmp.addr)
|
||||
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
|
||||
|
||||
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
||||
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
||||
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
||||
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
||||
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
||||
|
||||
func fromSszBytes*(T: type bool, data: openarray[byte]): T =
|
||||
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
||||
# definition for now, but maybe this should be a parse error instead?
|
||||
fromSszBytes(uint8, data) != 0
|
||||
|
||||
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T =
|
||||
doAssert data.len == sizeof(result.data)
|
||||
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
||||
|
||||
template fromSszBytes*(T: type Slot, bytes: openarray[byte]): Slot =
|
||||
Slot fromSszBytes(uint64, bytes)
|
||||
|
||||
template fromSszBytes*(T: type Epoch, bytes: openarray[byte]): Epoch =
|
||||
Epoch fromSszBytes(uint64, bytes)
|
||||
|
||||
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
|
||||
T fromSszBytes(uint64, bytes)
|
||||
|
||||
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
||||
BitSeq @bytes
|
||||
|
||||
proc fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto =
|
||||
BitList[N] @bytes
|
||||
|
||||
proc readSszValue*(input: openarray[byte], T: type): T =
|
||||
mixin fromSszBytes, toSszType
|
||||
|
||||
type T = type(result)
|
||||
|
||||
template readOffset(n: int): int =
|
||||
int fromSszBytes(uint32, input[n ..< n + offsetSize])
|
||||
|
||||
when useListType and result is List:
|
||||
type ElemType = type result[0]
|
||||
result = T readSszValue(input, seq[ElemType])
|
||||
elif result is string|seq|openarray|array:
|
||||
type ElemType = type result[0]
|
||||
when ElemType is byte|char:
|
||||
result.setLen input.len
|
||||
copyMem(addr result[0], unsafeAddr input[0], input.len)
|
||||
|
||||
elif isFixedSize(ElemType):
|
||||
const elemSize = fixedPortionSize(ElemType)
|
||||
if input.len mod elemSize != 0:
|
||||
var ex = new SszSizeMismatchError
|
||||
ex.deserializedType = cstring typetraits.name(T)
|
||||
ex.actualSszSize = input.len
|
||||
ex.elementSize = elemSize
|
||||
raise ex
|
||||
result.setLen input.len div elemSize
|
||||
trs "READING LIST WITH LEN ", result.len
|
||||
for i in 0 ..< result.len:
|
||||
trs "TRYING TO READ LIST ELEM ", i
|
||||
let offset = i * elemSize
|
||||
result[i] = readSszValue(input[offset ..< offset+elemSize], ElemType)
|
||||
trs "LIST READING COMPLETE"
|
||||
|
||||
else:
|
||||
if input.len == 0:
|
||||
# This is an empty list.
|
||||
# The default initialization of the return value is fine.
|
||||
return
|
||||
|
||||
var offset = readOffset 0
|
||||
trs "GOT OFFSET ", offset
|
||||
let resultLen = offset div offsetSize
|
||||
trs "LEN ", resultLen
|
||||
result.setLen resultLen
|
||||
for i in 1 ..< resultLen:
|
||||
let nextOffset = readOffset(i * offsetSize)
|
||||
result[i - 1] = readSszValue(input[offset ..< nextOffset], ElemType)
|
||||
offset = nextOffset
|
||||
|
||||
result[resultLen - 1] = readSszValue(input[offset ..< input.len], ElemType)
|
||||
|
||||
elif result is object|tuple:
|
||||
enumInstanceSerializedFields(result, fieldName, field):
|
||||
const boundingOffsets = T.getFieldBoundingOffsets(fieldName)
|
||||
trs "BOUNDING OFFSET FOR FIELD ", fieldName, " = ", boundingOffsets
|
||||
|
||||
type FieldType = type field
|
||||
type SszType = type toSszType(default(FieldType))
|
||||
|
||||
when isFixedSize(SszType):
|
||||
const
|
||||
startOffset = boundingOffsets[0]
|
||||
endOffset = boundingOffsets[1]
|
||||
trs "FIXED FIELD ", startOffset, "-", endOffset
|
||||
else:
|
||||
let
|
||||
startOffset = readOffset(boundingOffsets[0])
|
||||
endOffset = if boundingOffsets[1] == -1: input.len
|
||||
else: readOffset(boundingOffsets[1])
|
||||
trs "VAR FIELD ", startOffset, "-", endOffset
|
||||
|
||||
# TODO The extra type escaping here is a work-around for a Nim issue:
|
||||
when type(FieldType) is type(SszType):
|
||||
trs "READING NATIVE ", fieldName, ": ", name(SszType)
|
||||
field = readSszValue(input[startOffset ..< endOffset], SszType)
|
||||
trs "READING COMPLETE ", fieldName
|
||||
elif useListType and FieldType is List:
|
||||
field = readSszValue(input[startOffset ..< endOffset], FieldType)
|
||||
else:
|
||||
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
|
||||
field = fromSszBytes(FieldType, input[startOffset ..< endOffset])
|
||||
|
||||
elif result is SomeInteger|bool:
|
||||
trs "READING BASIC TYPE ", type(result).name, " input=", input.len
|
||||
result = fromSszBytes(type(result), input)
|
||||
trs "RESULT WAS ", repr(result)
|
||||
|
||||
else:
|
||||
unsupported T
|
||||
|
|
@ -0,0 +1,63 @@
|
|||
import
|
||||
stew/objects, stew/ranges/ptr_arith,
|
||||
./types, ./bytes_reader
|
||||
|
||||
type
|
||||
MemRange = object
|
||||
startAddr: ptr byte
|
||||
length: int
|
||||
|
||||
SszNavigator*[T] = object
|
||||
m: MemRange
|
||||
|
||||
func sszMount*(data: openarray[byte], T: type): SszNavigator[T] =
|
||||
let startAddr = unsafeAddr data[0]
|
||||
SszNavigator[T](m: MemRange(startAddr: startAddr, length: data.len))
|
||||
|
||||
template checkBounds(m: MemRange, offset: int) =
|
||||
if offset > m.length:
|
||||
raise newException(MalformedSszError, "Malformed SSZ")
|
||||
|
||||
template toOpenArray(m: MemRange): auto =
|
||||
makeOpenArray(m.startAddr, m.length)
|
||||
|
||||
func navigateToField[T](n: SszNavigator[T],
|
||||
fieldName: static string,
|
||||
FieldType: type): SszNavigator[FieldType] =
|
||||
mixin toSszType
|
||||
type SszFieldType = type toSszType(default FieldType)
|
||||
|
||||
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
|
||||
checkBounds(n.m, boundingOffsets[1])
|
||||
|
||||
when isFixedSize(SszFieldType):
|
||||
SszNavigator[FieldType](m: MemRange(
|
||||
startAddr: shift(n.m.startAddr, boundingOffsets[0]),
|
||||
length: boundingOffsets[1] - boundingOffsets[0]))
|
||||
else:
|
||||
template readOffset(offset): int =
|
||||
int fromSszBytes(uint32, makeOpenArray(shift(n.m.startAddr, offset),
|
||||
sizeof(uint32)))
|
||||
let
|
||||
startOffset = readOffset boundingOffsets[0]
|
||||
endOffset = when boundingOffsets[1] == -1: n.m.length
|
||||
else: readOffset boundingOffsets[1]
|
||||
|
||||
if endOffset < startOffset or endOffset > n.m.length:
|
||||
raise newException(MalformedSszError, "Incorrect offset values")
|
||||
|
||||
SszNavigator[FieldType](m: MemRange(
|
||||
startAddr: shift(n.m.startAddr, startOffset),
|
||||
length: endOffset - startOffset))
|
||||
|
||||
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
||||
type RecType = T
|
||||
type FieldType = type(default(RecType).field)
|
||||
navigateToField(n, astToStr(field), FieldType)
|
||||
|
||||
func `[]`*[T](n: SszNavigator[T]): T =
|
||||
readSszValue(toOpenArray(n.m), T)
|
||||
|
||||
converter derefNavigator*[T](n: SszNavigator[T]): T =
|
||||
n[]
|
||||
|
|
@ -0,0 +1,238 @@
|
|||
import
|
||||
tables,
|
||||
stew/shims/macros, stew/[objects, bitseqs],
|
||||
serialization/[object_serialization, errors]
|
||||
|
||||
const
|
||||
useListType* = false
|
||||
offsetSize* = 4
|
||||
|
||||
type
|
||||
BasicType* = char|bool|SomeUnsignedInt
|
||||
|
||||
SszError* = object of SerializationError
|
||||
|
||||
MalformedSszError* = object of SszError
|
||||
|
||||
SszSizeMismatchError* = object of SszError
|
||||
deserializedType*: cstring
|
||||
actualSszSize*: int
|
||||
elementSize*: int
|
||||
|
||||
SszChunksLimitExceeded* = object of SszError
|
||||
|
||||
SszSchema* = ref object
|
||||
nodes*: seq[SszNode]
|
||||
|
||||
SszTypeKind* = enum
|
||||
sszNull
|
||||
sszUInt
|
||||
sszBool
|
||||
sszList
|
||||
sszVector
|
||||
sszBitList
|
||||
sszBitVector
|
||||
sszRecord
|
||||
|
||||
SszType* = ref object
|
||||
case kind*: SszTypeKind
|
||||
of sszUInt, sszBitVector:
|
||||
bits*: int
|
||||
of sszBool, sszNull, sszBitList:
|
||||
discard
|
||||
of sszVector:
|
||||
size*: int
|
||||
vectorElemType*: SszType
|
||||
of sszList:
|
||||
listElemType*: SszType
|
||||
of sszRecord:
|
||||
schema*: SszSchema
|
||||
|
||||
SszNodeKind* = enum
|
||||
Field
|
||||
Union
|
||||
|
||||
SszNode* = ref object
|
||||
name*: string
|
||||
typ*: SszType
|
||||
case kind: SszNodeKind
|
||||
of Union:
|
||||
variants*: seq[SszSchema]
|
||||
of Field:
|
||||
discard
|
||||
|
||||
when useListType:
|
||||
type List*[T; maxLen: static int] = distinct seq[T]
|
||||
else:
|
||||
type List*[T; maxLen: static int] = seq[T]
|
||||
|
||||
macro unsupported*(T: typed): untyped =
|
||||
# TODO: {.fatal.} breaks compilation even in `compiles()` context,
|
||||
# so we use this macro instead. It's also much better at figuring
|
||||
# out the actual type that was used in the instantiation.
|
||||
# File both problems as issues.
|
||||
error "SSZ serialization of the type " & humaneTypeName(T) & " is not supported"
|
||||
|
||||
template ElemType*(T: type[array]): untyped =
|
||||
type(default(T)[low(T)])
|
||||
|
||||
template ElemType*[T](A: type[openarray[T]]): untyped =
|
||||
T
|
||||
|
||||
template ElemType*(T: type[seq|string|List]): untyped =
|
||||
type(default(T)[0])
|
||||
|
||||
func isFixedSize*(T0: type): bool {.compileTime.} =
|
||||
mixin toSszType, enumAllSerializedFields
|
||||
|
||||
when T0 is openarray:
|
||||
return false
|
||||
else:
|
||||
type T = type toSszType(default T0)
|
||||
|
||||
when T is BasicType:
|
||||
return true
|
||||
elif T is array:
|
||||
return isFixedSize(ElemType(T))
|
||||
elif T is object|tuple:
|
||||
enumAllSerializedFields(T):
|
||||
when not isFixedSize(FieldType):
|
||||
return false
|
||||
return true
|
||||
|
||||
func fixedPortionSize*(T0: type): int {.compileTime.} =
|
||||
mixin enumAllSerializedFields, toSszType
|
||||
type T = type toSszType(default T0)
|
||||
|
||||
when T is BasicType: sizeof(T)
|
||||
elif T is array:
|
||||
const elementCount = high(T).ord - low(T).ord + 1
|
||||
type E = ElemType(T)
|
||||
when isFixedSize(E): elementCount * fixedPortionSize(E)
|
||||
else: elementCount * offsetSize
|
||||
elif T is seq|string|openarray: offsetSize
|
||||
elif T is object|tuple:
|
||||
var res = 0
|
||||
enumAllSerializedFields(T):
|
||||
when isFixedSize(FieldType):
|
||||
res += fixedPortionSize(FieldType)
|
||||
else:
|
||||
res += offsetSize
|
||||
res
|
||||
else:
|
||||
unsupported T0
|
||||
|
||||
func sszSchemaType*(T0: type): SszType {.compileTime.} =
|
||||
mixin toSszType, enumAllSerializedFields
|
||||
type T = type toSszType(default T0)
|
||||
|
||||
when T is bool:
|
||||
SszType(kind: sszBool)
|
||||
elif T is uint8|char:
|
||||
SszType(kind: sszUInt, bits: 8)
|
||||
elif T is uint16:
|
||||
SszType(kind: sszUInt, bits: 16)
|
||||
elif T is uint32:
|
||||
SszType(kind: sszUInt, bits: 32)
|
||||
elif T is uint64:
|
||||
SszType(kind: sszUInt, bits: 64)
|
||||
elif T is seq|string:
|
||||
SszType(kind: sszList, listElemType: sszSchemaType(ElemType(T)))
|
||||
elif T is array:
|
||||
SszType(kind: sszVector, vectorElemType: sszSchemaType(ElemType(T)))
|
||||
elif T is BitArray:
|
||||
SszType(kind: sszBitVector, bits: T.bits)
|
||||
elif T is BitSeq:
|
||||
SszType(kind: sszBitList)
|
||||
elif T is object|tuple:
|
||||
var recordSchema = SszSchema()
|
||||
var caseBranches = initTable[string, SszSchema]()
|
||||
caseBranches[""] = recordSchema
|
||||
# TODO case objects are still not supported here.
|
||||
# `recordFields` has to be refactored to properly
|
||||
# report nested discriminator fields.
|
||||
enumAllSerializedFields(T):
|
||||
recordSchema.nodes.add SszNode(
|
||||
name: fieldName,
|
||||
typ: sszSchemaType(FieldType),
|
||||
kind: Field)
|
||||
else:
|
||||
unsupported T0
|
||||
|
||||
# TODO This should have been an iterator, but the VM can't compile the
|
||||
# code due to "too many registers required".
|
||||
proc fieldInfos*(RecordType: type): seq[tuple[name: string,
|
||||
offset: int,
|
||||
fixedSize: int,
|
||||
branchKey: string]] =
|
||||
mixin enumAllSerializedFields
|
||||
|
||||
var
|
||||
offsetInBranch = {"": 0}.toTable
|
||||
nestedUnder = initTable[string, string]()
|
||||
|
||||
enumAllSerializedFields(RecordType):
|
||||
const
|
||||
isFixed = isFixedSize(FieldType)
|
||||
fixedSize = when isFixed: fixedPortionSize(FieldType)
|
||||
else: 0
|
||||
branchKey = when fieldCaseDisciminator.len == 0: ""
|
||||
else: fieldCaseDisciminator & ":" & $fieldCaseBranches
|
||||
fieldSize = when isFixed: fixedSize
|
||||
else: offsetSize
|
||||
|
||||
nestedUnder[fieldName] = branchKey
|
||||
|
||||
var fieldOffset: int
|
||||
offsetInBranch.withValue(branchKey, val):
|
||||
fieldOffset = val[]
|
||||
val[] += fieldSize
|
||||
do:
|
||||
let parentBranch = nestedUnder.getOrDefault(fieldCaseDisciminator, "")
|
||||
fieldOffset = offsetInBranch[parentBranch]
|
||||
offsetInBranch[branchKey] = fieldOffset + fieldSize
|
||||
|
||||
result.add((fieldName, fieldOffset, fixedSize, branchKey))
|
||||
|
||||
func getFieldBoundingOffsetsImpl(RecordType: type,
|
||||
fieldName: static string):
|
||||
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
|
||||
result = (-1, -1)
|
||||
var fieldBranchKey: string
|
||||
|
||||
for f in fieldInfos(RecordType):
|
||||
if fieldName == f.name:
|
||||
result[0] = f.offset
|
||||
if f.fixedSize > 0:
|
||||
result[1] = result[0] + f.fixedSize
|
||||
return
|
||||
else:
|
||||
fieldBranchKey = f.branchKey
|
||||
|
||||
elif result[0] != -1 and
|
||||
f.fixedSize == 0 and
|
||||
f.branchKey == fieldBranchKey:
|
||||
# We have found the next variable sized field
|
||||
result[1] = f.offset
|
||||
return
|
||||
|
||||
func getFieldBoundingOffsets*(RecordType: type,
|
||||
fieldName: static string):
|
||||
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
|
||||
## Returns the start and end offsets of a field.
|
||||
##
|
||||
## For fixed-size fields, the start offset points to the first
|
||||
## byte of the field and the end offset points to 1 byte past the
|
||||
## end of the field.
|
||||
##
|
||||
## For variable-size fields, the returned offsets point to the
|
||||
## statically known positions of the 32-bit offset values written
|
||||
## within the SSZ object. You must read the 32-bit values stored
|
||||
## at the these locations in order to obtain the actual offsets.
|
||||
##
|
||||
## For variable-size fields, the end offset may be -1 when the
|
||||
## designated field is the last variable sized field within the
|
||||
## object. Then the SSZ object boundary known at run-time marks
|
||||
## the end of the variable-size field.
|
||||
type T = RecordType
|
||||
anonConst getFieldBoundingOffsetsImpl(T, fieldName)
|
|
@ -33,7 +33,7 @@
|
|||
import
|
||||
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
||||
./extras, ./ssz, ./beacon_node_types,
|
||||
./spec/[beaconstate, bitfield, crypto, datatypes, digest, helpers, validator],
|
||||
./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||
./spec/[state_transition_block, state_transition_epoch]
|
||||
|
||||
# Canonical state transition functions
|
||||
|
|
|
@ -24,8 +24,12 @@ type
|
|||
node*: BeaconNode
|
||||
db*: BeaconChainDB
|
||||
|
||||
BlockRootSlot* = object
|
||||
blockRoot: Eth2Digest
|
||||
slot: Slot
|
||||
|
||||
const
|
||||
MaxRootsToRequest = 512
|
||||
MaxRootsToRequest = 512'u64
|
||||
MaxHeadersToRequest = MaxRootsToRequest
|
||||
MaxAncestorBlocksResponse = 256
|
||||
|
||||
|
@ -67,8 +71,11 @@ proc mergeBlockHeadersAndBodies(headers: openarray[BeaconBlockHeader], bodies: o
|
|||
res[^1].fromHeaderAndBody(headers[i], bodies[i])
|
||||
some(res)
|
||||
|
||||
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
|
||||
|
||||
proc getBeaconBlocks*(peer: Peer,
|
||||
blockRoot: Eth2Digest,
|
||||
slot: Slot,
|
||||
maxBlocks, skipSlots: uint64,
|
||||
backward: bool): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
|
||||
|
||||
p2pProtocol BeaconSync(version = 1,
|
||||
shortName = "bcs",
|
||||
|
@ -113,8 +120,8 @@ p2pProtocol BeaconSync(version = 1,
|
|||
var s = bestSlot + 1
|
||||
while s <= m.bestSlot:
|
||||
debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer
|
||||
let headersLeft = int(m.bestSlot - s)
|
||||
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, 0)
|
||||
let headersLeft = uint64(m.bestSlot - s)
|
||||
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, false)
|
||||
if blocks.isSome:
|
||||
if blocks.get.len == 0:
|
||||
info "Got 0 blocks while syncing", peer
|
||||
|
@ -144,53 +151,40 @@ p2pProtocol BeaconSync(version = 1,
|
|||
|
||||
proc goodbye(peer: Peer, reason: DisconnectionReason)
|
||||
|
||||
requestResponse:
|
||||
proc getStatus(
|
||||
peer: Peer,
|
||||
sha: Eth2Digest,
|
||||
userAgent: string,
|
||||
timestamp: uint64) =
|
||||
|
||||
# TODO: How should this be implemented?
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/rpc-interface.md#get-status
|
||||
await response.send(sha, userAgent, timestamp)
|
||||
|
||||
proc status(peer: Peer, sha: Eth2Digest, userAgent: string, timestamp: uint64)
|
||||
|
||||
nextId 10
|
||||
|
||||
requestResponse:
|
||||
proc getBeaconBlockRoots(
|
||||
peer: Peer,
|
||||
fromSlot: Slot,
|
||||
maxRoots: int) =
|
||||
maxRoots: uint64) =
|
||||
let maxRoots = min(MaxRootsToRequest, maxRoots)
|
||||
var s = fromSlot
|
||||
var roots = newSeqOfCap[(Eth2Digest, Slot)](maxRoots)
|
||||
var roots = newSeqOfCap[BlockRootSlot](maxRoots)
|
||||
let blockPool = peer.networkState.node.blockPool
|
||||
let maxSlot = blockPool.head.blck.slot
|
||||
while s <= maxSlot:
|
||||
for r in blockPool.blockRootsForSlot(s):
|
||||
roots.add((r, s))
|
||||
if roots.len == maxRoots: break
|
||||
roots.add BlockRootSlot(blockRoot: r, slot: s)
|
||||
if roots.len == maxRoots.int: break
|
||||
s += 1
|
||||
await response.send(roots)
|
||||
|
||||
proc beaconBlockRoots(peer: Peer, roots: openarray[(Eth2Digest, Slot)])
|
||||
proc beaconBlockRoots(peer: Peer, roots: openarray[BlockRootSlot])
|
||||
|
||||
requestResponse:
|
||||
proc getBeaconBlockHeaders(
|
||||
peer: Peer,
|
||||
blockRoot: Eth2Digest,
|
||||
slot: Slot,
|
||||
maxHeaders: int,
|
||||
skipSlots: int,
|
||||
backward: uint8) =
|
||||
maxHeaders: uint64,
|
||||
skipSlots: uint64,
|
||||
backward: bool) =
|
||||
let maxHeaders = min(MaxHeadersToRequest, maxHeaders)
|
||||
var headers: seq[BeaconBlockHeader]
|
||||
let db = peer.networkState.db
|
||||
|
||||
if backward != 0:
|
||||
if backward:
|
||||
# TODO: implement skipSlots
|
||||
|
||||
var blockRoot = blockRoot
|
||||
|
@ -205,7 +199,7 @@ p2pProtocol BeaconSync(version = 1,
|
|||
|
||||
while not br.isNil:
|
||||
blockRefs.add(br)
|
||||
if blockRefs.len == maxHeaders:
|
||||
if blockRefs.len == maxHeaders.int:
|
||||
break
|
||||
br = br.parent
|
||||
|
||||
|
@ -223,50 +217,13 @@ p2pProtocol BeaconSync(version = 1,
|
|||
while s <= maxSlot:
|
||||
for r in blockPool.blockRootsForSlot(s):
|
||||
headers.add(db.getBlock(r).get().toHeader)
|
||||
if headers.len == maxHeaders: break
|
||||
if headers.len == maxHeaders.int: break
|
||||
s += 1
|
||||
|
||||
await response.send(headers)
|
||||
|
||||
proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader])
|
||||
|
||||
# TODO move this at the bottom, because it's not in the spec yet, but it will
|
||||
# consume a `method_id`
|
||||
requestResponse:
|
||||
proc getAncestorBlocks(
|
||||
peer: Peer,
|
||||
needed: openarray[FetchRecord]) =
|
||||
var resp = newSeqOfCap[BeaconBlock](needed.len)
|
||||
let db = peer.networkState.db
|
||||
var neededRoots = initSet[Eth2Digest]()
|
||||
for rec in needed: neededRoots.incl(rec.root)
|
||||
|
||||
for rec in needed:
|
||||
if (var blck = db.getBlock(rec.root); blck.isSome()):
|
||||
# TODO validate historySlots
|
||||
let firstSlot = blck.get().slot - rec.historySlots
|
||||
|
||||
for i in 0..<rec.historySlots.int:
|
||||
resp.add(blck.get())
|
||||
if resp.len >= MaxAncestorBlocksResponse:
|
||||
break
|
||||
|
||||
if blck.get().parent_root in neededRoots:
|
||||
# Don't send duplicate blocks, if neededRoots has roots that are
|
||||
# in the same chain
|
||||
break
|
||||
|
||||
if (blck = db.getBlock(blck.get().parent_root);
|
||||
blck.isNone() or blck.get().slot < firstSlot):
|
||||
break
|
||||
|
||||
if resp.len >= MaxAncestorBlocksResponse:
|
||||
break
|
||||
|
||||
await response.send(resp)
|
||||
|
||||
proc ancestorBlocks(peer: Peer, blocks: openarray[BeaconBlock])
|
||||
|
||||
requestResponse:
|
||||
proc getBeaconBlockBodies(
|
||||
peer: Peer,
|
||||
|
@ -285,7 +242,11 @@ p2pProtocol BeaconSync(version = 1,
|
|||
peer: Peer,
|
||||
blockBodies: openarray[BeaconBlockBody])
|
||||
|
||||
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.async.} =
|
||||
proc getBeaconBlocks*(peer: Peer,
|
||||
blockRoot: Eth2Digest,
|
||||
slot: Slot,
|
||||
maxBlocks, skipSlots: uint64,
|
||||
backward: bool): Future[Option[seq[BeaconBlock]]] {.async.} =
|
||||
## Retrieve block headers and block bodies from the remote peer, merge them into blocks.
|
||||
assert(maxBlocks <= MaxHeadersToRequest)
|
||||
let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward)
|
||||
|
|
2
nim.cfg
2
nim.cfg
|
@ -1,6 +1,8 @@
|
|||
--threads:on
|
||||
--opt:speed
|
||||
|
||||
# -d:"chronicles_sinks=json"
|
||||
|
||||
@if windows:
|
||||
# increase stack size
|
||||
--passL:"-Wl,--stack,8388608"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import
|
||||
confutils,
|
||||
../beacon_chain/[extras, ssz],
|
||||
../beacon_chain/spec/[beaconstate, bitfield, datatypes, digest, validator],
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
|
||||
../tests/testutil
|
||||
|
||||
proc stateSize(deposits: int, maxContent = false) =
|
||||
|
@ -23,7 +23,7 @@ proc stateSize(deposits: int, maxContent = false) =
|
|||
# validatorsPerCommittee =
|
||||
# len(crosslink_committees[0].committee) # close enough..
|
||||
# for a in state.latest_attestations.mitems():
|
||||
# a.aggregation_bits = BitField.init(validatorsPerCommittee)
|
||||
# a.aggregation_bits = BitSeq.init(validatorsPerCommittee)
|
||||
echo "Validators: ", deposits, ", total: ", SSZ.encode(state).len
|
||||
|
||||
dispatch(stateSize)
|
||||
|
|
|
@ -1,16 +1,11 @@
|
|||
import
|
||||
confutils, stats, times,
|
||||
json, strformat,
|
||||
strformat,
|
||||
options, sequtils, random, tables,
|
||||
../tests/[testutil],
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||
../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice]
|
||||
|
||||
proc `%`(v: uint64): JsonNode =
|
||||
if v > uint64(high(BiggestInt)): newJString($v) else: newJInt(BiggestInt(v))
|
||||
proc `%`(v: Eth2Digest): JsonNode = newJString($v)
|
||||
proc `%`(v: ValidatorSig|ValidatorPubKey): JsonNode = newJString($v)
|
||||
|
||||
type Timers = enum
|
||||
tBlock = "Process non-epoch slot with block"
|
||||
tEpoch = "Process epoch slot with block"
|
||||
|
@ -36,14 +31,11 @@ template withTimerRet(stats: var RunningStat, body: untyped): untyped =
|
|||
|
||||
tmp
|
||||
|
||||
proc `%`*(x: Slot): JsonNode {.borrow.}
|
||||
proc `%`*(x: Epoch): JsonNode {.borrow.}
|
||||
|
||||
proc writeJson*(prefix, slot, v: auto) =
|
||||
var f: File
|
||||
defer: close(f)
|
||||
discard open(f, fmt"{prefix:04}-{humaneSlotNum(slot):08}.json", fmWrite)
|
||||
write(f, pretty(%*(v)))
|
||||
let fileName = fmt"{prefix:04}-{humaneSlotNum(slot):08}.json"
|
||||
Json.saveFile(fileName, v, pretty = true)
|
||||
|
||||
cli do(slots = 448,
|
||||
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum
|
||||
|
|
|
@ -9,7 +9,6 @@ import # Unit test
|
|||
./test_attestation_pool,
|
||||
./test_beacon_chain_db,
|
||||
./test_beacon_node,
|
||||
./test_bitfield,
|
||||
./test_beaconstate,
|
||||
./test_block_pool,
|
||||
./test_helpers,
|
||||
|
@ -21,4 +20,5 @@ import # Unit test
|
|||
import # Official fixtures
|
||||
./official/test_fixture_shuffling,
|
||||
./official/test_fixture_bls,
|
||||
./official/test_fixture_ssz_uint
|
||||
./official/test_fixture_ssz_uint,
|
||||
./official/test_fixture_ssz_static
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 470513eddfd7b4d1d45c908816b966c877c0d232
|
||||
Subproject commit de468c07c2518cf1546c4cb615418738a2918577
|
|
@ -1,4 +1,6 @@
|
|||
import
|
||||
# Standard library
|
||||
os, strutils,
|
||||
# Status libs
|
||||
stew/byteutils,
|
||||
eth/common, serialization, json_serialization,
|
||||
|
@ -72,6 +74,10 @@ type
|
|||
handler*: string
|
||||
test_cases*: seq[T]
|
||||
|
||||
const
|
||||
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||
JsonTestsDir* = FixturesDir / "json_tests"
|
||||
|
||||
# #######################
|
||||
# Default init
|
||||
proc default*(T: typedesc): T = discard
|
||||
|
@ -87,9 +93,6 @@ proc readValue*[N: static int](r: var JsonReader, a: var array[N, byte]) {.inlin
|
|||
# if so export that to nim-eth
|
||||
hexToByteArray(r.readValue(string), a)
|
||||
|
||||
proc readValue*(r: var JsonReader, a: var ValidatorIndex) {.inline.} =
|
||||
a = r.readValue(uint32)
|
||||
|
||||
proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
|
||||
## Custom deserializer for seq[byte]
|
||||
a = hexToSeqByte(r.readValue(string))
|
||||
|
|
|
@ -0,0 +1,220 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
# Standard library
|
||||
os, strutils, strformat, tables, unittest, sequtils, typetraits,
|
||||
# Status libs
|
||||
stew/[byteutils, bitseqs], nimcrypto/hash,
|
||||
serialization/testing/tracing,
|
||||
json_serialization, json_serialization/lexer,
|
||||
# Beacon chain internals
|
||||
../../beacon_chain/ssz,
|
||||
../../beacon_chain/spec/[datatypes, validator, digest, crypto],
|
||||
# Test utilities
|
||||
../testutil,
|
||||
./fixtures_utils
|
||||
|
||||
const
|
||||
failFast = defined(debug) and false
|
||||
traceOnFailure = defined(debug)
|
||||
|
||||
type
|
||||
SpecObject[T] = ref object of RootObj
|
||||
obj: ref T
|
||||
|
||||
SszStaticTest* = object
|
||||
obj: RootRef
|
||||
objType, objJsonRepr: string
|
||||
expectedBytes: seq[byte]
|
||||
expectedRootHash, expectedSigHash: Eth2Digest
|
||||
hasSigHash: bool
|
||||
line: int
|
||||
|
||||
ReaderProc = proc(r: var JsonReader): RootRef {.cdecl, gcsafe.}
|
||||
TestingProc = proc(file: string, test: SszStaticTest) {.cdecl, gcsafe.}
|
||||
|
||||
SpecTypeVtable = object
|
||||
reader: ReaderProc
|
||||
tester: TestingProc
|
||||
|
||||
let testsDir = JsonTestsDir / "ssz_static" / "core"
|
||||
let minDevTestFile = getTempDir() / "minimal_ssz_test.json"
|
||||
|
||||
var specTypesRTTI = initTable[string, SpecTypeVtable]()
|
||||
|
||||
proc readerImpl[T](r: var JsonReader): RootRef {.cdecl, gcsafe.} =
|
||||
var res = SpecObject[T](obj: new T)
|
||||
res.obj[] = r.readValue(T)
|
||||
RootRef(res)
|
||||
|
||||
# TODO:
|
||||
# Fun fact: With mainnet settings, the BeaconState object
|
||||
# is too large to safely exist as a stack variable. The
|
||||
# `testerImpl` procedure below will trigger a segmentation
|
||||
# fault on its very first line because of it.
|
||||
#
|
||||
# To work-around this issue, this file uses ref objects
|
||||
# to store the loaded test cases, but we must compare them
|
||||
# by value:
|
||||
template valuesAreEqual[T](a, b: ref T): bool =
|
||||
a[] == b[]
|
||||
|
||||
template valuesAreEqual[T](a, b: T): bool =
|
||||
a == b
|
||||
|
||||
template `$`(x: ref auto): string =
|
||||
$(x[])
|
||||
|
||||
proc readSszValueRef*(input: openarray[byte], T: type): ref T =
|
||||
new result
|
||||
result[] = readSszValue(input, T)
|
||||
|
||||
proc testerImpl[T](path: string, test: SszStaticTest) {.cdecl, gcsafe.} =
|
||||
doAssert test.obj != nil
|
||||
var obj = SpecObject[T](test.obj)
|
||||
|
||||
template execTest(testOpName, testOp, expectedRes) =
|
||||
let ourRes = testOp
|
||||
let success = valuesAreEqual(ourRes, expectedRes)
|
||||
if not success and traceOnFailure:
|
||||
{.gcsafe.}:
|
||||
echo "====== ", testOpName, " failed ", path, ":", test.line
|
||||
echo " our result:"
|
||||
echo " ", ourRes
|
||||
echo " expected result:"
|
||||
echo " ", expectedRes
|
||||
when defined(serialization_tracing):
|
||||
tracingEnabled = true
|
||||
discard testOp
|
||||
tracingEnabled = false
|
||||
echo "======================================================"
|
||||
if failFast: quit 1
|
||||
|
||||
# TODO BEWARE: Passing the boolean expression to `check` directly
|
||||
# will trigger a Nim compilation bomb. This is most likely caused
|
||||
# by a mis-behaving generics instantiations cache when a function
|
||||
# is explicitly instantiated to get its address.
|
||||
# There is a recursive instantiation loop of system's `$` operator.
|
||||
check success
|
||||
|
||||
# let ob = SSZ.encode(obj.obj)
|
||||
|
||||
when false:
|
||||
execTest "serialization",
|
||||
(let ourBytes = SSZ.encode(obj.obj); ourBytes),
|
||||
test.expectedBytes
|
||||
|
||||
execTest "root hash check",
|
||||
hashTreeRoot(obj.obj),
|
||||
test.expectedRootHash
|
||||
|
||||
when hasSigningRoot(T):
|
||||
doAssert test.hasSigHash
|
||||
execTest "sig hash check",
|
||||
signingRoot(obj.obj),
|
||||
test.expectedSigHash
|
||||
|
||||
when true:
|
||||
execTest "roundtrip",
|
||||
readSszValueRef(test.expectedBytes, T),
|
||||
obj.obj
|
||||
|
||||
template addSpecTypeRTTI(T: type) =
|
||||
var reader = readerImpl[T]
|
||||
var tester = testerImpl[T]
|
||||
specTypesRTTI.add(T.name, SpecTypeVtable(reader: reader,
|
||||
tester: tester))
|
||||
foreachSpecType(addSpecTypeRTTI)
|
||||
|
||||
proc runTest(path: string, test: SszStaticTest) =
|
||||
if test.objType != "Unsupported":
|
||||
specTypesRTTI[test.objType].tester(path, test)
|
||||
|
||||
proc advanceToClosingBrace(lexer: var JsonLexer, openedBraces = 1) =
|
||||
var closedBraces = 0
|
||||
while closedBraces < openedBraces:
|
||||
while lexer.tok notin {tkCurlyLe, tkCurlyRi}:
|
||||
lexer.next
|
||||
if lexer.tok == tkCurlyLe:
|
||||
dec closedBraces
|
||||
else:
|
||||
inc closedBraces
|
||||
lexer.next
|
||||
|
||||
proc readValue*(r: var JsonReader, result: var SszStaticTest) {.gcsafe.} =
|
||||
r.skipToken tkCurlyLe
|
||||
|
||||
if r.lexer.tok != tkString:
|
||||
r.raiseUnexpectedToken(etString)
|
||||
|
||||
var reader: ReaderProc
|
||||
let key = r.lexer.strVal
|
||||
{.gcsafe.}:
|
||||
if not specTypesRTTI.hasKey(key):
|
||||
result.objType = "Unsupported"
|
||||
r.lexer.advanceToClosingBrace
|
||||
return
|
||||
|
||||
result.objType = key
|
||||
result.line = r.lexer.line
|
||||
reader = specTypesRTTI[key].reader
|
||||
|
||||
r.lexer.next
|
||||
r.skipToken tkColon
|
||||
r.skipToken tkCurlyLe
|
||||
|
||||
while r.lexer.tok == tkString:
|
||||
# TODO: I was hit by a very nasty Nim bug here.
|
||||
# If you use `let` on the next line, the variable will be
|
||||
# aliased to `r.lexer.strVar` instead of being copied.
|
||||
# This will create problems, because the value is modified
|
||||
# on the next line.
|
||||
var field = r.lexer.strVal
|
||||
r.lexer.next
|
||||
r.skipToken tkColon
|
||||
|
||||
case field
|
||||
of "value":
|
||||
result.obj = reader(r)
|
||||
of "serialized":
|
||||
result.expectedBytes = hexToSeqByte r.readValue(string)
|
||||
of "root":
|
||||
result.expectedRootHash = Eth2Digest.fromHex r.readValue(string)
|
||||
of "signing_root":
|
||||
result.expectedSigHash = Eth2Digest.fromHex r.readValue(string)
|
||||
result.hasSigHash = true
|
||||
else:
|
||||
r.raiseUnexpectedField(field, type(result).name)
|
||||
|
||||
if r.lexer.tok == tkComma:
|
||||
r.lexer.next()
|
||||
else:
|
||||
break
|
||||
|
||||
r.skipToken tkCurlyRi
|
||||
r.skipToken tkCurlyRi
|
||||
|
||||
when failFast:
|
||||
# This will produce faster failures in debug builds
|
||||
{.gcsafe.}: runTest result
|
||||
|
||||
proc executeSuite(path: string) =
|
||||
let sszSuite = path.parseTests SszStaticTest
|
||||
suite &"{path}: {sszSuite.title}":
|
||||
for sszTest in sszSuite.test_cases:
|
||||
test &"test case on line {sszTest.line}":
|
||||
runTest path, sszTest
|
||||
|
||||
if fileExists(minDevTestFile):
|
||||
executeSuite minDevTestFile
|
||||
|
||||
for kind, path in walkDir(testsDir):
|
||||
if kind notin {pcFile, pcLinkToFile}: continue
|
||||
if const_preset in path:
|
||||
executeSuite path
|
||||
|
|
@ -0,0 +1,2 @@
|
|||
-d:"serialization_tracing"
|
||||
-d:"ssz_testing"
|
|
@ -25,8 +25,8 @@ type
|
|||
ssz*: seq[byte]
|
||||
tags*: seq[string]
|
||||
|
||||
const TestFolder = currentSourcePath.rsplit(DirSep, 1)[0]
|
||||
const TestsPath = "fixtures" / "json_tests" / "ssz_generic" / "uint"
|
||||
const
|
||||
TestsDir = JsonTestsDir / "ssz_generic" / "uint"
|
||||
|
||||
func to(val: string, T: typedesc): T =
|
||||
when T is StUint:
|
||||
|
@ -97,18 +97,18 @@ proc runSSZUintTest(inputTests: Tests[SSZUint]) =
|
|||
|
||||
suite "Official - SSZ unsigned integer tests" & preset():
|
||||
block: # "Integers right at or beyond the bounds of the allowed value range"
|
||||
let uintBounds = parseTests(TestFolder / TestsPath / "uint_bounds.json", SSZUint)
|
||||
let uintBounds = parseTests(TestsDir / "uint_bounds.json", SSZUint)
|
||||
test uintBounds.summary & preset():
|
||||
runSSZUintTest(uintBounds)
|
||||
|
||||
block: # "Random integers chosen uniformly over the allowed value range"
|
||||
let uintRandom = parseTests(TestFolder / TestsPath / "uint_random.json", SSZUint)
|
||||
let uintRandom = parseTests(TestsDir / "uint_random.json", SSZUint)
|
||||
test uintRandom.summary & preset():
|
||||
runSSZUintTest(uintRandom)
|
||||
|
||||
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
||||
block: # "Serialized integers that are too short or too long"
|
||||
let uintWrongLength = parseTests(TestFolder / TestsPath / "uint_wrong_length.json", SSZUint)
|
||||
let uintWrongLength = parseTests(TestsDir / "uint_wrong_length.json", SSZUint)
|
||||
test "[Skipped] " & uintWrongLength.summary & preset():
|
||||
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
||||
echo " [Skipped] Pending https://github.com/status-im/nim-beacon-chain/issues/280"
|
||||
|
|
|
@ -23,8 +23,9 @@ mkdir -p "$VALIDATORS_DIR"
|
|||
cd "$GIT_ROOT"
|
||||
mkdir -p $BUILD_OUTPUTS_DIR
|
||||
|
||||
# Run with "SHARD_COUNT=8 ./start.sh" to change these
|
||||
DEFS="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024
|
||||
# Run with "SHARD_COUNT=4 ./start.sh" to change these
|
||||
DEFS="-d:chronicles_log_level=DEBUG "
|
||||
DEFS+="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024
|
||||
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-8} " # Spec default: 64
|
||||
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-12} " # Spec default: 6
|
||||
|
||||
|
|
|
@ -1,27 +0,0 @@
|
|||
import
|
||||
unittest,
|
||||
../beacon_chain/spec/[bitfield]
|
||||
|
||||
suite "BitField":
|
||||
test "roundtrips":
|
||||
var
|
||||
a = BitField.init(100)
|
||||
b = BitField.init(100)
|
||||
|
||||
check:
|
||||
not a.get_bitfield_bit(0)
|
||||
|
||||
a.set_bitfield_bit(1)
|
||||
|
||||
check:
|
||||
not a.get_bitfield_bit(0)
|
||||
a.get_bitfield_bit(1)
|
||||
|
||||
b.set_bitfield_bit(2)
|
||||
|
||||
a.combine(b)
|
||||
|
||||
check:
|
||||
not a.get_bitfield_bit(0)
|
||||
a.get_bitfield_bit(1)
|
||||
a.get_bitfield_bit(2)
|
|
@ -7,89 +7,77 @@
|
|||
|
||||
import
|
||||
unittest, sequtils, options,
|
||||
nimcrypto, eth/common, blscurve, serialization/testing/generic_suite,
|
||||
../beacon_chain/ssz, ../beacon_chain/spec/[datatypes, digest]
|
||||
stint, nimcrypto, eth/common, blscurve, serialization/testing/generic_suite,
|
||||
../beacon_chain/spec/[datatypes, digest],
|
||||
../beacon_chain/ssz, ../beacon_chain/ssz/navigator
|
||||
|
||||
func filled[N: static[int], T](typ: type array[N, T], value: T): array[N, T] =
|
||||
for val in result.mitems:
|
||||
val = value
|
||||
type
|
||||
SomeEnum = enum
|
||||
A, B, C
|
||||
|
||||
func filled(T: type MDigest, value: byte): T =
|
||||
for val in result.data.mitems:
|
||||
val = value
|
||||
Simple = object
|
||||
flag: bool
|
||||
# count: StUint[256]
|
||||
# ignored {.dontSerialize.}: string
|
||||
# data: array[256, bool]
|
||||
|
||||
suite "Simple serialization":
|
||||
# pending spec updates in
|
||||
# - https://github.com/ethereum/eth2.0-specs
|
||||
type
|
||||
Foo = object
|
||||
f0: uint8
|
||||
f1: uint32
|
||||
f2: EthAddress
|
||||
f3: MDigest[256]
|
||||
f4: seq[byte]
|
||||
f5: ValidatorIndex
|
||||
template reject(stmt) =
|
||||
assert(not compiles(stmt))
|
||||
|
||||
let expected_deser = Foo(
|
||||
f0: 5,
|
||||
f1: 0'u32 - 3,
|
||||
f2: EthAddress.filled(byte 35),
|
||||
f3: MDigest[256].filled(byte 35),
|
||||
f4: @[byte 'c'.ord, 'o'.ord, 'w'.ord],
|
||||
f5: ValidatorIndex(79))
|
||||
static:
|
||||
assert isFixedSize(bool) == true
|
||||
|
||||
var expected_ser = @[
|
||||
byte 67, 0, 0, 0, # length
|
||||
5,
|
||||
0xFD, 0xFF, 0xFF, 0xFF,
|
||||
]
|
||||
expected_ser &= EthAddress.filled(byte 35)
|
||||
expected_ser &= MDigest[256].filled(byte 35).data
|
||||
expected_ser &= [byte 3, 0, 0, 0, 'c'.ord, 'o'.ord, 'w'.ord]
|
||||
expected_ser &= [byte 79, 0, 0]
|
||||
assert fixedPortionSize(array[10, bool]) == 10
|
||||
assert fixedPortionSize(array[SomeEnum, uint64]) == 24
|
||||
assert fixedPortionSize(array[3..5, string]) == 12
|
||||
|
||||
test "Object deserialization":
|
||||
let deser = SSZ.decode(expected_ser, Foo)
|
||||
check: expected_deser == deser
|
||||
assert fixedPortionSize(string) == 4
|
||||
assert fixedPortionSize(seq[bool]) == 4
|
||||
assert fixedPortionSize(seq[string]) == 4
|
||||
|
||||
test "Object serialization":
|
||||
let ser = SSZ.encode(expected_deser)
|
||||
check: expected_ser == ser
|
||||
assert isFixedSize(array[20, bool]) == true
|
||||
assert isFixedSize(Simple) == true
|
||||
assert isFixedSize(string) == false
|
||||
assert isFixedSize(seq[bool]) == false
|
||||
assert isFixedSize(seq[string]) == false
|
||||
|
||||
test "Not enough data":
|
||||
expect SerializationError:
|
||||
let x = SSZ.decode(expected_ser[0..^2], Foo)
|
||||
reject fixedPortionSize(int)
|
||||
|
||||
expect SerializationError:
|
||||
let x = SSZ.decode(expected_ser[1..^1], Foo)
|
||||
type
|
||||
ObjWithFields = object
|
||||
f0: uint8
|
||||
f1: uint32
|
||||
f2: EthAddress
|
||||
f3: MDigest[256]
|
||||
f4: seq[byte]
|
||||
f5: ValidatorIndex
|
||||
|
||||
test "ValidatorIndex roundtrip":
|
||||
# https://github.com/nim-lang/Nim/issues/10027
|
||||
let v = 79.ValidatorIndex
|
||||
let ser = SSZ.encode(v)
|
||||
check:
|
||||
ser.len() == 3
|
||||
SSZ.decode(ser, v.type) == v
|
||||
static:
|
||||
assert fixedPortionSize(ObjWithFields) == 1 + 4 + sizeof(EthAddress) + (256 div 8) + 4 + 8
|
||||
|
||||
SSZ.roundtripTest [1, 2, 3]
|
||||
SSZ.roundtripTest @[1, 2, 3]
|
||||
SSZ.roundtripTest SigKey.random().getKey()
|
||||
SSZ.roundtripTest BeaconBlock(
|
||||
slot: 42.Slot, signature: sign(SigKey.random(), 0'u64, ""))
|
||||
SSZ.roundtripTest BeaconState(slot: 42.Slot)
|
||||
executeRoundTripTests SSZ
|
||||
|
||||
# suite "Tree hashing":
|
||||
# # TODO The test values are taken from an earlier version of SSZ and have
|
||||
# # nothing to do with upstream - needs verification and proper test suite
|
||||
type
|
||||
Foo = object
|
||||
bar: Bar
|
||||
|
||||
# test "Hash BeaconBlock":
|
||||
# let vr = BeaconBlock()
|
||||
# check:
|
||||
# $hash_tree_root(vr) ==
|
||||
# "8951C9C64ABA469EBA78F5D9F9A0666FB697B8C4D86901445777E4445D0B1543"
|
||||
Bar = object
|
||||
b: string
|
||||
baz: Baz
|
||||
|
||||
Baz = object
|
||||
i: uint64
|
||||
|
||||
suite "SSZ Navigation":
|
||||
test "simple object fields":
|
||||
var foo = Foo(bar: Bar(b: "bar", baz: Baz(i: 10'u64)))
|
||||
let encoded = SSZ.encode(foo)
|
||||
|
||||
check SSZ.decode(encoded, Foo) == foo
|
||||
|
||||
let mountedFoo = sszMount(encoded, Foo)
|
||||
check mountedFoo.bar.b == "bar"
|
||||
|
||||
let mountedBar = mountedFoo.bar
|
||||
check mountedBar.baz.i == 10'u64
|
||||
|
||||
# test "Hash BeaconState":
|
||||
# let vr = BeaconState()
|
||||
# check:
|
||||
# $hash_tree_root(vr) ==
|
||||
# "66F9BF92A690F1FBD36488D98BE70DA6C84100EDF935BC6D0B30FF14A2976455"
|
||||
|
|
|
@ -10,18 +10,27 @@ import
|
|||
chronicles, eth/trie/[db],
|
||||
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, state_transition,
|
||||
validator_pool, beacon_node_types],
|
||||
../beacon_chain/spec/[beaconstate, bitfield, crypto, datatypes, digest,
|
||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
|
||||
helpers, validator]
|
||||
|
||||
func preset*(): string =
|
||||
" [Preset: " & const_preset & ']'
|
||||
|
||||
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||
# lighthouse.
|
||||
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||
var bytes = uint64(i + 1000).toBytesLE()
|
||||
copyMem(addr result.x[0], addr bytes[0], sizeof(bytes))
|
||||
when ValidatorPrivKey is BlsValue:
|
||||
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||
# lighthouse.
|
||||
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||
result.kind = BlsValueType.Real
|
||||
var bytes = uint64(i + 1000).toBytesLE()
|
||||
copyMem(addr result.blsValue.x[0], addr bytes[0], sizeof(bytes))
|
||||
else:
|
||||
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||
# lighthouse.
|
||||
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||
var bytes = uint64(i + 1000).toBytesLE()
|
||||
copyMem(addr result.x[0], addr bytes[0], sizeof(bytes))
|
||||
|
||||
func makeFakeHash*(i: int): Eth2Digest =
|
||||
var bytes = uint64(i).toBytesLE()
|
||||
|
@ -170,9 +179,8 @@ proc makeAttestation*(
|
|||
|
||||
doAssert sac_index != -1, "find_shard_committee should guarantee this"
|
||||
|
||||
var
|
||||
aggregation_bits = BitField.init(committee.len)
|
||||
set_bitfield_bit(aggregation_bits, sac_index)
|
||||
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||
aggregation_bits.raiseBit sac_index
|
||||
|
||||
let
|
||||
msg = hash_tree_root(
|
||||
|
@ -192,7 +200,7 @@ proc makeAttestation*(
|
|||
data: data,
|
||||
aggregation_bits: aggregation_bits,
|
||||
signature: sig,
|
||||
custody_bits: BitField.init(committee.len)
|
||||
custody_bits: CommitteeValidatorsBits.init(committee.len)
|
||||
)
|
||||
|
||||
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
|
||||
|
|
Loading…
Reference in New Issue