Implement the latest SSZ specification and integrate the official SSZ test suite
This commit is contained in:
parent
7a4b7a6cfb
commit
398ea55801
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
deques, options, sequtils, tables,
|
deques, options, sequtils, tables,
|
||||||
chronicles,
|
chronicles, stew/bitseqs,
|
||||||
./spec/[beaconstate, bitfield, datatypes, crypto, digest, helpers, validator],
|
./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
|
||||||
./extras, ./beacon_chain_db, ./ssz, ./block_pool,
|
./extras, ./beacon_chain_db, ./ssz, ./block_pool,
|
||||||
beacon_node_types
|
beacon_node_types
|
||||||
|
|
||||||
|
@ -61,11 +61,11 @@ proc validate(
|
||||||
finalizedEpoch = humaneEpochNum(state.finalized_checkpoint.epoch)
|
finalizedEpoch = humaneEpochNum(state.finalized_checkpoint.epoch)
|
||||||
return
|
return
|
||||||
|
|
||||||
if not allIt(attestation.custody_bits.bits, it == 0):
|
if not allIt(attestation.custody_bits.bytes, it == 0):
|
||||||
notice "Invalid custody bitfield for phase 0"
|
notice "Invalid custody bitfield for phase 0"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
if not anyIt(attestation.aggregation_bits.bits, it != 0):
|
if not anyIt(attestation.aggregation_bits.bytes, it != 0):
|
||||||
notice "Empty aggregation bitfield"
|
notice "Empty aggregation bitfield"
|
||||||
return false
|
return false
|
||||||
|
|
||||||
|
@ -211,8 +211,7 @@ proc add*(pool: var AttestationPool,
|
||||||
# Attestations in the pool that are a subset of the new attestation
|
# Attestations in the pool that are a subset of the new attestation
|
||||||
# can now be removed per same logic as above
|
# can now be removed per same logic as above
|
||||||
a.validations.keepItIf(
|
a.validations.keepItIf(
|
||||||
if it.aggregation_bits.isSubsetOf(
|
if it.aggregation_bits.isSubsetOf(validation.aggregation_bits):
|
||||||
validation.aggregation_bits):
|
|
||||||
debug "Removing subset attestation",
|
debug "Removing subset attestation",
|
||||||
existingParticipants = get_attesting_indices_seq(
|
existingParticipants = get_attesting_indices_seq(
|
||||||
state, a.data, it.aggregation_bits),
|
state, a.data, it.aggregation_bits),
|
||||||
|
@ -314,10 +313,8 @@ proc getAttestationsForBlock*(
|
||||||
# and naively add as much as possible in one go, by we could also
|
# and naively add as much as possible in one go, by we could also
|
||||||
# add the same attestation data twice, as long as there's at least
|
# add the same attestation data twice, as long as there's at least
|
||||||
# one new attestation in there
|
# one new attestation in there
|
||||||
if not attestation.aggregation_bits.overlaps(
|
if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
|
||||||
v.aggregation_bits):
|
attestation.aggregation_bits.combine(v.aggregation_bits)
|
||||||
attestation.aggregation_bits.combine(
|
|
||||||
v.aggregation_bits)
|
|
||||||
attestation.custody_bits.combine(v.custody_bits)
|
attestation.custody_bits.combine(v.custody_bits)
|
||||||
attestation.signature.combine(v.aggregate_signature)
|
attestation.signature.combine(v.aggregate_signature)
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import
|
import
|
||||||
net, sequtils, options, tables, osproc, random, strutils, times, strformat,
|
net, sequtils, options, tables, osproc, random, strutils, times, strformat,
|
||||||
stew/shims/os, stew/objects,
|
stew/shims/os, stew/[objects, bitseqs],
|
||||||
chronos, chronicles, confutils, serialization/errors,
|
chronos, chronicles, confutils, serialization/errors,
|
||||||
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
|
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
|
||||||
spec/[bitfield, datatypes, digest, crypto, beaconstate, helpers, validator],
|
spec/[datatypes, digest, crypto, beaconstate, helpers, validator],
|
||||||
conf, time, state_transition, fork_choice, ssz, beacon_chain_db,
|
conf, time, state_transition, fork_choice, ssz, beacon_chain_db,
|
||||||
validator_pool, extras, attestation_pool, block_pool, eth2_network,
|
validator_pool, extras, attestation_pool, block_pool, eth2_network,
|
||||||
beacon_node_types, mainchain_monitor, trusted_state_snapshots, version,
|
beacon_node_types, mainchain_monitor, trusted_state_snapshots, version,
|
||||||
|
@ -309,15 +309,15 @@ proc sendAttestation(node: BeaconNode,
|
||||||
let
|
let
|
||||||
validatorSignature = await validator.signAttestation(attestationData)
|
validatorSignature = await validator.signAttestation(attestationData)
|
||||||
|
|
||||||
var aggregationBitfield = BitField.init(committeeLen)
|
var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
|
||||||
set_bitfield_bit(aggregationBitfield, indexInCommittee)
|
aggregationBits.raiseBit indexInCommittee
|
||||||
|
|
||||||
var attestation = Attestation(
|
var attestation = Attestation(
|
||||||
data: attestationData,
|
data: attestationData,
|
||||||
signature: validatorSignature,
|
signature: validatorSignature,
|
||||||
aggregation_bits: aggregationBitfield,
|
aggregation_bits: aggregationBits,
|
||||||
# Stub in phase0
|
# Stub in phase0
|
||||||
custody_bits: BitField.init(committeeLen)
|
custody_bits: CommitteeValidatorsBits.init(committeeLen)
|
||||||
)
|
)
|
||||||
|
|
||||||
node.network.broadcast(topicAttestations, attestation)
|
node.network.broadcast(topicAttestations, attestation)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
sets, deques, tables,
|
sets, deques, tables,
|
||||||
eth/keys,
|
eth/keys, stew/bitseqs,
|
||||||
spec/[bitfield, datatypes, crypto, digest],
|
spec/[datatypes, crypto, digest],
|
||||||
beacon_chain_db, conf, mainchain_monitor, eth2_network, time
|
beacon_chain_db, conf, mainchain_monitor, eth2_network, time
|
||||||
|
|
||||||
type
|
type
|
||||||
|
@ -45,8 +45,8 @@ type
|
||||||
#
|
#
|
||||||
# #############################################
|
# #############################################
|
||||||
Validation* = object
|
Validation* = object
|
||||||
aggregation_bits*: BitField
|
aggregation_bits*: CommitteeValidatorsBits
|
||||||
custody_bits*: BitField ##\
|
custody_bits*: CommitteeValidatorsBits ##\
|
||||||
## Phase 1 - the handling of this field is probably broken..
|
## Phase 1 - the handling of this field is probably broken..
|
||||||
aggregate_signature*: ValidatorSig
|
aggregate_signature*: ValidatorSig
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ type
|
||||||
# Yeah, you can do any linear combination of signatures. but you have to
|
# Yeah, you can do any linear combination of signatures. but you have to
|
||||||
# remember the linear combination of pubkeys that constructed
|
# remember the linear combination of pubkeys that constructed
|
||||||
# if you have two instances of a signature from pubkey p, then you need 2*p
|
# if you have two instances of a signature from pubkey p, then you need 2*p
|
||||||
# in the group pubkey because the attestation bitfield is only 1 bit per
|
# in the group pubkey because the attestation bitlist is only 1 bit per
|
||||||
# pubkey right now, attestations do not support this it could be extended to
|
# pubkey right now, attestations do not support this it could be extended to
|
||||||
# support N overlaps up to N times per pubkey if we had N bits per validator
|
# support N overlaps up to N times per pubkey if we had N bits per validator
|
||||||
# instead of 1
|
# instead of 1
|
||||||
|
|
|
@ -12,7 +12,7 @@ type
|
||||||
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
|
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
|
||||||
|
|
||||||
proc fetchAncestorBlocksFromPeer(peer: Peer, rec: FetchRecord, responseHandler: FetchAncestorsResponseHandler) {.async.} =
|
proc fetchAncestorBlocksFromPeer(peer: Peer, rec: FetchRecord, responseHandler: FetchAncestorsResponseHandler) {.async.} =
|
||||||
let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots.int, 0, 1)
|
let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots, 0, true)
|
||||||
if blocks.isSome:
|
if blocks.isSome:
|
||||||
for b in blocks.get:
|
for b in blocks.get:
|
||||||
responseHandler(b)
|
responseHandler(b)
|
||||||
|
|
|
@ -6,10 +6,10 @@
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
import
|
import
|
||||||
algorithm, chronicles, collections/sets, math, options, sequtils,
|
tables, algorithm, sets, math, options, sequtils,
|
||||||
|
chronicles, stew/bitseqs,
|
||||||
../extras, ../ssz, ../beacon_node_types,
|
../extras, ../ssz, ../beacon_node_types,
|
||||||
./bitfield, ./crypto, ./datatypes, ./digest, ./helpers, ./validator,
|
./crypto, ./datatypes, ./digest, ./helpers, ./validator
|
||||||
tables
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#verify_merkle_branch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#verify_merkle_branch
|
||||||
func verify_merkle_branch(leaf: Eth2Digest, proof: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
|
func verify_merkle_branch(leaf: Eth2Digest, proof: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
|
||||||
|
@ -357,8 +357,8 @@ func is_valid_indexed_attestation*(
|
||||||
# Check if ``indexed_attestation`` has valid indices and signature.
|
# Check if ``indexed_attestation`` has valid indices and signature.
|
||||||
|
|
||||||
let
|
let
|
||||||
bit_0_indices = indexed_attestation.custody_bit_0_indices
|
bit_0_indices = indexed_attestation.custody_bit_0_indices.asSeq
|
||||||
bit_1_indices = indexed_attestation.custody_bit_1_indices
|
bit_1_indices = indexed_attestation.custody_bit_1_indices.asSeq
|
||||||
|
|
||||||
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
# Verify no index has custody bit equal to 1 [to be removed in phase 1]
|
||||||
if len(bit_1_indices) != 0:
|
if len(bit_1_indices) != 0:
|
||||||
|
@ -370,7 +370,7 @@ func is_valid_indexed_attestation*(
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Verify index sets are disjoint
|
# Verify index sets are disjoint
|
||||||
if len(intersection(toSet(bit_0_indices), toSet(bit_1_indices))) != 0:
|
if len(intersection(bit_0_indices.toSet, bit_1_indices.toSet)) != 0:
|
||||||
return false
|
return false
|
||||||
|
|
||||||
# Verify indices are sorted
|
# Verify indices are sorted
|
||||||
|
@ -405,11 +405,11 @@ func is_valid_indexed_attestation*(
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_attesting_indices
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_attesting_indices
|
||||||
func get_attesting_indices*(state: BeaconState,
|
func get_attesting_indices*(state: BeaconState,
|
||||||
attestation_data: AttestationData,
|
attestation_data: AttestationData,
|
||||||
bitfield: BitField,
|
bits: CommitteeValidatorsBits,
|
||||||
stateCache: var StateCache):
|
stateCache: var StateCache):
|
||||||
HashSet[ValidatorIndex] =
|
HashSet[ValidatorIndex] =
|
||||||
## Return the sorted attesting indices corresponding to ``attestation_data``
|
## Return the sorted attesting indices corresponding to ``attestation_data``
|
||||||
## and ``bitfield``.
|
## and ``bits``.
|
||||||
## The spec goes through a lot of hoops to sort things, and sometimes
|
## The spec goes through a lot of hoops to sort things, and sometimes
|
||||||
## constructs sets from the results here. The basic idea is to always
|
## constructs sets from the results here. The basic idea is to always
|
||||||
## just keep it in a HashSet, which seems to suffice. If needed, it's
|
## just keep it in a HashSet, which seems to suffice. If needed, it's
|
||||||
|
@ -420,15 +420,15 @@ func get_attesting_indices*(state: BeaconState,
|
||||||
state, attestation_data.target.epoch, attestation_data.crosslink.shard,
|
state, attestation_data.target.epoch, attestation_data.crosslink.shard,
|
||||||
stateCache)
|
stateCache)
|
||||||
for i, index in committee:
|
for i, index in committee:
|
||||||
if get_bitfield_bit(bitfield, i):
|
if bits[i]:
|
||||||
result.incl index
|
result.incl index
|
||||||
|
|
||||||
func get_attesting_indices_seq*(
|
func get_attesting_indices_seq*(state: BeaconState,
|
||||||
state: BeaconState, attestation_data: AttestationData, bitfield: BitField):
|
attestation_data: AttestationData,
|
||||||
seq[ValidatorIndex] =
|
bits: CommitteeValidatorsBits): seq[ValidatorIndex] =
|
||||||
var cache = get_empty_per_epoch_cache()
|
var cache = get_empty_per_epoch_cache()
|
||||||
toSeq(items(get_attesting_indices(
|
toSeq(items(get_attesting_indices(
|
||||||
state, attestation_data, bitfield, cache)))
|
state, attestation_data, bits, cache)))
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_indexed_attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_indexed_attestation
|
||||||
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
||||||
|
@ -469,12 +469,12 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation,
|
||||||
## 0.6.3 highlights and explicates) except in that the spec,
|
## 0.6.3 highlights and explicates) except in that the spec,
|
||||||
## for no obvious reason, verifies it.
|
## for no obvious reason, verifies it.
|
||||||
IndexedAttestation(
|
IndexedAttestation(
|
||||||
custody_bit_0_indices: sorted(
|
custody_bit_0_indices: CustodyBitIndices sorted(
|
||||||
mapIt(custody_bit_0_indices, it.uint64), system.cmp),
|
mapIt(custody_bit_0_indices, it.uint64), system.cmp),
|
||||||
# toSeq pointlessly constructs int-indexable copy so mapIt can infer type;
|
# toSeq pointlessly constructs int-indexable copy so mapIt can infer type;
|
||||||
# see above
|
# see above
|
||||||
custody_bit_1_indices:
|
custody_bit_1_indices: CustodyBitIndices sorted(
|
||||||
sorted(mapIt(toSeq(items(custody_bit_1_indices)), it.uint64),
|
mapIt(toSeq(items(custody_bit_1_indices)), it.uint64),
|
||||||
system.cmp),
|
system.cmp),
|
||||||
data: attestation.data,
|
data: attestation.data,
|
||||||
signature: attestation.signature,
|
signature: attestation.signature,
|
||||||
|
|
|
@ -1,50 +0,0 @@
|
||||||
import stew/byteutils, json_serialization, stew/bitops2
|
|
||||||
|
|
||||||
type
|
|
||||||
BitField* = object
|
|
||||||
## A simple bit field type that follows the semantics of the spec, with
|
|
||||||
## regards to bit endian operations
|
|
||||||
# TODO stew contains utilities for with bitsets - could try to
|
|
||||||
# recycle that, but there are open questions about bit endianess there.
|
|
||||||
bits*: seq[byte]
|
|
||||||
|
|
||||||
func ceil_div8(v: int): int = (v + 7) div 8
|
|
||||||
|
|
||||||
func init*(T: type BitField, bits: int): BitField =
|
|
||||||
BitField(bits: newSeq[byte](ceil_div8(bits)))
|
|
||||||
|
|
||||||
# TODO fix this for state tests..
|
|
||||||
#proc readValue*(r: var JsonReader, a: var BitField) {.inline.} =
|
|
||||||
# a.bits = r.readValue(string).hexToSeqByte()
|
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_bitfield_bit
|
|
||||||
func get_bitfield_bit*(bitfield: BitField, i: int): bool =
|
|
||||||
# Extract the bit in ``bitfield`` at position ``i``.
|
|
||||||
doAssert 0 <= i div 8, "i: " & $i & " i div 8: " & $(i div 8)
|
|
||||||
doAssert i div 8 < bitfield.bits.len, "i: " & $i & " i div 8: " & $(i div 8)
|
|
||||||
((bitfield.bits[i div 8] shr (i mod 8)) mod 2) > 0'u8
|
|
||||||
|
|
||||||
# TODO spec candidatidates below, though they're used only indirectly there..
|
|
||||||
func set_bitfield_bit*(bitfield: var BitField, i: int) =
|
|
||||||
bitfield.bits[i div 8] = bitfield.bits[i div 8] or 1'u8 shl (i mod 8)
|
|
||||||
|
|
||||||
func combine*(tgt: var BitField, src: BitField) =
|
|
||||||
for i in 0 ..< tgt.bits.len:
|
|
||||||
tgt.bits[i] = tgt.bits[i] or src.bits[i]
|
|
||||||
|
|
||||||
func overlaps*(a, b: BitField): bool =
|
|
||||||
for i in 0..<a.bits.len:
|
|
||||||
if (a.bits[i] and b.bits[i]) > 0'u8:
|
|
||||||
return true
|
|
||||||
|
|
||||||
func countOnes*(a: BitField): int {.inline.} =
|
|
||||||
for v in a.bits: result += countOnes(v)
|
|
||||||
|
|
||||||
func len*(a: BitField): int {.inline.} =
|
|
||||||
countOnes(a)
|
|
||||||
|
|
||||||
func isSubsetOf*(a, b: Bitfield): bool =
|
|
||||||
for i in 0 ..< (len(a.bits) * 8):
|
|
||||||
if get_bitfield_bit(a, i) and not get_bitfield_bit(b, i):
|
|
||||||
return false
|
|
||||||
true
|
|
Binary file not shown.
|
@ -46,7 +46,7 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
sequtils,
|
sequtils,
|
||||||
hashes, eth/rlp,
|
stew/objects, hashes, eth/rlp, nimcrypto/utils,
|
||||||
blscurve, json_serialization,
|
blscurve, json_serialization,
|
||||||
digest
|
digest
|
||||||
|
|
||||||
|
@ -56,20 +56,102 @@ export
|
||||||
export blscurve.init, blscurve.getBytes, blscurve.combine, blscurve.`$`, blscurve.`==`
|
export blscurve.init, blscurve.getBytes, blscurve.combine, blscurve.`$`, blscurve.`==`
|
||||||
|
|
||||||
type
|
type
|
||||||
ValidatorPubKey* = blscurve.VerKey
|
BlsValueType* = enum
|
||||||
|
Real
|
||||||
|
OpaqueBlob
|
||||||
|
|
||||||
|
BlsValue*[T] = object
|
||||||
|
# TODO This is a temporary type needed until we sort out the
|
||||||
|
# issues with invalid BLS values appearing in the SSZ test suites.
|
||||||
|
case kind*: BlsValueType
|
||||||
|
of Real:
|
||||||
|
blsValue*: T
|
||||||
|
of OpaqueBlob:
|
||||||
|
when T is blscurve.Signature:
|
||||||
|
blob*: array[96, byte]
|
||||||
|
else:
|
||||||
|
blob*: array[48, byte]
|
||||||
|
|
||||||
|
ValidatorPubKey* = BlsValue[blscurve.VerKey]
|
||||||
|
# ValidatorPubKey* = blscurve.VerKey
|
||||||
|
|
||||||
|
# ValidatorPubKey* = array[48, byte]
|
||||||
|
# The use of byte arrays proved to be a dead end pretty quickly.
|
||||||
|
# Plenty of code needs to be modified for a successful build and
|
||||||
|
# the changes will negatively affect the performance.
|
||||||
|
|
||||||
|
# ValidatorPrivKey* = BlsValue[blscurve.SigKey]
|
||||||
ValidatorPrivKey* = blscurve.SigKey
|
ValidatorPrivKey* = blscurve.SigKey
|
||||||
ValidatorSig* = blscurve.Signature
|
|
||||||
|
ValidatorSig* = BlsValue[blscurve.Signature]
|
||||||
|
|
||||||
|
BlsCurveType* = VerKey|SigKey|Signature
|
||||||
ValidatorPKI* = ValidatorPrivKey|ValidatorPubKey|ValidatorSig
|
ValidatorPKI* = ValidatorPrivKey|ValidatorPubKey|ValidatorSig
|
||||||
|
|
||||||
func shortLog*(x: ValidatorPKI): string =
|
proc init*[T](BLS: type BlsValue[T], val: auto): BLS =
|
||||||
|
result.kind = BlsValueType.Real
|
||||||
|
result.blsValue = init(T, val)
|
||||||
|
|
||||||
|
func `$`*(x: BlsValue): string =
|
||||||
|
if x.kind == Real:
|
||||||
|
$x.blsValue
|
||||||
|
else:
|
||||||
|
"r:" & toHex(x.blob)
|
||||||
|
|
||||||
|
func `==`*(a, b: BlsValue): bool =
|
||||||
|
if a.kind != b.kind: return false
|
||||||
|
if a.kind == Real:
|
||||||
|
return a.blsValue == b.blsValue
|
||||||
|
else:
|
||||||
|
return a.blob == b.blob
|
||||||
|
|
||||||
|
func getBytes*(x: BlsValue): auto =
|
||||||
|
if x.kind == Real:
|
||||||
|
getBytes x.blsValue
|
||||||
|
else:
|
||||||
|
x.blob
|
||||||
|
|
||||||
|
func shortLog*(x: BlsValue): string =
|
||||||
($x)[0..7]
|
($x)[0..7]
|
||||||
|
|
||||||
template hash*(k: ValidatorPubKey|ValidatorPrivKey): Hash =
|
func shortLog*(x: BlsCurveType): string =
|
||||||
hash(k.getBytes())
|
($x)[0..7]
|
||||||
|
|
||||||
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey = pk.getKey()
|
proc hash*(x: BlsValue): Hash {.inline.} =
|
||||||
|
if x.kind == Real:
|
||||||
|
hash x.blsValue.getBytes()
|
||||||
|
else:
|
||||||
|
hash x.blob
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_aggregate_pubkeys
|
template hash*(x: BlsCurveType): Hash =
|
||||||
|
hash(getBytes(x))
|
||||||
|
|
||||||
|
template `==`*[T](a: BlsValue[T], b: T): bool =
|
||||||
|
a.blsValue == b
|
||||||
|
|
||||||
|
template `==`*[T](a: T, b: BlsValue[T]): bool =
|
||||||
|
a == b.blsValue
|
||||||
|
|
||||||
|
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey =
|
||||||
|
when ValidatorPubKey is BlsValue:
|
||||||
|
ValidatorPubKey(kind: Real, blsValue: pk.getKey())
|
||||||
|
elif ValidatorPubKey is array:
|
||||||
|
pk.getKey.getBytes
|
||||||
|
else:
|
||||||
|
pk.getKey
|
||||||
|
|
||||||
|
proc combine*[T](a: openarray[BlsValue[T]]): T =
|
||||||
|
doAssert a.len > 0 and a[0].kind == Real
|
||||||
|
result = a[0].blsValue
|
||||||
|
for i in 1 ..< a.len:
|
||||||
|
doAssert a[i].kind == Real
|
||||||
|
result.combine a[i].blsValue
|
||||||
|
|
||||||
|
proc combine*[T](x: var BlsValue[T], other: BlsValue[T]) =
|
||||||
|
doAssert x.kind == Real and other.kind == Real
|
||||||
|
x.blsValue.combine(other.blsValue)
|
||||||
|
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_aggregate_pubkeys
|
||||||
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
||||||
var empty = true
|
var empty = true
|
||||||
for key in keys:
|
for key in keys:
|
||||||
|
@ -79,14 +161,18 @@ func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
|
||||||
else:
|
else:
|
||||||
result.combine(key)
|
result.combine(key)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify
|
||||||
func bls_verify*(
|
func bls_verify*(
|
||||||
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
|
||||||
domain: uint64): bool =
|
domain: uint64): bool =
|
||||||
# name from spec!
|
# name from spec!
|
||||||
|
when ValidatorPubKey is BlsValue:
|
||||||
|
doAssert sig.kind == Real and pubkey.kind == Real
|
||||||
|
sig.blsValue.verify(msg, domain, pubkey.blsValue)
|
||||||
|
else:
|
||||||
sig.verify(msg, domain, pubkey)
|
sig.verify(msg, domain, pubkey)
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify_multiple
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify_multiple
|
||||||
func bls_verify_multiple*(
|
func bls_verify_multiple*(
|
||||||
pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest],
|
pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest],
|
||||||
sig: ValidatorSig, domain: uint64): bool =
|
sig: ValidatorSig, domain: uint64): bool =
|
||||||
|
@ -98,49 +184,94 @@ func bls_verify_multiple*(
|
||||||
let (pubkey, message_hash) = pubkey_message_hash
|
let (pubkey, message_hash) = pubkey_message_hash
|
||||||
# TODO spec doesn't say to handle this specially, but it's silly to
|
# TODO spec doesn't say to handle this specially, but it's silly to
|
||||||
# validate without any actual public keys.
|
# validate without any actual public keys.
|
||||||
if pubkey != ValidatorPubKey() and
|
if not pubkey.bls_verify(message_hash.data, sig, domain):
|
||||||
not sig.verify(message_hash.data, domain, pubkey):
|
|
||||||
return false
|
return false
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
||||||
|
when ValidatorPrivKey is BlsValue:
|
||||||
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
||||||
domain: uint64): ValidatorSig =
|
domain: uint64): ValidatorSig =
|
||||||
# name from spec!
|
# name from spec!
|
||||||
key.sign(domain, msg)
|
if key.kind == Real:
|
||||||
|
ValidatorSig(kind: Real, blsValue: key.blsValue.sign(domain, msg))
|
||||||
|
else:
|
||||||
|
ValidatorSig(kind: OpaqueBlob)
|
||||||
|
else:
|
||||||
|
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
|
||||||
|
domain: uint64): ValidatorSig =
|
||||||
|
# name from spec!
|
||||||
|
ValidatorSig(kind: Real, blsValue: key.sign(domain, msg))
|
||||||
|
|
||||||
|
proc fromBytes*[T](R: type BlsValue[T], bytes: openarray[byte]): R =
|
||||||
|
when defined(ssz_testing):
|
||||||
|
result = R(kind: OpaqueBlob, blob: toArray(result.blob.len, bytes))
|
||||||
|
else:
|
||||||
|
result = R(kind: Real, blsValue: init(T, bytes))
|
||||||
|
|
||||||
|
proc initFromBytes*[T](val: var BlsValue[T], bytes: openarray[byte]) =
|
||||||
|
val = fromBytes(BlsValue[T], bytes)
|
||||||
|
|
||||||
|
proc initFromBytes*(val: var BlsCurveType, bytes: openarray[byte]) =
|
||||||
|
val = init(type(val), bytes)
|
||||||
|
|
||||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.inline.} =
|
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.inline.} =
|
||||||
|
when value is BlsValue:
|
||||||
|
doAssert value.kind == Real
|
||||||
|
writer.writeValue($value.blsValue)
|
||||||
|
else:
|
||||||
writer.writeValue($value)
|
writer.writeValue($value)
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.inline.} =
|
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.inline.} =
|
||||||
value = VerKey.init(reader.readValue(string))
|
value.initFromBytes(fromHex reader.readValue(string))
|
||||||
|
|
||||||
proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.inline.} =
|
proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.inline.} =
|
||||||
|
when value is BlsValue:
|
||||||
|
doAssert value.kind == Real
|
||||||
|
writer.writeValue($value.blsValue)
|
||||||
|
else:
|
||||||
writer.writeValue($value)
|
writer.writeValue($value)
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.inline.} =
|
proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.inline.} =
|
||||||
value = Signature.init(reader.readValue(string))
|
value.initFromBytes(fromHex reader.readValue(string))
|
||||||
|
|
||||||
proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.inline.} =
|
proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.inline.} =
|
||||||
|
when value is BlsValue:
|
||||||
|
doAssert value.kind == Real
|
||||||
|
writer.writeValue($value.blsValue)
|
||||||
|
else:
|
||||||
writer.writeValue($value)
|
writer.writeValue($value)
|
||||||
|
|
||||||
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.inline.} =
|
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.inline.} =
|
||||||
value = SigKey.init(reader.readValue(string))
|
value.initFromBytes(fromHex reader.readValue(string))
|
||||||
|
|
||||||
proc newPrivKey*(): ValidatorPrivKey = SigKey.random()
|
when ValidatorPrivKey is BlsValue:
|
||||||
|
proc newPrivKey*(): ValidatorPrivKey =
|
||||||
|
ValidatorPrivKey(kind: Real, blsValue: SigKey.random())
|
||||||
|
else:
|
||||||
|
proc newPrivKey*(): ValidatorPrivKey =
|
||||||
|
SigKey.random()
|
||||||
|
|
||||||
# RLP serialization (TODO: remove if no longer necessary)
|
# RLP serialization (TODO: remove if no longer necessary)
|
||||||
|
when ValidatorPubKey is BlsValue:
|
||||||
|
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
||||||
|
writer.append if value.kind == Real: value.blsValue.getBytes()
|
||||||
|
else: value.blob
|
||||||
|
else:
|
||||||
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
|
||||||
writer.append value.getBytes()
|
writer.append value.getBytes()
|
||||||
|
|
||||||
proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} =
|
proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} =
|
||||||
result = ValidatorPubKey.init(rlp.toBytes.toOpenArray)
|
result fromBytes(T, rlp.toBytes)
|
||||||
rlp.skipElem()
|
|
||||||
|
|
||||||
|
when ValidatorSig is BlsValue:
|
||||||
|
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
||||||
|
writer.append if value.kind == Real: value.blsValue.getBytes()
|
||||||
|
else: value.blob
|
||||||
|
else:
|
||||||
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
proc append*(writer: var RlpWriter, value: ValidatorSig) =
|
||||||
writer.append value.getBytes()
|
writer.append value.getBytes()
|
||||||
|
|
||||||
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
|
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
|
||||||
result = ValidatorSig.init(rlp.toBytes.toOpenArray)
|
let bytes = fromBytes(T, rlp.toBytes)
|
||||||
rlp.skipElem()
|
|
||||||
|
|
||||||
|
|
|
@ -18,9 +18,9 @@
|
||||||
# types / composition
|
# types / composition
|
||||||
|
|
||||||
import
|
import
|
||||||
hashes, math, json,
|
macros, hashes, math, json, strutils,
|
||||||
chronicles, eth/[common, rlp],
|
stew/[byteutils, bitseqs], chronicles, eth/[common, rlp],
|
||||||
./bitfield, ./crypto, ./digest
|
../ssz/types, ./crypto, ./digest
|
||||||
|
|
||||||
# TODO Data types:
|
# TODO Data types:
|
||||||
# Presently, we're reusing the data types from the serialization (uint64) in the
|
# Presently, we're reusing the data types from the serialization (uint64) in the
|
||||||
|
@ -41,7 +41,7 @@ import
|
||||||
|
|
||||||
# Constant presets
|
# Constant presets
|
||||||
# https://github.com/ethereum/eth2.0-specs/tree/v0.6.3/configs/constant_presets/
|
# https://github.com/ethereum/eth2.0-specs/tree/v0.6.3/configs/constant_presets/
|
||||||
const const_preset*{.strdefine.} = "mainnet"
|
const const_preset* {.strdefine.} = "minimal"
|
||||||
|
|
||||||
when const_preset == "mainnet":
|
when const_preset == "mainnet":
|
||||||
import ./presets/mainnet
|
import ./presets/mainnet
|
||||||
|
@ -63,16 +63,21 @@ const
|
||||||
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
|
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
|
||||||
## compute_epoch_of_slot(GENESIS_SLOT)
|
## compute_epoch_of_slot(GENESIS_SLOT)
|
||||||
|
|
||||||
|
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
||||||
|
|
||||||
# Not part of spec. Still useful, pending removing usage if appropriate.
|
# Not part of spec. Still useful, pending removing usage if appropriate.
|
||||||
ZERO_HASH* = Eth2Digest()
|
ZERO_HASH* = Eth2Digest()
|
||||||
|
|
||||||
|
template maxSize*(n: int) {.pragma.}
|
||||||
|
|
||||||
type
|
type
|
||||||
ValidatorIndex* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around
|
ValidatorIndex* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around
|
||||||
|
|
||||||
Shard* = uint64
|
Shard* = uint64
|
||||||
Gwei* = uint64
|
Gwei* = uint64
|
||||||
Domain* = uint64
|
Domain* = uint64
|
||||||
|
|
||||||
|
BitList*[maxLen: static int] = distinct BitSeq
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#proposerslashing
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#proposerslashing
|
||||||
ProposerSlashing* = object
|
ProposerSlashing* = object
|
||||||
proposer_index*: uint64 ##\
|
proposer_index*: uint64 ##\
|
||||||
|
@ -91,11 +96,13 @@ type
|
||||||
attestation_2*: IndexedAttestation ## \
|
attestation_2*: IndexedAttestation ## \
|
||||||
## Second attestation
|
## Second attestation
|
||||||
|
|
||||||
|
CustodyBitIndices* = List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#indexedattestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#indexedattestation
|
||||||
IndexedAttestation* = object
|
IndexedAttestation* = object
|
||||||
# These probably should be seq[ValidatorIndex], but that throws RLP errors
|
# These probably should be seq[ValidatorIndex], but that throws RLP errors
|
||||||
custody_bit_0_indices*: seq[uint64]
|
custody_bit_0_indices*: CustodyBitIndices
|
||||||
custody_bit_1_indices*: seq[uint64]
|
custody_bit_1_indices*: CustodyBitIndices
|
||||||
|
|
||||||
data*: AttestationData ## \
|
data*: AttestationData ## \
|
||||||
## Attestation data
|
## Attestation data
|
||||||
|
@ -103,15 +110,17 @@ type
|
||||||
signature*: ValidatorSig ## \
|
signature*: ValidatorSig ## \
|
||||||
## Aggregate signature
|
## Aggregate signature
|
||||||
|
|
||||||
|
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestation
|
||||||
Attestation* = object
|
Attestation* = object
|
||||||
aggregation_bits*: BitField ##\
|
aggregation_bits*: CommitteeValidatorsBits ##\
|
||||||
## Attester aggregation bitfield
|
## Attester aggregation bitfield
|
||||||
|
|
||||||
data*: AttestationData ##\
|
data*: AttestationData ##\
|
||||||
## Attestation data
|
## Attestation data
|
||||||
|
|
||||||
custody_bits*: BitField ##\
|
custody_bits*: CommitteeValidatorsBits ##\
|
||||||
## Custody bitfield
|
## Custody bitfield
|
||||||
|
|
||||||
signature*: ValidatorSig ##\
|
signature*: ValidatorSig ##\
|
||||||
|
@ -143,7 +152,7 @@ type
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#deposit
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#deposit
|
||||||
Deposit* = object
|
Deposit* = object
|
||||||
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] ##\
|
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
|
||||||
## Merkle path to deposit data list root
|
## Merkle path to deposit data list root
|
||||||
|
|
||||||
data*: DepositData
|
data*: DepositData
|
||||||
|
@ -159,9 +168,6 @@ type
|
||||||
amount*: uint64 ##\
|
amount*: uint64 ##\
|
||||||
## Amount in Gwei
|
## Amount in Gwei
|
||||||
|
|
||||||
# TODO remove, not in spec
|
|
||||||
dummy*: uint64
|
|
||||||
|
|
||||||
signature*: ValidatorSig ##\
|
signature*: ValidatorSig ##\
|
||||||
## Container self-signature
|
## Container self-signature
|
||||||
|
|
||||||
|
@ -280,7 +286,7 @@ type
|
||||||
|
|
||||||
# Shuffling
|
# Shuffling
|
||||||
start_shard*: Shard
|
start_shard*: Shard
|
||||||
randao_mixes*: array[LATEST_RANDAO_MIXES_LENGTH, Eth2Digest]
|
randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
|
||||||
|
|
||||||
active_index_roots*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] ##\
|
active_index_roots*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] ##\
|
||||||
## Active index digests for light clients
|
## Active index digests for light clients
|
||||||
|
@ -348,10 +354,10 @@ type
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#pendingattestation
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#pendingattestation
|
||||||
PendingAttestation* = object
|
PendingAttestation* = object
|
||||||
aggregation_bits*: BitField ## Attester participation bitfield
|
aggregation_bits*: CommitteeValidatorsBits ## Attester participation bitfield
|
||||||
data*: AttestationData ## Attestation data
|
data*: AttestationData ## Attestation data
|
||||||
inclusion_delay*: uint64 ## Inclusion delay
|
inclusion_delay*: uint64 ## Inclusion delay
|
||||||
proposer_index*: ValidatorIndex ## Proposer index
|
proposer_index*: uint64 ## Proposer index
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#historicalbatch
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#historicalbatch
|
||||||
HistoricalBatch* = object
|
HistoricalBatch* = object
|
||||||
|
@ -382,6 +388,65 @@ type
|
||||||
data*: BeaconState
|
data*: BeaconState
|
||||||
root*: Eth2Digest # hash_tree_root (not signing_root!)
|
root*: Eth2Digest # hash_tree_root (not signing_root!)
|
||||||
|
|
||||||
|
template foreachSpecType*(op: untyped) =
|
||||||
|
## These are all spec types that will appear in network messages
|
||||||
|
## and persistent consensus data. This helper template is useful
|
||||||
|
## for populating RTTI tables that concern them.
|
||||||
|
op Attestation
|
||||||
|
op AttestationData
|
||||||
|
op AttestationDataAndCustodyBit
|
||||||
|
op AttesterSlashing
|
||||||
|
op BeaconBlock
|
||||||
|
op BeaconBlockBody
|
||||||
|
op BeaconBlockHeader
|
||||||
|
op BeaconState
|
||||||
|
op Crosslink
|
||||||
|
op Deposit
|
||||||
|
op DepositData
|
||||||
|
op Eth1Data
|
||||||
|
op Fork
|
||||||
|
op HistoricalBatch
|
||||||
|
op IndexedAttestation
|
||||||
|
op PendingAttestation
|
||||||
|
op ProposerSlashing
|
||||||
|
op Transfer
|
||||||
|
op Validator
|
||||||
|
op VoluntaryExit
|
||||||
|
|
||||||
|
macro fieldMaxLen*(x: typed): untyped =
|
||||||
|
# TODO This macro is a temporary solution for the lack of a
|
||||||
|
# more proper way to specify the max length of the List[T; N]
|
||||||
|
# objects in the spec.
|
||||||
|
# May be replaced with `getCustomPragma` once we upgrade to
|
||||||
|
# Nim 0.20.2 or with a distinct List type, which would require
|
||||||
|
# more substantial refactorings in the spec code.
|
||||||
|
if x.kind != nnkDotExpr:
|
||||||
|
return newLit(0)
|
||||||
|
|
||||||
|
let size = case $x[1]
|
||||||
|
of "pubkeys",
|
||||||
|
"compact_validators",
|
||||||
|
"custody_bit_0_indices",
|
||||||
|
"custody_bit_1_indices",
|
||||||
|
"aggregation_bits",
|
||||||
|
"custody_bits": int64(MAX_VALIDATORS_PER_COMMITTEE)
|
||||||
|
of "proposer_slashings": MAX_PROPOSER_SLASHINGS
|
||||||
|
of "attester_slashings": MAX_ATTESTER_SLASHINGS
|
||||||
|
of "attestations": MAX_ATTESTATIONS
|
||||||
|
of "deposits": MAX_DEPOSITS
|
||||||
|
of "voluntary_exits": MAX_VOLUNTARY_EXITS
|
||||||
|
of "transfers": MAX_TRANSFERS
|
||||||
|
of "historical_roots": HISTORICAL_ROOTS_LIMIT
|
||||||
|
of "eth1_data_votes": SLOTS_PER_ETH1_VOTING_PERIOD
|
||||||
|
of "validators": VALIDATOR_REGISTRY_LIMIT
|
||||||
|
of "balances": VALIDATOR_REGISTRY_LIMIT
|
||||||
|
of "previous_epoch_attestations",
|
||||||
|
"current_epoch_attestations": MAX_ATTESTATIONS *
|
||||||
|
SLOTS_PER_EPOCH
|
||||||
|
else: 0
|
||||||
|
|
||||||
|
newLit size
|
||||||
|
|
||||||
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
|
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
|
||||||
($state.validators[validatorIdx].pubkey)[0..7]
|
($state.validators[validatorIdx].pubkey)[0..7]
|
||||||
|
|
||||||
|
@ -439,6 +504,51 @@ proc `%`*(i: uint64): JsonNode =
|
||||||
ethTimeUnit Slot
|
ethTimeUnit Slot
|
||||||
ethTimeUnit Epoch
|
ethTimeUnit Epoch
|
||||||
|
|
||||||
|
Json.useCustomSerialization(BeaconState.justification_bits):
|
||||||
|
read:
|
||||||
|
let s = reader.readValue(string)
|
||||||
|
if s.len != 4: raise newException(ValueError, "unexpected number of bytes")
|
||||||
|
s.parseHexInt.uint8
|
||||||
|
|
||||||
|
write:
|
||||||
|
writer.writeValue "0x" & value.toHex
|
||||||
|
|
||||||
|
Json.useCustomSerialization(BitSeq):
|
||||||
|
read:
|
||||||
|
BitSeq reader.readValue(string).hexToSeqByte
|
||||||
|
|
||||||
|
write:
|
||||||
|
writer.writeValue "0x" & value.bytes.toHex
|
||||||
|
|
||||||
|
template readValue*(reader: var JsonReader, value: var BitList) =
|
||||||
|
type T = type(value)
|
||||||
|
value = T readValue(reader, BitSeq)
|
||||||
|
|
||||||
|
template writeValue*(writer: var JsonWriter, value: BitList) =
|
||||||
|
writeValue(writer, BitSeq value)
|
||||||
|
|
||||||
|
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
|
||||||
|
template len*(x: BitList): auto = len(BitSeq(x))
|
||||||
|
template bytes*(x: BitList): auto = bytes(BitSeq(x))
|
||||||
|
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
|
||||||
|
template `[]=`*(x: BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
|
||||||
|
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
|
||||||
|
template raiseBit*(x: BitList, idx: int) = raiseBit(BitSeq(x), idx)
|
||||||
|
template lowerBit*(x: BitList, idx: int) = lowerBit(BitSeq(x), idx)
|
||||||
|
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
|
||||||
|
template combine*(a, b: BitList) = combine(BitSeq(a), BitSeq(b))
|
||||||
|
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
|
||||||
|
|
||||||
|
when useListType:
|
||||||
|
template len*[T; N](x: List[T, N]): auto = len(seq[T](x))
|
||||||
|
template `[]`*[T; N](x: List[T, N], idx: auto): auto = seq[T](x)[idx]
|
||||||
|
template `[]=`*[T; N](x: List[T, N], idx: auto, val: bool) = seq[T](x)[idx] = val
|
||||||
|
template `==`*[T; N](a, b: List[T, N]): bool = seq[T](a) == seq[T](b)
|
||||||
|
template asSeq*[T; N](x: List[T, N]): auto = seq[T](x)
|
||||||
|
template `&`*[T; N](a, b: List[T, N]): List[T, N] = seq[T](a) & seq[T](b)
|
||||||
|
else:
|
||||||
|
template asSeq*[T; N](x: List[T, N]): auto = x
|
||||||
|
|
||||||
func humaneSlotNum*(s: Slot): uint64 =
|
func humaneSlotNum*(s: Slot): uint64 =
|
||||||
s - GENESIS_SLOT
|
s - GENESIS_SLOT
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -44,7 +44,7 @@ func eth2hash*(v: openArray[byte]): Eth2Digest {.inline.} =
|
||||||
var ctx: sha256
|
var ctx: sha256
|
||||||
ctx.init()
|
ctx.init()
|
||||||
ctx.update(v)
|
ctx.update(v)
|
||||||
result = ctx.finish()
|
ctx.finish()
|
||||||
|
|
||||||
template withEth2Hash*(body: untyped): Eth2Digest =
|
template withEth2Hash*(body: untyped): Eth2Digest =
|
||||||
## This little helper will init the hash function and return the sliced
|
## This little helper will init the hash function and return the sliced
|
||||||
|
@ -53,8 +53,7 @@ template withEth2Hash*(body: untyped): Eth2Digest =
|
||||||
var h {.inject.}: sha256
|
var h {.inject.}: sha256
|
||||||
h.init()
|
h.init()
|
||||||
body
|
body
|
||||||
var res = h.finish()
|
h.finish()
|
||||||
res
|
|
||||||
|
|
||||||
func hash*(x: Eth2Digest): Hash =
|
func hash*(x: Eth2Digest): Hash =
|
||||||
## Hash for digests for Nim hash tables
|
## Hash for digests for Nim hash tables
|
||||||
|
@ -63,3 +62,4 @@ func hash*(x: Eth2Digest): Hash =
|
||||||
# We just slice the first 4 or 8 bytes of the block hash
|
# We just slice the first 4 or 8 bytes of the block hash
|
||||||
# depending of if we are on a 32 or 64-bit platform
|
# depending of if we are on a 32 or 64-bit platform
|
||||||
result = cast[ptr Hash](unsafeAddr x)[]
|
result = cast[ptr Hash](unsafeAddr x)[]
|
||||||
|
|
||||||
|
|
|
@ -82,7 +82,6 @@ const
|
||||||
|
|
||||||
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
||||||
GENESIS_SLOT* = 0.Slot
|
GENESIS_SLOT* = 0.Slot
|
||||||
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
|
||||||
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
||||||
|
|
||||||
# Time parameters
|
# Time parameters
|
||||||
|
@ -139,8 +138,10 @@ const
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#state-list-lengths
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#state-list-lengths
|
||||||
LATEST_RANDAO_MIXES_LENGTH* = 8192
|
LATEST_RANDAO_MIXES_LENGTH* = 8192
|
||||||
EPOCHS_PER_HISTORICAL_VECTOR* = 8192 # 2'u64^13, epochs
|
EPOCHS_PER_HISTORICAL_VECTOR* = 65536
|
||||||
EPOCHS_PER_SLASHINGS_VECTOR* = 8192 # epochs
|
EPOCHS_PER_SLASHINGS_VECTOR* = 8192
|
||||||
|
HISTORICAL_ROOTS_LIMIT* = 16777216
|
||||||
|
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
|
||||||
|
|
||||||
# Reward and penalty quotients
|
# Reward and penalty quotients
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -65,7 +65,6 @@ const
|
||||||
# Unchanged
|
# Unchanged
|
||||||
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
|
||||||
GENESIS_SLOT* = 0.Slot
|
GENESIS_SLOT* = 0.Slot
|
||||||
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
|
|
||||||
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
BLS_WITHDRAWAL_PREFIX* = 0'u8
|
||||||
|
|
||||||
# Time parameters
|
# Time parameters
|
||||||
|
@ -88,7 +87,7 @@ const
|
||||||
|
|
||||||
# Changed
|
# Changed
|
||||||
SLOTS_PER_ETH1_VOTING_PERIOD* = 16
|
SLOTS_PER_ETH1_VOTING_PERIOD* = 16
|
||||||
SLOTS_PER_HISTORICAL_ROOT* = 128 # 64 doesn't work with GENESIS_SLOT == 0?
|
SLOTS_PER_HISTORICAL_ROOT* = 64 # doesn't work with GENESIS_SLOT == 0?
|
||||||
|
|
||||||
# Unchanged
|
# Unchanged
|
||||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
|
||||||
|
@ -104,6 +103,8 @@ const
|
||||||
LATEST_RANDAO_MIXES_LENGTH* = 64
|
LATEST_RANDAO_MIXES_LENGTH* = 64
|
||||||
EPOCHS_PER_HISTORICAL_VECTOR* = 64
|
EPOCHS_PER_HISTORICAL_VECTOR* = 64
|
||||||
EPOCHS_PER_SLASHINGS_VECTOR* = 64
|
EPOCHS_PER_SLASHINGS_VECTOR* = 64
|
||||||
|
HISTORICAL_ROOTS_LIMIT* = 16777216
|
||||||
|
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
|
||||||
|
|
||||||
# Reward and penalty quotients
|
# Reward and penalty quotients
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
|
@ -35,7 +35,7 @@
|
||||||
import # TODO - cleanup imports
|
import # TODO - cleanup imports
|
||||||
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
||||||
../extras, ../ssz, ../beacon_node_types,
|
../extras, ../ssz, ../beacon_node_types,
|
||||||
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator
|
beaconstate, crypto, datatypes, digest, helpers, validator
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#block-header
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#block-header
|
||||||
proc processBlockHeader(
|
proc processBlockHeader(
|
||||||
|
|
|
@ -34,9 +34,9 @@
|
||||||
|
|
||||||
import # TODO - cleanup imports
|
import # TODO - cleanup imports
|
||||||
algorithm, math, options, sequtils, tables,
|
algorithm, math, options, sequtils, tables,
|
||||||
chronicles, json_serialization/std/sets,
|
stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
|
||||||
../extras, ../ssz, ../beacon_node_types,
|
../extras, ../ssz, ../beacon_node_types,
|
||||||
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator
|
beaconstate, crypto, datatypes, digest, helpers, validator
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_total_active_balance
|
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_total_active_balance
|
||||||
func get_total_active_balance(state: BeaconState): Gwei =
|
func get_total_active_balance(state: BeaconState): Gwei =
|
||||||
|
@ -198,13 +198,6 @@ proc process_justification_and_finalization(
|
||||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||||
old_current_justified_checkpoint = state.current_justified_checkpoint
|
old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
|
||||||
## Bitvector[4] <-> uint8 mapping:
|
|
||||||
## state.justification_bits[0] is (state.justification_bits shr 0) and 1
|
|
||||||
## state.justification_bits[1] is (state.justification_bits shr 1) and 1
|
|
||||||
## state.justification_bits[2] is (state.justification_bits shr 2) and 1
|
|
||||||
## state.justification_bits[3] is (state.justification_bits shr 3) and 1
|
|
||||||
## https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/simple-serialize.md#bitvectorn
|
|
||||||
|
|
||||||
# Process justifications
|
# Process justifications
|
||||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
|
||||||
|
@ -247,8 +240,7 @@ proc process_justification_and_finalization(
|
||||||
Checkpoint(epoch: previous_epoch,
|
Checkpoint(epoch: previous_epoch,
|
||||||
root: get_block_root(state, previous_epoch))
|
root: get_block_root(state, previous_epoch))
|
||||||
|
|
||||||
# Spec: state.justification_bits[1] = 0b1
|
state.justification_bits.raiseBit 1
|
||||||
state.justification_bits = state.justification_bits or (1 shl 1)
|
|
||||||
|
|
||||||
let matching_target_attestations_current =
|
let matching_target_attestations_current =
|
||||||
get_matching_target_attestations(state, current_epoch) # Current epoch
|
get_matching_target_attestations(state, current_epoch) # Current epoch
|
||||||
|
@ -258,33 +250,32 @@ proc process_justification_and_finalization(
|
||||||
Checkpoint(epoch: current_epoch,
|
Checkpoint(epoch: current_epoch,
|
||||||
root: get_block_root(state, current_epoch))
|
root: get_block_root(state, current_epoch))
|
||||||
|
|
||||||
# Spec: state.justification_bits[0] = 0b1
|
state.justification_bits.raiseBit 0
|
||||||
state.justification_bits = state.justification_bits or (1 shl 0)
|
|
||||||
|
|
||||||
# Process finalizations
|
# Process finalizations
|
||||||
let bitfield = state.justification_bits
|
let bitfield = state.justification_bits
|
||||||
|
|
||||||
## The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th
|
## The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th
|
||||||
## as source
|
## as source
|
||||||
if (bitfield shr 1) mod 8 == 0b111 and
|
if (bitfield and 0b1110) == 0b1110 and
|
||||||
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||||
|
|
||||||
## The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as
|
## The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as
|
||||||
## source
|
## source
|
||||||
if (bitfield shr 1) mod 4 == 0b11 and
|
if (bitfield and 0b110) == 0b110 and
|
||||||
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||||
|
|
||||||
## The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as
|
## The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as
|
||||||
## source
|
## source
|
||||||
if (bitfield shr 0) mod 8 == 0b111 and
|
if (bitfield and 0b111) == 0b111 and
|
||||||
old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||||
|
|
||||||
## The 1st/2nd most recent epochs are justified, the 1st using the 2nd as
|
## The 1st/2nd most recent epochs are justified, the 1st using the 2nd as
|
||||||
## source
|
## source
|
||||||
if (bitfield shr 0) mod 4 == 0b11 and
|
if (bitfield and 0b11) == 0b11 and
|
||||||
old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||||
|
|
||||||
|
@ -384,7 +375,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
|
||||||
|
|
||||||
let proposer_reward =
|
let proposer_reward =
|
||||||
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei
|
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei
|
||||||
rewards[attestation.proposer_index] += proposer_reward
|
rewards[attestation.proposer_index.int] += proposer_reward
|
||||||
let max_attester_reward = get_base_reward(state, index) - proposer_reward
|
let max_attester_reward = get_base_reward(state, index) - proposer_reward
|
||||||
rewards[index] +=
|
rewards[index] +=
|
||||||
(max_attester_reward *
|
(max_attester_reward *
|
||||||
|
|
|
@ -9,435 +9,569 @@
|
||||||
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
|
||||||
|
|
||||||
import
|
import
|
||||||
endians, typetraits, options, algorithm, math,
|
endians, stew/shims/macros, options, algorithm, math,
|
||||||
faststreams/input_stream, serialization, eth/common, nimcrypto/sha2,
|
stew/[bitops2, bitseqs, objects, varints], stew/ranges/ptr_arith, stint,
|
||||||
./spec/[bitfield, crypto, datatypes, digest]
|
faststreams/input_stream, serialization, serialization/testing/tracing,
|
||||||
|
nimcrypto/sha2, blscurve, eth/common,
|
||||||
|
./spec/[crypto, datatypes, digest],
|
||||||
|
./ssz/[types, bytes_reader]
|
||||||
|
|
||||||
# ################### Helper functions ###################################
|
# ################### Helper functions ###################################
|
||||||
|
|
||||||
export
|
export
|
||||||
serialization
|
serialization, types, bytes_reader
|
||||||
|
|
||||||
|
when defined(serialization_tracing):
|
||||||
|
import
|
||||||
|
typetraits, stew/ranges/ptr_arith
|
||||||
|
|
||||||
|
const
|
||||||
|
bytesPerChunk = 32
|
||||||
|
bitsPerChunk = bytesPerChunk * 8
|
||||||
|
maxChunkTreeDepth = 25
|
||||||
|
defaultMaxObjectSize = 1 * 1024 * 1024
|
||||||
|
|
||||||
type
|
type
|
||||||
SszReader* = object
|
SszReader* = object
|
||||||
stream: ByteStreamVar
|
stream: ByteStreamVar
|
||||||
|
maxObjectSize: int
|
||||||
|
|
||||||
SszWriter* = object
|
SszWriter* = object
|
||||||
stream: OutputStreamVar
|
stream: OutputStreamVar
|
||||||
|
|
||||||
SszError* = object of SerializationError
|
BasicType = char|bool|SomeUnsignedInt|StUint
|
||||||
CorruptedDataError* = object of SszError
|
|
||||||
|
|
||||||
RecordWritingMemo = object
|
SszChunksMerkelizer = ref object of RootObj
|
||||||
initialStreamPos: int
|
combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
|
||||||
sizePrefixCursor: DelayedWriteCursor
|
totalChunks: uint
|
||||||
|
limit: uint64
|
||||||
|
|
||||||
|
Chunk = array[bytesPerChunk, byte]
|
||||||
|
|
||||||
|
TypeWithMaxLen[T; maxLen: static int64] = distinct T
|
||||||
|
|
||||||
|
SizePrefixed*[T] = distinct T
|
||||||
|
SszMaxSizeExceeded* = object of SerializationError
|
||||||
|
|
||||||
|
VarSizedWriterCtx = object
|
||||||
|
fixedParts: WriteCursor
|
||||||
|
offset: int
|
||||||
|
|
||||||
|
FixedSizedWriterCtx = object
|
||||||
|
|
||||||
serializationFormat SSZ,
|
serializationFormat SSZ,
|
||||||
Reader = SszReader,
|
Reader = SszReader,
|
||||||
Writer = SszWriter,
|
Writer = SszWriter,
|
||||||
PreferedOutput = seq[byte]
|
PreferedOutput = seq[byte]
|
||||||
|
|
||||||
proc init*(T: type SszReader, stream: ByteStreamVar): T =
|
proc init*(T: type SszReader,
|
||||||
result.stream = stream
|
stream: ByteStreamVar,
|
||||||
|
maxObjectSize = defaultMaxObjectSize): T =
|
||||||
|
T(stream: stream, maxObjectSize: maxObjectSize)
|
||||||
|
|
||||||
proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T =
|
proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T =
|
||||||
mixin readValue
|
mixin readValue
|
||||||
var reader = init(SszReader, stream)
|
var reader = init(SszReader, stream)
|
||||||
reader.readValue(T)
|
reader.readValue(T)
|
||||||
|
|
||||||
func toSSZType(x: Slot|Epoch): auto = x.uint64
|
method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe.} =
|
||||||
func toSSZType(x: auto): auto = x
|
# TODO: implement proper error string
|
||||||
|
"Serialisation error while processing " & filename
|
||||||
|
|
||||||
# toBytesSSZ convert simple fixed-length types to their SSZ wire representation
|
when false:
|
||||||
func toBytesSSZ(x: SomeInteger): array[sizeof(x), byte] =
|
# TODO: Nim can't handle yet this simpler definition. File an issue.
|
||||||
|
template valueOf[T; N](x: TypeWithMaxLen[T, N]): auto = T(x)
|
||||||
|
else:
|
||||||
|
proc unwrapImpl[T; N](x: ptr TypeWithMaxLen[T, N]): ptr T =
|
||||||
|
cast[ptr T](x)
|
||||||
|
|
||||||
|
template valueOf(x: TypeWithMaxLen): auto =
|
||||||
|
let xaddr = unsafeAddr x
|
||||||
|
unwrapImpl(xaddr)[]
|
||||||
|
|
||||||
|
template toSszType*(x: auto): auto =
|
||||||
|
mixin toSszType
|
||||||
|
|
||||||
|
when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
|
||||||
|
elif x is Eth2Digest: x.data
|
||||||
|
elif x is BlsValue|BlsCurveType: getBytes(x)
|
||||||
|
elif x is BitSeq|BitList: bytes(x)
|
||||||
|
elif x is TypeWithMaxLen: toSszType valueOf(x)
|
||||||
|
elif useListType and x is List: seq[x.T](x)
|
||||||
|
else: x
|
||||||
|
|
||||||
|
func writeFixedSized(c: var WriteCursor, x: auto) =
|
||||||
|
mixin toSszType
|
||||||
|
|
||||||
|
when x is byte:
|
||||||
|
c.append x
|
||||||
|
elif x is bool|char:
|
||||||
|
c.append byte(ord(x))
|
||||||
|
elif x is SomeUnsignedInt:
|
||||||
|
when system.cpuEndian == bigEndian:
|
||||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||||
## All integers are serialized as **little endian**.
|
## All integers are serialized as **little endian**.
|
||||||
|
var bytes: array[sizeof(x), byte]
|
||||||
|
when x.sizeof == 8: littleEndian64(addr bytes[0], x.unsafeAddr)
|
||||||
|
elif x.sizeof == 4: littleEndian32(addr bytes[0], x.unsafeAddr)
|
||||||
|
elif x.sizeof == 2: littleEndian16(addr bytes[0], x.unsafeAddr)
|
||||||
|
elif x.sizeof == 1: copyMem(addr bytes[0], x.unsafeAddr, sizeof(x))
|
||||||
|
else: unsupported x.type
|
||||||
|
c.append bytes
|
||||||
|
else:
|
||||||
|
let valueAddr = unsafeAddr x
|
||||||
|
trs "APPENDING INT ", x, " = ", makeOpenArray(cast[ptr byte](valueAddr), sizeof(x))
|
||||||
|
c.appendMemCopy x
|
||||||
|
elif x is StUint:
|
||||||
|
c.appendMemCopy x # TODO: Is this always correct?
|
||||||
|
elif x is array|string|seq|openarray:
|
||||||
|
when x[0] is byte:
|
||||||
|
trs "APPENDING FIXED SIZE BYTES", x
|
||||||
|
c.append x
|
||||||
|
else:
|
||||||
|
for elem in x:
|
||||||
|
trs "WRITING FIXED SIZE ARRAY ELEMENENT"
|
||||||
|
c.writeFixedSized toSszType(elem)
|
||||||
|
elif x is tuple|object:
|
||||||
|
enumInstanceSerializedFields(x, fieldName, field):
|
||||||
|
trs "WRITING FIXED SIZE FIELD", fieldName
|
||||||
|
c.writeFixedSized toSszType(field)
|
||||||
|
else:
|
||||||
|
unsupported x.type
|
||||||
|
|
||||||
when x.sizeof == 8: littleEndian64(result.addr, x.unsafeAddr)
|
template writeFixedSized(s: OutputStreamVar, x: auto) =
|
||||||
elif x.sizeof == 4: littleEndian32(result.addr, x.unsafeAddr)
|
writeFixedSized(s.cursor, x)
|
||||||
elif x.sizeof == 2: littleEndian16(result.addr, x.unsafeAddr)
|
|
||||||
elif x.sizeof == 1: copyMem(result.addr, x.unsafeAddr, sizeof(result))
|
|
||||||
else: {.fatal: "Unsupported type serialization: " & $(type(x)).name.}
|
|
||||||
|
|
||||||
func toBytesSSZ(x: ValidatorIndex): array[3, byte] =
|
template supports*(_: type SSZ, T: type): bool =
|
||||||
## Integers are all encoded as little endian and not padded
|
mixin toSszType
|
||||||
let v = x.uint32
|
anonConst compiles(fixedPortionSize toSszType(default(T)))
|
||||||
result[0] = byte(v and 0xff)
|
|
||||||
result[1] = byte((v shr 8) and 0xff)
|
|
||||||
result[2] = byte((v shr 16) and 0xff)
|
|
||||||
|
|
||||||
func toBytesSSZ(x: bool): array[1, byte] =
|
func init*(T: type SszWriter, stream: OutputStreamVar): T =
|
||||||
[if x: 1'u8 else: 0'u8]
|
|
||||||
|
|
||||||
func toBytesSSZ(x: EthAddress): array[sizeof(x), byte] = x
|
|
||||||
func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
|
|
||||||
|
|
||||||
# TODO these two are still being debated:
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/issues/308#issuecomment-447026815
|
|
||||||
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes()
|
|
||||||
|
|
||||||
type
|
|
||||||
BasicType =
|
|
||||||
# Types that serialize down to a fixed-length array - most importantly,
|
|
||||||
# these values don't carry a length prefix in the final encoding. toBytesSSZ
|
|
||||||
# provides the actual nim-type-to-bytes conversion.
|
|
||||||
# TODO think about this for a bit - depends where the serialization of
|
|
||||||
# validator keys ends up going..
|
|
||||||
# TODO can't put ranges like ValidatorIndex in here:
|
|
||||||
# https://github.com/nim-lang/Nim/issues/10027
|
|
||||||
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
|
|
||||||
bool | Slot | Epoch
|
|
||||||
|
|
||||||
func sszLen(v: BasicType): int = toBytesSSZ(v.toSSZType()).len
|
|
||||||
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len
|
|
||||||
|
|
||||||
func sszLen(v: object | tuple): int =
|
|
||||||
result = 4 # Length
|
|
||||||
for field in v.fields:
|
|
||||||
result += sszLen(type field)
|
|
||||||
|
|
||||||
func sszLen(v: seq | array): int =
|
|
||||||
result = 4 # Length
|
|
||||||
for i in v:
|
|
||||||
result += sszLen(i)
|
|
||||||
|
|
||||||
func sszLen(v: BitField): int =
|
|
||||||
sszLen(v.bits)
|
|
||||||
|
|
||||||
# fromBytesSSZ copies the wire representation to a Nim variable,
|
|
||||||
# assuming there's enough data in the buffer
|
|
||||||
func fromBytesSSZ(T: type SomeInteger, data: openarray[byte]): T =
|
|
||||||
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
|
||||||
## All integers are serialized as **little endian**.
|
|
||||||
## TODO: Assumes data points to a sufficiently large buffer
|
|
||||||
doAssert data.len == sizeof(result)
|
|
||||||
# TODO: any better way to get a suitably aligned buffer in nim???
|
|
||||||
# see also: https://github.com/nim-lang/Nim/issues/9206
|
|
||||||
var tmp: uint64
|
|
||||||
var alignedBuf = cast[ptr byte](tmp.addr)
|
|
||||||
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
|
|
||||||
|
|
||||||
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
|
||||||
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
|
||||||
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
|
||||||
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
|
||||||
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
|
||||||
|
|
||||||
func fromBytesSSZ(T: type bool, data: openarray[byte]): T =
|
|
||||||
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
|
||||||
# definition for now, but maybe this should be a parse error instead?
|
|
||||||
fromBytesSSZ(uint8, data) != 0
|
|
||||||
|
|
||||||
func fromBytesSSZ(T: type ValidatorIndex, data: openarray[byte]): T =
|
|
||||||
## Integers are all encoded as littleendian and not padded
|
|
||||||
doAssert data.len == 3
|
|
||||||
var tmp: uint32
|
|
||||||
tmp = tmp or uint32(data[0])
|
|
||||||
tmp = tmp or uint32(data[1]) shl 8
|
|
||||||
tmp = tmp or uint32(data[2]) shl 16
|
|
||||||
result = tmp.ValidatorIndex
|
|
||||||
|
|
||||||
func fromBytesSSZ(T: type EthAddress, data: openarray[byte]): T =
|
|
||||||
doAssert data.len == sizeof(result)
|
|
||||||
copyMem(result.addr, unsafeAddr data[0], sizeof(result))
|
|
||||||
|
|
||||||
func fromBytesSSZ(T: type Eth2Digest, data: openarray[byte]): T =
|
|
||||||
doAssert data.len == sizeof(result.data)
|
|
||||||
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
|
||||||
|
|
||||||
proc init*(T: type SszWriter, stream: OutputStreamVar): T =
|
|
||||||
result.stream = stream
|
result.stream = stream
|
||||||
|
|
||||||
proc writeValue*(w: var SszWriter, obj: auto)
|
template enumerateSubFields(holder, fieldVar, body: untyped) =
|
||||||
|
when holder is array|string|seq|openarray:
|
||||||
# This is an alternative lower-level API useful for RPC
|
for fieldVar in holder: body
|
||||||
# frameworks that can simulate the serialization of an
|
|
||||||
# object without constructing an actual instance:
|
|
||||||
proc beginRecord*(w: var SszWriter, T: type): RecordWritingMemo =
|
|
||||||
result.initialStreamPos = w.stream.pos
|
|
||||||
result.sizePrefixCursor = w.stream.delayFixedSizeWrite sizeof(uint32)
|
|
||||||
|
|
||||||
template writeField*(w: var SszWriter, name: string, value: auto) =
|
|
||||||
w.writeValue(value)
|
|
||||||
|
|
||||||
proc endRecord*(w: var SszWriter, memo: RecordWritingMemo) =
|
|
||||||
let finalSize = uint32(w.stream.pos - memo.initialStreamPos - 4)
|
|
||||||
memo.sizePrefixCursor.endWrite(finalSize.toBytesSSZ)
|
|
||||||
|
|
||||||
proc writeValue*(w: var SszWriter, obj: auto) =
|
|
||||||
# We are not using overloads here, because this leads to
|
|
||||||
# slightly better error messages when the user provides
|
|
||||||
# additional overloads for `writeValue`.
|
|
||||||
mixin writeValue
|
|
||||||
|
|
||||||
when obj is ValidatorIndex|BasicType:
|
|
||||||
w.stream.append obj.toSSZType().toBytesSSZ
|
|
||||||
elif obj is byte|char:
|
|
||||||
w.stream.append obj
|
|
||||||
elif obj is enum:
|
|
||||||
w.stream.append uint64(obj).toBytesSSZ
|
|
||||||
else:
|
else:
|
||||||
let memo = w.beginRecord(obj.type)
|
enumInstanceSerializedFields(holder, _, fieldVar): body
|
||||||
when obj is seq|array|openarray|string:
|
|
||||||
# If you get an error here that looks like:
|
|
||||||
# type mismatch: got <type range 0..8191(uint64)>
|
|
||||||
# you just used an unsigned int for an array index thinking you'd get
|
|
||||||
# away with it (surprise, surprise: you can't, uints are crippled!)
|
|
||||||
# https://github.com/nim-lang/Nim/issues/9984
|
|
||||||
for elem in obj:
|
|
||||||
w.writeValue elem
|
|
||||||
elif obj is BitField:
|
|
||||||
for elem in obj.bits:
|
|
||||||
w.writeValue elem
|
|
||||||
else:
|
|
||||||
obj.serializeFields(fieldName, field):
|
|
||||||
# for research/serialized_sizes, remove when appropriate
|
|
||||||
when defined(debugFieldSizes) and obj is (BeaconState|BeaconBlock):
|
|
||||||
let start = w.stream.pos
|
|
||||||
w.writeValue field.toSSZType
|
|
||||||
debugEcho fieldName, ": ", w.stream.pos - start
|
|
||||||
else:
|
|
||||||
w.writeValue field.toSSZType
|
|
||||||
w.endRecord(memo)
|
|
||||||
|
|
||||||
proc readValue*(r: var SszReader, result: var auto) =
|
func writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
|
||||||
# We are not using overloads here, because this leads to
|
|
||||||
# slightly better error messages when the user provides
|
func beginRecord*(w: var SszWriter, TT: type): auto =
|
||||||
# additional overloads for `readValue`.
|
type T = TT
|
||||||
type T = result.type
|
when isFixedSize(T):
|
||||||
mixin readValue
|
FixedSizedWriterCtx()
|
||||||
|
else:
|
||||||
|
const offset = when T is array: len(T) * offsetSize
|
||||||
|
else: fixedPortionSize(T)
|
||||||
|
VarSizedWriterCtx(offset: offset,
|
||||||
|
fixedParts: w.stream.delayFixedSizeWrite(offset))
|
||||||
|
|
||||||
|
template writeField*(w: var SszWriter,
|
||||||
|
ctx: var auto,
|
||||||
|
fieldName: string,
|
||||||
|
field: auto) =
|
||||||
|
mixin toSszType
|
||||||
|
when ctx is FixedSizedWriterCtx:
|
||||||
|
writeFixedSized(w, toSszType(field))
|
||||||
|
else:
|
||||||
|
type FieldType = type toSszType(field)
|
||||||
|
|
||||||
|
when isFixedSize(FieldType):
|
||||||
|
ctx.fixedParts.writeFixedSized toSszType(field)
|
||||||
|
else:
|
||||||
|
trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
|
||||||
|
ctx.fixedParts.writeFixedSized uint32(ctx.offset)
|
||||||
|
let initPos = w.stream.pos
|
||||||
|
trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
|
||||||
|
when FieldType is BitSeq:
|
||||||
|
trs "BIT SEQ ", field.bytes
|
||||||
|
writeVarSizeType(w, toSszType(field))
|
||||||
|
ctx.offset += w.stream.pos - initPos
|
||||||
|
|
||||||
|
template endRecord*(w: var SszWriter, ctx: var auto) =
|
||||||
|
when ctx is VarSizedWriterCtx:
|
||||||
|
finalize ctx.fixedParts
|
||||||
|
|
||||||
|
func writeVarSizeType(w: var SszWriter, value: auto) =
|
||||||
|
trs "STARTING VAR SIZE TYPE"
|
||||||
|
mixin toSszType
|
||||||
|
type T = type toSszType(value)
|
||||||
|
|
||||||
|
when T is seq|string|openarray:
|
||||||
|
type E = ElemType(T)
|
||||||
|
when isFixedSize(E):
|
||||||
|
trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
|
||||||
|
for elem in value:
|
||||||
|
w.stream.writeFixedSized toSszType(elem)
|
||||||
|
trs "DONE"
|
||||||
|
else:
|
||||||
|
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
|
||||||
|
var offset = value.len * offsetSize
|
||||||
|
var cursor = w.stream.delayFixedSizeWrite offset
|
||||||
|
for elem in value:
|
||||||
|
cursor.writeFixedSized uint32(offset)
|
||||||
|
let initPos = w.stream.pos
|
||||||
|
w.writeVarSizeType toSszType(elem)
|
||||||
|
offset += w.stream.pos - initPos
|
||||||
|
finalize cursor
|
||||||
|
trs "DONE"
|
||||||
|
|
||||||
|
elif T is object|tuple|array:
|
||||||
|
trs "WRITING OBJECT OR ARRAY"
|
||||||
|
var ctx = beginRecord(w, T)
|
||||||
|
enumerateSubFields(value, field):
|
||||||
|
writeField w, ctx, astToStr(field), field
|
||||||
|
endRecord w, ctx
|
||||||
|
|
||||||
|
func writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
|
||||||
|
mixin toSszType
|
||||||
|
type T = type toSszType(x)
|
||||||
|
|
||||||
|
when isFixedSize(T):
|
||||||
|
w.stream.writeFixedSized toSszType(x)
|
||||||
|
elif T is array|seq|openarray|string|object|tuple:
|
||||||
|
w.writeVarSizeType toSszType(x)
|
||||||
|
else:
|
||||||
|
unsupported type(x)
|
||||||
|
|
||||||
|
func writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
|
||||||
|
var cursor = w.stream.delayVarSizeWrite(10)
|
||||||
|
let initPos = w.stream.pos
|
||||||
|
w.writeValue T(x)
|
||||||
|
cursor.appendVarint uint64(w.stream.pos - initPos)
|
||||||
|
finalize cursor
|
||||||
|
|
||||||
template checkEof(n: int) =
|
template checkEof(n: int) =
|
||||||
if not r.stream[].ensureBytes(n):
|
if not r.stream[].ensureBytes(n):
|
||||||
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
|
||||||
|
|
||||||
when result is ValidatorIndex|BasicType:
|
template fromSszBytes*(T: type BlsValue, bytes: openarray[byte]): auto =
|
||||||
let bytesToRead = result.sszLen;
|
fromBytes(T, bytes)
|
||||||
checkEof bytesToRead
|
|
||||||
|
|
||||||
when result is ValidatorPubKey|ValidatorSig:
|
template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
|
||||||
if not result.init(r.stream.readBytes(bytesToRead)):
|
bytes: openarray[byte]): auto =
|
||||||
raise newException(CorruptedDataError, "Failed to load a BLS key or signature")
|
mixin fromSszBytes
|
||||||
else:
|
fromSszBytes(T, bytes)
|
||||||
result = T.fromBytesSSZ(r.stream.readBytes(bytesToRead))
|
|
||||||
|
|
||||||
elif result is enum:
|
proc fromSszBytes*(T: type BlsCurveType, bytes: openarray[byte]): auto =
|
||||||
# TODO what to do with out-of-range values?? rejecting means breaking
|
init(T, bytes)
|
||||||
# forwards compatibility..
|
|
||||||
result = cast[T](r.readValue(uint64))
|
|
||||||
|
|
||||||
elif result is string:
|
proc readValue*(r: var SszReader, val: var auto) =
|
||||||
{.error: "The SSZ format doesn't support the string type yet".}
|
val = readSszValue(r.stream.readBytes(r.stream.endPos), val.type)
|
||||||
else:
|
|
||||||
let totalLen = int r.readValue(uint32)
|
|
||||||
checkEof totalLen
|
|
||||||
|
|
||||||
let endPos = r.stream[].pos + totalLen
|
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
|
||||||
when T is seq:
|
let length = r.stream.readVarint(uint64)
|
||||||
type ElemType = type(result[0])
|
if length > r.maxObjectSize:
|
||||||
# Items are of homogenous type, but not necessarily homogenous length,
|
raise newException(SszMaxSizeExceeded,
|
||||||
# cannot pre-allocate item list generically
|
"Maximum SSZ object size exceeded: " & $length)
|
||||||
while r.stream[].pos < endPos:
|
val = readSszValue(r.stream.readBytes(length), T)
|
||||||
result.add r.readValue(ElemType)
|
|
||||||
|
|
||||||
elif T is BitField:
|
|
||||||
type ElemType = type(result.bits[0])
|
|
||||||
while r.stream[].pos < endPos:
|
|
||||||
result.bits.add r.readValue(ElemType)
|
|
||||||
|
|
||||||
elif T is array:
|
|
||||||
type ElemType = type(result[0])
|
|
||||||
var i = 0
|
|
||||||
while r.stream[].pos < endPos:
|
|
||||||
if i > result.len:
|
|
||||||
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of an array")
|
|
||||||
result[i] = r.readValue(ElemType)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
else:
|
|
||||||
result.deserializeFields(fieldName, field):
|
|
||||||
# TODO This hardcoding's ugly; generalize & abstract.
|
|
||||||
when field is Slot:
|
|
||||||
field = r.readValue(uint64).Slot
|
|
||||||
elif field is Epoch:
|
|
||||||
field = r.readValue(uint64).Epoch
|
|
||||||
else:
|
|
||||||
field = r.readValue(field.type)
|
|
||||||
|
|
||||||
if r.stream[].pos != endPos:
|
|
||||||
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of the deserialized object")
|
|
||||||
|
|
||||||
# ################### Hashing ###################################
|
|
||||||
|
|
||||||
# Sample hash_tree_root implementation based on:
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/specs/simple-serialize.md
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/test_libs/pyspec/eth2spec/utils/minimal_ssz.py
|
|
||||||
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet
|
|
||||||
|
|
||||||
const
|
const
|
||||||
BYTES_PER_CHUNK = 32
|
zeroChunk = default array[32, byte]
|
||||||
|
|
||||||
# ################### Hashing helpers ###################################
|
func hash(a, b: openArray[byte]): Eth2Digest =
|
||||||
|
result = withEth2Hash:
|
||||||
|
trs "MERGING BRANCHES "
|
||||||
|
trs a
|
||||||
|
trs b
|
||||||
|
|
||||||
# TODO varargs openarray, anyone?
|
h.update a
|
||||||
template withHash(body: untyped): array[32, byte] =
|
h.update b
|
||||||
let tmp = withEth2Hash: body
|
trs "HASH RESULT ", result
|
||||||
toBytesSSZ tmp
|
|
||||||
|
|
||||||
func hash(a, b: openArray[byte]): array[32, byte] =
|
func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
|
||||||
withHash:
|
result = withEth2Hash:
|
||||||
h.update(a)
|
trs "MERGING BRANCHES OPEN ARRAY"
|
||||||
h.update(b)
|
trs existing.data
|
||||||
|
trs newData
|
||||||
|
|
||||||
type
|
h.update existing.data
|
||||||
Chunk = array[BYTES_PER_CHUNK, byte]
|
h.update newData
|
||||||
|
|
||||||
# TODO: er, how is this _actually_ done?
|
let paddingBytes = bytesPerChunk - newData.len
|
||||||
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
|
if paddingBytes > 0:
|
||||||
func empty(T: type): T = discard
|
trs "USING ", paddingBytes, " PADDING BYTES"
|
||||||
const emptyChunk = empty(Chunk)
|
h.update zeroChunk[0 ..< paddingBytes]
|
||||||
|
trs "HASH RESULT ", result
|
||||||
|
|
||||||
func mix_in_length(root: Chunk, length: int): Chunk =
|
template mergeBranches(a, b: Eth2Digest): Eth2Digest =
|
||||||
var dataLen: array[32, byte]
|
hash(a.data, b.data)
|
||||||
var lstLen = uint64(length)
|
|
||||||
littleEndian64(dataLen[32-8].addr, lstLen.addr)
|
|
||||||
|
|
||||||
hash(root, dataLen)
|
func computeZeroHashes: array[100, Eth2Digest] =
|
||||||
|
result[0] = Eth2Digest(data: zeroChunk)
|
||||||
|
for i in 1 .. result.high:
|
||||||
|
result[i] = mergeBranches(result[i - 1], result[i - 1])
|
||||||
|
|
||||||
template padEmptyChunks(chunks: int) =
|
let zeroHashes = computeZeroHashes()
|
||||||
for i in chunks..<nextPowerOfTwo(chunks):
|
|
||||||
yield emptyChunk
|
|
||||||
|
|
||||||
iterator packAndPad(values: seq|array): Chunk =
|
func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
|
||||||
## Produce a stream of chunks that are packed and padded such that they number
|
# TODO this is a work-around for the somewhat broken side
|
||||||
## a power of two
|
# effects analysis of Nim - reading from global let variables
|
||||||
|
# is considered a side-effect.
|
||||||
|
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
|
||||||
|
# we should revisit this in Nim 0.20.2.
|
||||||
|
{.emit: "`result` = `zeroHashes`[`idx`];".}
|
||||||
|
|
||||||
when sizeof(values[0].toSSZType().toBytesSSZ()) == sizeof(Chunk):
|
func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
|
||||||
# When chunks and value lengths coincide, do the simple thing
|
doAssert data.len > 0 and data.len <= bytesPerChunk
|
||||||
for v in values:
|
|
||||||
yield v.toSSZType().toBytesSSZ()
|
|
||||||
padEmptyChunks(values.len)
|
|
||||||
|
|
||||||
|
if not getBitLE(merkelizer.totalChunks, 0):
|
||||||
|
let chunkStartAddr = addr merkelizer.combinedChunks[0].data[0]
|
||||||
|
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
|
||||||
|
zeroMem(chunkStartAddr.shift(data.len), bytesPerChunk - data.len)
|
||||||
|
trs "WROTE BASE CHUNK ", merkelizer.combinedChunks[0]
|
||||||
else:
|
else:
|
||||||
var
|
var hash = mergeBranches(merkelizer.combinedChunks[0], data)
|
||||||
chunks: int
|
|
||||||
tmp: Chunk
|
|
||||||
tmpPos: int # how many bytes of tmp we've filled with ssz values
|
|
||||||
|
|
||||||
for v in values:
|
for i in 1 .. high(merkelizer.combinedChunks):
|
||||||
var
|
trs "ITERATING"
|
||||||
vssz = toBytesSSZ(v.toSSZType)
|
if getBitLE(merkelizer.totalChunks, i):
|
||||||
vPos = 0 # how many bytes of vssz that we've consumed
|
trs "CALLING MERGE BRANCHES"
|
||||||
|
hash = mergeBranches(merkelizer.combinedChunks[i], hash)
|
||||||
while vPos < vssz.len:
|
|
||||||
# there are still bytes of vssz left to consume - looping happens when
|
|
||||||
# vssz.len > sizeof(Chunk)
|
|
||||||
|
|
||||||
let left = min(tmp.len - tmpPos, vssz.len - vPos)
|
|
||||||
copyMem(addr tmp[tmpPos], addr vssz[vPos], left)
|
|
||||||
vPos += left
|
|
||||||
tmpPos += left
|
|
||||||
|
|
||||||
if tmpPos == tmp.len:
|
|
||||||
# When vssz.len < sizeof(Chunk), multiple values will fit in a chunk
|
|
||||||
yield tmp
|
|
||||||
tmpPos = 0
|
|
||||||
chunks += 1
|
|
||||||
|
|
||||||
if tmpPos > 0:
|
|
||||||
# If vssz.len is not a multiple of Chunk, we might need to pad the last
|
|
||||||
# chunk with zeroes and return it
|
|
||||||
for i in tmpPos..<tmp.len:
|
|
||||||
tmp[i] = 0'u8
|
|
||||||
yield tmp
|
|
||||||
tmpPos = 0
|
|
||||||
chunks += 1
|
|
||||||
|
|
||||||
padEmptyChunks(chunks)
|
|
||||||
|
|
||||||
iterator hash_tree_collection(value: array|seq): Chunk =
|
|
||||||
mixin hash_tree_root
|
|
||||||
var chunks = 0
|
|
||||||
for v in value:
|
|
||||||
yield hash_tree_root(v).data
|
|
||||||
chunks += 1
|
|
||||||
padEmptyChunks(chunks)
|
|
||||||
|
|
||||||
iterator hash_tree_fields(value: object): Chunk =
|
|
||||||
mixin hash_tree_root
|
|
||||||
var chunks = 0
|
|
||||||
for v in value.fields:
|
|
||||||
yield hash_tree_root(v).data
|
|
||||||
chunks += 1
|
|
||||||
padEmptyChunks(chunks)
|
|
||||||
|
|
||||||
template merkleize(chunker: untyped): Chunk =
|
|
||||||
var
|
|
||||||
# a depth of 32 here should give us capability to handle 2^32 chunks,
|
|
||||||
# more than enough
|
|
||||||
# TODO replace with SmallVector-like thing..
|
|
||||||
stack: array[32, tuple[height: int, chunk: Chunk]]
|
|
||||||
stackPos = 0
|
|
||||||
|
|
||||||
for chunk in chunker:
|
|
||||||
# Leaves start at height 0 - every time they move up, height is increased
|
|
||||||
# allowing us to detect two chunks at the same height ready for
|
|
||||||
# consolidation
|
|
||||||
# See also: http://szydlo.com/logspacetime03.pdf
|
|
||||||
stack[stackPos] = (0, chunk)
|
|
||||||
inc stackPos
|
|
||||||
|
|
||||||
# Consolidate items of the same height - this keeps stack size at log N
|
|
||||||
while stackPos > 1 and stack[stackPos - 1].height == stack[stackPos - 2].height:
|
|
||||||
# As tradition dictates - one feature, at least one nim bug:
|
|
||||||
# https://github.com/nim-lang/Nim/issues/9684
|
|
||||||
let tmp = hash(stack[stackPos - 2].chunk, stack[stackPos - 1].chunk)
|
|
||||||
stack[stackPos - 2].height += 1
|
|
||||||
stack[stackPos - 2].chunk = tmp
|
|
||||||
stackPos -= 1
|
|
||||||
|
|
||||||
doAssert stackPos == 1,
|
|
||||||
"With power-of-two leaves, we should end up with a single root"
|
|
||||||
|
|
||||||
stack[0].chunk
|
|
||||||
|
|
||||||
template elementType[T, N](_: type array[N, T]): typedesc = T
|
|
||||||
template elementType[T](_: type seq[T]): typedesc = T
|
|
||||||
|
|
||||||
func hash_tree_root*[T](value: T): Eth2Digest =
|
|
||||||
# Merkle tree
|
|
||||||
Eth2Digest(data:
|
|
||||||
when T is BasicType:
|
|
||||||
merkleize(packAndPad([value]))
|
|
||||||
elif T is array|seq:
|
|
||||||
when T.elementType() is BasicType:
|
|
||||||
mix_in_length(merkleize(packAndPad(value)), len(value))
|
|
||||||
else:
|
else:
|
||||||
mix_in_length(merkleize(hash_tree_collection(value)), len(value))
|
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
|
||||||
elif T is object:
|
merkelizer.combinedChunks[i] = hash
|
||||||
merkleize(hash_tree_fields(value))
|
break
|
||||||
|
|
||||||
|
inc merkelizer.totalChunks
|
||||||
|
|
||||||
|
func getFinalHash*(merkelizer: SszChunksMerkelizer): Eth2Digest =
|
||||||
|
let limit = merkelizer.limit
|
||||||
|
|
||||||
|
if merkelizer.totalChunks == 0:
|
||||||
|
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
|
||||||
|
return getZeroHashWithoutSideEffect(limitHeight)
|
||||||
|
|
||||||
|
let
|
||||||
|
bottomHashIdx = firstOne(merkelizer.totalChunks) - 1
|
||||||
|
submittedChunksHeight = bitWidth(merkelizer.totalChunks - 1)
|
||||||
|
topHashIdx = if limit <= 1: submittedChunksHeight
|
||||||
|
else: max(submittedChunksHeight, bitWidth(limit - 1))
|
||||||
|
|
||||||
|
trs "BOTTOM HASH ", bottomHashIdx
|
||||||
|
trs "SUBMITTED HEIGHT ", submittedChunksHeight
|
||||||
|
trs "LIMIT ", limit
|
||||||
|
|
||||||
|
if bottomHashIdx != submittedChunksHeight:
|
||||||
|
# Our tree is not finished. We must complete the work in progress
|
||||||
|
# branches and then extend the tree to the right height.
|
||||||
|
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
||||||
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||||
|
|
||||||
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||||
|
if getBitLE(merkelizer.totalChunks, i):
|
||||||
|
result = mergeBranches(merkelizer.combinedChunks[i], result)
|
||||||
|
trs "COMBINED"
|
||||||
else:
|
else:
|
||||||
static: doAssert false, "Unexpected type: " & T.name
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||||
|
trs "COMBINED WITH ZERO"
|
||||||
|
|
||||||
|
elif bottomHashIdx == topHashIdx:
|
||||||
|
# We have a perfect tree (chunks == 2**n) at just the right height!
|
||||||
|
result = merkelizer.combinedChunks[bottomHashIdx]
|
||||||
|
else:
|
||||||
|
# We have a perfect tree of user chunks, but we have more work to
|
||||||
|
# do - we must extend it to reach the desired height
|
||||||
|
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
|
||||||
|
getZeroHashWithoutSideEffect(bottomHashIdx))
|
||||||
|
|
||||||
|
for i in bottomHashIdx + 1 ..< topHashIdx:
|
||||||
|
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
|
||||||
|
|
||||||
|
let HashingStreamVTable = OutputStreamVTable(
|
||||||
|
writePage: proc (s: OutputStreamVar, data: openarray[byte])
|
||||||
|
{.nimcall, gcsafe, raises: [IOError, Defect].} =
|
||||||
|
trs "ADDING STREAM CHUNK ", data
|
||||||
|
SszChunksMerkelizer(s.outputDevice).addChunk(data)
|
||||||
|
,
|
||||||
|
flush: proc (s: OutputStreamVar) {.nimcall, gcsafe.} =
|
||||||
|
discard
|
||||||
)
|
)
|
||||||
|
|
||||||
iterator hash_tree_most(v: object): Chunk =
|
func getVtableAddresWithoutSideEffect: ptr OutputStreamVTable =
|
||||||
const numFields = (proc(): int =
|
# TODO this is a work-around for the somewhat broken side
|
||||||
var o: type(v)
|
# effects analysis of Nim - reading from global let variables
|
||||||
var i = 0
|
# is considered a side-effect.
|
||||||
for _, _ in o.fieldPairs: inc i
|
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
|
||||||
i)()
|
# we should revisit this in Nim 0.20.2.
|
||||||
|
{.emit: "`result` = &`HashingStreamVTable`;".}
|
||||||
|
|
||||||
var i = 0
|
func newSszHashingStream(merkelizer: SszChunksMerkelizer): ref OutputStream =
|
||||||
for name, field in v.fieldPairs:
|
new result
|
||||||
if i == numFields - 1:
|
result.initWithSinglePage(pageSize = bytesPerChunk,
|
||||||
break
|
maxWriteSize = bytesPerChunk,
|
||||||
inc i
|
minWriteSize = bytesPerChunk)
|
||||||
yield hash_tree_root(field).data
|
result.outputDevice = merkelizer
|
||||||
|
result.vtable = getVtableAddresWithoutSideEffect()
|
||||||
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots
|
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
|
||||||
func signing_root*[T: object](x: T): Eth2Digest =
|
var dataLen: array[32, byte]
|
||||||
# TODO write tests for this (check vs hash_tree_root)
|
var lstLen = uint64(length)
|
||||||
|
littleEndian64(addr dataLen[0], addr lstLen)
|
||||||
|
hash(root.data, dataLen)
|
||||||
|
|
||||||
let root = merkleize(hash_tree_most(x))
|
func merkelizeSerializedChunks(merkelizer: SszChunksMerkelizer,
|
||||||
|
obj: auto): Eth2Digest =
|
||||||
|
var hashingStream = newSszHashingStream merkelizer
|
||||||
|
hashingStream.writeFixedSized obj
|
||||||
|
hashingStream.flush
|
||||||
|
merkelizer.getFinalHash
|
||||||
|
|
||||||
|
func merkelizeSerializedChunks(obj: auto): Eth2Digest =
|
||||||
|
merkelizeSerializedChunks(SszChunksMerkelizer(), obj)
|
||||||
|
|
||||||
|
func hashTreeRoot*(x: auto): Eth2Digest {.gcsafe.}
|
||||||
|
|
||||||
|
template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
|
||||||
|
var merkelizer {.inject.} = SszChunksMerkelizer()
|
||||||
|
|
||||||
|
template addField(field) =
|
||||||
|
let hash = hashTreeRoot(field)
|
||||||
|
trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
|
||||||
|
addChunk(merkelizer, hash.data)
|
||||||
|
trs "CHUNK ADDED"
|
||||||
|
|
||||||
|
template addField2(field) =
|
||||||
|
const maxLen = fieldMaxLen(field)
|
||||||
|
when maxLen > 0:
|
||||||
|
type FieldType = type field
|
||||||
|
addField TypeWithMaxLen[FieldType, maxLen](field)
|
||||||
|
else:
|
||||||
|
addField field
|
||||||
|
|
||||||
|
body
|
||||||
|
|
||||||
|
merkelizer.getFinalHash
|
||||||
|
|
||||||
|
func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest =
|
||||||
|
trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkelizer.limit
|
||||||
|
|
||||||
|
var
|
||||||
|
totalBytes = x.bytes.len
|
||||||
|
lastCorrectedByte = x.bytes[^1]
|
||||||
|
|
||||||
|
if lastCorrectedByte == byte(1):
|
||||||
|
if totalBytes == 1:
|
||||||
|
# This is an empty bit list.
|
||||||
|
# It should be hashed as a tree containing all zeros:
|
||||||
|
let treeHeight = if merkelizer.limit == 0: 0
|
||||||
|
else: log2trunc(merkelizer.limit)
|
||||||
|
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
|
||||||
|
getZeroHashWithoutSideEffect(0)) # this is the mixed length
|
||||||
|
|
||||||
|
totalBytes -= 1
|
||||||
|
lastCorrectedByte = x.bytes[^2]
|
||||||
|
else:
|
||||||
|
let markerPos = log2trunc(lastCorrectedByte)
|
||||||
|
lastCorrectedByte.lowerBit(markerPos)
|
||||||
|
|
||||||
|
var
|
||||||
|
bytesInLastChunk = totalBytes mod bytesPerChunk
|
||||||
|
paddingBytes = bytesPerChunk - bytesInLastChunk
|
||||||
|
fullChunks = totalBytes div bytesPerChunk
|
||||||
|
|
||||||
|
if bytesInLastChunk == 0:
|
||||||
|
fullChunks -= 1
|
||||||
|
bytesInLastChunk = 32
|
||||||
|
|
||||||
|
for i in 0 ..< fullChunks:
|
||||||
|
let
|
||||||
|
chunkStartPos = i * bytesPerChunk
|
||||||
|
chunkEndPos = chunkStartPos + bytesPerChunk - 1
|
||||||
|
|
||||||
|
merkelizer.addChunk x.bytes.toOpenArray(chunkEndPos, chunkEndPos)
|
||||||
|
|
||||||
|
var
|
||||||
|
lastChunk: array[bytesPerChunk, byte]
|
||||||
|
chunkStartPos = fullChunks * bytesPerChunk
|
||||||
|
|
||||||
|
for i in 0 .. bytesInLastChunk - 2:
|
||||||
|
lastChunk[i] = x.bytes[chunkStartPos + i]
|
||||||
|
|
||||||
|
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
|
||||||
|
|
||||||
|
merkelizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
|
||||||
|
let contentsHash = merkelizer.getFinalHash
|
||||||
|
mixInLength contentsHash, x.len
|
||||||
|
|
||||||
|
func hashTreeRootImpl[T](x: T): Eth2Digest =
|
||||||
|
when (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
|
||||||
|
trs "FIXED TYPE; USE CHUNK STREAM"
|
||||||
|
merkelizeSerializedChunks x
|
||||||
|
elif T is string or (when T is (seq|openarray): ElemType(T) is BasicType else: false):
|
||||||
|
trs "TYPE WITH LENGTH"
|
||||||
|
mixInLength merkelizeSerializedChunks(x), x.len
|
||||||
|
elif T is array|object|tuple:
|
||||||
|
trs "MERKELIZING FIELDS"
|
||||||
|
merkelizeFields:
|
||||||
|
x.enumerateSubFields(f):
|
||||||
|
const maxLen = fieldMaxLen(f)
|
||||||
|
when maxLen > 0:
|
||||||
|
type FieldType = type f
|
||||||
|
addField TypeWithMaxLen[FieldType, maxLen](f)
|
||||||
|
else:
|
||||||
|
addField f
|
||||||
|
elif T is seq:
|
||||||
|
trs "SEQ WITH VAR SIZE"
|
||||||
|
let hash = merkelizeFields(for e in x: addField e)
|
||||||
|
mixInLength hash, x.len
|
||||||
|
#elif isCaseObject(T):
|
||||||
|
# # TODO implement this
|
||||||
|
else:
|
||||||
|
unsupported T
|
||||||
|
|
||||||
|
func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
|
||||||
|
when T is BitList:
|
||||||
|
(maxLen + bitsPerChunk - 1) div bitsPerChunk
|
||||||
|
elif T is seq:
|
||||||
|
type E = ElemType(T)
|
||||||
|
when E is BasicType:
|
||||||
|
(maxLen * sizeof(E) + bytesPerChunk - 1) div bytesPerChunk
|
||||||
|
else:
|
||||||
|
maxLen
|
||||||
|
else:
|
||||||
|
unsupported T # This should never happen
|
||||||
|
|
||||||
|
func hashTreeRoot*(x: auto): Eth2Digest =
|
||||||
|
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
|
||||||
|
mixin toSszType
|
||||||
|
when x is TypeWithMaxLen:
|
||||||
|
const maxLen = x.maxLen
|
||||||
|
type T = type valueOf(x)
|
||||||
|
const limit = maxChunksCount(T, maxLen)
|
||||||
|
var merkelizer = SszChunksMerkelizer(limit: uint64(limit))
|
||||||
|
|
||||||
|
when T is BitList:
|
||||||
|
result = merkelizer.bitlistHashTreeRoot(BitSeq valueOf(x))
|
||||||
|
elif T is seq:
|
||||||
|
type E = ElemType(T)
|
||||||
|
let contentsHash = when E is BasicType:
|
||||||
|
merkelizeSerializedChunks(merkelizer, valueOf(x))
|
||||||
|
else:
|
||||||
|
for elem in valueOf(x):
|
||||||
|
let elemHash = hashTreeRoot(elem)
|
||||||
|
merkelizer.addChunk(elemHash.data)
|
||||||
|
merkelizer.getFinalHash()
|
||||||
|
result = mixInLength(contentsHash, valueOf(x).len)
|
||||||
|
else:
|
||||||
|
unsupported T # This should never happen
|
||||||
|
else:
|
||||||
|
result = hashTreeRootImpl toSszType(x)
|
||||||
|
|
||||||
|
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
|
||||||
|
|
||||||
|
func lastFieldName(RecordType: type): string {.compileTime.} =
|
||||||
|
enumAllSerializedFields(RecordType):
|
||||||
|
result = fieldName
|
||||||
|
|
||||||
|
func hasSigningRoot*(T: type): bool {.compileTime.} =
|
||||||
|
lastFieldName(T) == "signature"
|
||||||
|
|
||||||
|
func signingRoot*(obj: object): Eth2Digest =
|
||||||
|
const lastField = lastFieldName(obj.type)
|
||||||
|
merkelizeFields:
|
||||||
|
obj.enumInstanceSerializedFields(fieldName, field):
|
||||||
|
when fieldName != lastField:
|
||||||
|
addField2 field
|
||||||
|
|
||||||
Eth2Digest(data: root)
|
|
||||||
|
|
|
@ -0,0 +1,142 @@
|
||||||
|
import
|
||||||
|
endians, typetraits,
|
||||||
|
stew/[objects, bitseqs], serialization/testing/tracing,
|
||||||
|
../spec/[digest, datatypes], ./types
|
||||||
|
|
||||||
|
template setLen[R, T](a: var array[R, T], length: int) =
|
||||||
|
if length != a.len:
|
||||||
|
raise newException(MalformedSszError, "SSZ input of insufficient size")
|
||||||
|
|
||||||
|
# fromSszBytes copies the wire representation to a Nim variable,
|
||||||
|
# assuming there's enough data in the buffer
|
||||||
|
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
|
||||||
|
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
|
||||||
|
## All integers are serialized as **little endian**.
|
||||||
|
## TODO: Assumes data points to a sufficiently large buffer
|
||||||
|
doAssert data.len == sizeof(result)
|
||||||
|
# TODO: any better way to get a suitably aligned buffer in nim???
|
||||||
|
# see also: https://github.com/nim-lang/Nim/issues/9206
|
||||||
|
var tmp: uint64
|
||||||
|
var alignedBuf = cast[ptr byte](tmp.addr)
|
||||||
|
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
|
||||||
|
|
||||||
|
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
|
||||||
|
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
|
||||||
|
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
|
||||||
|
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
|
||||||
|
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
|
||||||
|
|
||||||
|
func fromSszBytes*(T: type bool, data: openarray[byte]): T =
|
||||||
|
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
|
||||||
|
# definition for now, but maybe this should be a parse error instead?
|
||||||
|
fromSszBytes(uint8, data) != 0
|
||||||
|
|
||||||
|
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T =
|
||||||
|
doAssert data.len == sizeof(result.data)
|
||||||
|
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
|
||||||
|
|
||||||
|
template fromSszBytes*(T: type Slot, bytes: openarray[byte]): Slot =
|
||||||
|
Slot fromSszBytes(uint64, bytes)
|
||||||
|
|
||||||
|
template fromSszBytes*(T: type Epoch, bytes: openarray[byte]): Epoch =
|
||||||
|
Epoch fromSszBytes(uint64, bytes)
|
||||||
|
|
||||||
|
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
|
||||||
|
T fromSszBytes(uint64, bytes)
|
||||||
|
|
||||||
|
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
|
||||||
|
BitSeq @bytes
|
||||||
|
|
||||||
|
proc fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto =
|
||||||
|
BitList[N] @bytes
|
||||||
|
|
||||||
|
proc readSszValue*(input: openarray[byte], T: type): T =
|
||||||
|
mixin fromSszBytes, toSszType
|
||||||
|
|
||||||
|
type T = type(result)
|
||||||
|
|
||||||
|
template readOffset(n: int): int =
|
||||||
|
int fromSszBytes(uint32, input[n ..< n + offsetSize])
|
||||||
|
|
||||||
|
when useListType and result is List:
|
||||||
|
type ElemType = type result[0]
|
||||||
|
result = T readSszValue(input, seq[ElemType])
|
||||||
|
elif result is string|seq|openarray|array:
|
||||||
|
type ElemType = type result[0]
|
||||||
|
when ElemType is byte|char:
|
||||||
|
result.setLen input.len
|
||||||
|
copyMem(addr result[0], unsafeAddr input[0], input.len)
|
||||||
|
|
||||||
|
elif isFixedSize(ElemType):
|
||||||
|
const elemSize = fixedPortionSize(ElemType)
|
||||||
|
if input.len mod elemSize != 0:
|
||||||
|
var ex = new SszSizeMismatchError
|
||||||
|
ex.deserializedType = cstring typetraits.name(T)
|
||||||
|
ex.actualSszSize = input.len
|
||||||
|
ex.elementSize = elemSize
|
||||||
|
raise ex
|
||||||
|
result.setLen input.len div elemSize
|
||||||
|
trs "READING LIST WITH LEN ", result.len
|
||||||
|
for i in 0 ..< result.len:
|
||||||
|
trs "TRYING TO READ LIST ELEM ", i
|
||||||
|
let offset = i * elemSize
|
||||||
|
result[i] = readSszValue(input[offset ..< offset+elemSize], ElemType)
|
||||||
|
trs "LIST READING COMPLETE"
|
||||||
|
|
||||||
|
else:
|
||||||
|
if input.len == 0:
|
||||||
|
# This is an empty list.
|
||||||
|
# The default initialization of the return value is fine.
|
||||||
|
return
|
||||||
|
|
||||||
|
var offset = readOffset 0
|
||||||
|
trs "GOT OFFSET ", offset
|
||||||
|
let resultLen = offset div offsetSize
|
||||||
|
trs "LEN ", resultLen
|
||||||
|
result.setLen resultLen
|
||||||
|
for i in 1 ..< resultLen:
|
||||||
|
let nextOffset = readOffset(i * offsetSize)
|
||||||
|
result[i - 1] = readSszValue(input[offset ..< nextOffset], ElemType)
|
||||||
|
offset = nextOffset
|
||||||
|
|
||||||
|
result[resultLen - 1] = readSszValue(input[offset ..< input.len], ElemType)
|
||||||
|
|
||||||
|
elif result is object|tuple:
|
||||||
|
enumInstanceSerializedFields(result, fieldName, field):
|
||||||
|
const boundingOffsets = T.getFieldBoundingOffsets(fieldName)
|
||||||
|
trs "BOUNDING OFFSET FOR FIELD ", fieldName, " = ", boundingOffsets
|
||||||
|
|
||||||
|
type FieldType = type field
|
||||||
|
type SszType = type toSszType(default(FieldType))
|
||||||
|
|
||||||
|
when isFixedSize(SszType):
|
||||||
|
const
|
||||||
|
startOffset = boundingOffsets[0]
|
||||||
|
endOffset = boundingOffsets[1]
|
||||||
|
trs "FIXED FIELD ", startOffset, "-", endOffset
|
||||||
|
else:
|
||||||
|
let
|
||||||
|
startOffset = readOffset(boundingOffsets[0])
|
||||||
|
endOffset = if boundingOffsets[1] == -1: input.len
|
||||||
|
else: readOffset(boundingOffsets[1])
|
||||||
|
trs "VAR FIELD ", startOffset, "-", endOffset
|
||||||
|
|
||||||
|
# TODO The extra type escaping here is a work-around for a Nim issue:
|
||||||
|
when type(FieldType) is type(SszType):
|
||||||
|
trs "READING NATIVE ", fieldName, ": ", name(SszType)
|
||||||
|
field = readSszValue(input[startOffset ..< endOffset], SszType)
|
||||||
|
trs "READING COMPLETE ", fieldName
|
||||||
|
elif useListType and FieldType is List:
|
||||||
|
field = readSszValue(input[startOffset ..< endOffset], FieldType)
|
||||||
|
else:
|
||||||
|
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
|
||||||
|
field = fromSszBytes(FieldType, input[startOffset ..< endOffset])
|
||||||
|
|
||||||
|
elif result is SomeInteger|bool:
|
||||||
|
trs "READING BASIC TYPE ", type(result).name, " input=", input.len
|
||||||
|
result = fromSszBytes(type(result), input)
|
||||||
|
trs "RESULT WAS ", repr(result)
|
||||||
|
|
||||||
|
else:
|
||||||
|
unsupported T
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
import
|
||||||
|
stew/objects, stew/ranges/ptr_arith,
|
||||||
|
./types, ./bytes_reader
|
||||||
|
|
||||||
|
type
|
||||||
|
MemRange = object
|
||||||
|
startAddr: ptr byte
|
||||||
|
length: int
|
||||||
|
|
||||||
|
SszNavigator*[T] = object
|
||||||
|
m: MemRange
|
||||||
|
|
||||||
|
func sszMount*(data: openarray[byte], T: type): SszNavigator[T] =
|
||||||
|
let startAddr = unsafeAddr data[0]
|
||||||
|
SszNavigator[T](m: MemRange(startAddr: startAddr, length: data.len))
|
||||||
|
|
||||||
|
template checkBounds(m: MemRange, offset: int) =
|
||||||
|
if offset > m.length:
|
||||||
|
raise newException(MalformedSszError, "Malformed SSZ")
|
||||||
|
|
||||||
|
template toOpenArray(m: MemRange): auto =
|
||||||
|
makeOpenArray(m.startAddr, m.length)
|
||||||
|
|
||||||
|
func navigateToField[T](n: SszNavigator[T],
|
||||||
|
fieldName: static string,
|
||||||
|
FieldType: type): SszNavigator[FieldType] =
|
||||||
|
mixin toSszType
|
||||||
|
type SszFieldType = type toSszType(default FieldType)
|
||||||
|
|
||||||
|
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
|
||||||
|
checkBounds(n.m, boundingOffsets[1])
|
||||||
|
|
||||||
|
when isFixedSize(SszFieldType):
|
||||||
|
SszNavigator[FieldType](m: MemRange(
|
||||||
|
startAddr: shift(n.m.startAddr, boundingOffsets[0]),
|
||||||
|
length: boundingOffsets[1] - boundingOffsets[0]))
|
||||||
|
else:
|
||||||
|
template readOffset(offset): int =
|
||||||
|
int fromSszBytes(uint32, makeOpenArray(shift(n.m.startAddr, offset),
|
||||||
|
sizeof(uint32)))
|
||||||
|
let
|
||||||
|
startOffset = readOffset boundingOffsets[0]
|
||||||
|
endOffset = when boundingOffsets[1] == -1: n.m.length
|
||||||
|
else: readOffset boundingOffsets[1]
|
||||||
|
|
||||||
|
if endOffset < startOffset or endOffset > n.m.length:
|
||||||
|
raise newException(MalformedSszError, "Incorrect offset values")
|
||||||
|
|
||||||
|
SszNavigator[FieldType](m: MemRange(
|
||||||
|
startAddr: shift(n.m.startAddr, startOffset),
|
||||||
|
length: endOffset - startOffset))
|
||||||
|
|
||||||
|
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
|
||||||
|
type RecType = T
|
||||||
|
type FieldType = type(default(RecType).field)
|
||||||
|
navigateToField(n, astToStr(field), FieldType)
|
||||||
|
|
||||||
|
func `[]`*[T](n: SszNavigator[T]): T =
|
||||||
|
readSszValue(toOpenArray(n.m), T)
|
||||||
|
|
||||||
|
converter derefNavigator*[T](n: SszNavigator[T]): T =
|
||||||
|
n[]
|
||||||
|
|
|
@ -0,0 +1,238 @@
|
||||||
|
import
|
||||||
|
tables,
|
||||||
|
stew/shims/macros, stew/[objects, bitseqs],
|
||||||
|
serialization/[object_serialization, errors]
|
||||||
|
|
||||||
|
const
|
||||||
|
useListType* = false
|
||||||
|
offsetSize* = 4
|
||||||
|
|
||||||
|
type
|
||||||
|
BasicType* = char|bool|SomeUnsignedInt
|
||||||
|
|
||||||
|
SszError* = object of SerializationError
|
||||||
|
|
||||||
|
MalformedSszError* = object of SszError
|
||||||
|
|
||||||
|
SszSizeMismatchError* = object of SszError
|
||||||
|
deserializedType*: cstring
|
||||||
|
actualSszSize*: int
|
||||||
|
elementSize*: int
|
||||||
|
|
||||||
|
SszChunksLimitExceeded* = object of SszError
|
||||||
|
|
||||||
|
SszSchema* = ref object
|
||||||
|
nodes*: seq[SszNode]
|
||||||
|
|
||||||
|
SszTypeKind* = enum
|
||||||
|
sszNull
|
||||||
|
sszUInt
|
||||||
|
sszBool
|
||||||
|
sszList
|
||||||
|
sszVector
|
||||||
|
sszBitList
|
||||||
|
sszBitVector
|
||||||
|
sszRecord
|
||||||
|
|
||||||
|
SszType* = ref object
|
||||||
|
case kind*: SszTypeKind
|
||||||
|
of sszUInt, sszBitVector:
|
||||||
|
bits*: int
|
||||||
|
of sszBool, sszNull, sszBitList:
|
||||||
|
discard
|
||||||
|
of sszVector:
|
||||||
|
size*: int
|
||||||
|
vectorElemType*: SszType
|
||||||
|
of sszList:
|
||||||
|
listElemType*: SszType
|
||||||
|
of sszRecord:
|
||||||
|
schema*: SszSchema
|
||||||
|
|
||||||
|
SszNodeKind* = enum
|
||||||
|
Field
|
||||||
|
Union
|
||||||
|
|
||||||
|
SszNode* = ref object
|
||||||
|
name*: string
|
||||||
|
typ*: SszType
|
||||||
|
case kind: SszNodeKind
|
||||||
|
of Union:
|
||||||
|
variants*: seq[SszSchema]
|
||||||
|
of Field:
|
||||||
|
discard
|
||||||
|
|
||||||
|
when useListType:
|
||||||
|
type List*[T; maxLen: static int] = distinct seq[T]
|
||||||
|
else:
|
||||||
|
type List*[T; maxLen: static int] = seq[T]
|
||||||
|
|
||||||
|
macro unsupported*(T: typed): untyped =
|
||||||
|
# TODO: {.fatal.} breaks compilation even in `compiles()` context,
|
||||||
|
# so we use this macro instead. It's also much better at figuring
|
||||||
|
# out the actual type that was used in the instantiation.
|
||||||
|
# File both problems as issues.
|
||||||
|
error "SSZ serialization of the type " & humaneTypeName(T) & " is not supported"
|
||||||
|
|
||||||
|
template ElemType*(T: type[array]): untyped =
|
||||||
|
type(default(T)[low(T)])
|
||||||
|
|
||||||
|
template ElemType*[T](A: type[openarray[T]]): untyped =
|
||||||
|
T
|
||||||
|
|
||||||
|
template ElemType*(T: type[seq|string|List]): untyped =
|
||||||
|
type(default(T)[0])
|
||||||
|
|
||||||
|
func isFixedSize*(T0: type): bool {.compileTime.} =
|
||||||
|
mixin toSszType, enumAllSerializedFields
|
||||||
|
|
||||||
|
when T0 is openarray:
|
||||||
|
return false
|
||||||
|
else:
|
||||||
|
type T = type toSszType(default T0)
|
||||||
|
|
||||||
|
when T is BasicType:
|
||||||
|
return true
|
||||||
|
elif T is array:
|
||||||
|
return isFixedSize(ElemType(T))
|
||||||
|
elif T is object|tuple:
|
||||||
|
enumAllSerializedFields(T):
|
||||||
|
when not isFixedSize(FieldType):
|
||||||
|
return false
|
||||||
|
return true
|
||||||
|
|
||||||
|
func fixedPortionSize*(T0: type): int {.compileTime.} =
|
||||||
|
mixin enumAllSerializedFields, toSszType
|
||||||
|
type T = type toSszType(default T0)
|
||||||
|
|
||||||
|
when T is BasicType: sizeof(T)
|
||||||
|
elif T is array:
|
||||||
|
const elementCount = high(T).ord - low(T).ord + 1
|
||||||
|
type E = ElemType(T)
|
||||||
|
when isFixedSize(E): elementCount * fixedPortionSize(E)
|
||||||
|
else: elementCount * offsetSize
|
||||||
|
elif T is seq|string|openarray: offsetSize
|
||||||
|
elif T is object|tuple:
|
||||||
|
var res = 0
|
||||||
|
enumAllSerializedFields(T):
|
||||||
|
when isFixedSize(FieldType):
|
||||||
|
res += fixedPortionSize(FieldType)
|
||||||
|
else:
|
||||||
|
res += offsetSize
|
||||||
|
res
|
||||||
|
else:
|
||||||
|
unsupported T0
|
||||||
|
|
||||||
|
func sszSchemaType*(T0: type): SszType {.compileTime.} =
|
||||||
|
mixin toSszType, enumAllSerializedFields
|
||||||
|
type T = type toSszType(default T0)
|
||||||
|
|
||||||
|
when T is bool:
|
||||||
|
SszType(kind: sszBool)
|
||||||
|
elif T is uint8|char:
|
||||||
|
SszType(kind: sszUInt, bits: 8)
|
||||||
|
elif T is uint16:
|
||||||
|
SszType(kind: sszUInt, bits: 16)
|
||||||
|
elif T is uint32:
|
||||||
|
SszType(kind: sszUInt, bits: 32)
|
||||||
|
elif T is uint64:
|
||||||
|
SszType(kind: sszUInt, bits: 64)
|
||||||
|
elif T is seq|string:
|
||||||
|
SszType(kind: sszList, listElemType: sszSchemaType(ElemType(T)))
|
||||||
|
elif T is array:
|
||||||
|
SszType(kind: sszVector, vectorElemType: sszSchemaType(ElemType(T)))
|
||||||
|
elif T is BitArray:
|
||||||
|
SszType(kind: sszBitVector, bits: T.bits)
|
||||||
|
elif T is BitSeq:
|
||||||
|
SszType(kind: sszBitList)
|
||||||
|
elif T is object|tuple:
|
||||||
|
var recordSchema = SszSchema()
|
||||||
|
var caseBranches = initTable[string, SszSchema]()
|
||||||
|
caseBranches[""] = recordSchema
|
||||||
|
# TODO case objects are still not supported here.
|
||||||
|
# `recordFields` has to be refactored to properly
|
||||||
|
# report nested discriminator fields.
|
||||||
|
enumAllSerializedFields(T):
|
||||||
|
recordSchema.nodes.add SszNode(
|
||||||
|
name: fieldName,
|
||||||
|
typ: sszSchemaType(FieldType),
|
||||||
|
kind: Field)
|
||||||
|
else:
|
||||||
|
unsupported T0
|
||||||
|
|
||||||
|
# TODO This should have been an iterator, but the VM can't compile the
|
||||||
|
# code due to "too many registers required".
|
||||||
|
proc fieldInfos*(RecordType: type): seq[tuple[name: string,
|
||||||
|
offset: int,
|
||||||
|
fixedSize: int,
|
||||||
|
branchKey: string]] =
|
||||||
|
mixin enumAllSerializedFields
|
||||||
|
|
||||||
|
var
|
||||||
|
offsetInBranch = {"": 0}.toTable
|
||||||
|
nestedUnder = initTable[string, string]()
|
||||||
|
|
||||||
|
enumAllSerializedFields(RecordType):
|
||||||
|
const
|
||||||
|
isFixed = isFixedSize(FieldType)
|
||||||
|
fixedSize = when isFixed: fixedPortionSize(FieldType)
|
||||||
|
else: 0
|
||||||
|
branchKey = when fieldCaseDisciminator.len == 0: ""
|
||||||
|
else: fieldCaseDisciminator & ":" & $fieldCaseBranches
|
||||||
|
fieldSize = when isFixed: fixedSize
|
||||||
|
else: offsetSize
|
||||||
|
|
||||||
|
nestedUnder[fieldName] = branchKey
|
||||||
|
|
||||||
|
var fieldOffset: int
|
||||||
|
offsetInBranch.withValue(branchKey, val):
|
||||||
|
fieldOffset = val[]
|
||||||
|
val[] += fieldSize
|
||||||
|
do:
|
||||||
|
let parentBranch = nestedUnder.getOrDefault(fieldCaseDisciminator, "")
|
||||||
|
fieldOffset = offsetInBranch[parentBranch]
|
||||||
|
offsetInBranch[branchKey] = fieldOffset + fieldSize
|
||||||
|
|
||||||
|
result.add((fieldName, fieldOffset, fixedSize, branchKey))
|
||||||
|
|
||||||
|
func getFieldBoundingOffsetsImpl(RecordType: type,
|
||||||
|
fieldName: static string):
|
||||||
|
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
|
||||||
|
result = (-1, -1)
|
||||||
|
var fieldBranchKey: string
|
||||||
|
|
||||||
|
for f in fieldInfos(RecordType):
|
||||||
|
if fieldName == f.name:
|
||||||
|
result[0] = f.offset
|
||||||
|
if f.fixedSize > 0:
|
||||||
|
result[1] = result[0] + f.fixedSize
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
fieldBranchKey = f.branchKey
|
||||||
|
|
||||||
|
elif result[0] != -1 and
|
||||||
|
f.fixedSize == 0 and
|
||||||
|
f.branchKey == fieldBranchKey:
|
||||||
|
# We have found the next variable sized field
|
||||||
|
result[1] = f.offset
|
||||||
|
return
|
||||||
|
|
||||||
|
func getFieldBoundingOffsets*(RecordType: type,
|
||||||
|
fieldName: static string):
|
||||||
|
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
|
||||||
|
## Returns the start and end offsets of a field.
|
||||||
|
##
|
||||||
|
## For fixed-size fields, the start offset points to the first
|
||||||
|
## byte of the field and the end offset points to 1 byte past the
|
||||||
|
## end of the field.
|
||||||
|
##
|
||||||
|
## For variable-size fields, the returned offsets point to the
|
||||||
|
## statically known positions of the 32-bit offset values written
|
||||||
|
## within the SSZ object. You must read the 32-bit values stored
|
||||||
|
## at the these locations in order to obtain the actual offsets.
|
||||||
|
##
|
||||||
|
## For variable-size fields, the end offset may be -1 when the
|
||||||
|
## designated field is the last variable sized field within the
|
||||||
|
## object. Then the SSZ object boundary known at run-time marks
|
||||||
|
## the end of the variable-size field.
|
||||||
|
type T = RecordType
|
||||||
|
anonConst getFieldBoundingOffsetsImpl(T, fieldName)
|
|
@ -33,7 +33,7 @@
|
||||||
import
|
import
|
||||||
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
|
||||||
./extras, ./ssz, ./beacon_node_types,
|
./extras, ./ssz, ./beacon_node_types,
|
||||||
./spec/[beaconstate, bitfield, crypto, datatypes, digest, helpers, validator],
|
./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||||
./spec/[state_transition_block, state_transition_epoch]
|
./spec/[state_transition_block, state_transition_epoch]
|
||||||
|
|
||||||
# Canonical state transition functions
|
# Canonical state transition functions
|
||||||
|
|
|
@ -24,8 +24,12 @@ type
|
||||||
node*: BeaconNode
|
node*: BeaconNode
|
||||||
db*: BeaconChainDB
|
db*: BeaconChainDB
|
||||||
|
|
||||||
|
BlockRootSlot* = object
|
||||||
|
blockRoot: Eth2Digest
|
||||||
|
slot: Slot
|
||||||
|
|
||||||
const
|
const
|
||||||
MaxRootsToRequest = 512
|
MaxRootsToRequest = 512'u64
|
||||||
MaxHeadersToRequest = MaxRootsToRequest
|
MaxHeadersToRequest = MaxRootsToRequest
|
||||||
MaxAncestorBlocksResponse = 256
|
MaxAncestorBlocksResponse = 256
|
||||||
|
|
||||||
|
@ -67,8 +71,11 @@ proc mergeBlockHeadersAndBodies(headers: openarray[BeaconBlockHeader], bodies: o
|
||||||
res[^1].fromHeaderAndBody(headers[i], bodies[i])
|
res[^1].fromHeaderAndBody(headers[i], bodies[i])
|
||||||
some(res)
|
some(res)
|
||||||
|
|
||||||
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
|
proc getBeaconBlocks*(peer: Peer,
|
||||||
|
blockRoot: Eth2Digest,
|
||||||
|
slot: Slot,
|
||||||
|
maxBlocks, skipSlots: uint64,
|
||||||
|
backward: bool): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
|
||||||
|
|
||||||
p2pProtocol BeaconSync(version = 1,
|
p2pProtocol BeaconSync(version = 1,
|
||||||
shortName = "bcs",
|
shortName = "bcs",
|
||||||
|
@ -113,8 +120,8 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
var s = bestSlot + 1
|
var s = bestSlot + 1
|
||||||
while s <= m.bestSlot:
|
while s <= m.bestSlot:
|
||||||
debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer
|
debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer
|
||||||
let headersLeft = int(m.bestSlot - s)
|
let headersLeft = uint64(m.bestSlot - s)
|
||||||
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, 0)
|
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, false)
|
||||||
if blocks.isSome:
|
if blocks.isSome:
|
||||||
if blocks.get.len == 0:
|
if blocks.get.len == 0:
|
||||||
info "Got 0 blocks while syncing", peer
|
info "Got 0 blocks while syncing", peer
|
||||||
|
@ -144,53 +151,40 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
|
|
||||||
proc goodbye(peer: Peer, reason: DisconnectionReason)
|
proc goodbye(peer: Peer, reason: DisconnectionReason)
|
||||||
|
|
||||||
requestResponse:
|
|
||||||
proc getStatus(
|
|
||||||
peer: Peer,
|
|
||||||
sha: Eth2Digest,
|
|
||||||
userAgent: string,
|
|
||||||
timestamp: uint64) =
|
|
||||||
|
|
||||||
# TODO: How should this be implemented?
|
|
||||||
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/rpc-interface.md#get-status
|
|
||||||
await response.send(sha, userAgent, timestamp)
|
|
||||||
|
|
||||||
proc status(peer: Peer, sha: Eth2Digest, userAgent: string, timestamp: uint64)
|
|
||||||
|
|
||||||
nextId 10
|
nextId 10
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getBeaconBlockRoots(
|
proc getBeaconBlockRoots(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
fromSlot: Slot,
|
fromSlot: Slot,
|
||||||
maxRoots: int) =
|
maxRoots: uint64) =
|
||||||
let maxRoots = min(MaxRootsToRequest, maxRoots)
|
let maxRoots = min(MaxRootsToRequest, maxRoots)
|
||||||
var s = fromSlot
|
var s = fromSlot
|
||||||
var roots = newSeqOfCap[(Eth2Digest, Slot)](maxRoots)
|
var roots = newSeqOfCap[BlockRootSlot](maxRoots)
|
||||||
let blockPool = peer.networkState.node.blockPool
|
let blockPool = peer.networkState.node.blockPool
|
||||||
let maxSlot = blockPool.head.blck.slot
|
let maxSlot = blockPool.head.blck.slot
|
||||||
while s <= maxSlot:
|
while s <= maxSlot:
|
||||||
for r in blockPool.blockRootsForSlot(s):
|
for r in blockPool.blockRootsForSlot(s):
|
||||||
roots.add((r, s))
|
roots.add BlockRootSlot(blockRoot: r, slot: s)
|
||||||
if roots.len == maxRoots: break
|
if roots.len == maxRoots.int: break
|
||||||
s += 1
|
s += 1
|
||||||
await response.send(roots)
|
await response.send(roots)
|
||||||
|
|
||||||
proc beaconBlockRoots(peer: Peer, roots: openarray[(Eth2Digest, Slot)])
|
proc beaconBlockRoots(peer: Peer, roots: openarray[BlockRootSlot])
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getBeaconBlockHeaders(
|
proc getBeaconBlockHeaders(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
blockRoot: Eth2Digest,
|
blockRoot: Eth2Digest,
|
||||||
slot: Slot,
|
slot: Slot,
|
||||||
maxHeaders: int,
|
maxHeaders: uint64,
|
||||||
skipSlots: int,
|
skipSlots: uint64,
|
||||||
backward: uint8) =
|
backward: bool) =
|
||||||
let maxHeaders = min(MaxHeadersToRequest, maxHeaders)
|
let maxHeaders = min(MaxHeadersToRequest, maxHeaders)
|
||||||
var headers: seq[BeaconBlockHeader]
|
var headers: seq[BeaconBlockHeader]
|
||||||
let db = peer.networkState.db
|
let db = peer.networkState.db
|
||||||
|
|
||||||
if backward != 0:
|
if backward:
|
||||||
# TODO: implement skipSlots
|
# TODO: implement skipSlots
|
||||||
|
|
||||||
var blockRoot = blockRoot
|
var blockRoot = blockRoot
|
||||||
|
@ -205,7 +199,7 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
|
|
||||||
while not br.isNil:
|
while not br.isNil:
|
||||||
blockRefs.add(br)
|
blockRefs.add(br)
|
||||||
if blockRefs.len == maxHeaders:
|
if blockRefs.len == maxHeaders.int:
|
||||||
break
|
break
|
||||||
br = br.parent
|
br = br.parent
|
||||||
|
|
||||||
|
@ -223,50 +217,13 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
while s <= maxSlot:
|
while s <= maxSlot:
|
||||||
for r in blockPool.blockRootsForSlot(s):
|
for r in blockPool.blockRootsForSlot(s):
|
||||||
headers.add(db.getBlock(r).get().toHeader)
|
headers.add(db.getBlock(r).get().toHeader)
|
||||||
if headers.len == maxHeaders: break
|
if headers.len == maxHeaders.int: break
|
||||||
s += 1
|
s += 1
|
||||||
|
|
||||||
await response.send(headers)
|
await response.send(headers)
|
||||||
|
|
||||||
proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader])
|
proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader])
|
||||||
|
|
||||||
# TODO move this at the bottom, because it's not in the spec yet, but it will
|
|
||||||
# consume a `method_id`
|
|
||||||
requestResponse:
|
|
||||||
proc getAncestorBlocks(
|
|
||||||
peer: Peer,
|
|
||||||
needed: openarray[FetchRecord]) =
|
|
||||||
var resp = newSeqOfCap[BeaconBlock](needed.len)
|
|
||||||
let db = peer.networkState.db
|
|
||||||
var neededRoots = initSet[Eth2Digest]()
|
|
||||||
for rec in needed: neededRoots.incl(rec.root)
|
|
||||||
|
|
||||||
for rec in needed:
|
|
||||||
if (var blck = db.getBlock(rec.root); blck.isSome()):
|
|
||||||
# TODO validate historySlots
|
|
||||||
let firstSlot = blck.get().slot - rec.historySlots
|
|
||||||
|
|
||||||
for i in 0..<rec.historySlots.int:
|
|
||||||
resp.add(blck.get())
|
|
||||||
if resp.len >= MaxAncestorBlocksResponse:
|
|
||||||
break
|
|
||||||
|
|
||||||
if blck.get().parent_root in neededRoots:
|
|
||||||
# Don't send duplicate blocks, if neededRoots has roots that are
|
|
||||||
# in the same chain
|
|
||||||
break
|
|
||||||
|
|
||||||
if (blck = db.getBlock(blck.get().parent_root);
|
|
||||||
blck.isNone() or blck.get().slot < firstSlot):
|
|
||||||
break
|
|
||||||
|
|
||||||
if resp.len >= MaxAncestorBlocksResponse:
|
|
||||||
break
|
|
||||||
|
|
||||||
await response.send(resp)
|
|
||||||
|
|
||||||
proc ancestorBlocks(peer: Peer, blocks: openarray[BeaconBlock])
|
|
||||||
|
|
||||||
requestResponse:
|
requestResponse:
|
||||||
proc getBeaconBlockBodies(
|
proc getBeaconBlockBodies(
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
|
@ -285,7 +242,11 @@ p2pProtocol BeaconSync(version = 1,
|
||||||
peer: Peer,
|
peer: Peer,
|
||||||
blockBodies: openarray[BeaconBlockBody])
|
blockBodies: openarray[BeaconBlockBody])
|
||||||
|
|
||||||
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.async.} =
|
proc getBeaconBlocks*(peer: Peer,
|
||||||
|
blockRoot: Eth2Digest,
|
||||||
|
slot: Slot,
|
||||||
|
maxBlocks, skipSlots: uint64,
|
||||||
|
backward: bool): Future[Option[seq[BeaconBlock]]] {.async.} =
|
||||||
## Retrieve block headers and block bodies from the remote peer, merge them into blocks.
|
## Retrieve block headers and block bodies from the remote peer, merge them into blocks.
|
||||||
assert(maxBlocks <= MaxHeadersToRequest)
|
assert(maxBlocks <= MaxHeadersToRequest)
|
||||||
let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward)
|
let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward)
|
||||||
|
|
2
nim.cfg
2
nim.cfg
|
@ -1,6 +1,8 @@
|
||||||
--threads:on
|
--threads:on
|
||||||
--opt:speed
|
--opt:speed
|
||||||
|
|
||||||
|
# -d:"chronicles_sinks=json"
|
||||||
|
|
||||||
@if windows:
|
@if windows:
|
||||||
# increase stack size
|
# increase stack size
|
||||||
--passL:"-Wl,--stack,8388608"
|
--passL:"-Wl,--stack,8388608"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import
|
import
|
||||||
confutils,
|
confutils,
|
||||||
../beacon_chain/[extras, ssz],
|
../beacon_chain/[extras, ssz],
|
||||||
../beacon_chain/spec/[beaconstate, bitfield, datatypes, digest, validator],
|
../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
|
||||||
../tests/testutil
|
../tests/testutil
|
||||||
|
|
||||||
proc stateSize(deposits: int, maxContent = false) =
|
proc stateSize(deposits: int, maxContent = false) =
|
||||||
|
@ -23,7 +23,7 @@ proc stateSize(deposits: int, maxContent = false) =
|
||||||
# validatorsPerCommittee =
|
# validatorsPerCommittee =
|
||||||
# len(crosslink_committees[0].committee) # close enough..
|
# len(crosslink_committees[0].committee) # close enough..
|
||||||
# for a in state.latest_attestations.mitems():
|
# for a in state.latest_attestations.mitems():
|
||||||
# a.aggregation_bits = BitField.init(validatorsPerCommittee)
|
# a.aggregation_bits = BitSeq.init(validatorsPerCommittee)
|
||||||
echo "Validators: ", deposits, ", total: ", SSZ.encode(state).len
|
echo "Validators: ", deposits, ", total: ", SSZ.encode(state).len
|
||||||
|
|
||||||
dispatch(stateSize)
|
dispatch(stateSize)
|
||||||
|
|
|
@ -1,16 +1,11 @@
|
||||||
import
|
import
|
||||||
confutils, stats, times,
|
confutils, stats, times,
|
||||||
json, strformat,
|
strformat,
|
||||||
options, sequtils, random, tables,
|
options, sequtils, random, tables,
|
||||||
../tests/[testutil],
|
../tests/[testutil],
|
||||||
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||||
../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice]
|
../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice]
|
||||||
|
|
||||||
proc `%`(v: uint64): JsonNode =
|
|
||||||
if v > uint64(high(BiggestInt)): newJString($v) else: newJInt(BiggestInt(v))
|
|
||||||
proc `%`(v: Eth2Digest): JsonNode = newJString($v)
|
|
||||||
proc `%`(v: ValidatorSig|ValidatorPubKey): JsonNode = newJString($v)
|
|
||||||
|
|
||||||
type Timers = enum
|
type Timers = enum
|
||||||
tBlock = "Process non-epoch slot with block"
|
tBlock = "Process non-epoch slot with block"
|
||||||
tEpoch = "Process epoch slot with block"
|
tEpoch = "Process epoch slot with block"
|
||||||
|
@ -36,14 +31,11 @@ template withTimerRet(stats: var RunningStat, body: untyped): untyped =
|
||||||
|
|
||||||
tmp
|
tmp
|
||||||
|
|
||||||
proc `%`*(x: Slot): JsonNode {.borrow.}
|
|
||||||
proc `%`*(x: Epoch): JsonNode {.borrow.}
|
|
||||||
|
|
||||||
proc writeJson*(prefix, slot, v: auto) =
|
proc writeJson*(prefix, slot, v: auto) =
|
||||||
var f: File
|
var f: File
|
||||||
defer: close(f)
|
defer: close(f)
|
||||||
discard open(f, fmt"{prefix:04}-{humaneSlotNum(slot):08}.json", fmWrite)
|
let fileName = fmt"{prefix:04}-{humaneSlotNum(slot):08}.json"
|
||||||
write(f, pretty(%*(v)))
|
Json.saveFile(fileName, v, pretty = true)
|
||||||
|
|
||||||
cli do(slots = 448,
|
cli do(slots = 448,
|
||||||
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum
|
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum
|
||||||
|
|
|
@ -9,7 +9,6 @@ import # Unit test
|
||||||
./test_attestation_pool,
|
./test_attestation_pool,
|
||||||
./test_beacon_chain_db,
|
./test_beacon_chain_db,
|
||||||
./test_beacon_node,
|
./test_beacon_node,
|
||||||
./test_bitfield,
|
|
||||||
./test_beaconstate,
|
./test_beaconstate,
|
||||||
./test_block_pool,
|
./test_block_pool,
|
||||||
./test_helpers,
|
./test_helpers,
|
||||||
|
@ -21,4 +20,5 @@ import # Unit test
|
||||||
import # Official fixtures
|
import # Official fixtures
|
||||||
./official/test_fixture_shuffling,
|
./official/test_fixture_shuffling,
|
||||||
./official/test_fixture_bls,
|
./official/test_fixture_bls,
|
||||||
./official/test_fixture_ssz_uint
|
./official/test_fixture_ssz_uint,
|
||||||
|
./official/test_fixture_ssz_static
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 470513eddfd7b4d1d45c908816b966c877c0d232
|
Subproject commit de468c07c2518cf1546c4cb615418738a2918577
|
|
@ -1,4 +1,6 @@
|
||||||
import
|
import
|
||||||
|
# Standard library
|
||||||
|
os, strutils,
|
||||||
# Status libs
|
# Status libs
|
||||||
stew/byteutils,
|
stew/byteutils,
|
||||||
eth/common, serialization, json_serialization,
|
eth/common, serialization, json_serialization,
|
||||||
|
@ -72,6 +74,10 @@ type
|
||||||
handler*: string
|
handler*: string
|
||||||
test_cases*: seq[T]
|
test_cases*: seq[T]
|
||||||
|
|
||||||
|
const
|
||||||
|
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
|
||||||
|
JsonTestsDir* = FixturesDir / "json_tests"
|
||||||
|
|
||||||
# #######################
|
# #######################
|
||||||
# Default init
|
# Default init
|
||||||
proc default*(T: typedesc): T = discard
|
proc default*(T: typedesc): T = discard
|
||||||
|
@ -87,9 +93,6 @@ proc readValue*[N: static int](r: var JsonReader, a: var array[N, byte]) {.inlin
|
||||||
# if so export that to nim-eth
|
# if so export that to nim-eth
|
||||||
hexToByteArray(r.readValue(string), a)
|
hexToByteArray(r.readValue(string), a)
|
||||||
|
|
||||||
proc readValue*(r: var JsonReader, a: var ValidatorIndex) {.inline.} =
|
|
||||||
a = r.readValue(uint32)
|
|
||||||
|
|
||||||
proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
|
proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
|
||||||
## Custom deserializer for seq[byte]
|
## Custom deserializer for seq[byte]
|
||||||
a = hexToSeqByte(r.readValue(string))
|
a = hexToSeqByte(r.readValue(string))
|
||||||
|
|
|
@ -0,0 +1,220 @@
|
||||||
|
# beacon_chain
|
||||||
|
# Copyright (c) 2018 Status Research & Development GmbH
|
||||||
|
# Licensed and distributed under either of
|
||||||
|
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||||
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||||
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
|
import
|
||||||
|
# Standard library
|
||||||
|
os, strutils, strformat, tables, unittest, sequtils, typetraits,
|
||||||
|
# Status libs
|
||||||
|
stew/[byteutils, bitseqs], nimcrypto/hash,
|
||||||
|
serialization/testing/tracing,
|
||||||
|
json_serialization, json_serialization/lexer,
|
||||||
|
# Beacon chain internals
|
||||||
|
../../beacon_chain/ssz,
|
||||||
|
../../beacon_chain/spec/[datatypes, validator, digest, crypto],
|
||||||
|
# Test utilities
|
||||||
|
../testutil,
|
||||||
|
./fixtures_utils
|
||||||
|
|
||||||
|
const
|
||||||
|
failFast = defined(debug) and false
|
||||||
|
traceOnFailure = defined(debug)
|
||||||
|
|
||||||
|
type
|
||||||
|
SpecObject[T] = ref object of RootObj
|
||||||
|
obj: ref T
|
||||||
|
|
||||||
|
SszStaticTest* = object
|
||||||
|
obj: RootRef
|
||||||
|
objType, objJsonRepr: string
|
||||||
|
expectedBytes: seq[byte]
|
||||||
|
expectedRootHash, expectedSigHash: Eth2Digest
|
||||||
|
hasSigHash: bool
|
||||||
|
line: int
|
||||||
|
|
||||||
|
ReaderProc = proc(r: var JsonReader): RootRef {.cdecl, gcsafe.}
|
||||||
|
TestingProc = proc(file: string, test: SszStaticTest) {.cdecl, gcsafe.}
|
||||||
|
|
||||||
|
SpecTypeVtable = object
|
||||||
|
reader: ReaderProc
|
||||||
|
tester: TestingProc
|
||||||
|
|
||||||
|
let testsDir = JsonTestsDir / "ssz_static" / "core"
|
||||||
|
let minDevTestFile = getTempDir() / "minimal_ssz_test.json"
|
||||||
|
|
||||||
|
var specTypesRTTI = initTable[string, SpecTypeVtable]()
|
||||||
|
|
||||||
|
proc readerImpl[T](r: var JsonReader): RootRef {.cdecl, gcsafe.} =
|
||||||
|
var res = SpecObject[T](obj: new T)
|
||||||
|
res.obj[] = r.readValue(T)
|
||||||
|
RootRef(res)
|
||||||
|
|
||||||
|
# TODO:
|
||||||
|
# Fun fact: With mainnet settings, the BeaconState object
|
||||||
|
# is too large to safely exist as a stack variable. The
|
||||||
|
# `testerImpl` procedure below will trigger a segmentation
|
||||||
|
# fault on its very first line because of it.
|
||||||
|
#
|
||||||
|
# To work-around this issue, this file uses ref objects
|
||||||
|
# to store the loaded test cases, but we must compare them
|
||||||
|
# by value:
|
||||||
|
template valuesAreEqual[T](a, b: ref T): bool =
|
||||||
|
a[] == b[]
|
||||||
|
|
||||||
|
template valuesAreEqual[T](a, b: T): bool =
|
||||||
|
a == b
|
||||||
|
|
||||||
|
template `$`(x: ref auto): string =
|
||||||
|
$(x[])
|
||||||
|
|
||||||
|
proc readSszValueRef*(input: openarray[byte], T: type): ref T =
|
||||||
|
new result
|
||||||
|
result[] = readSszValue(input, T)
|
||||||
|
|
||||||
|
proc testerImpl[T](path: string, test: SszStaticTest) {.cdecl, gcsafe.} =
|
||||||
|
doAssert test.obj != nil
|
||||||
|
var obj = SpecObject[T](test.obj)
|
||||||
|
|
||||||
|
template execTest(testOpName, testOp, expectedRes) =
|
||||||
|
let ourRes = testOp
|
||||||
|
let success = valuesAreEqual(ourRes, expectedRes)
|
||||||
|
if not success and traceOnFailure:
|
||||||
|
{.gcsafe.}:
|
||||||
|
echo "====== ", testOpName, " failed ", path, ":", test.line
|
||||||
|
echo " our result:"
|
||||||
|
echo " ", ourRes
|
||||||
|
echo " expected result:"
|
||||||
|
echo " ", expectedRes
|
||||||
|
when defined(serialization_tracing):
|
||||||
|
tracingEnabled = true
|
||||||
|
discard testOp
|
||||||
|
tracingEnabled = false
|
||||||
|
echo "======================================================"
|
||||||
|
if failFast: quit 1
|
||||||
|
|
||||||
|
# TODO BEWARE: Passing the boolean expression to `check` directly
|
||||||
|
# will trigger a Nim compilation bomb. This is most likely caused
|
||||||
|
# by a mis-behaving generics instantiations cache when a function
|
||||||
|
# is explicitly instantiated to get its address.
|
||||||
|
# There is a recursive instantiation loop of system's `$` operator.
|
||||||
|
check success
|
||||||
|
|
||||||
|
# let ob = SSZ.encode(obj.obj)
|
||||||
|
|
||||||
|
when false:
|
||||||
|
execTest "serialization",
|
||||||
|
(let ourBytes = SSZ.encode(obj.obj); ourBytes),
|
||||||
|
test.expectedBytes
|
||||||
|
|
||||||
|
execTest "root hash check",
|
||||||
|
hashTreeRoot(obj.obj),
|
||||||
|
test.expectedRootHash
|
||||||
|
|
||||||
|
when hasSigningRoot(T):
|
||||||
|
doAssert test.hasSigHash
|
||||||
|
execTest "sig hash check",
|
||||||
|
signingRoot(obj.obj),
|
||||||
|
test.expectedSigHash
|
||||||
|
|
||||||
|
when true:
|
||||||
|
execTest "roundtrip",
|
||||||
|
readSszValueRef(test.expectedBytes, T),
|
||||||
|
obj.obj
|
||||||
|
|
||||||
|
template addSpecTypeRTTI(T: type) =
|
||||||
|
var reader = readerImpl[T]
|
||||||
|
var tester = testerImpl[T]
|
||||||
|
specTypesRTTI.add(T.name, SpecTypeVtable(reader: reader,
|
||||||
|
tester: tester))
|
||||||
|
foreachSpecType(addSpecTypeRTTI)
|
||||||
|
|
||||||
|
proc runTest(path: string, test: SszStaticTest) =
|
||||||
|
if test.objType != "Unsupported":
|
||||||
|
specTypesRTTI[test.objType].tester(path, test)
|
||||||
|
|
||||||
|
proc advanceToClosingBrace(lexer: var JsonLexer, openedBraces = 1) =
|
||||||
|
var closedBraces = 0
|
||||||
|
while closedBraces < openedBraces:
|
||||||
|
while lexer.tok notin {tkCurlyLe, tkCurlyRi}:
|
||||||
|
lexer.next
|
||||||
|
if lexer.tok == tkCurlyLe:
|
||||||
|
dec closedBraces
|
||||||
|
else:
|
||||||
|
inc closedBraces
|
||||||
|
lexer.next
|
||||||
|
|
||||||
|
proc readValue*(r: var JsonReader, result: var SszStaticTest) {.gcsafe.} =
|
||||||
|
r.skipToken tkCurlyLe
|
||||||
|
|
||||||
|
if r.lexer.tok != tkString:
|
||||||
|
r.raiseUnexpectedToken(etString)
|
||||||
|
|
||||||
|
var reader: ReaderProc
|
||||||
|
let key = r.lexer.strVal
|
||||||
|
{.gcsafe.}:
|
||||||
|
if not specTypesRTTI.hasKey(key):
|
||||||
|
result.objType = "Unsupported"
|
||||||
|
r.lexer.advanceToClosingBrace
|
||||||
|
return
|
||||||
|
|
||||||
|
result.objType = key
|
||||||
|
result.line = r.lexer.line
|
||||||
|
reader = specTypesRTTI[key].reader
|
||||||
|
|
||||||
|
r.lexer.next
|
||||||
|
r.skipToken tkColon
|
||||||
|
r.skipToken tkCurlyLe
|
||||||
|
|
||||||
|
while r.lexer.tok == tkString:
|
||||||
|
# TODO: I was hit by a very nasty Nim bug here.
|
||||||
|
# If you use `let` on the next line, the variable will be
|
||||||
|
# aliased to `r.lexer.strVar` instead of being copied.
|
||||||
|
# This will create problems, because the value is modified
|
||||||
|
# on the next line.
|
||||||
|
var field = r.lexer.strVal
|
||||||
|
r.lexer.next
|
||||||
|
r.skipToken tkColon
|
||||||
|
|
||||||
|
case field
|
||||||
|
of "value":
|
||||||
|
result.obj = reader(r)
|
||||||
|
of "serialized":
|
||||||
|
result.expectedBytes = hexToSeqByte r.readValue(string)
|
||||||
|
of "root":
|
||||||
|
result.expectedRootHash = Eth2Digest.fromHex r.readValue(string)
|
||||||
|
of "signing_root":
|
||||||
|
result.expectedSigHash = Eth2Digest.fromHex r.readValue(string)
|
||||||
|
result.hasSigHash = true
|
||||||
|
else:
|
||||||
|
r.raiseUnexpectedField(field, type(result).name)
|
||||||
|
|
||||||
|
if r.lexer.tok == tkComma:
|
||||||
|
r.lexer.next()
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
r.skipToken tkCurlyRi
|
||||||
|
r.skipToken tkCurlyRi
|
||||||
|
|
||||||
|
when failFast:
|
||||||
|
# This will produce faster failures in debug builds
|
||||||
|
{.gcsafe.}: runTest result
|
||||||
|
|
||||||
|
proc executeSuite(path: string) =
|
||||||
|
let sszSuite = path.parseTests SszStaticTest
|
||||||
|
suite &"{path}: {sszSuite.title}":
|
||||||
|
for sszTest in sszSuite.test_cases:
|
||||||
|
test &"test case on line {sszTest.line}":
|
||||||
|
runTest path, sszTest
|
||||||
|
|
||||||
|
if fileExists(minDevTestFile):
|
||||||
|
executeSuite minDevTestFile
|
||||||
|
|
||||||
|
for kind, path in walkDir(testsDir):
|
||||||
|
if kind notin {pcFile, pcLinkToFile}: continue
|
||||||
|
if const_preset in path:
|
||||||
|
executeSuite path
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
-d:"serialization_tracing"
|
||||||
|
-d:"ssz_testing"
|
|
@ -25,8 +25,8 @@ type
|
||||||
ssz*: seq[byte]
|
ssz*: seq[byte]
|
||||||
tags*: seq[string]
|
tags*: seq[string]
|
||||||
|
|
||||||
const TestFolder = currentSourcePath.rsplit(DirSep, 1)[0]
|
const
|
||||||
const TestsPath = "fixtures" / "json_tests" / "ssz_generic" / "uint"
|
TestsDir = JsonTestsDir / "ssz_generic" / "uint"
|
||||||
|
|
||||||
func to(val: string, T: typedesc): T =
|
func to(val: string, T: typedesc): T =
|
||||||
when T is StUint:
|
when T is StUint:
|
||||||
|
@ -97,18 +97,18 @@ proc runSSZUintTest(inputTests: Tests[SSZUint]) =
|
||||||
|
|
||||||
suite "Official - SSZ unsigned integer tests" & preset():
|
suite "Official - SSZ unsigned integer tests" & preset():
|
||||||
block: # "Integers right at or beyond the bounds of the allowed value range"
|
block: # "Integers right at or beyond the bounds of the allowed value range"
|
||||||
let uintBounds = parseTests(TestFolder / TestsPath / "uint_bounds.json", SSZUint)
|
let uintBounds = parseTests(TestsDir / "uint_bounds.json", SSZUint)
|
||||||
test uintBounds.summary & preset():
|
test uintBounds.summary & preset():
|
||||||
runSSZUintTest(uintBounds)
|
runSSZUintTest(uintBounds)
|
||||||
|
|
||||||
block: # "Random integers chosen uniformly over the allowed value range"
|
block: # "Random integers chosen uniformly over the allowed value range"
|
||||||
let uintRandom = parseTests(TestFolder / TestsPath / "uint_random.json", SSZUint)
|
let uintRandom = parseTests(TestsDir / "uint_random.json", SSZUint)
|
||||||
test uintRandom.summary & preset():
|
test uintRandom.summary & preset():
|
||||||
runSSZUintTest(uintRandom)
|
runSSZUintTest(uintRandom)
|
||||||
|
|
||||||
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
||||||
block: # "Serialized integers that are too short or too long"
|
block: # "Serialized integers that are too short or too long"
|
||||||
let uintWrongLength = parseTests(TestFolder / TestsPath / "uint_wrong_length.json", SSZUint)
|
let uintWrongLength = parseTests(TestsDir / "uint_wrong_length.json", SSZUint)
|
||||||
test "[Skipped] " & uintWrongLength.summary & preset():
|
test "[Skipped] " & uintWrongLength.summary & preset():
|
||||||
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
|
||||||
echo " [Skipped] Pending https://github.com/status-im/nim-beacon-chain/issues/280"
|
echo " [Skipped] Pending https://github.com/status-im/nim-beacon-chain/issues/280"
|
||||||
|
|
|
@ -23,8 +23,9 @@ mkdir -p "$VALIDATORS_DIR"
|
||||||
cd "$GIT_ROOT"
|
cd "$GIT_ROOT"
|
||||||
mkdir -p $BUILD_OUTPUTS_DIR
|
mkdir -p $BUILD_OUTPUTS_DIR
|
||||||
|
|
||||||
# Run with "SHARD_COUNT=8 ./start.sh" to change these
|
# Run with "SHARD_COUNT=4 ./start.sh" to change these
|
||||||
DEFS="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024
|
DEFS="-d:chronicles_log_level=DEBUG "
|
||||||
|
DEFS+="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024
|
||||||
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-8} " # Spec default: 64
|
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-8} " # Spec default: 64
|
||||||
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-12} " # Spec default: 6
|
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-12} " # Spec default: 6
|
||||||
|
|
||||||
|
|
|
@ -1,27 +0,0 @@
|
||||||
import
|
|
||||||
unittest,
|
|
||||||
../beacon_chain/spec/[bitfield]
|
|
||||||
|
|
||||||
suite "BitField":
|
|
||||||
test "roundtrips":
|
|
||||||
var
|
|
||||||
a = BitField.init(100)
|
|
||||||
b = BitField.init(100)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a.get_bitfield_bit(0)
|
|
||||||
|
|
||||||
a.set_bitfield_bit(1)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a.get_bitfield_bit(0)
|
|
||||||
a.get_bitfield_bit(1)
|
|
||||||
|
|
||||||
b.set_bitfield_bit(2)
|
|
||||||
|
|
||||||
a.combine(b)
|
|
||||||
|
|
||||||
check:
|
|
||||||
not a.get_bitfield_bit(0)
|
|
||||||
a.get_bitfield_bit(1)
|
|
||||||
a.get_bitfield_bit(2)
|
|
|
@ -7,22 +7,44 @@
|
||||||
|
|
||||||
import
|
import
|
||||||
unittest, sequtils, options,
|
unittest, sequtils, options,
|
||||||
nimcrypto, eth/common, blscurve, serialization/testing/generic_suite,
|
stint, nimcrypto, eth/common, blscurve, serialization/testing/generic_suite,
|
||||||
../beacon_chain/ssz, ../beacon_chain/spec/[datatypes, digest]
|
../beacon_chain/spec/[datatypes, digest],
|
||||||
|
../beacon_chain/ssz, ../beacon_chain/ssz/navigator
|
||||||
|
|
||||||
func filled[N: static[int], T](typ: type array[N, T], value: T): array[N, T] =
|
|
||||||
for val in result.mitems:
|
|
||||||
val = value
|
|
||||||
|
|
||||||
func filled(T: type MDigest, value: byte): T =
|
|
||||||
for val in result.data.mitems:
|
|
||||||
val = value
|
|
||||||
|
|
||||||
suite "Simple serialization":
|
|
||||||
# pending spec updates in
|
|
||||||
# - https://github.com/ethereum/eth2.0-specs
|
|
||||||
type
|
type
|
||||||
Foo = object
|
SomeEnum = enum
|
||||||
|
A, B, C
|
||||||
|
|
||||||
|
Simple = object
|
||||||
|
flag: bool
|
||||||
|
# count: StUint[256]
|
||||||
|
# ignored {.dontSerialize.}: string
|
||||||
|
# data: array[256, bool]
|
||||||
|
|
||||||
|
template reject(stmt) =
|
||||||
|
assert(not compiles(stmt))
|
||||||
|
|
||||||
|
static:
|
||||||
|
assert isFixedSize(bool) == true
|
||||||
|
|
||||||
|
assert fixedPortionSize(array[10, bool]) == 10
|
||||||
|
assert fixedPortionSize(array[SomeEnum, uint64]) == 24
|
||||||
|
assert fixedPortionSize(array[3..5, string]) == 12
|
||||||
|
|
||||||
|
assert fixedPortionSize(string) == 4
|
||||||
|
assert fixedPortionSize(seq[bool]) == 4
|
||||||
|
assert fixedPortionSize(seq[string]) == 4
|
||||||
|
|
||||||
|
assert isFixedSize(array[20, bool]) == true
|
||||||
|
assert isFixedSize(Simple) == true
|
||||||
|
assert isFixedSize(string) == false
|
||||||
|
assert isFixedSize(seq[bool]) == false
|
||||||
|
assert isFixedSize(seq[string]) == false
|
||||||
|
|
||||||
|
reject fixedPortionSize(int)
|
||||||
|
|
||||||
|
type
|
||||||
|
ObjWithFields = object
|
||||||
f0: uint8
|
f0: uint8
|
||||||
f1: uint32
|
f1: uint32
|
||||||
f2: EthAddress
|
f2: EthAddress
|
||||||
|
@ -30,66 +52,32 @@ suite "Simple serialization":
|
||||||
f4: seq[byte]
|
f4: seq[byte]
|
||||||
f5: ValidatorIndex
|
f5: ValidatorIndex
|
||||||
|
|
||||||
let expected_deser = Foo(
|
static:
|
||||||
f0: 5,
|
assert fixedPortionSize(ObjWithFields) == 1 + 4 + sizeof(EthAddress) + (256 div 8) + 4 + 8
|
||||||
f1: 0'u32 - 3,
|
|
||||||
f2: EthAddress.filled(byte 35),
|
|
||||||
f3: MDigest[256].filled(byte 35),
|
|
||||||
f4: @[byte 'c'.ord, 'o'.ord, 'w'.ord],
|
|
||||||
f5: ValidatorIndex(79))
|
|
||||||
|
|
||||||
var expected_ser = @[
|
executeRoundTripTests SSZ
|
||||||
byte 67, 0, 0, 0, # length
|
|
||||||
5,
|
|
||||||
0xFD, 0xFF, 0xFF, 0xFF,
|
|
||||||
]
|
|
||||||
expected_ser &= EthAddress.filled(byte 35)
|
|
||||||
expected_ser &= MDigest[256].filled(byte 35).data
|
|
||||||
expected_ser &= [byte 3, 0, 0, 0, 'c'.ord, 'o'.ord, 'w'.ord]
|
|
||||||
expected_ser &= [byte 79, 0, 0]
|
|
||||||
|
|
||||||
test "Object deserialization":
|
type
|
||||||
let deser = SSZ.decode(expected_ser, Foo)
|
Foo = object
|
||||||
check: expected_deser == deser
|
bar: Bar
|
||||||
|
|
||||||
test "Object serialization":
|
Bar = object
|
||||||
let ser = SSZ.encode(expected_deser)
|
b: string
|
||||||
check: expected_ser == ser
|
baz: Baz
|
||||||
|
|
||||||
test "Not enough data":
|
Baz = object
|
||||||
expect SerializationError:
|
i: uint64
|
||||||
let x = SSZ.decode(expected_ser[0..^2], Foo)
|
|
||||||
|
|
||||||
expect SerializationError:
|
suite "SSZ Navigation":
|
||||||
let x = SSZ.decode(expected_ser[1..^1], Foo)
|
test "simple object fields":
|
||||||
|
var foo = Foo(bar: Bar(b: "bar", baz: Baz(i: 10'u64)))
|
||||||
|
let encoded = SSZ.encode(foo)
|
||||||
|
|
||||||
test "ValidatorIndex roundtrip":
|
check SSZ.decode(encoded, Foo) == foo
|
||||||
# https://github.com/nim-lang/Nim/issues/10027
|
|
||||||
let v = 79.ValidatorIndex
|
|
||||||
let ser = SSZ.encode(v)
|
|
||||||
check:
|
|
||||||
ser.len() == 3
|
|
||||||
SSZ.decode(ser, v.type) == v
|
|
||||||
|
|
||||||
SSZ.roundtripTest [1, 2, 3]
|
let mountedFoo = sszMount(encoded, Foo)
|
||||||
SSZ.roundtripTest @[1, 2, 3]
|
check mountedFoo.bar.b == "bar"
|
||||||
SSZ.roundtripTest SigKey.random().getKey()
|
|
||||||
SSZ.roundtripTest BeaconBlock(
|
|
||||||
slot: 42.Slot, signature: sign(SigKey.random(), 0'u64, ""))
|
|
||||||
SSZ.roundtripTest BeaconState(slot: 42.Slot)
|
|
||||||
|
|
||||||
# suite "Tree hashing":
|
let mountedBar = mountedFoo.bar
|
||||||
# # TODO The test values are taken from an earlier version of SSZ and have
|
check mountedBar.baz.i == 10'u64
|
||||||
# # nothing to do with upstream - needs verification and proper test suite
|
|
||||||
|
|
||||||
# test "Hash BeaconBlock":
|
|
||||||
# let vr = BeaconBlock()
|
|
||||||
# check:
|
|
||||||
# $hash_tree_root(vr) ==
|
|
||||||
# "8951C9C64ABA469EBA78F5D9F9A0666FB697B8C4D86901445777E4445D0B1543"
|
|
||||||
|
|
||||||
# test "Hash BeaconState":
|
|
||||||
# let vr = BeaconState()
|
|
||||||
# check:
|
|
||||||
# $hash_tree_root(vr) ==
|
|
||||||
# "66F9BF92A690F1FBD36488D98BE70DA6C84100EDF935BC6D0B30FF14A2976455"
|
|
||||||
|
|
|
@ -10,12 +10,21 @@ import
|
||||||
chronicles, eth/trie/[db],
|
chronicles, eth/trie/[db],
|
||||||
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, state_transition,
|
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, state_transition,
|
||||||
validator_pool, beacon_node_types],
|
validator_pool, beacon_node_types],
|
||||||
../beacon_chain/spec/[beaconstate, bitfield, crypto, datatypes, digest,
|
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
|
||||||
helpers, validator]
|
helpers, validator]
|
||||||
|
|
||||||
func preset*(): string =
|
func preset*(): string =
|
||||||
" [Preset: " & const_preset & ']'
|
" [Preset: " & const_preset & ']'
|
||||||
|
|
||||||
|
when ValidatorPrivKey is BlsValue:
|
||||||
|
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||||
|
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||||
|
# lighthouse.
|
||||||
|
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
|
||||||
|
result.kind = BlsValueType.Real
|
||||||
|
var bytes = uint64(i + 1000).toBytesLE()
|
||||||
|
copyMem(addr result.blsValue.x[0], addr bytes[0], sizeof(bytes))
|
||||||
|
else:
|
||||||
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
|
||||||
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
|
||||||
# lighthouse.
|
# lighthouse.
|
||||||
|
@ -170,9 +179,8 @@ proc makeAttestation*(
|
||||||
|
|
||||||
doAssert sac_index != -1, "find_shard_committee should guarantee this"
|
doAssert sac_index != -1, "find_shard_committee should guarantee this"
|
||||||
|
|
||||||
var
|
var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
|
||||||
aggregation_bits = BitField.init(committee.len)
|
aggregation_bits.raiseBit sac_index
|
||||||
set_bitfield_bit(aggregation_bits, sac_index)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
msg = hash_tree_root(
|
msg = hash_tree_root(
|
||||||
|
@ -192,7 +200,7 @@ proc makeAttestation*(
|
||||||
data: data,
|
data: data,
|
||||||
aggregation_bits: aggregation_bits,
|
aggregation_bits: aggregation_bits,
|
||||||
signature: sig,
|
signature: sig,
|
||||||
custody_bits: BitField.init(committee.len)
|
custody_bits: CommitteeValidatorsBits.init(committee.len)
|
||||||
)
|
)
|
||||||
|
|
||||||
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
|
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =
|
||||||
|
|
Loading…
Reference in New Issue