Implement the latest SSZ specification and integrate the official SSZ test suite

This commit is contained in:
Zahary Karadjov 2019-07-03 10:35:05 +03:00
parent 7a4b7a6cfb
commit 398ea55801
No known key found for this signature in database
GPG Key ID: C8936F8A3073D609
34 changed files with 1647 additions and 739 deletions

View File

@ -1,7 +1,7 @@
import import
deques, options, sequtils, tables, deques, options, sequtils, tables,
chronicles, chronicles, stew/bitseqs,
./spec/[beaconstate, bitfield, datatypes, crypto, digest, helpers, validator], ./spec/[beaconstate, datatypes, crypto, digest, helpers, validator],
./extras, ./beacon_chain_db, ./ssz, ./block_pool, ./extras, ./beacon_chain_db, ./ssz, ./block_pool,
beacon_node_types beacon_node_types
@ -61,11 +61,11 @@ proc validate(
finalizedEpoch = humaneEpochNum(state.finalized_checkpoint.epoch) finalizedEpoch = humaneEpochNum(state.finalized_checkpoint.epoch)
return return
if not allIt(attestation.custody_bits.bits, it == 0): if not allIt(attestation.custody_bits.bytes, it == 0):
notice "Invalid custody bitfield for phase 0" notice "Invalid custody bitfield for phase 0"
return false return false
if not anyIt(attestation.aggregation_bits.bits, it != 0): if not anyIt(attestation.aggregation_bits.bytes, it != 0):
notice "Empty aggregation bitfield" notice "Empty aggregation bitfield"
return false return false
@ -211,8 +211,7 @@ proc add*(pool: var AttestationPool,
# Attestations in the pool that are a subset of the new attestation # Attestations in the pool that are a subset of the new attestation
# can now be removed per same logic as above # can now be removed per same logic as above
a.validations.keepItIf( a.validations.keepItIf(
if it.aggregation_bits.isSubsetOf( if it.aggregation_bits.isSubsetOf(validation.aggregation_bits):
validation.aggregation_bits):
debug "Removing subset attestation", debug "Removing subset attestation",
existingParticipants = get_attesting_indices_seq( existingParticipants = get_attesting_indices_seq(
state, a.data, it.aggregation_bits), state, a.data, it.aggregation_bits),
@ -314,10 +313,8 @@ proc getAttestationsForBlock*(
# and naively add as much as possible in one go, by we could also # and naively add as much as possible in one go, by we could also
# add the same attestation data twice, as long as there's at least # add the same attestation data twice, as long as there's at least
# one new attestation in there # one new attestation in there
if not attestation.aggregation_bits.overlaps( if not attestation.aggregation_bits.overlaps(v.aggregation_bits):
v.aggregation_bits): attestation.aggregation_bits.combine(v.aggregation_bits)
attestation.aggregation_bits.combine(
v.aggregation_bits)
attestation.custody_bits.combine(v.custody_bits) attestation.custody_bits.combine(v.custody_bits)
attestation.signature.combine(v.aggregate_signature) attestation.signature.combine(v.aggregate_signature)

View File

@ -1,9 +1,9 @@
import import
net, sequtils, options, tables, osproc, random, strutils, times, strformat, net, sequtils, options, tables, osproc, random, strutils, times, strformat,
stew/shims/os, stew/objects, stew/shims/os, stew/[objects, bitseqs],
chronos, chronicles, confutils, serialization/errors, chronos, chronicles, confutils, serialization/errors,
eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils, eth/trie/db, eth/trie/backends/rocksdb_backend, eth/async_utils,
spec/[bitfield, datatypes, digest, crypto, beaconstate, helpers, validator], spec/[datatypes, digest, crypto, beaconstate, helpers, validator],
conf, time, state_transition, fork_choice, ssz, beacon_chain_db, conf, time, state_transition, fork_choice, ssz, beacon_chain_db,
validator_pool, extras, attestation_pool, block_pool, eth2_network, validator_pool, extras, attestation_pool, block_pool, eth2_network,
beacon_node_types, mainchain_monitor, trusted_state_snapshots, version, beacon_node_types, mainchain_monitor, trusted_state_snapshots, version,
@ -309,15 +309,15 @@ proc sendAttestation(node: BeaconNode,
let let
validatorSignature = await validator.signAttestation(attestationData) validatorSignature = await validator.signAttestation(attestationData)
var aggregationBitfield = BitField.init(committeeLen) var aggregationBits = CommitteeValidatorsBits.init(committeeLen)
set_bitfield_bit(aggregationBitfield, indexInCommittee) aggregationBits.raiseBit indexInCommittee
var attestation = Attestation( var attestation = Attestation(
data: attestationData, data: attestationData,
signature: validatorSignature, signature: validatorSignature,
aggregation_bits: aggregationBitfield, aggregation_bits: aggregationBits,
# Stub in phase0 # Stub in phase0
custody_bits: BitField.init(committeeLen) custody_bits: CommitteeValidatorsBits.init(committeeLen)
) )
node.network.broadcast(topicAttestations, attestation) node.network.broadcast(topicAttestations, attestation)

View File

@ -1,7 +1,7 @@
import import
sets, deques, tables, sets, deques, tables,
eth/keys, eth/keys, stew/bitseqs,
spec/[bitfield, datatypes, crypto, digest], spec/[datatypes, crypto, digest],
beacon_chain_db, conf, mainchain_monitor, eth2_network, time beacon_chain_db, conf, mainchain_monitor, eth2_network, time
type type
@ -45,8 +45,8 @@ type
# #
# ############################################# # #############################################
Validation* = object Validation* = object
aggregation_bits*: BitField aggregation_bits*: CommitteeValidatorsBits
custody_bits*: BitField ##\ custody_bits*: CommitteeValidatorsBits ##\
## Phase 1 - the handling of this field is probably broken.. ## Phase 1 - the handling of this field is probably broken..
aggregate_signature*: ValidatorSig aggregate_signature*: ValidatorSig
@ -54,7 +54,7 @@ type
# Yeah, you can do any linear combination of signatures. but you have to # Yeah, you can do any linear combination of signatures. but you have to
# remember the linear combination of pubkeys that constructed # remember the linear combination of pubkeys that constructed
# if you have two instances of a signature from pubkey p, then you need 2*p # if you have two instances of a signature from pubkey p, then you need 2*p
# in the group pubkey because the attestation bitfield is only 1 bit per # in the group pubkey because the attestation bitlist is only 1 bit per
# pubkey right now, attestations do not support this it could be extended to # pubkey right now, attestations do not support this it could be extended to
# support N overlaps up to N times per pubkey if we had N bits per validator # support N overlaps up to N times per pubkey if we had N bits per validator
# instead of 1 # instead of 1

View File

@ -12,7 +12,7 @@ type
FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.} FetchAncestorsResponseHandler = proc (b: BeaconBlock) {.gcsafe.}
proc fetchAncestorBlocksFromPeer(peer: Peer, rec: FetchRecord, responseHandler: FetchAncestorsResponseHandler) {.async.} = proc fetchAncestorBlocksFromPeer(peer: Peer, rec: FetchRecord, responseHandler: FetchAncestorsResponseHandler) {.async.} =
let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots.int, 0, 1) let blocks = await peer.getBeaconBlocks(rec.root, GENESIS_SLOT, rec.historySlots, 0, true)
if blocks.isSome: if blocks.isSome:
for b in blocks.get: for b in blocks.get:
responseHandler(b) responseHandler(b)

View File

@ -6,10 +6,10 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms. # at your option. This file may not be copied, modified, or distributed except according to those terms.
import import
algorithm, chronicles, collections/sets, math, options, sequtils, tables, algorithm, sets, math, options, sequtils,
chronicles, stew/bitseqs,
../extras, ../ssz, ../beacon_node_types, ../extras, ../ssz, ../beacon_node_types,
./bitfield, ./crypto, ./datatypes, ./digest, ./helpers, ./validator, ./crypto, ./datatypes, ./digest, ./helpers, ./validator
tables
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#verify_merkle_branch # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#verify_merkle_branch
func verify_merkle_branch(leaf: Eth2Digest, proof: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool = func verify_merkle_branch(leaf: Eth2Digest, proof: openarray[Eth2Digest], depth: uint64, index: uint64, root: Eth2Digest): bool =
@ -357,8 +357,8 @@ func is_valid_indexed_attestation*(
# Check if ``indexed_attestation`` has valid indices and signature. # Check if ``indexed_attestation`` has valid indices and signature.
let let
bit_0_indices = indexed_attestation.custody_bit_0_indices bit_0_indices = indexed_attestation.custody_bit_0_indices.asSeq
bit_1_indices = indexed_attestation.custody_bit_1_indices bit_1_indices = indexed_attestation.custody_bit_1_indices.asSeq
# Verify no index has custody bit equal to 1 [to be removed in phase 1] # Verify no index has custody bit equal to 1 [to be removed in phase 1]
if len(bit_1_indices) != 0: if len(bit_1_indices) != 0:
@ -370,7 +370,7 @@ func is_valid_indexed_attestation*(
return false return false
# Verify index sets are disjoint # Verify index sets are disjoint
if len(intersection(toSet(bit_0_indices), toSet(bit_1_indices))) != 0: if len(intersection(bit_0_indices.toSet, bit_1_indices.toSet)) != 0:
return false return false
# Verify indices are sorted # Verify indices are sorted
@ -405,11 +405,11 @@ func is_valid_indexed_attestation*(
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_attesting_indices # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_attesting_indices
func get_attesting_indices*(state: BeaconState, func get_attesting_indices*(state: BeaconState,
attestation_data: AttestationData, attestation_data: AttestationData,
bitfield: BitField, bits: CommitteeValidatorsBits,
stateCache: var StateCache): stateCache: var StateCache):
HashSet[ValidatorIndex] = HashSet[ValidatorIndex] =
## Return the sorted attesting indices corresponding to ``attestation_data`` ## Return the sorted attesting indices corresponding to ``attestation_data``
## and ``bitfield``. ## and ``bits``.
## The spec goes through a lot of hoops to sort things, and sometimes ## The spec goes through a lot of hoops to sort things, and sometimes
## constructs sets from the results here. The basic idea is to always ## constructs sets from the results here. The basic idea is to always
## just keep it in a HashSet, which seems to suffice. If needed, it's ## just keep it in a HashSet, which seems to suffice. If needed, it's
@ -420,15 +420,15 @@ func get_attesting_indices*(state: BeaconState,
state, attestation_data.target.epoch, attestation_data.crosslink.shard, state, attestation_data.target.epoch, attestation_data.crosslink.shard,
stateCache) stateCache)
for i, index in committee: for i, index in committee:
if get_bitfield_bit(bitfield, i): if bits[i]:
result.incl index result.incl index
func get_attesting_indices_seq*( func get_attesting_indices_seq*(state: BeaconState,
state: BeaconState, attestation_data: AttestationData, bitfield: BitField): attestation_data: AttestationData,
seq[ValidatorIndex] = bits: CommitteeValidatorsBits): seq[ValidatorIndex] =
var cache = get_empty_per_epoch_cache() var cache = get_empty_per_epoch_cache()
toSeq(items(get_attesting_indices( toSeq(items(get_attesting_indices(
state, attestation_data, bitfield, cache))) state, attestation_data, bits, cache)))
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_indexed_attestation # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_indexed_attestation
func get_indexed_attestation(state: BeaconState, attestation: Attestation, func get_indexed_attestation(state: BeaconState, attestation: Attestation,
@ -469,13 +469,13 @@ func get_indexed_attestation(state: BeaconState, attestation: Attestation,
## 0.6.3 highlights and explicates) except in that the spec, ## 0.6.3 highlights and explicates) except in that the spec,
## for no obvious reason, verifies it. ## for no obvious reason, verifies it.
IndexedAttestation( IndexedAttestation(
custody_bit_0_indices: sorted( custody_bit_0_indices: CustodyBitIndices sorted(
mapIt(custody_bit_0_indices, it.uint64), system.cmp), mapIt(custody_bit_0_indices, it.uint64), system.cmp),
# toSeq pointlessly constructs int-indexable copy so mapIt can infer type; # toSeq pointlessly constructs int-indexable copy so mapIt can infer type;
# see above # see above
custody_bit_1_indices: custody_bit_1_indices: CustodyBitIndices sorted(
sorted(mapIt(toSeq(items(custody_bit_1_indices)), it.uint64), mapIt(toSeq(items(custody_bit_1_indices)), it.uint64),
system.cmp), system.cmp),
data: attestation.data, data: attestation.data,
signature: attestation.signature, signature: attestation.signature,
) )

View File

@ -1,50 +0,0 @@
import stew/byteutils, json_serialization, stew/bitops2
type
BitField* = object
## A simple bit field type that follows the semantics of the spec, with
## regards to bit endian operations
# TODO stew contains utilities for with bitsets - could try to
# recycle that, but there are open questions about bit endianess there.
bits*: seq[byte]
func ceil_div8(v: int): int = (v + 7) div 8
func init*(T: type BitField, bits: int): BitField =
BitField(bits: newSeq[byte](ceil_div8(bits)))
# TODO fix this for state tests..
#proc readValue*(r: var JsonReader, a: var BitField) {.inline.} =
# a.bits = r.readValue(string).hexToSeqByte()
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#get_bitfield_bit
func get_bitfield_bit*(bitfield: BitField, i: int): bool =
# Extract the bit in ``bitfield`` at position ``i``.
doAssert 0 <= i div 8, "i: " & $i & " i div 8: " & $(i div 8)
doAssert i div 8 < bitfield.bits.len, "i: " & $i & " i div 8: " & $(i div 8)
((bitfield.bits[i div 8] shr (i mod 8)) mod 2) > 0'u8
# TODO spec candidatidates below, though they're used only indirectly there..
func set_bitfield_bit*(bitfield: var BitField, i: int) =
bitfield.bits[i div 8] = bitfield.bits[i div 8] or 1'u8 shl (i mod 8)
func combine*(tgt: var BitField, src: BitField) =
for i in 0 ..< tgt.bits.len:
tgt.bits[i] = tgt.bits[i] or src.bits[i]
func overlaps*(a, b: BitField): bool =
for i in 0..<a.bits.len:
if (a.bits[i] and b.bits[i]) > 0'u8:
return true
func countOnes*(a: BitField): int {.inline.} =
for v in a.bits: result += countOnes(v)
func len*(a: BitField): int {.inline.} =
countOnes(a)
func isSubsetOf*(a, b: Bitfield): bool =
for i in 0 ..< (len(a.bits) * 8):
if get_bitfield_bit(a, i) and not get_bitfield_bit(b, i):
return false
true

BIN
beacon_chain/spec/crypto Executable file

Binary file not shown.

View File

@ -46,7 +46,7 @@
import import
sequtils, sequtils,
hashes, eth/rlp, stew/objects, hashes, eth/rlp, nimcrypto/utils,
blscurve, json_serialization, blscurve, json_serialization,
digest digest
@ -56,20 +56,102 @@ export
export blscurve.init, blscurve.getBytes, blscurve.combine, blscurve.`$`, blscurve.`==` export blscurve.init, blscurve.getBytes, blscurve.combine, blscurve.`$`, blscurve.`==`
type type
ValidatorPubKey* = blscurve.VerKey BlsValueType* = enum
Real
OpaqueBlob
BlsValue*[T] = object
# TODO This is a temporary type needed until we sort out the
# issues with invalid BLS values appearing in the SSZ test suites.
case kind*: BlsValueType
of Real:
blsValue*: T
of OpaqueBlob:
when T is blscurve.Signature:
blob*: array[96, byte]
else:
blob*: array[48, byte]
ValidatorPubKey* = BlsValue[blscurve.VerKey]
# ValidatorPubKey* = blscurve.VerKey
# ValidatorPubKey* = array[48, byte]
# The use of byte arrays proved to be a dead end pretty quickly.
# Plenty of code needs to be modified for a successful build and
# the changes will negatively affect the performance.
# ValidatorPrivKey* = BlsValue[blscurve.SigKey]
ValidatorPrivKey* = blscurve.SigKey ValidatorPrivKey* = blscurve.SigKey
ValidatorSig* = blscurve.Signature
ValidatorSig* = BlsValue[blscurve.Signature]
BlsCurveType* = VerKey|SigKey|Signature
ValidatorPKI* = ValidatorPrivKey|ValidatorPubKey|ValidatorSig ValidatorPKI* = ValidatorPrivKey|ValidatorPubKey|ValidatorSig
func shortLog*(x: ValidatorPKI): string = proc init*[T](BLS: type BlsValue[T], val: auto): BLS =
result.kind = BlsValueType.Real
result.blsValue = init(T, val)
func `$`*(x: BlsValue): string =
if x.kind == Real:
$x.blsValue
else:
"r:" & toHex(x.blob)
func `==`*(a, b: BlsValue): bool =
if a.kind != b.kind: return false
if a.kind == Real:
return a.blsValue == b.blsValue
else:
return a.blob == b.blob
func getBytes*(x: BlsValue): auto =
if x.kind == Real:
getBytes x.blsValue
else:
x.blob
func shortLog*(x: BlsValue): string =
($x)[0..7] ($x)[0..7]
template hash*(k: ValidatorPubKey|ValidatorPrivKey): Hash = func shortLog*(x: BlsCurveType): string =
hash(k.getBytes()) ($x)[0..7]
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey = pk.getKey() proc hash*(x: BlsValue): Hash {.inline.} =
if x.kind == Real:
hash x.blsValue.getBytes()
else:
hash x.blob
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_aggregate_pubkeys template hash*(x: BlsCurveType): Hash =
hash(getBytes(x))
template `==`*[T](a: BlsValue[T], b: T): bool =
a.blsValue == b
template `==`*[T](a: T, b: BlsValue[T]): bool =
a == b.blsValue
func pubKey*(pk: ValidatorPrivKey): ValidatorPubKey =
when ValidatorPubKey is BlsValue:
ValidatorPubKey(kind: Real, blsValue: pk.getKey())
elif ValidatorPubKey is array:
pk.getKey.getBytes
else:
pk.getKey
proc combine*[T](a: openarray[BlsValue[T]]): T =
doAssert a.len > 0 and a[0].kind == Real
result = a[0].blsValue
for i in 1 ..< a.len:
doAssert a[i].kind == Real
result.combine a[i].blsValue
proc combine*[T](x: var BlsValue[T], other: BlsValue[T]) =
doAssert x.kind == Real and other.kind == Real
x.blsValue.combine(other.blsValue)
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_aggregate_pubkeys
func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey = func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
var empty = true var empty = true
for key in keys: for key in keys:
@ -79,14 +161,18 @@ func bls_aggregate_pubkeys*(keys: openArray[ValidatorPubKey]): ValidatorPubKey =
else: else:
result.combine(key) result.combine(key)
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify
func bls_verify*( func bls_verify*(
pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig, pubkey: ValidatorPubKey, msg: openArray[byte], sig: ValidatorSig,
domain: uint64): bool = domain: uint64): bool =
# name from spec! # name from spec!
sig.verify(msg, domain, pubkey) when ValidatorPubKey is BlsValue:
doAssert sig.kind == Real and pubkey.kind == Real
sig.blsValue.verify(msg, domain, pubkey.blsValue)
else:
sig.verify(msg, domain, pubkey)
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/bls_signature.md#bls_verify_multiple # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/bls_signature.md#bls_verify_multiple
func bls_verify_multiple*( func bls_verify_multiple*(
pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest], pubkeys: seq[ValidatorPubKey], message_hashes: openArray[Eth2Digest],
sig: ValidatorSig, domain: uint64): bool = sig: ValidatorSig, domain: uint64): bool =
@ -98,49 +184,94 @@ func bls_verify_multiple*(
let (pubkey, message_hash) = pubkey_message_hash let (pubkey, message_hash) = pubkey_message_hash
# TODO spec doesn't say to handle this specially, but it's silly to # TODO spec doesn't say to handle this specially, but it's silly to
# validate without any actual public keys. # validate without any actual public keys.
if pubkey != ValidatorPubKey() and if not pubkey.bls_verify(message_hash.data, sig, domain):
not sig.verify(message_hash.data, domain, pubkey):
return false return false
true true
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte], when ValidatorPrivKey is BlsValue:
domain: uint64): ValidatorSig = func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
# name from spec! domain: uint64): ValidatorSig =
key.sign(domain, msg) # name from spec!
if key.kind == Real:
ValidatorSig(kind: Real, blsValue: key.blsValue.sign(domain, msg))
else:
ValidatorSig(kind: OpaqueBlob)
else:
func bls_sign*(key: ValidatorPrivKey, msg: openarray[byte],
domain: uint64): ValidatorSig =
# name from spec!
ValidatorSig(kind: Real, blsValue: key.sign(domain, msg))
proc fromBytes*[T](R: type BlsValue[T], bytes: openarray[byte]): R =
when defined(ssz_testing):
result = R(kind: OpaqueBlob, blob: toArray(result.blob.len, bytes))
else:
result = R(kind: Real, blsValue: init(T, bytes))
proc initFromBytes*[T](val: var BlsValue[T], bytes: openarray[byte]) =
val = fromBytes(BlsValue[T], bytes)
proc initFromBytes*(val: var BlsCurveType, bytes: openarray[byte]) =
val = init(type(val), bytes)
proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.inline.} = proc writeValue*(writer: var JsonWriter, value: ValidatorPubKey) {.inline.} =
writer.writeValue($value) when value is BlsValue:
doAssert value.kind == Real
writer.writeValue($value.blsValue)
else:
writer.writeValue($value)
proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.inline.} = proc readValue*(reader: var JsonReader, value: var ValidatorPubKey) {.inline.} =
value = VerKey.init(reader.readValue(string)) value.initFromBytes(fromHex reader.readValue(string))
proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.inline.} = proc writeValue*(writer: var JsonWriter, value: ValidatorSig) {.inline.} =
writer.writeValue($value) when value is BlsValue:
doAssert value.kind == Real
writer.writeValue($value.blsValue)
else:
writer.writeValue($value)
proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.inline.} = proc readValue*(reader: var JsonReader, value: var ValidatorSig) {.inline.} =
value = Signature.init(reader.readValue(string)) value.initFromBytes(fromHex reader.readValue(string))
proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.inline.} = proc writeValue*(writer: var JsonWriter, value: ValidatorPrivKey) {.inline.} =
writer.writeValue($value) when value is BlsValue:
doAssert value.kind == Real
writer.writeValue($value.blsValue)
else:
writer.writeValue($value)
proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.inline.} = proc readValue*(reader: var JsonReader, value: var ValidatorPrivKey) {.inline.} =
value = SigKey.init(reader.readValue(string)) value.initFromBytes(fromHex reader.readValue(string))
proc newPrivKey*(): ValidatorPrivKey = SigKey.random() when ValidatorPrivKey is BlsValue:
proc newPrivKey*(): ValidatorPrivKey =
ValidatorPrivKey(kind: Real, blsValue: SigKey.random())
else:
proc newPrivKey*(): ValidatorPrivKey =
SigKey.random()
# RLP serialization (TODO: remove if no longer necessary) # RLP serialization (TODO: remove if no longer necessary)
proc append*(writer: var RlpWriter, value: ValidatorPubKey) = when ValidatorPubKey is BlsValue:
writer.append value.getBytes() proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
writer.append if value.kind == Real: value.blsValue.getBytes()
else: value.blob
else:
proc append*(writer: var RlpWriter, value: ValidatorPubKey) =
writer.append value.getBytes()
proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} = proc read*(rlp: var Rlp, T: type ValidatorPubKey): T {.inline.} =
result = ValidatorPubKey.init(rlp.toBytes.toOpenArray) result fromBytes(T, rlp.toBytes)
rlp.skipElem()
proc append*(writer: var RlpWriter, value: ValidatorSig) = when ValidatorSig is BlsValue:
writer.append value.getBytes() proc append*(writer: var RlpWriter, value: ValidatorSig) =
writer.append if value.kind == Real: value.blsValue.getBytes()
else: value.blob
else:
proc append*(writer: var RlpWriter, value: ValidatorSig) =
writer.append value.getBytes()
proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} = proc read*(rlp: var Rlp, T: type ValidatorSig): T {.inline.} =
result = ValidatorSig.init(rlp.toBytes.toOpenArray) let bytes = fromBytes(T, rlp.toBytes)
rlp.skipElem()

View File

@ -18,9 +18,9 @@
# types / composition # types / composition
import import
hashes, math, json, macros, hashes, math, json, strutils,
chronicles, eth/[common, rlp], stew/[byteutils, bitseqs], chronicles, eth/[common, rlp],
./bitfield, ./crypto, ./digest ../ssz/types, ./crypto, ./digest
# TODO Data types: # TODO Data types:
# Presently, we're reusing the data types from the serialization (uint64) in the # Presently, we're reusing the data types from the serialization (uint64) in the
@ -41,7 +41,7 @@ import
# Constant presets # Constant presets
# https://github.com/ethereum/eth2.0-specs/tree/v0.6.3/configs/constant_presets/ # https://github.com/ethereum/eth2.0-specs/tree/v0.6.3/configs/constant_presets/
const const_preset*{.strdefine.} = "mainnet" const const_preset* {.strdefine.} = "minimal"
when const_preset == "mainnet": when const_preset == "mainnet":
import ./presets/mainnet import ./presets/mainnet
@ -63,16 +63,21 @@ const
GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\ GENESIS_EPOCH* = (GENESIS_SLOT.uint64 div SLOTS_PER_EPOCH).Epoch ##\
## compute_epoch_of_slot(GENESIS_SLOT) ## compute_epoch_of_slot(GENESIS_SLOT)
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
# Not part of spec. Still useful, pending removing usage if appropriate. # Not part of spec. Still useful, pending removing usage if appropriate.
ZERO_HASH* = Eth2Digest() ZERO_HASH* = Eth2Digest()
template maxSize*(n: int) {.pragma.}
type type
ValidatorIndex* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around ValidatorIndex* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around
Shard* = uint64 Shard* = uint64
Gwei* = uint64 Gwei* = uint64
Domain* = uint64 Domain* = uint64
BitList*[maxLen: static int] = distinct BitSeq
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#proposerslashing # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#proposerslashing
ProposerSlashing* = object ProposerSlashing* = object
proposer_index*: uint64 ##\ proposer_index*: uint64 ##\
@ -91,11 +96,13 @@ type
attestation_2*: IndexedAttestation ## \ attestation_2*: IndexedAttestation ## \
## Second attestation ## Second attestation
CustodyBitIndices* = List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#indexedattestation # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#indexedattestation
IndexedAttestation* = object IndexedAttestation* = object
# These probably should be seq[ValidatorIndex], but that throws RLP errors # These probably should be seq[ValidatorIndex], but that throws RLP errors
custody_bit_0_indices*: seq[uint64] custody_bit_0_indices*: CustodyBitIndices
custody_bit_1_indices*: seq[uint64] custody_bit_1_indices*: CustodyBitIndices
data*: AttestationData ## \ data*: AttestationData ## \
## Attestation data ## Attestation data
@ -103,15 +110,17 @@ type
signature*: ValidatorSig ## \ signature*: ValidatorSig ## \
## Aggregate signature ## Aggregate signature
CommitteeValidatorsBits* = BitList[MAX_VALIDATORS_PER_COMMITTEE]
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestation # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#attestation
Attestation* = object Attestation* = object
aggregation_bits*: BitField ##\ aggregation_bits*: CommitteeValidatorsBits ##\
## Attester aggregation bitfield ## Attester aggregation bitfield
data*: AttestationData ##\ data*: AttestationData ##\
## Attestation data ## Attestation data
custody_bits*: BitField ##\ custody_bits*: CommitteeValidatorsBits ##\
## Custody bitfield ## Custody bitfield
signature*: ValidatorSig ##\ signature*: ValidatorSig ##\
@ -143,7 +152,7 @@ type
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#deposit # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#deposit
Deposit* = object Deposit* = object
proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH, Eth2Digest] ##\ proof*: array[DEPOSIT_CONTRACT_TREE_DEPTH + 1, Eth2Digest] ##\
## Merkle path to deposit data list root ## Merkle path to deposit data list root
data*: DepositData data*: DepositData
@ -159,9 +168,6 @@ type
amount*: uint64 ##\ amount*: uint64 ##\
## Amount in Gwei ## Amount in Gwei
# TODO remove, not in spec
dummy*: uint64
signature*: ValidatorSig ##\ signature*: ValidatorSig ##\
## Container self-signature ## Container self-signature
@ -280,7 +286,7 @@ type
# Shuffling # Shuffling
start_shard*: Shard start_shard*: Shard
randao_mixes*: array[LATEST_RANDAO_MIXES_LENGTH, Eth2Digest] randao_mixes*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest]
active_index_roots*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] ##\ active_index_roots*: array[EPOCHS_PER_HISTORICAL_VECTOR, Eth2Digest] ##\
## Active index digests for light clients ## Active index digests for light clients
@ -348,10 +354,10 @@ type
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#pendingattestation # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#pendingattestation
PendingAttestation* = object PendingAttestation* = object
aggregation_bits*: BitField ## Attester participation bitfield aggregation_bits*: CommitteeValidatorsBits ## Attester participation bitfield
data*: AttestationData ## Attestation data data*: AttestationData ## Attestation data
inclusion_delay*: uint64 ## Inclusion delay inclusion_delay*: uint64 ## Inclusion delay
proposer_index*: ValidatorIndex ## Proposer index proposer_index*: uint64 ## Proposer index
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#historicalbatch # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#historicalbatch
HistoricalBatch* = object HistoricalBatch* = object
@ -382,6 +388,65 @@ type
data*: BeaconState data*: BeaconState
root*: Eth2Digest # hash_tree_root (not signing_root!) root*: Eth2Digest # hash_tree_root (not signing_root!)
template foreachSpecType*(op: untyped) =
## These are all spec types that will appear in network messages
## and persistent consensus data. This helper template is useful
## for populating RTTI tables that concern them.
op Attestation
op AttestationData
op AttestationDataAndCustodyBit
op AttesterSlashing
op BeaconBlock
op BeaconBlockBody
op BeaconBlockHeader
op BeaconState
op Crosslink
op Deposit
op DepositData
op Eth1Data
op Fork
op HistoricalBatch
op IndexedAttestation
op PendingAttestation
op ProposerSlashing
op Transfer
op Validator
op VoluntaryExit
macro fieldMaxLen*(x: typed): untyped =
# TODO This macro is a temporary solution for the lack of a
# more proper way to specify the max length of the List[T; N]
# objects in the spec.
# May be replaced with `getCustomPragma` once we upgrade to
# Nim 0.20.2 or with a distinct List type, which would require
# more substantial refactorings in the spec code.
if x.kind != nnkDotExpr:
return newLit(0)
let size = case $x[1]
of "pubkeys",
"compact_validators",
"custody_bit_0_indices",
"custody_bit_1_indices",
"aggregation_bits",
"custody_bits": int64(MAX_VALIDATORS_PER_COMMITTEE)
of "proposer_slashings": MAX_PROPOSER_SLASHINGS
of "attester_slashings": MAX_ATTESTER_SLASHINGS
of "attestations": MAX_ATTESTATIONS
of "deposits": MAX_DEPOSITS
of "voluntary_exits": MAX_VOLUNTARY_EXITS
of "transfers": MAX_TRANSFERS
of "historical_roots": HISTORICAL_ROOTS_LIMIT
of "eth1_data_votes": SLOTS_PER_ETH1_VOTING_PERIOD
of "validators": VALIDATOR_REGISTRY_LIMIT
of "balances": VALIDATOR_REGISTRY_LIMIT
of "previous_epoch_attestations",
"current_epoch_attestations": MAX_ATTESTATIONS *
SLOTS_PER_EPOCH
else: 0
newLit size
func shortValidatorKey*(state: BeaconState, validatorIdx: int): string = func shortValidatorKey*(state: BeaconState, validatorIdx: int): string =
($state.validators[validatorIdx].pubkey)[0..7] ($state.validators[validatorIdx].pubkey)[0..7]
@ -439,6 +504,51 @@ proc `%`*(i: uint64): JsonNode =
ethTimeUnit Slot ethTimeUnit Slot
ethTimeUnit Epoch ethTimeUnit Epoch
Json.useCustomSerialization(BeaconState.justification_bits):
read:
let s = reader.readValue(string)
if s.len != 4: raise newException(ValueError, "unexpected number of bytes")
s.parseHexInt.uint8
write:
writer.writeValue "0x" & value.toHex
Json.useCustomSerialization(BitSeq):
read:
BitSeq reader.readValue(string).hexToSeqByte
write:
writer.writeValue "0x" & value.bytes.toHex
template readValue*(reader: var JsonReader, value: var BitList) =
type T = type(value)
value = T readValue(reader, BitSeq)
template writeValue*(writer: var JsonWriter, value: BitList) =
writeValue(writer, BitSeq value)
template init*(T: type BitList, len: int): auto = T init(BitSeq, len)
template len*(x: BitList): auto = len(BitSeq(x))
template bytes*(x: BitList): auto = bytes(BitSeq(x))
template `[]`*(x: BitList, idx: auto): auto = BitSeq(x)[idx]
template `[]=`*(x: BitList, idx: auto, val: bool) = BitSeq(x)[idx] = val
template `==`*(a, b: BitList): bool = BitSeq(a) == BitSeq(b)
template raiseBit*(x: BitList, idx: int) = raiseBit(BitSeq(x), idx)
template lowerBit*(x: BitList, idx: int) = lowerBit(BitSeq(x), idx)
template overlaps*(a, b: BitList): bool = overlaps(BitSeq(a), BitSeq(b))
template combine*(a, b: BitList) = combine(BitSeq(a), BitSeq(b))
template isSubsetOf*(a, b: BitList): bool = isSubsetOf(BitSeq(a), BitSeq(b))
when useListType:
template len*[T; N](x: List[T, N]): auto = len(seq[T](x))
template `[]`*[T; N](x: List[T, N], idx: auto): auto = seq[T](x)[idx]
template `[]=`*[T; N](x: List[T, N], idx: auto, val: bool) = seq[T](x)[idx] = val
template `==`*[T; N](a, b: List[T, N]): bool = seq[T](a) == seq[T](b)
template asSeq*[T; N](x: List[T, N]): auto = seq[T](x)
template `&`*[T; N](a, b: List[T, N]): List[T, N] = seq[T](a) & seq[T](b)
else:
template asSeq*[T; N](x: List[T, N]): auto = x
func humaneSlotNum*(s: Slot): uint64 = func humaneSlotNum*(s: Slot): uint64 =
s - GENESIS_SLOT s - GENESIS_SLOT

BIN
beacon_chain/spec/digest Executable file

Binary file not shown.

View File

@ -44,17 +44,16 @@ func eth2hash*(v: openArray[byte]): Eth2Digest {.inline.} =
var ctx: sha256 var ctx: sha256
ctx.init() ctx.init()
ctx.update(v) ctx.update(v)
result = ctx.finish() ctx.finish()
template withEth2Hash*(body: untyped): Eth2Digest = template withEth2Hash*(body: untyped): Eth2Digest =
## This little helper will init the hash function and return the sliced ## This little helper will init the hash function and return the sliced
## hash: ## hash:
## let hashOfData = withHash: h.update(data) ## let hashOfData = withHash: h.update(data)
var h {.inject.}: sha256 var h {.inject.}: sha256
h.init() h.init()
body body
var res = h.finish() h.finish()
res
func hash*(x: Eth2Digest): Hash = func hash*(x: Eth2Digest): Hash =
## Hash for digests for Nim hash tables ## Hash for digests for Nim hash tables
@ -63,3 +62,4 @@ func hash*(x: Eth2Digest): Hash =
# We just slice the first 4 or 8 bytes of the block hash # We just slice the first 4 or 8 bytes of the block hash
# depending of if we are on a 32 or 64-bit platform # depending of if we are on a 32 or 64-bit platform
result = cast[ptr Hash](unsafeAddr x)[] result = cast[ptr Hash](unsafeAddr x)[]

View File

@ -82,7 +82,6 @@ const
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8] GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
GENESIS_SLOT* = 0.Slot GENESIS_SLOT* = 0.Slot
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
BLS_WITHDRAWAL_PREFIX* = 0'u8 BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters # Time parameters
@ -139,8 +138,10 @@ const
# --------------------------------------------------------------- # ---------------------------------------------------------------
# https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#state-list-lengths # https://github.com/ethereum/eth2.0-specs/blob/v0.7.1/specs/core/0_beacon-chain.md#state-list-lengths
LATEST_RANDAO_MIXES_LENGTH* = 8192 LATEST_RANDAO_MIXES_LENGTH* = 8192
EPOCHS_PER_HISTORICAL_VECTOR* = 8192 # 2'u64^13, epochs EPOCHS_PER_HISTORICAL_VECTOR* = 65536
EPOCHS_PER_SLASHINGS_VECTOR* = 8192 # epochs EPOCHS_PER_SLASHINGS_VECTOR* = 8192
HISTORICAL_ROOTS_LIMIT* = 16777216
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
# Reward and penalty quotients # Reward and penalty quotients
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -65,7 +65,6 @@ const
# Unchanged # Unchanged
GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8] GENESIS_FORK_VERSION* = [0'u8, 0'u8, 0'u8, 0'u8]
GENESIS_SLOT* = 0.Slot GENESIS_SLOT* = 0.Slot
FAR_FUTURE_EPOCH* = (not 0'u64).Epoch # 2^64 - 1 in spec
BLS_WITHDRAWAL_PREFIX* = 0'u8 BLS_WITHDRAWAL_PREFIX* = 0'u8
# Time parameters # Time parameters
@ -88,7 +87,7 @@ const
# Changed # Changed
SLOTS_PER_ETH1_VOTING_PERIOD* = 16 SLOTS_PER_ETH1_VOTING_PERIOD* = 16
SLOTS_PER_HISTORICAL_ROOT* = 128 # 64 doesn't work with GENESIS_SLOT == 0? SLOTS_PER_HISTORICAL_ROOT* = 64 # doesn't work with GENESIS_SLOT == 0?
# Unchanged # Unchanged
MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8 MIN_VALIDATOR_WITHDRAWABILITY_DELAY* = 2'u64^8
@ -104,6 +103,8 @@ const
LATEST_RANDAO_MIXES_LENGTH* = 64 LATEST_RANDAO_MIXES_LENGTH* = 64
EPOCHS_PER_HISTORICAL_VECTOR* = 64 EPOCHS_PER_HISTORICAL_VECTOR* = 64
EPOCHS_PER_SLASHINGS_VECTOR* = 64 EPOCHS_PER_SLASHINGS_VECTOR* = 64
HISTORICAL_ROOTS_LIMIT* = 16777216
VALIDATOR_REGISTRY_LIMIT* = 1099511627776
# Reward and penalty quotients # Reward and penalty quotients
# --------------------------------------------------------------- # ---------------------------------------------------------------

View File

@ -35,7 +35,7 @@
import # TODO - cleanup imports import # TODO - cleanup imports
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables, algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
../extras, ../ssz, ../beacon_node_types, ../extras, ../ssz, ../beacon_node_types,
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator beaconstate, crypto, datatypes, digest, helpers, validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#block-header # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#block-header
proc processBlockHeader( proc processBlockHeader(

View File

@ -34,9 +34,9 @@
import # TODO - cleanup imports import # TODO - cleanup imports
algorithm, math, options, sequtils, tables, algorithm, math, options, sequtils, tables,
chronicles, json_serialization/std/sets, stew/[bitseqs, bitops2], chronicles, json_serialization/std/sets,
../extras, ../ssz, ../beacon_node_types, ../extras, ../ssz, ../beacon_node_types,
beaconstate, bitfield, crypto, datatypes, digest, helpers, validator beaconstate, crypto, datatypes, digest, helpers, validator
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_total_active_balance # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#get_total_active_balance
func get_total_active_balance(state: BeaconState): Gwei = func get_total_active_balance(state: BeaconState): Gwei =
@ -198,13 +198,6 @@ proc process_justification_and_finalization(
old_previous_justified_checkpoint = state.previous_justified_checkpoint old_previous_justified_checkpoint = state.previous_justified_checkpoint
old_current_justified_checkpoint = state.current_justified_checkpoint old_current_justified_checkpoint = state.current_justified_checkpoint
## Bitvector[4] <-> uint8 mapping:
## state.justification_bits[0] is (state.justification_bits shr 0) and 1
## state.justification_bits[1] is (state.justification_bits shr 1) and 1
## state.justification_bits[2] is (state.justification_bits shr 2) and 1
## state.justification_bits[3] is (state.justification_bits shr 3) and 1
## https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/simple-serialize.md#bitvectorn
# Process justifications # Process justifications
state.previous_justified_checkpoint = state.current_justified_checkpoint state.previous_justified_checkpoint = state.current_justified_checkpoint
@ -247,8 +240,7 @@ proc process_justification_and_finalization(
Checkpoint(epoch: previous_epoch, Checkpoint(epoch: previous_epoch,
root: get_block_root(state, previous_epoch)) root: get_block_root(state, previous_epoch))
# Spec: state.justification_bits[1] = 0b1 state.justification_bits.raiseBit 1
state.justification_bits = state.justification_bits or (1 shl 1)
let matching_target_attestations_current = let matching_target_attestations_current =
get_matching_target_attestations(state, current_epoch) # Current epoch get_matching_target_attestations(state, current_epoch) # Current epoch
@ -258,34 +250,33 @@ proc process_justification_and_finalization(
Checkpoint(epoch: current_epoch, Checkpoint(epoch: current_epoch,
root: get_block_root(state, current_epoch)) root: get_block_root(state, current_epoch))
# Spec: state.justification_bits[0] = 0b1 state.justification_bits.raiseBit 0
state.justification_bits = state.justification_bits or (1 shl 0)
# Process finalizations # Process finalizations
let bitfield = state.justification_bits let bitfield = state.justification_bits
## The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th ## The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th
## as source ## as source
if (bitfield shr 1) mod 8 == 0b111 and if (bitfield and 0b1110) == 0b1110 and
old_previous_justified_checkpoint.epoch + 3 == current_epoch: old_previous_justified_checkpoint.epoch + 3 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint state.finalized_checkpoint = old_previous_justified_checkpoint
## The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as ## The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as
## source ## source
if (bitfield shr 1) mod 4 == 0b11 and if (bitfield and 0b110) == 0b110 and
old_previous_justified_checkpoint.epoch + 2 == current_epoch: old_previous_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_previous_justified_checkpoint state.finalized_checkpoint = old_previous_justified_checkpoint
## The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as ## The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as
## source ## source
if (bitfield shr 0) mod 8 == 0b111 and if (bitfield and 0b111) == 0b111 and
old_current_justified_checkpoint.epoch + 2 == current_epoch: old_current_justified_checkpoint.epoch + 2 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint state.finalized_checkpoint = old_current_justified_checkpoint
## The 1st/2nd most recent epochs are justified, the 1st using the 2nd as ## The 1st/2nd most recent epochs are justified, the 1st using the 2nd as
## source ## source
if (bitfield shr 0) mod 4 == 0b11 and if (bitfield and 0b11) == 0b11 and
old_current_justified_checkpoint.epoch + 1 == current_epoch: old_current_justified_checkpoint.epoch + 1 == current_epoch:
state.finalized_checkpoint = old_current_justified_checkpoint state.finalized_checkpoint = old_current_justified_checkpoint
# https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#crosslinks # https://github.com/ethereum/eth2.0-specs/blob/v0.8.1/specs/core/0_beacon-chain.md#crosslinks
@ -384,7 +375,7 @@ func get_attestation_deltas(state: BeaconState, stateCache: var StateCache):
let proposer_reward = let proposer_reward =
(get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei (get_base_reward(state, index) div PROPOSER_REWARD_QUOTIENT).Gwei
rewards[attestation.proposer_index] += proposer_reward rewards[attestation.proposer_index.int] += proposer_reward
let max_attester_reward = get_base_reward(state, index) - proposer_reward let max_attester_reward = get_base_reward(state, index) - proposer_reward
rewards[index] += rewards[index] +=
(max_attester_reward * (max_attester_reward *

View File

@ -9,435 +9,569 @@
# See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md # See https://github.com/ethereum/eth2.0-specs/blob/master/specs/simple-serialize.md
import import
endians, typetraits, options, algorithm, math, endians, stew/shims/macros, options, algorithm, math,
faststreams/input_stream, serialization, eth/common, nimcrypto/sha2, stew/[bitops2, bitseqs, objects, varints], stew/ranges/ptr_arith, stint,
./spec/[bitfield, crypto, datatypes, digest] faststreams/input_stream, serialization, serialization/testing/tracing,
nimcrypto/sha2, blscurve, eth/common,
./spec/[crypto, datatypes, digest],
./ssz/[types, bytes_reader]
# ################### Helper functions ################################### # ################### Helper functions ###################################
export export
serialization serialization, types, bytes_reader
when defined(serialization_tracing):
import
typetraits, stew/ranges/ptr_arith
const
bytesPerChunk = 32
bitsPerChunk = bytesPerChunk * 8
maxChunkTreeDepth = 25
defaultMaxObjectSize = 1 * 1024 * 1024
type type
SszReader* = object SszReader* = object
stream: ByteStreamVar stream: ByteStreamVar
maxObjectSize: int
SszWriter* = object SszWriter* = object
stream: OutputStreamVar stream: OutputStreamVar
SszError* = object of SerializationError BasicType = char|bool|SomeUnsignedInt|StUint
CorruptedDataError* = object of SszError
RecordWritingMemo = object SszChunksMerkelizer = ref object of RootObj
initialStreamPos: int combinedChunks: array[maxChunkTreeDepth, Eth2Digest]
sizePrefixCursor: DelayedWriteCursor totalChunks: uint
limit: uint64
Chunk = array[bytesPerChunk, byte]
TypeWithMaxLen[T; maxLen: static int64] = distinct T
SizePrefixed*[T] = distinct T
SszMaxSizeExceeded* = object of SerializationError
VarSizedWriterCtx = object
fixedParts: WriteCursor
offset: int
FixedSizedWriterCtx = object
serializationFormat SSZ, serializationFormat SSZ,
Reader = SszReader, Reader = SszReader,
Writer = SszWriter, Writer = SszWriter,
PreferedOutput = seq[byte] PreferedOutput = seq[byte]
proc init*(T: type SszReader, stream: ByteStreamVar): T = proc init*(T: type SszReader,
result.stream = stream stream: ByteStreamVar,
maxObjectSize = defaultMaxObjectSize): T =
T(stream: stream, maxObjectSize: maxObjectSize)
proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T = proc mount*(F: type SSZ, stream: ByteStreamVar, T: type): T =
mixin readValue mixin readValue
var reader = init(SszReader, stream) var reader = init(SszReader, stream)
reader.readValue(T) reader.readValue(T)
func toSSZType(x: Slot|Epoch): auto = x.uint64 method formatMsg*(err: ref SszSizeMismatchError, filename: string): string {.gcsafe.} =
func toSSZType(x: auto): auto = x # TODO: implement proper error string
"Serialisation error while processing " & filename
# toBytesSSZ convert simple fixed-length types to their SSZ wire representation when false:
func toBytesSSZ(x: SomeInteger): array[sizeof(x), byte] = # TODO: Nim can't handle yet this simpler definition. File an issue.
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``) template valueOf[T; N](x: TypeWithMaxLen[T, N]): auto = T(x)
## All integers are serialized as **little endian**. else:
proc unwrapImpl[T; N](x: ptr TypeWithMaxLen[T, N]): ptr T =
cast[ptr T](x)
when x.sizeof == 8: littleEndian64(result.addr, x.unsafeAddr) template valueOf(x: TypeWithMaxLen): auto =
elif x.sizeof == 4: littleEndian32(result.addr, x.unsafeAddr) let xaddr = unsafeAddr x
elif x.sizeof == 2: littleEndian16(result.addr, x.unsafeAddr) unwrapImpl(xaddr)[]
elif x.sizeof == 1: copyMem(result.addr, x.unsafeAddr, sizeof(result))
else: {.fatal: "Unsupported type serialization: " & $(type(x)).name.}
func toBytesSSZ(x: ValidatorIndex): array[3, byte] = template toSszType*(x: auto): auto =
## Integers are all encoded as little endian and not padded mixin toSszType
let v = x.uint32
result[0] = byte(v and 0xff)
result[1] = byte((v shr 8) and 0xff)
result[2] = byte((v shr 16) and 0xff)
func toBytesSSZ(x: bool): array[1, byte] = when x is Slot|Epoch|ValidatorIndex|enum: uint64(x)
[if x: 1'u8 else: 0'u8] elif x is Eth2Digest: x.data
elif x is BlsValue|BlsCurveType: getBytes(x)
elif x is BitSeq|BitList: bytes(x)
elif x is TypeWithMaxLen: toSszType valueOf(x)
elif useListType and x is List: seq[x.T](x)
else: x
func toBytesSSZ(x: EthAddress): array[sizeof(x), byte] = x func writeFixedSized(c: var WriteCursor, x: auto) =
func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data mixin toSszType
# TODO these two are still being debated: when x is byte:
# https://github.com/ethereum/eth2.0-specs/issues/308#issuecomment-447026815 c.append x
func toBytesSSZ(x: ValidatorPubKey|ValidatorSig): auto = x.getBytes() elif x is bool|char:
c.append byte(ord(x))
elif x is SomeUnsignedInt:
when system.cpuEndian == bigEndian:
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
## All integers are serialized as **little endian**.
var bytes: array[sizeof(x), byte]
when x.sizeof == 8: littleEndian64(addr bytes[0], x.unsafeAddr)
elif x.sizeof == 4: littleEndian32(addr bytes[0], x.unsafeAddr)
elif x.sizeof == 2: littleEndian16(addr bytes[0], x.unsafeAddr)
elif x.sizeof == 1: copyMem(addr bytes[0], x.unsafeAddr, sizeof(x))
else: unsupported x.type
c.append bytes
else:
let valueAddr = unsafeAddr x
trs "APPENDING INT ", x, " = ", makeOpenArray(cast[ptr byte](valueAddr), sizeof(x))
c.appendMemCopy x
elif x is StUint:
c.appendMemCopy x # TODO: Is this always correct?
elif x is array|string|seq|openarray:
when x[0] is byte:
trs "APPENDING FIXED SIZE BYTES", x
c.append x
else:
for elem in x:
trs "WRITING FIXED SIZE ARRAY ELEMENENT"
c.writeFixedSized toSszType(elem)
elif x is tuple|object:
enumInstanceSerializedFields(x, fieldName, field):
trs "WRITING FIXED SIZE FIELD", fieldName
c.writeFixedSized toSszType(field)
else:
unsupported x.type
type template writeFixedSized(s: OutputStreamVar, x: auto) =
BasicType = writeFixedSized(s.cursor, x)
# Types that serialize down to a fixed-length array - most importantly,
# these values don't carry a length prefix in the final encoding. toBytesSSZ
# provides the actual nim-type-to-bytes conversion.
# TODO think about this for a bit - depends where the serialization of
# validator keys ends up going..
# TODO can't put ranges like ValidatorIndex in here:
# https://github.com/nim-lang/Nim/issues/10027
SomeInteger | EthAddress | Eth2Digest | ValidatorPubKey | ValidatorSig |
bool | Slot | Epoch
func sszLen(v: BasicType): int = toBytesSSZ(v.toSSZType()).len template supports*(_: type SSZ, T: type): bool =
func sszLen(v: ValidatorIndex): int = toBytesSSZ(v).len mixin toSszType
anonConst compiles(fixedPortionSize toSszType(default(T)))
func sszLen(v: object | tuple): int = func init*(T: type SszWriter, stream: OutputStreamVar): T =
result = 4 # Length
for field in v.fields:
result += sszLen(type field)
func sszLen(v: seq | array): int =
result = 4 # Length
for i in v:
result += sszLen(i)
func sszLen(v: BitField): int =
sszLen(v.bits)
# fromBytesSSZ copies the wire representation to a Nim variable,
# assuming there's enough data in the buffer
func fromBytesSSZ(T: type SomeInteger, data: openarray[byte]): T =
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
## All integers are serialized as **little endian**.
## TODO: Assumes data points to a sufficiently large buffer
doAssert data.len == sizeof(result)
# TODO: any better way to get a suitably aligned buffer in nim???
# see also: https://github.com/nim-lang/Nim/issues/9206
var tmp: uint64
var alignedBuf = cast[ptr byte](tmp.addr)
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
func fromBytesSSZ(T: type bool, data: openarray[byte]): T =
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
# definition for now, but maybe this should be a parse error instead?
fromBytesSSZ(uint8, data) != 0
func fromBytesSSZ(T: type ValidatorIndex, data: openarray[byte]): T =
## Integers are all encoded as littleendian and not padded
doAssert data.len == 3
var tmp: uint32
tmp = tmp or uint32(data[0])
tmp = tmp or uint32(data[1]) shl 8
tmp = tmp or uint32(data[2]) shl 16
result = tmp.ValidatorIndex
func fromBytesSSZ(T: type EthAddress, data: openarray[byte]): T =
doAssert data.len == sizeof(result)
copyMem(result.addr, unsafeAddr data[0], sizeof(result))
func fromBytesSSZ(T: type Eth2Digest, data: openarray[byte]): T =
doAssert data.len == sizeof(result.data)
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
proc init*(T: type SszWriter, stream: OutputStreamVar): T =
result.stream = stream result.stream = stream
proc writeValue*(w: var SszWriter, obj: auto) template enumerateSubFields(holder, fieldVar, body: untyped) =
when holder is array|string|seq|openarray:
# This is an alternative lower-level API useful for RPC for fieldVar in holder: body
# frameworks that can simulate the serialization of an
# object without constructing an actual instance:
proc beginRecord*(w: var SszWriter, T: type): RecordWritingMemo =
result.initialStreamPos = w.stream.pos
result.sizePrefixCursor = w.stream.delayFixedSizeWrite sizeof(uint32)
template writeField*(w: var SszWriter, name: string, value: auto) =
w.writeValue(value)
proc endRecord*(w: var SszWriter, memo: RecordWritingMemo) =
let finalSize = uint32(w.stream.pos - memo.initialStreamPos - 4)
memo.sizePrefixCursor.endWrite(finalSize.toBytesSSZ)
proc writeValue*(w: var SszWriter, obj: auto) =
# We are not using overloads here, because this leads to
# slightly better error messages when the user provides
# additional overloads for `writeValue`.
mixin writeValue
when obj is ValidatorIndex|BasicType:
w.stream.append obj.toSSZType().toBytesSSZ
elif obj is byte|char:
w.stream.append obj
elif obj is enum:
w.stream.append uint64(obj).toBytesSSZ
else: else:
let memo = w.beginRecord(obj.type) enumInstanceSerializedFields(holder, _, fieldVar): body
when obj is seq|array|openarray|string:
# If you get an error here that looks like:
# type mismatch: got <type range 0..8191(uint64)>
# you just used an unsigned int for an array index thinking you'd get
# away with it (surprise, surprise: you can't, uints are crippled!)
# https://github.com/nim-lang/Nim/issues/9984
for elem in obj:
w.writeValue elem
elif obj is BitField:
for elem in obj.bits:
w.writeValue elem
else:
obj.serializeFields(fieldName, field):
# for research/serialized_sizes, remove when appropriate
when defined(debugFieldSizes) and obj is (BeaconState|BeaconBlock):
let start = w.stream.pos
w.writeValue field.toSSZType
debugEcho fieldName, ": ", w.stream.pos - start
else:
w.writeValue field.toSSZType
w.endRecord(memo)
proc readValue*(r: var SszReader, result: var auto) = func writeVarSizeType(w: var SszWriter, value: auto) {.gcsafe.}
# We are not using overloads here, because this leads to
# slightly better error messages when the user provides
# additional overloads for `readValue`.
type T = result.type
mixin readValue
template checkEof(n: int) = func beginRecord*(w: var SszWriter, TT: type): auto =
if not r.stream[].ensureBytes(n): type T = TT
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes") when isFixedSize(T):
FixedSizedWriterCtx()
when result is ValidatorIndex|BasicType:
let bytesToRead = result.sszLen;
checkEof bytesToRead
when result is ValidatorPubKey|ValidatorSig:
if not result.init(r.stream.readBytes(bytesToRead)):
raise newException(CorruptedDataError, "Failed to load a BLS key or signature")
else:
result = T.fromBytesSSZ(r.stream.readBytes(bytesToRead))
elif result is enum:
# TODO what to do with out-of-range values?? rejecting means breaking
# forwards compatibility..
result = cast[T](r.readValue(uint64))
elif result is string:
{.error: "The SSZ format doesn't support the string type yet".}
else: else:
let totalLen = int r.readValue(uint32) const offset = when T is array: len(T) * offsetSize
checkEof totalLen else: fixedPortionSize(T)
VarSizedWriterCtx(offset: offset,
fixedParts: w.stream.delayFixedSizeWrite(offset))
let endPos = r.stream[].pos + totalLen template writeField*(w: var SszWriter,
when T is seq: ctx: var auto,
type ElemType = type(result[0]) fieldName: string,
# Items are of homogenous type, but not necessarily homogenous length, field: auto) =
# cannot pre-allocate item list generically mixin toSszType
while r.stream[].pos < endPos: when ctx is FixedSizedWriterCtx:
result.add r.readValue(ElemType) writeFixedSized(w, toSszType(field))
else:
elif T is BitField: type FieldType = type toSszType(field)
type ElemType = type(result.bits[0])
while r.stream[].pos < endPos:
result.bits.add r.readValue(ElemType)
elif T is array:
type ElemType = type(result[0])
var i = 0
while r.stream[].pos < endPos:
if i > result.len:
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of an array")
result[i] = r.readValue(ElemType)
i += 1
when isFixedSize(FieldType):
ctx.fixedParts.writeFixedSized toSszType(field)
else: else:
result.deserializeFields(fieldName, field): trs "WRITING OFFSET ", ctx.offset, " FOR ", fieldName
# TODO This hardcoding's ugly; generalize & abstract. ctx.fixedParts.writeFixedSized uint32(ctx.offset)
when field is Slot: let initPos = w.stream.pos
field = r.readValue(uint64).Slot trs "WRITING VAR SIZE VALUE OF TYPE ", name(FieldType)
elif field is Epoch: when FieldType is BitSeq:
field = r.readValue(uint64).Epoch trs "BIT SEQ ", field.bytes
else: writeVarSizeType(w, toSszType(field))
field = r.readValue(field.type) ctx.offset += w.stream.pos - initPos
if r.stream[].pos != endPos: template endRecord*(w: var SszWriter, ctx: var auto) =
raise newException(CorruptedDataError, "SSZ includes unexpected bytes past the end of the deserialized object") when ctx is VarSizedWriterCtx:
finalize ctx.fixedParts
# ################### Hashing ################################### func writeVarSizeType(w: var SszWriter, value: auto) =
trs "STARTING VAR SIZE TYPE"
mixin toSszType
type T = type toSszType(value)
# Sample hash_tree_root implementation based on: when T is seq|string|openarray:
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/specs/simple-serialize.md type E = ElemType(T)
# https://github.com/ethereum/eth2.0-specs/blob/v0.6.3/test_libs/pyspec/eth2spec/utils/minimal_ssz.py when isFixedSize(E):
# TODO Probably wrong - the spec is pretty bare-bones and no test vectors yet trs "WRITING LIST WITH FIXED SIZE ELEMENTS"
for elem in value:
w.stream.writeFixedSized toSszType(elem)
trs "DONE"
else:
trs "WRITING LIST WITH VAR SIZE ELEMENTS"
var offset = value.len * offsetSize
var cursor = w.stream.delayFixedSizeWrite offset
for elem in value:
cursor.writeFixedSized uint32(offset)
let initPos = w.stream.pos
w.writeVarSizeType toSszType(elem)
offset += w.stream.pos - initPos
finalize cursor
trs "DONE"
elif T is object|tuple|array:
trs "WRITING OBJECT OR ARRAY"
var ctx = beginRecord(w, T)
enumerateSubFields(value, field):
writeField w, ctx, astToStr(field), field
endRecord w, ctx
func writeValue*(w: var SszWriter, x: auto) {.gcsafe.} =
mixin toSszType
type T = type toSszType(x)
when isFixedSize(T):
w.stream.writeFixedSized toSszType(x)
elif T is array|seq|openarray|string|object|tuple:
w.writeVarSizeType toSszType(x)
else:
unsupported type(x)
func writeValue*[T](w: var SszWriter, x: SizePrefixed[T]) =
var cursor = w.stream.delayVarSizeWrite(10)
let initPos = w.stream.pos
w.writeValue T(x)
cursor.appendVarint uint64(w.stream.pos - initPos)
finalize cursor
template checkEof(n: int) =
if not r.stream[].ensureBytes(n):
raise newException(UnexpectedEofError, "SSZ has insufficient number of bytes")
template fromSszBytes*(T: type BlsValue, bytes: openarray[byte]): auto =
fromBytes(T, bytes)
template fromSszBytes*[T; N](_: type TypeWithMaxLen[T, N],
bytes: openarray[byte]): auto =
mixin fromSszBytes
fromSszBytes(T, bytes)
proc fromSszBytes*(T: type BlsCurveType, bytes: openarray[byte]): auto =
init(T, bytes)
proc readValue*(r: var SszReader, val: var auto) =
val = readSszValue(r.stream.readBytes(r.stream.endPos), val.type)
proc readValue*[T](r: var SszReader, val: var SizePrefixed[T]) =
let length = r.stream.readVarint(uint64)
if length > r.maxObjectSize:
raise newException(SszMaxSizeExceeded,
"Maximum SSZ object size exceeded: " & $length)
val = readSszValue(r.stream.readBytes(length), T)
const const
BYTES_PER_CHUNK = 32 zeroChunk = default array[32, byte]
# ################### Hashing helpers ################################### func hash(a, b: openArray[byte]): Eth2Digest =
result = withEth2Hash:
trs "MERGING BRANCHES "
trs a
trs b
# TODO varargs openarray, anyone? h.update a
template withHash(body: untyped): array[32, byte] = h.update b
let tmp = withEth2Hash: body trs "HASH RESULT ", result
toBytesSSZ tmp
func hash(a, b: openArray[byte]): array[32, byte] = func mergeBranches(existing: Eth2Digest, newData: openarray[byte]): Eth2Digest =
withHash: result = withEth2Hash:
h.update(a) trs "MERGING BRANCHES OPEN ARRAY"
h.update(b) trs existing.data
trs newData
type h.update existing.data
Chunk = array[BYTES_PER_CHUNK, byte] h.update newData
# TODO: er, how is this _actually_ done? let paddingBytes = bytesPerChunk - newData.len
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825 if paddingBytes > 0:
func empty(T: type): T = discard trs "USING ", paddingBytes, " PADDING BYTES"
const emptyChunk = empty(Chunk) h.update zeroChunk[0 ..< paddingBytes]
trs "HASH RESULT ", result
func mix_in_length(root: Chunk, length: int): Chunk = template mergeBranches(a, b: Eth2Digest): Eth2Digest =
hash(a.data, b.data)
func computeZeroHashes: array[100, Eth2Digest] =
result[0] = Eth2Digest(data: zeroChunk)
for i in 1 .. result.high:
result[i] = mergeBranches(result[i - 1], result[i - 1])
let zeroHashes = computeZeroHashes()
func getZeroHashWithoutSideEffect(idx: int): Eth2Digest =
# TODO this is a work-around for the somewhat broken side
# effects analysis of Nim - reading from global let variables
# is considered a side-effect.
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
# we should revisit this in Nim 0.20.2.
{.emit: "`result` = `zeroHashes`[`idx`];".}
func addChunk*(merkelizer: SszChunksMerkelizer, data: openarray[byte]) =
doAssert data.len > 0 and data.len <= bytesPerChunk
if not getBitLE(merkelizer.totalChunks, 0):
let chunkStartAddr = addr merkelizer.combinedChunks[0].data[0]
copyMem(chunkStartAddr, unsafeAddr data[0], data.len)
zeroMem(chunkStartAddr.shift(data.len), bytesPerChunk - data.len)
trs "WROTE BASE CHUNK ", merkelizer.combinedChunks[0]
else:
var hash = mergeBranches(merkelizer.combinedChunks[0], data)
for i in 1 .. high(merkelizer.combinedChunks):
trs "ITERATING"
if getBitLE(merkelizer.totalChunks, i):
trs "CALLING MERGE BRANCHES"
hash = mergeBranches(merkelizer.combinedChunks[i], hash)
else:
trs "WRITING FRESH CHUNK AT ", i, " = ", hash
merkelizer.combinedChunks[i] = hash
break
inc merkelizer.totalChunks
func getFinalHash*(merkelizer: SszChunksMerkelizer): Eth2Digest =
let limit = merkelizer.limit
if merkelizer.totalChunks == 0:
let limitHeight = if limit != 0: bitWidth(limit - 1) else: 0
return getZeroHashWithoutSideEffect(limitHeight)
let
bottomHashIdx = firstOne(merkelizer.totalChunks) - 1
submittedChunksHeight = bitWidth(merkelizer.totalChunks - 1)
topHashIdx = if limit <= 1: submittedChunksHeight
else: max(submittedChunksHeight, bitWidth(limit - 1))
trs "BOTTOM HASH ", bottomHashIdx
trs "SUBMITTED HEIGHT ", submittedChunksHeight
trs "LIMIT ", limit
if bottomHashIdx != submittedChunksHeight:
# Our tree is not finished. We must complete the work in progress
# branches and then extend the tree to the right height.
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
getZeroHashWithoutSideEffect(bottomHashIdx))
for i in bottomHashIdx + 1 ..< topHashIdx:
if getBitLE(merkelizer.totalChunks, i):
result = mergeBranches(merkelizer.combinedChunks[i], result)
trs "COMBINED"
else:
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
trs "COMBINED WITH ZERO"
elif bottomHashIdx == topHashIdx:
# We have a perfect tree (chunks == 2**n) at just the right height!
result = merkelizer.combinedChunks[bottomHashIdx]
else:
# We have a perfect tree of user chunks, but we have more work to
# do - we must extend it to reach the desired height
result = mergeBranches(merkelizer.combinedChunks[bottomHashIdx],
getZeroHashWithoutSideEffect(bottomHashIdx))
for i in bottomHashIdx + 1 ..< topHashIdx:
result = mergeBranches(result, getZeroHashWithoutSideEffect(i))
let HashingStreamVTable = OutputStreamVTable(
writePage: proc (s: OutputStreamVar, data: openarray[byte])
{.nimcall, gcsafe, raises: [IOError, Defect].} =
trs "ADDING STREAM CHUNK ", data
SszChunksMerkelizer(s.outputDevice).addChunk(data)
,
flush: proc (s: OutputStreamVar) {.nimcall, gcsafe.} =
discard
)
func getVtableAddresWithoutSideEffect: ptr OutputStreamVTable =
# TODO this is a work-around for the somewhat broken side
# effects analysis of Nim - reading from global let variables
# is considered a side-effect.
# Nim 0.19 doesnt have the `{.noSideEffect.}:` override, so
# we should revisit this in Nim 0.20.2.
{.emit: "`result` = &`HashingStreamVTable`;".}
func newSszHashingStream(merkelizer: SszChunksMerkelizer): ref OutputStream =
new result
result.initWithSinglePage(pageSize = bytesPerChunk,
maxWriteSize = bytesPerChunk,
minWriteSize = bytesPerChunk)
result.outputDevice = merkelizer
result.vtable = getVtableAddresWithoutSideEffect()
func mixInLength(root: Eth2Digest, length: int): Eth2Digest =
var dataLen: array[32, byte] var dataLen: array[32, byte]
var lstLen = uint64(length) var lstLen = uint64(length)
littleEndian64(dataLen[32-8].addr, lstLen.addr) littleEndian64(addr dataLen[0], addr lstLen)
hash(root.data, dataLen)
hash(root, dataLen) func merkelizeSerializedChunks(merkelizer: SszChunksMerkelizer,
obj: auto): Eth2Digest =
var hashingStream = newSszHashingStream merkelizer
hashingStream.writeFixedSized obj
hashingStream.flush
merkelizer.getFinalHash
template padEmptyChunks(chunks: int) = func merkelizeSerializedChunks(obj: auto): Eth2Digest =
for i in chunks..<nextPowerOfTwo(chunks): merkelizeSerializedChunks(SszChunksMerkelizer(), obj)
yield emptyChunk
iterator packAndPad(values: seq|array): Chunk = func hashTreeRoot*(x: auto): Eth2Digest {.gcsafe.}
## Produce a stream of chunks that are packed and padded such that they number
## a power of two
when sizeof(values[0].toSSZType().toBytesSSZ()) == sizeof(Chunk): template merkelizeFields(body: untyped): Eth2Digest {.dirty.} =
# When chunks and value lengths coincide, do the simple thing var merkelizer {.inject.} = SszChunksMerkelizer()
for v in values:
yield v.toSSZType().toBytesSSZ()
padEmptyChunks(values.len)
else: template addField(field) =
var let hash = hashTreeRoot(field)
chunks: int trs "MERKLEIZING FIELD ", astToStr(field), " = ", hash
tmp: Chunk addChunk(merkelizer, hash.data)
tmpPos: int # how many bytes of tmp we've filled with ssz values trs "CHUNK ADDED"
for v in values: template addField2(field) =
var const maxLen = fieldMaxLen(field)
vssz = toBytesSSZ(v.toSSZType) when maxLen > 0:
vPos = 0 # how many bytes of vssz that we've consumed type FieldType = type field
addField TypeWithMaxLen[FieldType, maxLen](field)
while vPos < vssz.len:
# there are still bytes of vssz left to consume - looping happens when
# vssz.len > sizeof(Chunk)
let left = min(tmp.len - tmpPos, vssz.len - vPos)
copyMem(addr tmp[tmpPos], addr vssz[vPos], left)
vPos += left
tmpPos += left
if tmpPos == tmp.len:
# When vssz.len < sizeof(Chunk), multiple values will fit in a chunk
yield tmp
tmpPos = 0
chunks += 1
if tmpPos > 0:
# If vssz.len is not a multiple of Chunk, we might need to pad the last
# chunk with zeroes and return it
for i in tmpPos..<tmp.len:
tmp[i] = 0'u8
yield tmp
tmpPos = 0
chunks += 1
padEmptyChunks(chunks)
iterator hash_tree_collection(value: array|seq): Chunk =
mixin hash_tree_root
var chunks = 0
for v in value:
yield hash_tree_root(v).data
chunks += 1
padEmptyChunks(chunks)
iterator hash_tree_fields(value: object): Chunk =
mixin hash_tree_root
var chunks = 0
for v in value.fields:
yield hash_tree_root(v).data
chunks += 1
padEmptyChunks(chunks)
template merkleize(chunker: untyped): Chunk =
var
# a depth of 32 here should give us capability to handle 2^32 chunks,
# more than enough
# TODO replace with SmallVector-like thing..
stack: array[32, tuple[height: int, chunk: Chunk]]
stackPos = 0
for chunk in chunker:
# Leaves start at height 0 - every time they move up, height is increased
# allowing us to detect two chunks at the same height ready for
# consolidation
# See also: http://szydlo.com/logspacetime03.pdf
stack[stackPos] = (0, chunk)
inc stackPos
# Consolidate items of the same height - this keeps stack size at log N
while stackPos > 1 and stack[stackPos - 1].height == stack[stackPos - 2].height:
# As tradition dictates - one feature, at least one nim bug:
# https://github.com/nim-lang/Nim/issues/9684
let tmp = hash(stack[stackPos - 2].chunk, stack[stackPos - 1].chunk)
stack[stackPos - 2].height += 1
stack[stackPos - 2].chunk = tmp
stackPos -= 1
doAssert stackPos == 1,
"With power-of-two leaves, we should end up with a single root"
stack[0].chunk
template elementType[T, N](_: type array[N, T]): typedesc = T
template elementType[T](_: type seq[T]): typedesc = T
func hash_tree_root*[T](value: T): Eth2Digest =
# Merkle tree
Eth2Digest(data:
when T is BasicType:
merkleize(packAndPad([value]))
elif T is array|seq:
when T.elementType() is BasicType:
mix_in_length(merkleize(packAndPad(value)), len(value))
else:
mix_in_length(merkleize(hash_tree_collection(value)), len(value))
elif T is object:
merkleize(hash_tree_fields(value))
else: else:
static: doAssert false, "Unexpected type: " & T.name addField field
)
iterator hash_tree_most(v: object): Chunk = body
const numFields = (proc(): int =
var o: type(v)
var i = 0
for _, _ in o.fieldPairs: inc i
i)()
var i = 0 merkelizer.getFinalHash
for name, field in v.fieldPairs:
if i == numFields - 1:
break
inc i
yield hash_tree_root(field).data
# https://github.com/ethereum/eth2.0-specs/blob/0.4.0/specs/simple-serialize.md#signed-roots func bitlistHashTreeRoot(merkelizer: SszChunksMerkelizer, x: BitSeq): Eth2Digest =
func signing_root*[T: object](x: T): Eth2Digest = trs "CHUNKIFYING BIT SEQ WITH LIMIT ", merkelizer.limit
# TODO write tests for this (check vs hash_tree_root)
let root = merkleize(hash_tree_most(x)) var
totalBytes = x.bytes.len
lastCorrectedByte = x.bytes[^1]
if lastCorrectedByte == byte(1):
if totalBytes == 1:
# This is an empty bit list.
# It should be hashed as a tree containing all zeros:
let treeHeight = if merkelizer.limit == 0: 0
else: log2trunc(merkelizer.limit)
return mergeBranches(getZeroHashWithoutSideEffect(treeHeight),
getZeroHashWithoutSideEffect(0)) # this is the mixed length
totalBytes -= 1
lastCorrectedByte = x.bytes[^2]
else:
let markerPos = log2trunc(lastCorrectedByte)
lastCorrectedByte.lowerBit(markerPos)
var
bytesInLastChunk = totalBytes mod bytesPerChunk
paddingBytes = bytesPerChunk - bytesInLastChunk
fullChunks = totalBytes div bytesPerChunk
if bytesInLastChunk == 0:
fullChunks -= 1
bytesInLastChunk = 32
for i in 0 ..< fullChunks:
let
chunkStartPos = i * bytesPerChunk
chunkEndPos = chunkStartPos + bytesPerChunk - 1
merkelizer.addChunk x.bytes.toOpenArray(chunkEndPos, chunkEndPos)
var
lastChunk: array[bytesPerChunk, byte]
chunkStartPos = fullChunks * bytesPerChunk
for i in 0 .. bytesInLastChunk - 2:
lastChunk[i] = x.bytes[chunkStartPos + i]
lastChunk[bytesInLastChunk - 1] = lastCorrectedByte
merkelizer.addChunk lastChunk.toOpenArray(0, bytesInLastChunk - 1)
let contentsHash = merkelizer.getFinalHash
mixInLength contentsHash, x.len
func hashTreeRootImpl[T](x: T): Eth2Digest =
when (T is BasicType) or (when T is array: ElemType(T) is BasicType else: false):
trs "FIXED TYPE; USE CHUNK STREAM"
merkelizeSerializedChunks x
elif T is string or (when T is (seq|openarray): ElemType(T) is BasicType else: false):
trs "TYPE WITH LENGTH"
mixInLength merkelizeSerializedChunks(x), x.len
elif T is array|object|tuple:
trs "MERKELIZING FIELDS"
merkelizeFields:
x.enumerateSubFields(f):
const maxLen = fieldMaxLen(f)
when maxLen > 0:
type FieldType = type f
addField TypeWithMaxLen[FieldType, maxLen](f)
else:
addField f
elif T is seq:
trs "SEQ WITH VAR SIZE"
let hash = merkelizeFields(for e in x: addField e)
mixInLength hash, x.len
#elif isCaseObject(T):
# # TODO implement this
else:
unsupported T
func maxChunksCount(T: type, maxLen: static int64): int64 {.compileTime.} =
when T is BitList:
(maxLen + bitsPerChunk - 1) div bitsPerChunk
elif T is seq:
type E = ElemType(T)
when E is BasicType:
(maxLen * sizeof(E) + bytesPerChunk - 1) div bytesPerChunk
else:
maxLen
else:
unsupported T # This should never happen
func hashTreeRoot*(x: auto): Eth2Digest =
trs "STARTING HASH TREE ROOT FOR TYPE ", name(type(x))
mixin toSszType
when x is TypeWithMaxLen:
const maxLen = x.maxLen
type T = type valueOf(x)
const limit = maxChunksCount(T, maxLen)
var merkelizer = SszChunksMerkelizer(limit: uint64(limit))
when T is BitList:
result = merkelizer.bitlistHashTreeRoot(BitSeq valueOf(x))
elif T is seq:
type E = ElemType(T)
let contentsHash = when E is BasicType:
merkelizeSerializedChunks(merkelizer, valueOf(x))
else:
for elem in valueOf(x):
let elemHash = hashTreeRoot(elem)
merkelizer.addChunk(elemHash.data)
merkelizer.getFinalHash()
result = mixInLength(contentsHash, valueOf(x).len)
else:
unsupported T # This should never happen
else:
result = hashTreeRootImpl toSszType(x)
trs "HASH TREE ROOT FOR ", name(type x), " = ", "0x", $result
func lastFieldName(RecordType: type): string {.compileTime.} =
enumAllSerializedFields(RecordType):
result = fieldName
func hasSigningRoot*(T: type): bool {.compileTime.} =
lastFieldName(T) == "signature"
func signingRoot*(obj: object): Eth2Digest =
const lastField = lastFieldName(obj.type)
merkelizeFields:
obj.enumInstanceSerializedFields(fieldName, field):
when fieldName != lastField:
addField2 field
Eth2Digest(data: root)

View File

@ -0,0 +1,142 @@
import
endians, typetraits,
stew/[objects, bitseqs], serialization/testing/tracing,
../spec/[digest, datatypes], ./types
template setLen[R, T](a: var array[R, T], length: int) =
if length != a.len:
raise newException(MalformedSszError, "SSZ input of insufficient size")
# fromSszBytes copies the wire representation to a Nim variable,
# assuming there's enough data in the buffer
func fromSszBytes*(T: type SomeInteger, data: openarray[byte]): T =
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
## All integers are serialized as **little endian**.
## TODO: Assumes data points to a sufficiently large buffer
doAssert data.len == sizeof(result)
# TODO: any better way to get a suitably aligned buffer in nim???
# see also: https://github.com/nim-lang/Nim/issues/9206
var tmp: uint64
var alignedBuf = cast[ptr byte](tmp.addr)
copyMem(alignedBuf, unsafeAddr data[0], result.sizeof)
when result.sizeof == 8: littleEndian64(result.addr, alignedBuf)
elif result.sizeof == 4: littleEndian32(result.addr, alignedBuf)
elif result.sizeof == 2: littleEndian16(result.addr, alignedBuf)
elif result.sizeof == 1: copyMem(result.addr, alignedBuf, sizeof(result))
else: {.fatal: "Unsupported type deserialization: " & $(type(result)).name.}
func fromSszBytes*(T: type bool, data: openarray[byte]): T =
# TODO: spec doesn't say what to do if the value is >1 - we'll use the C
# definition for now, but maybe this should be a parse error instead?
fromSszBytes(uint8, data) != 0
func fromSszBytes*(T: type Eth2Digest, data: openarray[byte]): T =
doAssert data.len == sizeof(result.data)
copyMem(result.data.addr, unsafeAddr data[0], sizeof(result.data))
template fromSszBytes*(T: type Slot, bytes: openarray[byte]): Slot =
Slot fromSszBytes(uint64, bytes)
template fromSszBytes*(T: type Epoch, bytes: openarray[byte]): Epoch =
Epoch fromSszBytes(uint64, bytes)
template fromSszBytes*(T: type enum, bytes: openarray[byte]): auto =
T fromSszBytes(uint64, bytes)
template fromSszBytes*(T: type BitSeq, bytes: openarray[byte]): auto =
BitSeq @bytes
proc fromSszBytes*[N](T: type BitList[N], bytes: openarray[byte]): auto =
BitList[N] @bytes
proc readSszValue*(input: openarray[byte], T: type): T =
mixin fromSszBytes, toSszType
type T = type(result)
template readOffset(n: int): int =
int fromSszBytes(uint32, input[n ..< n + offsetSize])
when useListType and result is List:
type ElemType = type result[0]
result = T readSszValue(input, seq[ElemType])
elif result is string|seq|openarray|array:
type ElemType = type result[0]
when ElemType is byte|char:
result.setLen input.len
copyMem(addr result[0], unsafeAddr input[0], input.len)
elif isFixedSize(ElemType):
const elemSize = fixedPortionSize(ElemType)
if input.len mod elemSize != 0:
var ex = new SszSizeMismatchError
ex.deserializedType = cstring typetraits.name(T)
ex.actualSszSize = input.len
ex.elementSize = elemSize
raise ex
result.setLen input.len div elemSize
trs "READING LIST WITH LEN ", result.len
for i in 0 ..< result.len:
trs "TRYING TO READ LIST ELEM ", i
let offset = i * elemSize
result[i] = readSszValue(input[offset ..< offset+elemSize], ElemType)
trs "LIST READING COMPLETE"
else:
if input.len == 0:
# This is an empty list.
# The default initialization of the return value is fine.
return
var offset = readOffset 0
trs "GOT OFFSET ", offset
let resultLen = offset div offsetSize
trs "LEN ", resultLen
result.setLen resultLen
for i in 1 ..< resultLen:
let nextOffset = readOffset(i * offsetSize)
result[i - 1] = readSszValue(input[offset ..< nextOffset], ElemType)
offset = nextOffset
result[resultLen - 1] = readSszValue(input[offset ..< input.len], ElemType)
elif result is object|tuple:
enumInstanceSerializedFields(result, fieldName, field):
const boundingOffsets = T.getFieldBoundingOffsets(fieldName)
trs "BOUNDING OFFSET FOR FIELD ", fieldName, " = ", boundingOffsets
type FieldType = type field
type SszType = type toSszType(default(FieldType))
when isFixedSize(SszType):
const
startOffset = boundingOffsets[0]
endOffset = boundingOffsets[1]
trs "FIXED FIELD ", startOffset, "-", endOffset
else:
let
startOffset = readOffset(boundingOffsets[0])
endOffset = if boundingOffsets[1] == -1: input.len
else: readOffset(boundingOffsets[1])
trs "VAR FIELD ", startOffset, "-", endOffset
# TODO The extra type escaping here is a work-around for a Nim issue:
when type(FieldType) is type(SszType):
trs "READING NATIVE ", fieldName, ": ", name(SszType)
field = readSszValue(input[startOffset ..< endOffset], SszType)
trs "READING COMPLETE ", fieldName
elif useListType and FieldType is List:
field = readSszValue(input[startOffset ..< endOffset], FieldType)
else:
trs "READING FOREIGN ", fieldName, ": ", name(SszType)
field = fromSszBytes(FieldType, input[startOffset ..< endOffset])
elif result is SomeInteger|bool:
trs "READING BASIC TYPE ", type(result).name, " input=", input.len
result = fromSszBytes(type(result), input)
trs "RESULT WAS ", repr(result)
else:
unsupported T

View File

@ -0,0 +1,63 @@
import
stew/objects, stew/ranges/ptr_arith,
./types, ./bytes_reader
type
MemRange = object
startAddr: ptr byte
length: int
SszNavigator*[T] = object
m: MemRange
func sszMount*(data: openarray[byte], T: type): SszNavigator[T] =
let startAddr = unsafeAddr data[0]
SszNavigator[T](m: MemRange(startAddr: startAddr, length: data.len))
template checkBounds(m: MemRange, offset: int) =
if offset > m.length:
raise newException(MalformedSszError, "Malformed SSZ")
template toOpenArray(m: MemRange): auto =
makeOpenArray(m.startAddr, m.length)
func navigateToField[T](n: SszNavigator[T],
fieldName: static string,
FieldType: type): SszNavigator[FieldType] =
mixin toSszType
type SszFieldType = type toSszType(default FieldType)
const boundingOffsets = getFieldBoundingOffsets(T, fieldName)
checkBounds(n.m, boundingOffsets[1])
when isFixedSize(SszFieldType):
SszNavigator[FieldType](m: MemRange(
startAddr: shift(n.m.startAddr, boundingOffsets[0]),
length: boundingOffsets[1] - boundingOffsets[0]))
else:
template readOffset(offset): int =
int fromSszBytes(uint32, makeOpenArray(shift(n.m.startAddr, offset),
sizeof(uint32)))
let
startOffset = readOffset boundingOffsets[0]
endOffset = when boundingOffsets[1] == -1: n.m.length
else: readOffset boundingOffsets[1]
if endOffset < startOffset or endOffset > n.m.length:
raise newException(MalformedSszError, "Incorrect offset values")
SszNavigator[FieldType](m: MemRange(
startAddr: shift(n.m.startAddr, startOffset),
length: endOffset - startOffset))
template `.`*[T](n: SszNavigator[T], field: untyped): auto =
type RecType = T
type FieldType = type(default(RecType).field)
navigateToField(n, astToStr(field), FieldType)
func `[]`*[T](n: SszNavigator[T]): T =
readSszValue(toOpenArray(n.m), T)
converter derefNavigator*[T](n: SszNavigator[T]): T =
n[]

238
beacon_chain/ssz/types.nim Normal file
View File

@ -0,0 +1,238 @@
import
tables,
stew/shims/macros, stew/[objects, bitseqs],
serialization/[object_serialization, errors]
const
useListType* = false
offsetSize* = 4
type
BasicType* = char|bool|SomeUnsignedInt
SszError* = object of SerializationError
MalformedSszError* = object of SszError
SszSizeMismatchError* = object of SszError
deserializedType*: cstring
actualSszSize*: int
elementSize*: int
SszChunksLimitExceeded* = object of SszError
SszSchema* = ref object
nodes*: seq[SszNode]
SszTypeKind* = enum
sszNull
sszUInt
sszBool
sszList
sszVector
sszBitList
sszBitVector
sszRecord
SszType* = ref object
case kind*: SszTypeKind
of sszUInt, sszBitVector:
bits*: int
of sszBool, sszNull, sszBitList:
discard
of sszVector:
size*: int
vectorElemType*: SszType
of sszList:
listElemType*: SszType
of sszRecord:
schema*: SszSchema
SszNodeKind* = enum
Field
Union
SszNode* = ref object
name*: string
typ*: SszType
case kind: SszNodeKind
of Union:
variants*: seq[SszSchema]
of Field:
discard
when useListType:
type List*[T; maxLen: static int] = distinct seq[T]
else:
type List*[T; maxLen: static int] = seq[T]
macro unsupported*(T: typed): untyped =
# TODO: {.fatal.} breaks compilation even in `compiles()` context,
# so we use this macro instead. It's also much better at figuring
# out the actual type that was used in the instantiation.
# File both problems as issues.
error "SSZ serialization of the type " & humaneTypeName(T) & " is not supported"
template ElemType*(T: type[array]): untyped =
type(default(T)[low(T)])
template ElemType*[T](A: type[openarray[T]]): untyped =
T
template ElemType*(T: type[seq|string|List]): untyped =
type(default(T)[0])
func isFixedSize*(T0: type): bool {.compileTime.} =
mixin toSszType, enumAllSerializedFields
when T0 is openarray:
return false
else:
type T = type toSszType(default T0)
when T is BasicType:
return true
elif T is array:
return isFixedSize(ElemType(T))
elif T is object|tuple:
enumAllSerializedFields(T):
when not isFixedSize(FieldType):
return false
return true
func fixedPortionSize*(T0: type): int {.compileTime.} =
mixin enumAllSerializedFields, toSszType
type T = type toSszType(default T0)
when T is BasicType: sizeof(T)
elif T is array:
const elementCount = high(T).ord - low(T).ord + 1
type E = ElemType(T)
when isFixedSize(E): elementCount * fixedPortionSize(E)
else: elementCount * offsetSize
elif T is seq|string|openarray: offsetSize
elif T is object|tuple:
var res = 0
enumAllSerializedFields(T):
when isFixedSize(FieldType):
res += fixedPortionSize(FieldType)
else:
res += offsetSize
res
else:
unsupported T0
func sszSchemaType*(T0: type): SszType {.compileTime.} =
mixin toSszType, enumAllSerializedFields
type T = type toSszType(default T0)
when T is bool:
SszType(kind: sszBool)
elif T is uint8|char:
SszType(kind: sszUInt, bits: 8)
elif T is uint16:
SszType(kind: sszUInt, bits: 16)
elif T is uint32:
SszType(kind: sszUInt, bits: 32)
elif T is uint64:
SszType(kind: sszUInt, bits: 64)
elif T is seq|string:
SszType(kind: sszList, listElemType: sszSchemaType(ElemType(T)))
elif T is array:
SszType(kind: sszVector, vectorElemType: sszSchemaType(ElemType(T)))
elif T is BitArray:
SszType(kind: sszBitVector, bits: T.bits)
elif T is BitSeq:
SszType(kind: sszBitList)
elif T is object|tuple:
var recordSchema = SszSchema()
var caseBranches = initTable[string, SszSchema]()
caseBranches[""] = recordSchema
# TODO case objects are still not supported here.
# `recordFields` has to be refactored to properly
# report nested discriminator fields.
enumAllSerializedFields(T):
recordSchema.nodes.add SszNode(
name: fieldName,
typ: sszSchemaType(FieldType),
kind: Field)
else:
unsupported T0
# TODO This should have been an iterator, but the VM can't compile the
# code due to "too many registers required".
proc fieldInfos*(RecordType: type): seq[tuple[name: string,
offset: int,
fixedSize: int,
branchKey: string]] =
mixin enumAllSerializedFields
var
offsetInBranch = {"": 0}.toTable
nestedUnder = initTable[string, string]()
enumAllSerializedFields(RecordType):
const
isFixed = isFixedSize(FieldType)
fixedSize = when isFixed: fixedPortionSize(FieldType)
else: 0
branchKey = when fieldCaseDisciminator.len == 0: ""
else: fieldCaseDisciminator & ":" & $fieldCaseBranches
fieldSize = when isFixed: fixedSize
else: offsetSize
nestedUnder[fieldName] = branchKey
var fieldOffset: int
offsetInBranch.withValue(branchKey, val):
fieldOffset = val[]
val[] += fieldSize
do:
let parentBranch = nestedUnder.getOrDefault(fieldCaseDisciminator, "")
fieldOffset = offsetInBranch[parentBranch]
offsetInBranch[branchKey] = fieldOffset + fieldSize
result.add((fieldName, fieldOffset, fixedSize, branchKey))
func getFieldBoundingOffsetsImpl(RecordType: type,
fieldName: static string):
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
result = (-1, -1)
var fieldBranchKey: string
for f in fieldInfos(RecordType):
if fieldName == f.name:
result[0] = f.offset
if f.fixedSize > 0:
result[1] = result[0] + f.fixedSize
return
else:
fieldBranchKey = f.branchKey
elif result[0] != -1 and
f.fixedSize == 0 and
f.branchKey == fieldBranchKey:
# We have found the next variable sized field
result[1] = f.offset
return
func getFieldBoundingOffsets*(RecordType: type,
fieldName: static string):
tuple[fieldOffset, nextFieldOffset: int] {.compileTime.} =
## Returns the start and end offsets of a field.
##
## For fixed-size fields, the start offset points to the first
## byte of the field and the end offset points to 1 byte past the
## end of the field.
##
## For variable-size fields, the returned offsets point to the
## statically known positions of the 32-bit offset values written
## within the SSZ object. You must read the 32-bit values stored
## at the these locations in order to obtain the actual offsets.
##
## For variable-size fields, the end offset may be -1 when the
## designated field is the last variable sized field within the
## object. Then the SSZ object boundary known at run-time marks
## the end of the variable-size field.
type T = RecordType
anonConst getFieldBoundingOffsetsImpl(T, fieldName)

View File

@ -33,7 +33,7 @@
import import
algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables, algorithm, collections/sets, chronicles, math, options, sequtils, sets, tables,
./extras, ./ssz, ./beacon_node_types, ./extras, ./ssz, ./beacon_node_types,
./spec/[beaconstate, bitfield, crypto, datatypes, digest, helpers, validator], ./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
./spec/[state_transition_block, state_transition_epoch] ./spec/[state_transition_block, state_transition_epoch]
# Canonical state transition functions # Canonical state transition functions

View File

@ -24,8 +24,12 @@ type
node*: BeaconNode node*: BeaconNode
db*: BeaconChainDB db*: BeaconChainDB
BlockRootSlot* = object
blockRoot: Eth2Digest
slot: Slot
const const
MaxRootsToRequest = 512 MaxRootsToRequest = 512'u64
MaxHeadersToRequest = MaxRootsToRequest MaxHeadersToRequest = MaxRootsToRequest
MaxAncestorBlocksResponse = 256 MaxAncestorBlocksResponse = 256
@ -67,8 +71,11 @@ proc mergeBlockHeadersAndBodies(headers: openarray[BeaconBlockHeader], bodies: o
res[^1].fromHeaderAndBody(headers[i], bodies[i]) res[^1].fromHeaderAndBody(headers[i], bodies[i])
some(res) some(res)
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.} proc getBeaconBlocks*(peer: Peer,
blockRoot: Eth2Digest,
slot: Slot,
maxBlocks, skipSlots: uint64,
backward: bool): Future[Option[seq[BeaconBlock]]] {.gcsafe, async.}
p2pProtocol BeaconSync(version = 1, p2pProtocol BeaconSync(version = 1,
shortName = "bcs", shortName = "bcs",
@ -113,8 +120,8 @@ p2pProtocol BeaconSync(version = 1,
var s = bestSlot + 1 var s = bestSlot + 1
while s <= m.bestSlot: while s <= m.bestSlot:
debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer debug "Waiting for block headers", fromPeer = peer, remoteBestSlot = m.bestSlot, peer
let headersLeft = int(m.bestSlot - s) let headersLeft = uint64(m.bestSlot - s)
let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, 0) let blocks = await peer.getBeaconBlocks(bestRoot, s, min(headersLeft, MaxHeadersToRequest), 0, false)
if blocks.isSome: if blocks.isSome:
if blocks.get.len == 0: if blocks.get.len == 0:
info "Got 0 blocks while syncing", peer info "Got 0 blocks while syncing", peer
@ -144,53 +151,40 @@ p2pProtocol BeaconSync(version = 1,
proc goodbye(peer: Peer, reason: DisconnectionReason) proc goodbye(peer: Peer, reason: DisconnectionReason)
requestResponse:
proc getStatus(
peer: Peer,
sha: Eth2Digest,
userAgent: string,
timestamp: uint64) =
# TODO: How should this be implemented?
# https://github.com/ethereum/eth2.0-specs/blob/dev/specs/networking/rpc-interface.md#get-status
await response.send(sha, userAgent, timestamp)
proc status(peer: Peer, sha: Eth2Digest, userAgent: string, timestamp: uint64)
nextId 10 nextId 10
requestResponse: requestResponse:
proc getBeaconBlockRoots( proc getBeaconBlockRoots(
peer: Peer, peer: Peer,
fromSlot: Slot, fromSlot: Slot,
maxRoots: int) = maxRoots: uint64) =
let maxRoots = min(MaxRootsToRequest, maxRoots) let maxRoots = min(MaxRootsToRequest, maxRoots)
var s = fromSlot var s = fromSlot
var roots = newSeqOfCap[(Eth2Digest, Slot)](maxRoots) var roots = newSeqOfCap[BlockRootSlot](maxRoots)
let blockPool = peer.networkState.node.blockPool let blockPool = peer.networkState.node.blockPool
let maxSlot = blockPool.head.blck.slot let maxSlot = blockPool.head.blck.slot
while s <= maxSlot: while s <= maxSlot:
for r in blockPool.blockRootsForSlot(s): for r in blockPool.blockRootsForSlot(s):
roots.add((r, s)) roots.add BlockRootSlot(blockRoot: r, slot: s)
if roots.len == maxRoots: break if roots.len == maxRoots.int: break
s += 1 s += 1
await response.send(roots) await response.send(roots)
proc beaconBlockRoots(peer: Peer, roots: openarray[(Eth2Digest, Slot)]) proc beaconBlockRoots(peer: Peer, roots: openarray[BlockRootSlot])
requestResponse: requestResponse:
proc getBeaconBlockHeaders( proc getBeaconBlockHeaders(
peer: Peer, peer: Peer,
blockRoot: Eth2Digest, blockRoot: Eth2Digest,
slot: Slot, slot: Slot,
maxHeaders: int, maxHeaders: uint64,
skipSlots: int, skipSlots: uint64,
backward: uint8) = backward: bool) =
let maxHeaders = min(MaxHeadersToRequest, maxHeaders) let maxHeaders = min(MaxHeadersToRequest, maxHeaders)
var headers: seq[BeaconBlockHeader] var headers: seq[BeaconBlockHeader]
let db = peer.networkState.db let db = peer.networkState.db
if backward != 0: if backward:
# TODO: implement skipSlots # TODO: implement skipSlots
var blockRoot = blockRoot var blockRoot = blockRoot
@ -205,7 +199,7 @@ p2pProtocol BeaconSync(version = 1,
while not br.isNil: while not br.isNil:
blockRefs.add(br) blockRefs.add(br)
if blockRefs.len == maxHeaders: if blockRefs.len == maxHeaders.int:
break break
br = br.parent br = br.parent
@ -223,50 +217,13 @@ p2pProtocol BeaconSync(version = 1,
while s <= maxSlot: while s <= maxSlot:
for r in blockPool.blockRootsForSlot(s): for r in blockPool.blockRootsForSlot(s):
headers.add(db.getBlock(r).get().toHeader) headers.add(db.getBlock(r).get().toHeader)
if headers.len == maxHeaders: break if headers.len == maxHeaders.int: break
s += 1 s += 1
await response.send(headers) await response.send(headers)
proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader]) proc beaconBlockHeaders(peer: Peer, blockHeaders: openarray[BeaconBlockHeader])
# TODO move this at the bottom, because it's not in the spec yet, but it will
# consume a `method_id`
requestResponse:
proc getAncestorBlocks(
peer: Peer,
needed: openarray[FetchRecord]) =
var resp = newSeqOfCap[BeaconBlock](needed.len)
let db = peer.networkState.db
var neededRoots = initSet[Eth2Digest]()
for rec in needed: neededRoots.incl(rec.root)
for rec in needed:
if (var blck = db.getBlock(rec.root); blck.isSome()):
# TODO validate historySlots
let firstSlot = blck.get().slot - rec.historySlots
for i in 0..<rec.historySlots.int:
resp.add(blck.get())
if resp.len >= MaxAncestorBlocksResponse:
break
if blck.get().parent_root in neededRoots:
# Don't send duplicate blocks, if neededRoots has roots that are
# in the same chain
break
if (blck = db.getBlock(blck.get().parent_root);
blck.isNone() or blck.get().slot < firstSlot):
break
if resp.len >= MaxAncestorBlocksResponse:
break
await response.send(resp)
proc ancestorBlocks(peer: Peer, blocks: openarray[BeaconBlock])
requestResponse: requestResponse:
proc getBeaconBlockBodies( proc getBeaconBlockBodies(
peer: Peer, peer: Peer,
@ -285,7 +242,11 @@ p2pProtocol BeaconSync(version = 1,
peer: Peer, peer: Peer,
blockBodies: openarray[BeaconBlockBody]) blockBodies: openarray[BeaconBlockBody])
proc getBeaconBlocks*(peer: Peer, blockRoot: Eth2Digest, slot: Slot, maxBlocks, skipSlots: int, backward: uint8): Future[Option[seq[BeaconBlock]]] {.async.} = proc getBeaconBlocks*(peer: Peer,
blockRoot: Eth2Digest,
slot: Slot,
maxBlocks, skipSlots: uint64,
backward: bool): Future[Option[seq[BeaconBlock]]] {.async.} =
## Retrieve block headers and block bodies from the remote peer, merge them into blocks. ## Retrieve block headers and block bodies from the remote peer, merge them into blocks.
assert(maxBlocks <= MaxHeadersToRequest) assert(maxBlocks <= MaxHeadersToRequest)
let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward) let headersResp = await peer.getBeaconBlockHeaders(blockRoot, slot, maxBlocks, skipSlots, backward)

View File

@ -1,6 +1,8 @@
--threads:on --threads:on
--opt:speed --opt:speed
# -d:"chronicles_sinks=json"
@if windows: @if windows:
# increase stack size # increase stack size
--passL:"-Wl,--stack,8388608" --passL:"-Wl,--stack,8388608"

View File

@ -1,7 +1,7 @@
import import
confutils, confutils,
../beacon_chain/[extras, ssz], ../beacon_chain/[extras, ssz],
../beacon_chain/spec/[beaconstate, bitfield, datatypes, digest, validator], ../beacon_chain/spec/[beaconstate, datatypes, digest, validator],
../tests/testutil ../tests/testutil
proc stateSize(deposits: int, maxContent = false) = proc stateSize(deposits: int, maxContent = false) =
@ -23,7 +23,7 @@ proc stateSize(deposits: int, maxContent = false) =
# validatorsPerCommittee = # validatorsPerCommittee =
# len(crosslink_committees[0].committee) # close enough.. # len(crosslink_committees[0].committee) # close enough..
# for a in state.latest_attestations.mitems(): # for a in state.latest_attestations.mitems():
# a.aggregation_bits = BitField.init(validatorsPerCommittee) # a.aggregation_bits = BitSeq.init(validatorsPerCommittee)
echo "Validators: ", deposits, ", total: ", SSZ.encode(state).len echo "Validators: ", deposits, ", total: ", SSZ.encode(state).len
dispatch(stateSize) dispatch(stateSize)

View File

@ -1,16 +1,11 @@
import import
confutils, stats, times, confutils, stats, times,
json, strformat, strformat,
options, sequtils, random, tables, options, sequtils, random, tables,
../tests/[testutil], ../tests/[testutil],
../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator], ../beacon_chain/spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice] ../beacon_chain/[attestation_pool, extras, ssz, state_transition, fork_choice]
proc `%`(v: uint64): JsonNode =
if v > uint64(high(BiggestInt)): newJString($v) else: newJInt(BiggestInt(v))
proc `%`(v: Eth2Digest): JsonNode = newJString($v)
proc `%`(v: ValidatorSig|ValidatorPubKey): JsonNode = newJString($v)
type Timers = enum type Timers = enum
tBlock = "Process non-epoch slot with block" tBlock = "Process non-epoch slot with block"
tEpoch = "Process epoch slot with block" tEpoch = "Process epoch slot with block"
@ -36,14 +31,11 @@ template withTimerRet(stats: var RunningStat, body: untyped): untyped =
tmp tmp
proc `%`*(x: Slot): JsonNode {.borrow.}
proc `%`*(x: Epoch): JsonNode {.borrow.}
proc writeJson*(prefix, slot, v: auto) = proc writeJson*(prefix, slot, v: auto) =
var f: File var f: File
defer: close(f) defer: close(f)
discard open(f, fmt"{prefix:04}-{humaneSlotNum(slot):08}.json", fmWrite) let fileName = fmt"{prefix:04}-{humaneSlotNum(slot):08}.json"
write(f, pretty(%*(v))) Json.saveFile(fileName, v, pretty = true)
cli do(slots = 448, cli do(slots = 448,
validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum validators = SLOTS_PER_EPOCH * 9, # One per shard is minimum

View File

@ -9,7 +9,6 @@ import # Unit test
./test_attestation_pool, ./test_attestation_pool,
./test_beacon_chain_db, ./test_beacon_chain_db,
./test_beacon_node, ./test_beacon_node,
./test_bitfield,
./test_beaconstate, ./test_beaconstate,
./test_block_pool, ./test_block_pool,
./test_helpers, ./test_helpers,
@ -21,4 +20,5 @@ import # Unit test
import # Official fixtures import # Official fixtures
./official/test_fixture_shuffling, ./official/test_fixture_shuffling,
./official/test_fixture_bls, ./official/test_fixture_bls,
./official/test_fixture_ssz_uint ./official/test_fixture_ssz_uint,
./official/test_fixture_ssz_static

@ -1 +1 @@
Subproject commit 470513eddfd7b4d1d45c908816b966c877c0d232 Subproject commit de468c07c2518cf1546c4cb615418738a2918577

View File

@ -1,4 +1,6 @@
import import
# Standard library
os, strutils,
# Status libs # Status libs
stew/byteutils, stew/byteutils,
eth/common, serialization, json_serialization, eth/common, serialization, json_serialization,
@ -72,6 +74,10 @@ type
handler*: string handler*: string
test_cases*: seq[T] test_cases*: seq[T]
const
FixturesDir* = currentSourcePath.rsplit(DirSep, 1)[0] / "fixtures"
JsonTestsDir* = FixturesDir / "json_tests"
# ####################### # #######################
# Default init # Default init
proc default*(T: typedesc): T = discard proc default*(T: typedesc): T = discard
@ -87,9 +93,6 @@ proc readValue*[N: static int](r: var JsonReader, a: var array[N, byte]) {.inlin
# if so export that to nim-eth # if so export that to nim-eth
hexToByteArray(r.readValue(string), a) hexToByteArray(r.readValue(string), a)
proc readValue*(r: var JsonReader, a: var ValidatorIndex) {.inline.} =
a = r.readValue(uint32)
proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} = proc readValue*(r: var JsonReader, a: var seq[byte]) {.inline.} =
## Custom deserializer for seq[byte] ## Custom deserializer for seq[byte]
a = hexToSeqByte(r.readValue(string)) a = hexToSeqByte(r.readValue(string))

View File

@ -0,0 +1,220 @@
# beacon_chain
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
# Standard library
os, strutils, strformat, tables, unittest, sequtils, typetraits,
# Status libs
stew/[byteutils, bitseqs], nimcrypto/hash,
serialization/testing/tracing,
json_serialization, json_serialization/lexer,
# Beacon chain internals
../../beacon_chain/ssz,
../../beacon_chain/spec/[datatypes, validator, digest, crypto],
# Test utilities
../testutil,
./fixtures_utils
const
failFast = defined(debug) and false
traceOnFailure = defined(debug)
type
SpecObject[T] = ref object of RootObj
obj: ref T
SszStaticTest* = object
obj: RootRef
objType, objJsonRepr: string
expectedBytes: seq[byte]
expectedRootHash, expectedSigHash: Eth2Digest
hasSigHash: bool
line: int
ReaderProc = proc(r: var JsonReader): RootRef {.cdecl, gcsafe.}
TestingProc = proc(file: string, test: SszStaticTest) {.cdecl, gcsafe.}
SpecTypeVtable = object
reader: ReaderProc
tester: TestingProc
let testsDir = JsonTestsDir / "ssz_static" / "core"
let minDevTestFile = getTempDir() / "minimal_ssz_test.json"
var specTypesRTTI = initTable[string, SpecTypeVtable]()
proc readerImpl[T](r: var JsonReader): RootRef {.cdecl, gcsafe.} =
var res = SpecObject[T](obj: new T)
res.obj[] = r.readValue(T)
RootRef(res)
# TODO:
# Fun fact: With mainnet settings, the BeaconState object
# is too large to safely exist as a stack variable. The
# `testerImpl` procedure below will trigger a segmentation
# fault on its very first line because of it.
#
# To work-around this issue, this file uses ref objects
# to store the loaded test cases, but we must compare them
# by value:
template valuesAreEqual[T](a, b: ref T): bool =
a[] == b[]
template valuesAreEqual[T](a, b: T): bool =
a == b
template `$`(x: ref auto): string =
$(x[])
proc readSszValueRef*(input: openarray[byte], T: type): ref T =
new result
result[] = readSszValue(input, T)
proc testerImpl[T](path: string, test: SszStaticTest) {.cdecl, gcsafe.} =
doAssert test.obj != nil
var obj = SpecObject[T](test.obj)
template execTest(testOpName, testOp, expectedRes) =
let ourRes = testOp
let success = valuesAreEqual(ourRes, expectedRes)
if not success and traceOnFailure:
{.gcsafe.}:
echo "====== ", testOpName, " failed ", path, ":", test.line
echo " our result:"
echo " ", ourRes
echo " expected result:"
echo " ", expectedRes
when defined(serialization_tracing):
tracingEnabled = true
discard testOp
tracingEnabled = false
echo "======================================================"
if failFast: quit 1
# TODO BEWARE: Passing the boolean expression to `check` directly
# will trigger a Nim compilation bomb. This is most likely caused
# by a mis-behaving generics instantiations cache when a function
# is explicitly instantiated to get its address.
# There is a recursive instantiation loop of system's `$` operator.
check success
# let ob = SSZ.encode(obj.obj)
when false:
execTest "serialization",
(let ourBytes = SSZ.encode(obj.obj); ourBytes),
test.expectedBytes
execTest "root hash check",
hashTreeRoot(obj.obj),
test.expectedRootHash
when hasSigningRoot(T):
doAssert test.hasSigHash
execTest "sig hash check",
signingRoot(obj.obj),
test.expectedSigHash
when true:
execTest "roundtrip",
readSszValueRef(test.expectedBytes, T),
obj.obj
template addSpecTypeRTTI(T: type) =
var reader = readerImpl[T]
var tester = testerImpl[T]
specTypesRTTI.add(T.name, SpecTypeVtable(reader: reader,
tester: tester))
foreachSpecType(addSpecTypeRTTI)
proc runTest(path: string, test: SszStaticTest) =
if test.objType != "Unsupported":
specTypesRTTI[test.objType].tester(path, test)
proc advanceToClosingBrace(lexer: var JsonLexer, openedBraces = 1) =
var closedBraces = 0
while closedBraces < openedBraces:
while lexer.tok notin {tkCurlyLe, tkCurlyRi}:
lexer.next
if lexer.tok == tkCurlyLe:
dec closedBraces
else:
inc closedBraces
lexer.next
proc readValue*(r: var JsonReader, result: var SszStaticTest) {.gcsafe.} =
r.skipToken tkCurlyLe
if r.lexer.tok != tkString:
r.raiseUnexpectedToken(etString)
var reader: ReaderProc
let key = r.lexer.strVal
{.gcsafe.}:
if not specTypesRTTI.hasKey(key):
result.objType = "Unsupported"
r.lexer.advanceToClosingBrace
return
result.objType = key
result.line = r.lexer.line
reader = specTypesRTTI[key].reader
r.lexer.next
r.skipToken tkColon
r.skipToken tkCurlyLe
while r.lexer.tok == tkString:
# TODO: I was hit by a very nasty Nim bug here.
# If you use `let` on the next line, the variable will be
# aliased to `r.lexer.strVar` instead of being copied.
# This will create problems, because the value is modified
# on the next line.
var field = r.lexer.strVal
r.lexer.next
r.skipToken tkColon
case field
of "value":
result.obj = reader(r)
of "serialized":
result.expectedBytes = hexToSeqByte r.readValue(string)
of "root":
result.expectedRootHash = Eth2Digest.fromHex r.readValue(string)
of "signing_root":
result.expectedSigHash = Eth2Digest.fromHex r.readValue(string)
result.hasSigHash = true
else:
r.raiseUnexpectedField(field, type(result).name)
if r.lexer.tok == tkComma:
r.lexer.next()
else:
break
r.skipToken tkCurlyRi
r.skipToken tkCurlyRi
when failFast:
# This will produce faster failures in debug builds
{.gcsafe.}: runTest result
proc executeSuite(path: string) =
let sszSuite = path.parseTests SszStaticTest
suite &"{path}: {sszSuite.title}":
for sszTest in sszSuite.test_cases:
test &"test case on line {sszTest.line}":
runTest path, sszTest
if fileExists(minDevTestFile):
executeSuite minDevTestFile
for kind, path in walkDir(testsDir):
if kind notin {pcFile, pcLinkToFile}: continue
if const_preset in path:
executeSuite path

View File

@ -0,0 +1,2 @@
-d:"serialization_tracing"
-d:"ssz_testing"

View File

@ -25,8 +25,8 @@ type
ssz*: seq[byte] ssz*: seq[byte]
tags*: seq[string] tags*: seq[string]
const TestFolder = currentSourcePath.rsplit(DirSep, 1)[0] const
const TestsPath = "fixtures" / "json_tests" / "ssz_generic" / "uint" TestsDir = JsonTestsDir / "ssz_generic" / "uint"
func to(val: string, T: typedesc): T = func to(val: string, T: typedesc): T =
when T is StUint: when T is StUint:
@ -97,18 +97,18 @@ proc runSSZUintTest(inputTests: Tests[SSZUint]) =
suite "Official - SSZ unsigned integer tests" & preset(): suite "Official - SSZ unsigned integer tests" & preset():
block: # "Integers right at or beyond the bounds of the allowed value range" block: # "Integers right at or beyond the bounds of the allowed value range"
let uintBounds = parseTests(TestFolder / TestsPath / "uint_bounds.json", SSZUint) let uintBounds = parseTests(TestsDir / "uint_bounds.json", SSZUint)
test uintBounds.summary & preset(): test uintBounds.summary & preset():
runSSZUintTest(uintBounds) runSSZUintTest(uintBounds)
block: # "Random integers chosen uniformly over the allowed value range" block: # "Random integers chosen uniformly over the allowed value range"
let uintRandom = parseTests(TestFolder / TestsPath / "uint_random.json", SSZUint) let uintRandom = parseTests(TestsDir / "uint_random.json", SSZUint)
test uintRandom.summary & preset(): test uintRandom.summary & preset():
runSSZUintTest(uintRandom) runSSZUintTest(uintRandom)
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280 # TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
block: # "Serialized integers that are too short or too long" block: # "Serialized integers that are too short or too long"
let uintWrongLength = parseTests(TestFolder / TestsPath / "uint_wrong_length.json", SSZUint) let uintWrongLength = parseTests(TestsDir / "uint_wrong_length.json", SSZUint)
test "[Skipped] " & uintWrongLength.summary & preset(): test "[Skipped] " & uintWrongLength.summary & preset():
# TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280 # TODO: pending fix for https://github.com/status-im/nim-beacon-chain/issues/280
echo " [Skipped] Pending https://github.com/status-im/nim-beacon-chain/issues/280" echo " [Skipped] Pending https://github.com/status-im/nim-beacon-chain/issues/280"

View File

@ -23,8 +23,9 @@ mkdir -p "$VALIDATORS_DIR"
cd "$GIT_ROOT" cd "$GIT_ROOT"
mkdir -p $BUILD_OUTPUTS_DIR mkdir -p $BUILD_OUTPUTS_DIR
# Run with "SHARD_COUNT=8 ./start.sh" to change these # Run with "SHARD_COUNT=4 ./start.sh" to change these
DEFS="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024 DEFS="-d:chronicles_log_level=DEBUG "
DEFS+="-d:SHARD_COUNT=${SHARD_COUNT:-8} " # Spec default: 1024
DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-8} " # Spec default: 64 DEFS+="-d:SLOTS_PER_EPOCH=${SLOTS_PER_EPOCH:-8} " # Spec default: 64
DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-12} " # Spec default: 6 DEFS+="-d:SECONDS_PER_SLOT=${SECONDS_PER_SLOT:-12} " # Spec default: 6

View File

@ -1,27 +0,0 @@
import
unittest,
../beacon_chain/spec/[bitfield]
suite "BitField":
test "roundtrips":
var
a = BitField.init(100)
b = BitField.init(100)
check:
not a.get_bitfield_bit(0)
a.set_bitfield_bit(1)
check:
not a.get_bitfield_bit(0)
a.get_bitfield_bit(1)
b.set_bitfield_bit(2)
a.combine(b)
check:
not a.get_bitfield_bit(0)
a.get_bitfield_bit(1)
a.get_bitfield_bit(2)

View File

@ -7,89 +7,77 @@
import import
unittest, sequtils, options, unittest, sequtils, options,
nimcrypto, eth/common, blscurve, serialization/testing/generic_suite, stint, nimcrypto, eth/common, blscurve, serialization/testing/generic_suite,
../beacon_chain/ssz, ../beacon_chain/spec/[datatypes, digest] ../beacon_chain/spec/[datatypes, digest],
../beacon_chain/ssz, ../beacon_chain/ssz/navigator
func filled[N: static[int], T](typ: type array[N, T], value: T): array[N, T] = type
for val in result.mitems: SomeEnum = enum
val = value A, B, C
func filled(T: type MDigest, value: byte): T = Simple = object
for val in result.data.mitems: flag: bool
val = value # count: StUint[256]
# ignored {.dontSerialize.}: string
# data: array[256, bool]
suite "Simple serialization": template reject(stmt) =
# pending spec updates in assert(not compiles(stmt))
# - https://github.com/ethereum/eth2.0-specs
type
Foo = object
f0: uint8
f1: uint32
f2: EthAddress
f3: MDigest[256]
f4: seq[byte]
f5: ValidatorIndex
let expected_deser = Foo( static:
f0: 5, assert isFixedSize(bool) == true
f1: 0'u32 - 3,
f2: EthAddress.filled(byte 35),
f3: MDigest[256].filled(byte 35),
f4: @[byte 'c'.ord, 'o'.ord, 'w'.ord],
f5: ValidatorIndex(79))
var expected_ser = @[ assert fixedPortionSize(array[10, bool]) == 10
byte 67, 0, 0, 0, # length assert fixedPortionSize(array[SomeEnum, uint64]) == 24
5, assert fixedPortionSize(array[3..5, string]) == 12
0xFD, 0xFF, 0xFF, 0xFF,
]
expected_ser &= EthAddress.filled(byte 35)
expected_ser &= MDigest[256].filled(byte 35).data
expected_ser &= [byte 3, 0, 0, 0, 'c'.ord, 'o'.ord, 'w'.ord]
expected_ser &= [byte 79, 0, 0]
test "Object deserialization": assert fixedPortionSize(string) == 4
let deser = SSZ.decode(expected_ser, Foo) assert fixedPortionSize(seq[bool]) == 4
check: expected_deser == deser assert fixedPortionSize(seq[string]) == 4
test "Object serialization": assert isFixedSize(array[20, bool]) == true
let ser = SSZ.encode(expected_deser) assert isFixedSize(Simple) == true
check: expected_ser == ser assert isFixedSize(string) == false
assert isFixedSize(seq[bool]) == false
assert isFixedSize(seq[string]) == false
test "Not enough data": reject fixedPortionSize(int)
expect SerializationError:
let x = SSZ.decode(expected_ser[0..^2], Foo)
expect SerializationError: type
let x = SSZ.decode(expected_ser[1..^1], Foo) ObjWithFields = object
f0: uint8
f1: uint32
f2: EthAddress
f3: MDigest[256]
f4: seq[byte]
f5: ValidatorIndex
test "ValidatorIndex roundtrip": static:
# https://github.com/nim-lang/Nim/issues/10027 assert fixedPortionSize(ObjWithFields) == 1 + 4 + sizeof(EthAddress) + (256 div 8) + 4 + 8
let v = 79.ValidatorIndex
let ser = SSZ.encode(v)
check:
ser.len() == 3
SSZ.decode(ser, v.type) == v
SSZ.roundtripTest [1, 2, 3] executeRoundTripTests SSZ
SSZ.roundtripTest @[1, 2, 3]
SSZ.roundtripTest SigKey.random().getKey()
SSZ.roundtripTest BeaconBlock(
slot: 42.Slot, signature: sign(SigKey.random(), 0'u64, ""))
SSZ.roundtripTest BeaconState(slot: 42.Slot)
# suite "Tree hashing": type
# # TODO The test values are taken from an earlier version of SSZ and have Foo = object
# # nothing to do with upstream - needs verification and proper test suite bar: Bar
# test "Hash BeaconBlock": Bar = object
# let vr = BeaconBlock() b: string
# check: baz: Baz
# $hash_tree_root(vr) ==
# "8951C9C64ABA469EBA78F5D9F9A0666FB697B8C4D86901445777E4445D0B1543" Baz = object
i: uint64
suite "SSZ Navigation":
test "simple object fields":
var foo = Foo(bar: Bar(b: "bar", baz: Baz(i: 10'u64)))
let encoded = SSZ.encode(foo)
check SSZ.decode(encoded, Foo) == foo
let mountedFoo = sszMount(encoded, Foo)
check mountedFoo.bar.b == "bar"
let mountedBar = mountedFoo.bar
check mountedBar.baz.i == 10'u64
# test "Hash BeaconState":
# let vr = BeaconState()
# check:
# $hash_tree_root(vr) ==
# "66F9BF92A690F1FBD36488D98BE70DA6C84100EDF935BC6D0B30FF14A2976455"

View File

@ -10,18 +10,27 @@ import
chronicles, eth/trie/[db], chronicles, eth/trie/[db],
../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, state_transition, ../beacon_chain/[beacon_chain_db, block_pool, extras, ssz, state_transition,
validator_pool, beacon_node_types], validator_pool, beacon_node_types],
../beacon_chain/spec/[beaconstate, bitfield, crypto, datatypes, digest, ../beacon_chain/spec/[beaconstate, crypto, datatypes, digest,
helpers, validator] helpers, validator]
func preset*(): string = func preset*(): string =
" [Preset: " & const_preset & ']' " [Preset: " & const_preset & ']'
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey = when ValidatorPrivKey is BlsValue:
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library, func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
# lighthouse. # 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60 # lighthouse.
var bytes = uint64(i + 1000).toBytesLE() # TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
copyMem(addr result.x[0], addr bytes[0], sizeof(bytes)) result.kind = BlsValueType.Real
var bytes = uint64(i + 1000).toBytesLE()
copyMem(addr result.blsValue.x[0], addr bytes[0], sizeof(bytes))
else:
func makeFakeValidatorPrivKey*(i: int): ValidatorPrivKey =
# 0 is not a valid BLS private key - 1000 helps interop with rust BLS library,
# lighthouse.
# TODO: switch to https://github.com/ethereum/eth2.0-pm/issues/60
var bytes = uint64(i + 1000).toBytesLE()
copyMem(addr result.x[0], addr bytes[0], sizeof(bytes))
func makeFakeHash*(i: int): Eth2Digest = func makeFakeHash*(i: int): Eth2Digest =
var bytes = uint64(i).toBytesLE() var bytes = uint64(i).toBytesLE()
@ -170,9 +179,8 @@ proc makeAttestation*(
doAssert sac_index != -1, "find_shard_committee should guarantee this" doAssert sac_index != -1, "find_shard_committee should guarantee this"
var var aggregation_bits = CommitteeValidatorsBits.init(committee.len)
aggregation_bits = BitField.init(committee.len) aggregation_bits.raiseBit sac_index
set_bitfield_bit(aggregation_bits, sac_index)
let let
msg = hash_tree_root( msg = hash_tree_root(
@ -192,7 +200,7 @@ proc makeAttestation*(
data: data, data: data,
aggregation_bits: aggregation_bits, aggregation_bits: aggregation_bits,
signature: sig, signature: sig,
custody_bits: BitField.init(committee.len) custody_bits: CommitteeValidatorsBits.init(committee.len)
) )
proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB = proc makeTestDB*(tailState: BeaconState, tailBlock: BeaconBlock): BeaconChainDB =