spec updates

* more work on block processing
* fix some unsigned issues
* fix ssz issue when last chunk is not full
* work around empty const seq compiler bug
* XXX -> TODO
This commit is contained in:
Jacek Sieka 2018-11-29 16:11:05 -06:00
parent a661e5afd8
commit 6669e698b5
No known key found for this signature in database
GPG Key ID: 6299FEB3EB6FA465
12 changed files with 177 additions and 69 deletions

View File

@ -148,7 +148,7 @@ proc scheduleCycleActions(node: BeaconNode) =
# Schedule attestations
let
committeesIdx = get_shard_and_committees_index(node.beaconState, slot.uint64)
committeesIdx = get_shards_and_committees_index(node.beaconState, slot.uint64)
for shard in node.beaconState.shard_and_committee_for_slots[committees_idx]:
for validatorIdx in shard.committee:

View File

@ -11,6 +11,9 @@
import
./spec/[crypto, digest]
const
BEACON_CHAIN_SHARD* = 0xffffffffffffffff'u64
type
InitialValidator* = object
## Eth1 validator registration contract output

View File

@ -26,14 +26,20 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
## must be calculated before creating the genesis block.
#
# Induct validators
# Not in spec: the system doesn't work unless there are at least CYCLE_LENGTH
# validators - there needs to be at least one member in each committee -
# good to know for testing, though arguably the system is not that useful at
# at that point :)
assert initial_validator_entries.len >= CYCLE_LENGTH
var validators: seq[ValidatorRecord]
for v in initial_validator_entries:
validators = get_new_validators(
validators,
ForkData(
pre_fork_version: 0,
post_fork_version: 0,
pre_fork_version: INITIAL_FORK_VERSION,
post_fork_version: INITIAL_FORK_VERSION,
fork_slot_number: 0xffffffffffffffff'u64
),
v.pubkey,
@ -44,7 +50,6 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
ACTIVE,
0
).validators
# Setup state
let
x = get_new_shuffling(Eth2Digest(), validators, 0)
@ -69,16 +74,21 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
)
)
func get_shard_and_committees_index*(state: BeaconState, slot: uint64): uint64 =
func get_shards_and_committees_index*(state: BeaconState, slot: uint64): uint64 =
# TODO spec unsigned-unsafe here
let earliest_slot_in_array =
state.last_state_recalculation_slot - CYCLE_LENGTH
if state.last_state_recalculation_slot > CYCLE_LENGTH.uint64:
state.last_state_recalculation_slot - CYCLE_LENGTH
else:
0
doAssert earliest_slot_in_array <= slot and
slot < earliest_slot_in_array + CYCLE_LENGTH * 2
slot - earliest_slot_in_array
proc get_shards_and_committees_for_slot*(
state: BeaconState, slot: uint64): seq[ShardAndCommittee] =
let index = state.get_shard_and_committees_index(slot)
let index = state.get_shards_and_committees_index(slot)
state.shard_and_committee_for_slots[index]
func get_beacon_proposer_index*(state: BeaconState, slot: uint64): uint64 =
@ -91,7 +101,7 @@ func get_beacon_proposer_index*(state: BeaconState, slot: uint64): uint64 =
##
## idx in Vidx == p(i mod N), pi being a random permutation of validators indices (i.e. a committee)
let idx = get_shard_and_committees_index(state, slot)
let idx = get_shards_and_committees_index(state, slot)
state.shard_and_committee_for_slots[idx][0].committee.mod_get(slot)
func get_block_hash*(state: BeaconState,
@ -118,9 +128,9 @@ proc get_attestation_participants*(state: BeaconState,
## bit field that corresponds to the committee of the shard at the time - this
## function converts it to list of indices in to BeaconState.validators
## Returns empty list if the shard is not found
# XXX Linear search through shard list? borderline ok, it's a small list
# XXX bitfield type needed, once bit order settles down
# XXX iterator candidate
# TODO Linear search through shard list? borderline ok, it's a small list
# TODO bitfield type needed, once bit order settles down
# TODO iterator candidate
let
sncs_for_slot = get_shards_and_committees_for_slot(
state, attestation_data.slot)
@ -129,7 +139,7 @@ proc get_attestation_participants*(state: BeaconState,
if snc.shard != attestation_data.shard:
continue
# XXX investigate functional library / approach to help avoid loop bugs
# TODO investigate functional library / approach to help avoid loop bugs
assert len(attester_bitfield) == ceil_div8(len(snc.committee))
for i, vindex in snc.committee:
let

View File

@ -76,4 +76,10 @@ func get_new_recent_block_hashes*(old_block_hashes: seq[Eth2Digest],
for _ in 0 ..< min(d, old_block_hashes.len):
result.add parent_hash
func ceil_div8*(v: int): int = (v + 7) div 8 # XXX use a proper bitarray!
func ceil_div8*(v: int): int = (v + 7) div 8 # TODO use a proper bitarray!
func repeat_hash*(v: Eth2Digest, n: SomeInteger): Eth2Digest =
if n == 0:
v
else:
repeat_hash(eth2hash(v.data), n - 1)

View File

@ -26,7 +26,7 @@ func get_new_validators*(current_validators: seq[ValidatorRecord],
status: ValidatorStatusCodes,
current_slot: uint64
): tuple[validators: seq[ValidatorRecord], index: int] =
# XXX Spec candidate: inefficient API
# TODO Spec candidate: inefficient API
#
# Check that validator really did register
# let signed_message = signed_message = bytes32(pubkey) + withdrawal_credentials + randao_commitment

View File

@ -43,9 +43,9 @@ func toBytesSSZ(x: Eth2Digest): array[32, byte] = x.data
func fromBytesSSZUnsafe(T: typedesc[SomeInteger], data: ptr byte): T =
## Convert directly to bytes the size of the int. (e.g. ``uint16 = 2 bytes``)
## All integers are serialized as **big endian**.
## XXX: Assumes data points to a sufficiently large buffer
## TODO: Assumes data points to a sufficiently large buffer
# XXX: any better way to get a suitably aligned buffer in nim???
# TODO: any better way to get a suitably aligned buffer in nim???
# see also: https://github.com/nim-lang/Nim/issues/9206
var tmp: uint64
var alignedBuf = cast[ptr byte](tmp.addr)
@ -115,7 +115,7 @@ func deserialize(data: ptr byte, pos: var int, len: int, typ: typedesc[object]):
func deserialize*(
data: seq[byte or uint8] or openarray[byte or uint8] or string,
typ: typedesc[object]): auto {.inline.} =
# XXX: returns Option[typ]: https://github.com/nim-lang/Nim/issues/9195
# TODO: returns Option[typ]: https://github.com/nim-lang/Nim/issues/9195
var pos = 0
return deserialize((ptr byte)(data[0].unsafeAddr), pos, data.len, typ)
@ -140,7 +140,7 @@ const CHUNK_SIZE = 128
# ################### Hashing helpers ###################################
# XXX varargs openarray, anyone?
# TODO varargs openarray, anyone?
template withHash(body: untyped): array[32, byte] =
let tmp = withEth2Hash: body
toBytesSSZ tmp
@ -154,9 +154,10 @@ func hash(a, b: openArray[byte]): array[32, byte] =
h.update(a)
h.update(b)
# XXX: er, how is this _actually_ done?
# TODO: er, how is this _actually_ done?
# Mandatory bug: https://github.com/nim-lang/Nim/issues/9825
func empty(T: typedesc): T = discard
const emptyChunk = @(empty(array[CHUNK_SIZE, byte]))
const emptyChunk = empty(array[CHUNK_SIZE, byte])
func merkleHash[T](lst: seq[T]): array[32, byte]
@ -186,13 +187,13 @@ func hashSSZ*(x: openArray[byte]): array[32, byte] =
func hashSSZ*(x: ValidatorRecord): array[32, byte] =
## Containers have their fields recursively hashed, concatenated and hashed
# XXX hash_ssz.py code contains special cases for some types, why?
# TODO hash_ssz.py code contains special cases for some types, why?
withHash:
# tmp.add(x.pubkey) # XXX uncertain future of public key format
# tmp.add(x.pubkey) # TODO uncertain future of public key format
h.update hashSSZ(x.withdrawal_credentials)
h.update hashSSZ(x.randao_skips)
h.update hashSSZ(x.balance)
# h.update hashSSZ(x.status) # XXX it's an enum, deal with it
# h.update hashSSZ(x.status) # TODO it's an enum, deal with it
h.update hashSSZ(x.last_status_change_slot)
h.update hashSSZ(x.exit_seq)
@ -207,7 +208,7 @@ func hashSSZ*[T: not enum](x: T): array[32, byte] =
merkleHash(x)
else:
## Containers have their fields recursively hashed, concatenated and hashed
# XXX could probaby compile-time-macro-sort fields...
# TODO could probaby compile-time-macro-sort fields...
var fields: seq[tuple[name: string, value: seq[byte]]]
for name, field in x.fieldPairs:
fields.add (name, @(hashSSZ(field)))
@ -262,7 +263,7 @@ func hashSSZ*(x: BeaconBlock): array[32, byte] =
func merkleHash[T](lst: seq[T]): array[32, byte] =
## Merkle tree hash of a list of homogenous, non-empty items
# XXX: the heap allocations here can be avoided by computing the merkle tree
# TODO: the heap allocations here can be avoided by computing the merkle tree
# recursively, but for now keep things simple and aligned with upstream
# Store length of list (to compensate for non-bijectiveness of padding)
@ -274,7 +275,7 @@ func merkleHash[T](lst: seq[T]): array[32, byte] =
var chunkz: seq[seq[byte]]
if len(lst) == 0:
chunkz.add emptyChunk
chunkz.add @emptyChunk
elif sizeof(hashSSZ(lst[0])) < CHUNK_SIZE:
# See how many items fit in a chunk
let itemsPerChunk = CHUNK_SIZE div sizeof(hashSSZ(lst[0]))
@ -284,6 +285,9 @@ func merkleHash[T](lst: seq[T]): array[32, byte] =
# Build a list of chunks based on the number of items in the chunk
for i in 0..<chunkz.len:
for j in 0..<itemsPerChunk:
if i == chunkz.len - 1:
let idx = i * itemsPerChunk + j
if idx >= lst.len: break # Last chunk may be partial!
chunkz[i].add hashSSZ(lst[i * itemsPerChunk + j])
else:
# Leave large items alone
@ -293,7 +297,7 @@ func merkleHash[T](lst: seq[T]): array[32, byte] =
while chunkz.len() > 1:
if chunkz.len() mod 2 == 1:
chunkz.add emptyChunk
chunkz.add @emptyChunk
for i in 0..<(chunkz.len div 2):
# As tradition dictates - one feature, at least one nim bug:
# https://github.com/nim-lang/Nim/issues/9684

View File

@ -13,37 +13,23 @@
import
options,
./extras,
./spec/[beaconstate, crypto, datatypes, digest, helpers],
./ssz,
milagro_crypto # nimble install https://github.com/status-im/nim-milagro-crypto@#master
func checkAttestations(state: BeaconState, blck: BeaconBlock):
seq[ProcessedAttestation] =
discard
# TODO there's an ugly mix of functional and procedural styles here that
# is due to how the spec is mixed as well - once we're past the prototype
# stage, this will need clearing up and unification.
func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
## When a new block is received, all participants must verify that the block
## makes sense and update their state accordingly. This function will return
## the new state, unless something breaks along the way
# XXX: simplistic way to be able to rollback state
var state = state
let
parent_hash = blck.ancestor_hashes[0]
slot = blck.slot
parent_slot = slot - 1 # XXX Not!! can skip slots...
# TODO actually get parent block, which means fixing up BeaconState refs above;
# there's no distinction between active/crystallized state anymore, etc.
state.recent_block_hashes =
append_to_recent_block_hashes(state.recent_block_hashes, parent_slot, slot,
parent_hash)
state.pending_attestations.add checkAttestations(state, blck)
doAssert blck.attestations.len <= MAX_ATTESTATION_COUNT
func checkAttestations(state: BeaconState,
blck: BeaconBlock,
parent_slot: uint64): Option[seq[ProcessedAttestation]] =
# TODO perf improvement potential..
if blck.attestations.len > MAX_ATTESTATION_COUNT:
return
var res: seq[ProcessedAttestation]
for attestation in blck.attestations:
if attestation.data.slot <= blck.slot - MIN_ATTESTATION_INCLUSION_DELAY:
return
@ -73,11 +59,88 @@ func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState]
# For now only check compilation
# doAssert attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
debugEcho "Aggregate sig verify message: ", attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
debugEcho "Aggregate sig verify message: ",
attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
return some(state)
# Extend the list of AttestationRecord objects in the active_state, ordering the new additions in the same order as they came in the block.
# TODO
res.add ProcessedAttestation(
data: attestation.data,
attester_bitfield: attestation.attester_bitfield,
poc_bitfield: attestation.poc_bitfield,
slot_included: blck.slot
)
# Verify that the slot % len(get_indices_for_slot(state, slot-1)[0])'th attester in get_indices_for_slot(state, slot-1)[0]is part of at least one of the AttestationRecord objects; this attester can be considered to be the proposer of the block.
# TODO
some(res)
func verifyProposerSignature(state: BeaconState, blck: BeaconBlock): bool =
var blck_without_sig = blck
blck_without_sig.proposer_signature = ValidatorSig()
let
proposal_hash = hashSSZ(ProposalSignedData(
slot: blck.slot,
shard: BEACON_CHAIN_SHARD,
block_hash: Eth2Digest(data: hashSSZ(blck_without_sig))
))
verifyMessage(
blck.proposer_signature, proposal_hash,
state.validators[get_beacon_proposer_index(state, blck.slot).int].pubkey)
func processRandaoReveal(state: var BeaconState,
blck: BeaconBlock,
parent_slot: uint64): bool =
# Update randao skips
for slot in parentslot + 1 ..< blck.slot:
let proposer_index = get_beacon_proposer_index(state, slot)
state.validators[proposer_index.int].randao_skips.inc()
var
proposer_index = get_beacon_proposer_index(state, blck.slot)
proposer = state.validators[proposer_index.int]
# Check that proposer commit and reveal match
if repeat_hash(blck.randao_reveal, proposer.randao_skips + 1) !=
proposer.randao_commitment:
return
# Update state and proposer now that we're alright
for i, b in state.randao_mix.data:
state.randao_mix.data[i] = b xor blck.randao_reveal.data[i]
proposer.randao_commitment = blck.randao_reveal
proposer.randao_skips = 0
true
func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
## When a new block is received, all participants must verify that the block
## makes sense and update their state accordingly. This function will return
## the new state, unless something breaks along the way
# TODO: simplistic way to be able to rollback state
var state = state
let
parent_hash = blck.ancestor_hashes[0]
slot = blck.slot
parent_slot = slot - 1 # TODO Not!! can skip slots...
# TODO actually get parent block, which means fixing up BeaconState refs above;
# there's no distinction between active/crystallized state anymore, etc.
state.recent_block_hashes =
append_to_recent_block_hashes(state.recent_block_hashes, parent_slot, slot,
parent_hash)
let processed_attestations = checkAttestations(state, blck, parent_slot)
if processed_attestations.isNone:
return
state.pending_attestations.add processed_attestations.get()
if not verifyProposerSignature(state, blck):
return
if not processRandaoReveal(state, blck, parent_slot):
return
some(state) # Looks ok - move on with the updated state

View File

@ -6,7 +6,8 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
unittest,
sequtils, unittest,
./testhelpers,
../beacon_chain/extras,
../beacon_chain/spec/[beaconstate, datatypes, digest]
@ -14,5 +15,5 @@ suite "Beacon state":
# Smoke test
test "Smoke on_startup":
let state = on_startup([InitialValidator()], 0, Eth2Digest())
check: state.validators.len == 1
let state = on_startup(makeInitialValidators(CYCLE_LENGTH), 0, Eth2Digest())
check: state.validators.len == CYCLE_LENGTH

View File

@ -6,19 +6,21 @@
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
options, unittest,
../beacon_chain/spec/[datatypes, digest],
../beacon_chain/state_transition
options, sequtils, unittest,
./testhelpers,
../beacon_chain/spec/[beaconstate, datatypes, digest],
../beacon_chain/[extras, state_transition]
suite "Block processing":
## For now just test that we can compile and execute block processing with mock data.
test "Mock process_block":
let
state = BeaconState()
state = on_startup(makeInitialValidators(), 0, Eth2Digest())
blck = BeaconBlock(
slot: 1,
ancestor_hashes: @[Eth2Digest()]
)
newState = process_block(state, blck).get()
newState = process_block(state, blck)
check:
newState.genesis_time == state.genesis_time
newState.isNone() # Broken block, should fail processing

View File

@ -60,7 +60,7 @@ suite "Simple serialization":
expected_ser[1..^1].deserialize(Foo).isNone()
suite "Tree hashing":
# XXX Nothing but smoke tests for now..
# TODO Nothing but smoke tests for now..
test "Hash ValidatorRecord":
let vr = ValidatorRecord()

View File

@ -24,7 +24,7 @@ suite "Validators":
status: ACTIVE
), 32*1024)
# XXX the shuffling looks really odd, probably buggy
# TODO the shuffling looks really odd, probably buggy
let s = get_new_shuffling(Eth2Digest(), validators, 0)
check:
s.len == CYCLE_LENGTH

19
tests/testhelpers.nim Normal file
View File

@ -0,0 +1,19 @@
# beacon_chain
# Copyright (c) 2018 Status Research & Development GmbH
# Licensed and distributed under either of
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
# at your option. This file may not be copied, modified, or distributed except according to those terms.
import
../beacon_chain/extras,
../beacon_chain/spec/[crypto, datatypes]
func makeValidatorPubKey(n: int): ValidatorPubKey =
result.point.x.a.g[0] = n
func makeInitialValidators*(n = CYCLE_LENGTH): seq[InitialValidator] =
for i in 0..<n:
result.add InitialValidator(
pubkey: makeValidatorPubKey(i)
)