add more epoch processing
* document several parts * almost complete epoch state processing
This commit is contained in:
parent
9d284b6eb4
commit
4b41010610
|
@ -57,11 +57,11 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
|
||||||
shard_and_committee_for_slots[i] = n
|
shard_and_committee_for_slots[i] = n
|
||||||
shard_and_committee_for_slots[EPOCH_LENGTH + i] = n
|
shard_and_committee_for_slots[EPOCH_LENGTH + i] = n
|
||||||
|
|
||||||
# The spec says to use validators, but it's actually indices..
|
# TODO validators vs indices
|
||||||
let validator_indices = get_active_validator_indices(validators)
|
let active_validator_indices = get_active_validator_indices(validators)
|
||||||
|
|
||||||
let persistent_committees = split(shuffle(
|
let persistent_committees = split(shuffle(
|
||||||
validator_indices, ZERO_HASH), SHARD_COUNT)
|
active_validator_indices, ZERO_HASH), SHARD_COUNT)
|
||||||
|
|
||||||
BeaconState(
|
BeaconState(
|
||||||
validator_registry: validators,
|
validator_registry: validators,
|
||||||
|
@ -112,9 +112,9 @@ func append_to_recent_block_hashes*(old_block_hashes: seq[Eth2Digest],
|
||||||
result = old_block_hashes
|
result = old_block_hashes
|
||||||
result.add repeat(parent_hash, d)
|
result.add repeat(parent_hash, d)
|
||||||
|
|
||||||
proc get_attestation_participants*(state: BeaconState,
|
func get_attestation_participants*(state: BeaconState,
|
||||||
attestation_data: AttestationData,
|
attestation_data: AttestationData,
|
||||||
participation_bitfield: seq[byte]): seq[int] =
|
participation_bitfield: seq[byte]): seq[Uint24] =
|
||||||
## Attestation participants in the attestation data are called out in a
|
## Attestation participants in the attestation data are called out in a
|
||||||
## bit field that corresponds to the committee of the shard at the time - this
|
## bit field that corresponds to the committee of the shard at the time - this
|
||||||
## function converts it to list of indices in to BeaconState.validators
|
## function converts it to list of indices in to BeaconState.validators
|
||||||
|
@ -138,3 +138,16 @@ proc get_attestation_participants*(state: BeaconState,
|
||||||
if bit == 1:
|
if bit == 1:
|
||||||
result.add(vindex)
|
result.add(vindex)
|
||||||
return # found the shard, we're done
|
return # found the shard, we're done
|
||||||
|
|
||||||
|
func change_validators*(state: var BeaconState,
|
||||||
|
current_slot: uint64) =
|
||||||
|
## Change validator registry.
|
||||||
|
|
||||||
|
let res = get_changed_validators(
|
||||||
|
state.validator_registry,
|
||||||
|
state.latest_penalized_exit_balances,
|
||||||
|
state.validator_registry_delta_chain_tip,
|
||||||
|
current_slot
|
||||||
|
)
|
||||||
|
state.validator_registry = res.validators
|
||||||
|
state.latest_penalized_exit_balances = res.latest_penalized_exit_balances
|
||||||
|
|
|
@ -44,7 +44,7 @@ const
|
||||||
SLOT_DURATION* = 6 # seconds
|
SLOT_DURATION* = 6 # seconds
|
||||||
MIN_ATTESTATION_INCLUSION_DELAY* = 4 # slots (~25 minutes)
|
MIN_ATTESTATION_INCLUSION_DELAY* = 4 # slots (~25 minutes)
|
||||||
EPOCH_LENGTH* = 64 # slots (~6.4 minutes)
|
EPOCH_LENGTH* = 64 # slots (~6.4 minutes)
|
||||||
MIN_VALIDATOR_SET_CHANGE_INTERVAL* = 2^8 # slots (~25.6 minutes)
|
MIN_VALIDATOR_REGISTRY_CHANGE_INTERVAL* = 2^8 # slots (~25.6 minutes)
|
||||||
POW_RECEIPT_ROOT_VOTING_PERIOD* = 2^10 # slots (~1.7 hours)
|
POW_RECEIPT_ROOT_VOTING_PERIOD* = 2^10 # slots (~1.7 hours)
|
||||||
SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD* = 2^17 # slots (~9 days)
|
SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD* = 2^17 # slots (~9 days)
|
||||||
SQRT_E_DROP_TIME* = 2^17 # slots (~9 days); amount of time it takes for the
|
SQRT_E_DROP_TIME* = 2^17 # slots (~9 days); amount of time it takes for the
|
||||||
|
@ -182,7 +182,7 @@ type
|
||||||
slot_included*: uint64 # Slot in which it was included
|
slot_included*: uint64 # Slot in which it was included
|
||||||
|
|
||||||
ValidatorStatusCodes* {.pure.} = enum
|
ValidatorStatusCodes* {.pure.} = enum
|
||||||
PENDING_ACITVATION = 0
|
PENDING_ACTIVATION = 0
|
||||||
ACTIVE = 1
|
ACTIVE = 1
|
||||||
EXITED_WITHOUT_PENALTY = 2
|
EXITED_WITHOUT_PENALTY = 2
|
||||||
EXITED_WITH_PENALTY = 3
|
EXITED_WITH_PENALTY = 3
|
||||||
|
|
|
@ -104,7 +104,7 @@ proc get_shard_and_committees_for_slot*(
|
||||||
let index = state.get_shard_and_committees_index(slot)
|
let index = state.get_shard_and_committees_index(slot)
|
||||||
state.shard_and_committee_for_slots[index]
|
state.shard_and_committee_for_slots[index]
|
||||||
|
|
||||||
func get_beacon_proposer_index*(state: BeaconState, slot: uint64): uint64 =
|
func get_beacon_proposer_index*(state: BeaconState, slot: uint64): Uint24 =
|
||||||
## From Casper RPJ mini-spec:
|
## From Casper RPJ mini-spec:
|
||||||
## When slot i begins, validator Vidx is expected
|
## When slot i begins, validator Vidx is expected
|
||||||
## to create ("propose") a block, which contains a pointer to some parent block
|
## to create ("propose") a block, which contains a pointer to some parent block
|
||||||
|
@ -133,3 +133,5 @@ func get_fork_version*(fork_data: ForkData, slot: uint64): uint64 =
|
||||||
func get_domain*(fork_data: ForkData, slot: uint64, domain_type: uint64): uint64 =
|
func get_domain*(fork_data: ForkData, slot: uint64, domain_type: uint64): uint64 =
|
||||||
# TODO Slot overflow? Or is slot 32 bits for all intents and purposes?
|
# TODO Slot overflow? Or is slot 32 bits for all intents and purposes?
|
||||||
(get_fork_version(fork_data, slot) shl 32) + domain_type
|
(get_fork_version(fork_data, slot) shl 32) + domain_type
|
||||||
|
|
||||||
|
func is_power_of_2*(v: uint64): bool = discard # TODO
|
|
@ -7,7 +7,7 @@
|
||||||
# Helpers and functions pertaining to managing the validator set
|
# Helpers and functions pertaining to managing the validator set
|
||||||
|
|
||||||
import
|
import
|
||||||
options, nimcrypto,
|
options, nimcrypto, sequtils, math,
|
||||||
eth_common,
|
eth_common,
|
||||||
../ssz,
|
../ssz,
|
||||||
./crypto, ./datatypes, ./digest, ./helpers
|
./crypto, ./datatypes, ./digest, ./helpers
|
||||||
|
@ -81,7 +81,7 @@ func get_active_validator_indices*(validators: openArray[ValidatorRecord]): seq[
|
||||||
|
|
||||||
func get_new_shuffling*(seed: Eth2Digest,
|
func get_new_shuffling*(seed: Eth2Digest,
|
||||||
validators: openArray[ValidatorRecord],
|
validators: openArray[ValidatorRecord],
|
||||||
crosslinking_start_shard: int
|
crosslinking_start_shard: uint64
|
||||||
): array[EPOCH_LENGTH, seq[ShardAndCommittee]] =
|
): array[EPOCH_LENGTH, seq[ShardAndCommittee]] =
|
||||||
## Split up validators into groups at the start of every epoch,
|
## Split up validators into groups at the start of every epoch,
|
||||||
## determining at what height they can make attestations and what shard they are making crosslinks for
|
## determining at what height they can make attestations and what shard they are making crosslinks for
|
||||||
|
@ -91,7 +91,7 @@ func get_new_shuffling*(seed: Eth2Digest,
|
||||||
active_validators = get_active_validator_indices(validators)
|
active_validators = get_active_validator_indices(validators)
|
||||||
committees_per_slot = clamp(
|
committees_per_slot = clamp(
|
||||||
len(active_validators) div EPOCH_LENGTH div TARGET_COMMITTEE_SIZE,
|
len(active_validators) div EPOCH_LENGTH div TARGET_COMMITTEE_SIZE,
|
||||||
1, SHARD_COUNT div EPOCH_LENGTH)
|
1, SHARD_COUNT div EPOCH_LENGTH).uint64
|
||||||
# Shuffle with seed
|
# Shuffle with seed
|
||||||
shuffled_active_validator_indices = shuffle(active_validators, seed)
|
shuffled_active_validator_indices = shuffle(active_validators, seed)
|
||||||
# Split the shuffled list into cycle_length pieces
|
# Split the shuffled list into cycle_length pieces
|
||||||
|
@ -102,12 +102,13 @@ func get_new_shuffling*(seed: Eth2Digest,
|
||||||
for slot, slot_indices in validators_per_slot:
|
for slot, slot_indices in validators_per_slot:
|
||||||
let
|
let
|
||||||
shard_indices = split(slot_indices, committees_per_slot)
|
shard_indices = split(slot_indices, committees_per_slot)
|
||||||
shard_id_start = crosslinking_start_shard + slot * committees_per_slot
|
shard_id_start =
|
||||||
|
crosslinking_start_shard + slot.uint64 * committees_per_slot
|
||||||
|
|
||||||
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
||||||
for shard_position, indices in shard_indices:
|
for shard_position, indices in shard_indices:
|
||||||
committees[shard_position].shard =
|
committees[shard_position].shard =
|
||||||
uint64(shard_id_start + shard_position) mod SHARD_COUNT
|
uint64(shard_id_start + shard_position.uint64) mod SHARD_COUNT.uint64
|
||||||
committees[shard_position].committee = indices
|
committees[shard_position].committee = indices
|
||||||
|
|
||||||
result[slot] = committees
|
result[slot] = committees
|
||||||
|
@ -173,3 +174,75 @@ func exit_validator*(index: Uint24,
|
||||||
validator.pubkey,
|
validator.pubkey,
|
||||||
EXIT,
|
EXIT,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func get_changed_validators*(validators: seq[ValidatorRecord],
|
||||||
|
latest_penalized_exit_balances: seq[uint64],
|
||||||
|
validator_registry_delta_chain_tip: Eth2Digest,
|
||||||
|
current_slot: uint64):
|
||||||
|
tuple[
|
||||||
|
validators: seq[ValidatorRecord],
|
||||||
|
latest_penalized_exit_balances: seq[uint64],
|
||||||
|
validator_registry_delta_chain_tip: Eth2Digest] =
|
||||||
|
## Return changed validator registry and `latest_penalized_exit_balances`,
|
||||||
|
## `validator_registry_delta_chain_tip`.
|
||||||
|
|
||||||
|
# TODO inefficient
|
||||||
|
var validators = validators
|
||||||
|
|
||||||
|
# The active validators
|
||||||
|
let active_validator_indices = get_active_validator_indices(validators)
|
||||||
|
# The total balance of active validators
|
||||||
|
# TODO strange spec code
|
||||||
|
let total_balance = sum(mapIt(active_validator_indices, get_effective_balance(validators[it])))
|
||||||
|
# The maximum total Gwei that can be deposited and withdrawn
|
||||||
|
let max_allowable_change = max(
|
||||||
|
uint64(2 * MAX_DEPOSIT * GWEI_PER_ETH),
|
||||||
|
total_balance div MAX_CHURN_QUOTIENT.uint64
|
||||||
|
)
|
||||||
|
|
||||||
|
# Go through the list start to end, depositing and withdrawing as many as possible
|
||||||
|
var total_changed: uint64 = 0
|
||||||
|
var validator_registry_delta_chain_tip = validator_registry_delta_chain_tip
|
||||||
|
for i in 0..<validators.len:
|
||||||
|
if validators[i].status == PENDING_ACTIVATION:
|
||||||
|
validators[i].status = ACTIVE
|
||||||
|
total_changed.inc(get_effective_balance(validators[i]).int)
|
||||||
|
let validator_registry_delta_chain_tip =
|
||||||
|
get_new_validator_registry_delta_chain_tip(
|
||||||
|
validator_registry_delta_chain_tip,
|
||||||
|
i.Uint24,
|
||||||
|
validators[i].pubkey,
|
||||||
|
ACTIVATION,
|
||||||
|
)
|
||||||
|
elif validators[i].status == EXITED_WITHOUT_PENALTY:
|
||||||
|
validators[i].latest_status_change_slot = current_slot
|
||||||
|
total_changed.inc(get_effective_balance(validators[i]).int)
|
||||||
|
validator_registry_delta_chain_tip =
|
||||||
|
get_new_validator_registry_delta_chain_tip(
|
||||||
|
validator_registry_delta_chain_tip,
|
||||||
|
i.Uint24,
|
||||||
|
validators[i].pubkey,
|
||||||
|
EXIT,
|
||||||
|
)
|
||||||
|
if total_changed >= max_allowable_change:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Calculate the total ETH that has been penalized in the last ~2-3 withdrawal periods
|
||||||
|
let period_index =
|
||||||
|
(current_slot div COLLECTIVE_PENALTY_CALCULATION_PERIOD.uint64).int
|
||||||
|
let total_penalties = (
|
||||||
|
(latest_penalized_exit_balances[period_index]) +
|
||||||
|
(if period_index >= 1: latest_penalized_exit_balances[period_index - 1] else: 0) +
|
||||||
|
(if period_index >= 2: latest_penalized_exit_balances[period_index - 2] else: 0)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Calculate penalties for slashed validators
|
||||||
|
func to_penalize(v: ValidatorRecord): bool =
|
||||||
|
v.status == EXITED_WITH_PENALTY
|
||||||
|
var validators_to_penalize = filter(validators, to_penalize)
|
||||||
|
for v in validators_to_penalize.mitems():
|
||||||
|
v.balance.dec(
|
||||||
|
(get_effective_balance(v) * min(total_penalties * 3'u64, total_balance) div
|
||||||
|
total_balance).int)
|
||||||
|
|
||||||
|
(validators, latest_penalized_exit_balances, validator_registry_delta_chain_tip)
|
||||||
|
|
|
@ -5,27 +5,49 @@
|
||||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||||
|
|
||||||
# A imcomplete implementation of the state transition function, as described
|
# State transition, as described in
|
||||||
# under "Per-block processing" in https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md
|
# https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||||
#
|
#
|
||||||
# The code is here mainly to verify the data types and get an idea about
|
# The purpose of this code right is primarily educational, to help piece
|
||||||
# missing pieces - needs testing throughout
|
# together the mechanics of the beacon state and to discover potential problem
|
||||||
|
# areas.
|
||||||
|
#
|
||||||
|
# General notes about the code (TODO):
|
||||||
|
# * It's inefficient - we quadratically copy, allocate and iterate when there
|
||||||
|
# are faster options
|
||||||
|
# * Weird styling - the sections taken from the spec use python styling while
|
||||||
|
# the others use NEP-1 - helps grepping identifiers in spec
|
||||||
|
# * We mix procedural and functional styles for no good reason, except that the
|
||||||
|
# spec does so also.
|
||||||
|
# * There are no tests, and likely lots of bugs.
|
||||||
|
# * For validators, sometimes indices are used and sometimes instances - this
|
||||||
|
# causes unnecessary friction in some sections
|
||||||
|
# * For indices, we get a mix of uint64, Uint24 and int - this is currently
|
||||||
|
# swept under the rug with casts
|
||||||
|
# * The spec uses uint64 for data types, but functions in the spec often assume
|
||||||
|
# signed bigint semantics - under- and overflows ensue
|
||||||
|
# * Sane error handling is missing in most cases (yay, we'll get the chance to
|
||||||
|
# debate exceptions again!)
|
||||||
|
#
|
||||||
|
# When updating the code, add TODO sections to mark where there are clear
|
||||||
|
# improvements to be made - other than that, keep things similar to spec for
|
||||||
|
# now.
|
||||||
|
|
||||||
import
|
import
|
||||||
math, options, sequtils,
|
math, options, sequtils,
|
||||||
./extras,
|
./extras, ./ssz,
|
||||||
./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||||
./ssz,
|
milagro_crypto
|
||||||
milagro_crypto # nimble install https://github.com/status-im/nim-milagro-crypto@#master
|
|
||||||
|
|
||||||
# TODO there's an ugly mix of functional and procedural styles here that
|
func processAttestations(state: var BeaconState,
|
||||||
# is due to how the spec is mixed as well - once we're past the prototype
|
blck: BeaconBlock,
|
||||||
# stage, this will need clearing up and unification.
|
parent_slot: uint64): bool =
|
||||||
|
# Each block includes a number of attestations that the proposer chose. Each
|
||||||
func checkAttestations(state: BeaconState,
|
# attestation represents an update to a specific shard and is signed by a
|
||||||
blck: BeaconBlock,
|
# committee of validators.
|
||||||
parent_slot: uint64): Option[seq[PendingAttestationRecord]] =
|
# Here we make sanity checks for each attestation and it to the state - most
|
||||||
# TODO perf improvement potential..
|
# updates will happen at the epoch boundary where state updates happen in
|
||||||
|
# bulk.
|
||||||
if blck.attestations.len > MAX_ATTESTATIONS_PER_BLOCK:
|
if blck.attestations.len > MAX_ATTESTATIONS_PER_BLOCK:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -33,7 +55,8 @@ func checkAttestations(state: BeaconState,
|
||||||
for attestation in blck.attestations:
|
for attestation in blck.attestations:
|
||||||
if attestation.data.slot <= blck.slot - MIN_ATTESTATION_INCLUSION_DELAY:
|
if attestation.data.slot <= blck.slot - MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
return
|
return
|
||||||
# TODO unsigned undeflow in spec
|
|
||||||
|
# TODO spec - unsigned underflow
|
||||||
if attestation.data.slot >= max(parent_slot.int - EPOCH_LENGTH + 1, 0).uint64:
|
if attestation.data.slot >= max(parent_slot.int - EPOCH_LENGTH + 1, 0).uint64:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -42,6 +65,7 @@ func checkAttestations(state: BeaconState,
|
||||||
state.justified_slot
|
state.justified_slot
|
||||||
else:
|
else:
|
||||||
state.previous_justified_slot
|
state.previous_justified_slot
|
||||||
|
|
||||||
if attestation.data.justified_slot != expected_justified_slot:
|
if attestation.data.justified_slot != expected_justified_slot:
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -77,14 +101,16 @@ func checkAttestations(state: BeaconState,
|
||||||
debugEcho "Aggregate sig verify message: ",
|
debugEcho "Aggregate sig verify message: ",
|
||||||
attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
|
attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
|
||||||
|
|
||||||
res.add PendingAttestationRecord(
|
# All checks passed - update state
|
||||||
|
# TODO no rollback in case of errors
|
||||||
|
state.latest_attestations.add PendingAttestationRecord(
|
||||||
data: attestation.data,
|
data: attestation.data,
|
||||||
participation_bitfield: attestation.participation_bitfield,
|
participation_bitfield: attestation.participation_bitfield,
|
||||||
custody_bitfield: attestation.custody_bitfield,
|
custody_bitfield: attestation.custody_bitfield,
|
||||||
slot_included: blck.slot
|
slot_included: blck.slot
|
||||||
)
|
)
|
||||||
|
|
||||||
some(res)
|
true
|
||||||
|
|
||||||
func verifyProposerSignature(state: BeaconState, blck: BeaconBlock): bool =
|
func verifyProposerSignature(state: BeaconState, blck: BeaconBlock): bool =
|
||||||
var blck_without_sig = blck
|
var blck_without_sig = blck
|
||||||
|
@ -109,9 +135,9 @@ func processRandaoReveal(state: var BeaconState,
|
||||||
let proposer_index = get_beacon_proposer_index(state, slot)
|
let proposer_index = get_beacon_proposer_index(state, slot)
|
||||||
state.validator_registry[proposer_index.int].randao_skips.inc()
|
state.validator_registry[proposer_index.int].randao_skips.inc()
|
||||||
|
|
||||||
var
|
let
|
||||||
proposer_index = get_beacon_proposer_index(state, blck.slot)
|
proposer_index = get_beacon_proposer_index(state, blck.slot)
|
||||||
proposer = state.validator_registry[proposer_index.int]
|
proposer = state.validator_registry[proposer_index.int].addr
|
||||||
|
|
||||||
# Check that proposer commit and reveal match
|
# Check that proposer commit and reveal match
|
||||||
if repeat_hash(blck.randao_reveal, proposer.randao_skips + 1) !=
|
if repeat_hash(blck.randao_reveal, proposer.randao_skips + 1) !=
|
||||||
|
@ -143,31 +169,25 @@ func processSpecials(state: var BeaconState, blck: BeaconBlock): bool =
|
||||||
# TODO incoming spec changes here..
|
# TODO incoming spec changes here..
|
||||||
true
|
true
|
||||||
|
|
||||||
func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
func processBlock(state: var BeaconState, blck: BeaconBlock): bool =
|
||||||
## When a new block is received, all participants must verify that the block
|
## When a new block is received, all participants must verify that the block
|
||||||
## makes sense and update their state accordingly. This function will return
|
## makes sense and update their state accordingly. This function will return
|
||||||
## the new state, unless something breaks along the way
|
## the new state, unless something breaks along the way
|
||||||
|
# TODO state not rolled back in case of failure
|
||||||
# TODO: simplistic way to be able to rollback state
|
|
||||||
var state = state
|
|
||||||
|
|
||||||
let
|
let
|
||||||
parent_hash = blck.ancestor_hashes[0]
|
parent_hash = blck.ancestor_hashes[0]
|
||||||
slot = blck.slot
|
slot = blck.slot
|
||||||
parent_slot = slot - 1 # TODO Not!! can skip slots...
|
parent_slot = slot - 1 # TODO Not!! can skip slots...
|
||||||
# TODO actually get parent block, which means fixing up BeaconState refs above;
|
# TODO actually get parent block, which means fixing up BeaconState refs above;
|
||||||
# there's no distinction between active/crystallized state anymore, etc.
|
|
||||||
|
|
||||||
state.latest_block_hashes =
|
state.latest_block_hashes =
|
||||||
append_to_recent_block_hashes(state.latest_block_hashes, parent_slot, slot,
|
append_to_recent_block_hashes(state.latest_block_hashes, parent_slot, slot,
|
||||||
parent_hash)
|
parent_hash)
|
||||||
|
|
||||||
let processed_attestations = checkAttestations(state, blck, parent_slot)
|
if not processAttestations(state, blck, parent_slot):
|
||||||
if processed_attestations.isNone:
|
|
||||||
return
|
return
|
||||||
|
|
||||||
state.latest_attestations.add processed_attestations.get()
|
|
||||||
|
|
||||||
if not verifyProposerSignature(state, blck):
|
if not verifyProposerSignature(state, blck):
|
||||||
return
|
return
|
||||||
|
|
||||||
|
@ -180,46 +200,69 @@ func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState]
|
||||||
if not processSpecials(state, blck):
|
if not processSpecials(state, blck):
|
||||||
return
|
return
|
||||||
|
|
||||||
some(state) # Looks ok - move on with the updated state
|
true
|
||||||
|
|
||||||
func flatten[T](v: openArray[seq[T]]): seq[T] =
|
func flatten[T](v: openArray[seq[T]]): seq[T] =
|
||||||
|
# TODO not in nim - doh.
|
||||||
for x in v: result.add x
|
for x in v: result.add x
|
||||||
|
|
||||||
func get_epoch_boundary_attesters(
|
func get_epoch_boundary_attesters(
|
||||||
state: BeaconState,
|
state: BeaconState,
|
||||||
attestations: openArray[PendingAttestationRecord]): seq[int] =
|
attestations: openArray[PendingAttestationRecord]): seq[Uint24] =
|
||||||
|
# TODO spec - add as helper?
|
||||||
deduplicate(flatten(mapIt(attestations,
|
deduplicate(flatten(mapIt(attestations,
|
||||||
get_attestation_participants(state, it.data, it.participation_bitfield))))
|
get_attestation_participants(state, it.data, it.participation_bitfield))))
|
||||||
|
|
||||||
func adjust_for_inclusion_distance[T](magnitude: T, dist: T): T =
|
func adjust_for_inclusion_distance[T](magnitude: T, dist: T): T =
|
||||||
magnitude div 2 + (magnitude div 2) * MIN_ATTESTATION_INCLUSION_DELAY div dist
|
magnitude div 2 + (magnitude div 2) * MIN_ATTESTATION_INCLUSION_DELAY div dist
|
||||||
|
|
||||||
func processEpoch*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
func boundary_attestations(
|
||||||
|
state: BeaconState, boundary_hash: Eth2Digest,
|
||||||
|
attestations: openArray[PendingAttestationRecord]
|
||||||
|
): seq[PendingAttestationRecord] =
|
||||||
|
# TODO spec - add as helper?
|
||||||
|
filterIt(attestations,
|
||||||
|
it.data.epoch_boundary_hash == boundary_hash and
|
||||||
|
it.data.justified_slot == state.justified_slot)
|
||||||
|
|
||||||
|
func sum_effective_balances(
|
||||||
|
state: BeaconState, validator_indices: openArray[Uint24]): uint64 =
|
||||||
|
# TODO spec - add as helper?
|
||||||
|
sum(mapIt(
|
||||||
|
validator_indices, get_effective_balance(state.validator_registry[it]))
|
||||||
|
)
|
||||||
|
|
||||||
|
func lowerThan(candidate, current: Eth2Digest): bool =
|
||||||
|
# return true iff candidate is "lower" than current, per spec rule:
|
||||||
|
# "ties broken by favoring lower `shard_block_hash` values"
|
||||||
|
# TODO spec - clarify hash ordering..
|
||||||
|
for i, v in current.data:
|
||||||
|
if v > candidate.data[i]: return true
|
||||||
|
return false
|
||||||
|
|
||||||
|
func processEpoch(state: var BeaconState, blck: BeaconBlock): bool =
|
||||||
## Epoch processing happens every time we've passed EPOCH_LENGTH blocks.
|
## Epoch processing happens every time we've passed EPOCH_LENGTH blocks.
|
||||||
## Because some slots may be skipped, it may happen that we go through the
|
## Because some slots may be skipped, it may happen that we go through the
|
||||||
## loop more than once - each time the latest_state_recalculation_slot will be
|
## loop more than once - each time the latest_state_recalculation_slot will be
|
||||||
## increased by EPOCH_LENGTH.
|
## increased by EPOCH_LENGTH.
|
||||||
|
|
||||||
# TODO: simplistic way to be able to rollback state
|
|
||||||
var state = state
|
|
||||||
|
|
||||||
# Precomputation
|
|
||||||
|
|
||||||
while blck.slot >= EPOCH_LENGTH.uint64 + state.latest_state_recalculation_slot:
|
while blck.slot >= EPOCH_LENGTH.uint64 + state.latest_state_recalculation_slot:
|
||||||
|
# Convenience shortcut, from spec
|
||||||
let s = state.latest_state_recalculation_slot
|
let s = state.latest_state_recalculation_slot
|
||||||
|
|
||||||
|
# Precomputation
|
||||||
let
|
let
|
||||||
active_validators =
|
active_validator_indices =
|
||||||
mapIt(get_active_validator_indices(state.validator_registry),
|
get_active_validator_indices(state.validator_registry)
|
||||||
state.validator_registry[it])
|
total_balance = sum_effective_balances(state, active_validator_indices)
|
||||||
|
|
||||||
total_balance = sum(mapIt(active_validators, get_effective_balance(it)))
|
|
||||||
|
|
||||||
total_balance_in_eth = total_balance.int div GWEI_PER_ETH
|
total_balance_in_eth = total_balance.int div GWEI_PER_ETH
|
||||||
|
|
||||||
# The per-slot maximum interest rate is `2/reward_quotient`.)
|
# The per-slot maximum interest rate is `2/reward_quotient`.)
|
||||||
reward_quotient = BASE_REWARD_QUOTIENT * int_sqrt(total_balance_in_eth)
|
reward_quotient = BASE_REWARD_QUOTIENT * int_sqrt(total_balance_in_eth)
|
||||||
|
|
||||||
|
# TODO not in spec, convenient
|
||||||
|
epoch_boundary_hash = get_block_hash(state, blck, s)
|
||||||
|
|
||||||
proc base_reward(v: ValidatorRecord): uint64 =
|
proc base_reward(v: ValidatorRecord): uint64 =
|
||||||
get_effective_balance(v) div reward_quotient.uint64
|
get_effective_balance(v) div reward_quotient.uint64
|
||||||
|
|
||||||
|
@ -229,128 +272,268 @@ func processEpoch*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
||||||
this_epoch_attestations = filterIt(state.latest_attestations,
|
this_epoch_attestations = filterIt(state.latest_attestations,
|
||||||
s <= it.data.slot and it.data.slot < s + EPOCH_LENGTH)
|
s <= it.data.slot and it.data.slot < s + EPOCH_LENGTH)
|
||||||
|
|
||||||
this_epoch_boundary_attestations = filterIt(this_epoch_attestations,
|
this_epoch_boundary_attestations =
|
||||||
it.data.epoch_boundary_hash == get_block_hash(state, blck, s) and
|
boundary_attestations(state, epoch_boundary_hash,
|
||||||
it.data.justified_slot == state.justified_slot)
|
this_epoch_attestations)
|
||||||
|
|
||||||
this_epoch_boundary_attesters =
|
this_epoch_boundary_attesters =
|
||||||
get_epoch_boundary_attesters(state, this_epoch_attestations)
|
get_epoch_boundary_attesters(state, this_epoch_attestations)
|
||||||
|
|
||||||
this_epoch_boundary_attesting_balance = sum(
|
this_epoch_boundary_attesting_balance =
|
||||||
mapIt(this_epoch_boundary_attesters,
|
sum_effective_balances(state, this_epoch_boundary_attesters)
|
||||||
get_effective_balance(state.validator_registry[it]))
|
|
||||||
)
|
|
||||||
|
|
||||||
let
|
let
|
||||||
previous_epoch_attestations = filterIt(state.latest_attestations,
|
previous_epoch_attestations = filterIt(state.latest_attestations,
|
||||||
s <= it.data.slot + EPOCH_LENGTH and it.data.slot < s)
|
s <= it.data.slot + EPOCH_LENGTH and it.data.slot < s)
|
||||||
previous_epoch_boundary_attestations = filterIt(previous_epoch_attestations,
|
|
||||||
it.data.epoch_boundary_hash == get_block_hash(state, blck, s) and
|
previous_epoch_boundary_attestations =
|
||||||
it.data.justified_slot == state.justified_slot)
|
boundary_attestations(state, epoch_boundary_hash,
|
||||||
|
previous_epoch_attestations)
|
||||||
|
|
||||||
previous_epoch_boundary_attesters =
|
previous_epoch_boundary_attesters =
|
||||||
get_epoch_boundary_attesters(state, previous_epoch_boundary_attestations)
|
get_epoch_boundary_attesters(state, previous_epoch_boundary_attestations)
|
||||||
previous_epoch_boundary_attesting_balance = sum(
|
|
||||||
mapIt(previous_epoch_boundary_attesters,
|
|
||||||
get_effective_balance(state.validator_registry[it]))
|
|
||||||
)
|
|
||||||
|
|
||||||
# TODO gets pretty hairy here
|
previous_epoch_boundary_attesting_balance =
|
||||||
|
sum_effective_balances(state, this_epoch_boundary_attesters)
|
||||||
|
|
||||||
|
# TODO this is really hairy - we cannot capture `state` directly, but we
|
||||||
|
# can capture a pointer to it - this is safe because we don't leak
|
||||||
|
# these closures outside this scope, but still..
|
||||||
|
let statePtr = state.addr
|
||||||
func attesting_validators(
|
func attesting_validators(
|
||||||
obj: ShardAndCommittee, shard_block_hash: Eth2Digest): seq[int] =
|
obj: ShardAndCommittee, shard_block_hash: Eth2Digest): seq[Uint24] =
|
||||||
flatten(
|
flatten(
|
||||||
mapIt(
|
mapIt(
|
||||||
filterIt(concat(this_epoch_attestations, previous_epoch_attestations),
|
filterIt(concat(this_epoch_attestations, previous_epoch_attestations),
|
||||||
it.data.shard == obj.shard and
|
it.data.shard == obj.shard and
|
||||||
it.data.shard_block_hash == shard_block_hash),
|
it.data.shard_block_hash == shard_block_hash),
|
||||||
get_attestation_participants(state, it.data, it.participation_bitfield)))
|
get_attestation_participants(statePtr[], it.data, it.participation_bitfield)))
|
||||||
|
|
||||||
# TODO which shard_block_hash:es?
|
func winning_hash(obj: ShardAndCommittee): Eth2Digest =
|
||||||
# * Let `attesting_validators(obj)` be equal to `attesting_validators(obj, shard_block_hash)` for the value of `shard_block_hash` such that `sum([get_effective_balance(v) for v in attesting_validators(obj, shard_block_hash)])` is maximized (ties broken by favoring lower `shard_block_hash` values).
|
# * Let `winning_hash(obj)` be the winning `shard_block_hash` value.
|
||||||
# * Let `total_attesting_balance(obj)` be the sum of the balances-at-stake of `attesting_validators(obj)`.
|
# ... such that `sum([get_effective_balance(v) for v in attesting_validators(obj, shard_block_hash)])`
|
||||||
# * Let `winning_hash(obj)` be the winning `shard_block_hash` value.
|
# is maximized (ties broken by favoring lower `shard_block_hash` values).
|
||||||
# * Let `total_balance(obj) = sum([get_effective_balance(v) for v in obj.committee])`.
|
let candidates =
|
||||||
|
mapIt(
|
||||||
|
filterIt(concat(this_epoch_attestations, previous_epoch_attestations),
|
||||||
|
it.data.shard == obj.shard),
|
||||||
|
it.data.shard_block_hash)
|
||||||
|
|
||||||
# Let `inclusion_slot(v)` equal `a.slot_included` for the attestation `a` where `v` is in `get_attestation_participants(state, a.data, a.participation_bitfield)`, and `inclusion_distance(v) = a.slot_included - a.data.slot` for the same attestation. We define a function `adjust_for_inclusion_distance(magnitude, distance)` which adjusts the reward of an attestation based on how long it took to get included (the longer, the lower the reward). Returns a value between 0 and `magnitude`.
|
var max_hash = candidates[0]
|
||||||
|
var max_val =
|
||||||
|
sum_effective_balances(statePtr[], attesting_validators(obj, max_hash))
|
||||||
|
for candidate in candidates[1..^1]:
|
||||||
|
let val = sum_effective_balances(statePtr[], attesting_validators(obj, candidate))
|
||||||
|
if val > max_val or (val == max_val and candidate.lowerThan(max_hash)):
|
||||||
|
max_hash = candidate
|
||||||
|
max_val = val
|
||||||
|
max_hash
|
||||||
|
|
||||||
# Adjust justified slots and crosslink status
|
func attesting_validators(obj: ShardAndCommittee): seq[Uint24] =
|
||||||
|
attesting_validators(obj, winning_hash(obj))
|
||||||
|
|
||||||
var new_justified_slot: Option[uint64]
|
func total_attesting_balance(obj: ShardAndCommittee): uint64 =
|
||||||
# overflow intentional!
|
sum_effective_balances(statePtr[], attesting_validators(obj))
|
||||||
state.justified_slot_bitfield = state.justified_slot_bitfield * 2
|
|
||||||
|
|
||||||
if 3'u64 * previous_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
func total_balance_sac(obj: ShardAndCommittee): uint64 =
|
||||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
sum_effective_balances(statePtr[], obj.committee)
|
||||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 2
|
|
||||||
new_justified_slot = some(s - EPOCH_LENGTH)
|
|
||||||
|
|
||||||
if 3'u64 * this_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
func inclusion_slot(v: Uint24): uint64 =
|
||||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
for a in statePtr[].latest_attestations:
|
||||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 1
|
if v in get_attestation_participants(statePtr[], a.data, a.participation_bitfield):
|
||||||
new_justified_slot = some(s)
|
return a.slot_included
|
||||||
|
assert false # shouldn't happen..
|
||||||
|
|
||||||
if state.justified_slot == s - EPOCH_LENGTH and
|
func inclusion_distance(v: Uint24): uint64 =
|
||||||
state.justified_slot_bitfield mod 4 == 3:
|
for a in statePtr[].latest_attestations:
|
||||||
state.finalized_slot = state.justified_slot
|
if v in get_attestation_participants(statePtr[], a.data, a.participation_bitfield):
|
||||||
if state.justified_slot == s - EPOCH_LENGTH - EPOCH_LENGTH and
|
return a.slot_included - a.data.slot
|
||||||
state.justified_slot_bitfield mod 8 == 7:
|
assert false # shouldn't happen..
|
||||||
state.finalized_slot = state.justified_slot
|
|
||||||
|
|
||||||
if state.justified_slot == s - EPOCH_LENGTH - 2 * EPOCH_LENGTH and
|
block: # Adjust justified slots and crosslink status
|
||||||
state.justified_slot_bitfield mod 16 in [15'u64, 14]:
|
var new_justified_slot: Option[uint64]
|
||||||
state.finalized_slot = state.justified_slot
|
# TODO where's that bitfield type when you need it?
|
||||||
|
# TODO what happens with the bits that drop off..?
|
||||||
|
state.justified_slot_bitfield = state.justified_slot_bitfield shl 1
|
||||||
|
|
||||||
state.previous_justified_slot = state.justified_slot
|
if 3'u64 * previous_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||||
|
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||||
|
state.justified_slot_bitfield = state.justified_slot_bitfield or 2
|
||||||
|
new_justified_slot = some(s - EPOCH_LENGTH)
|
||||||
|
|
||||||
if new_justified_slot.isSome():
|
if 3'u64 * this_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||||
state.justified_slot = new_justified_slot.get()
|
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||||
|
state.justified_slot_bitfield = state.justified_slot_bitfield or 1
|
||||||
|
new_justified_slot = some(s)
|
||||||
|
|
||||||
# for obj in state.shard_and_committee_for_slots:
|
if state.justified_slot == s - EPOCH_LENGTH and
|
||||||
# 3 * total_attesting_balance(obj) >= 2 * total_balance(obj):
|
state.justified_slot_bitfield mod 4 == 3:
|
||||||
# state.crosslinks[shard] = CrosslinkRecord(
|
state.finalized_slot = state.justified_slot
|
||||||
# slot: latest_state_recalculation_slot + EPOCH_LENGTH,
|
if state.justified_slot == s - EPOCH_LENGTH - EPOCH_LENGTH and
|
||||||
# hash: winning_hash(obj))
|
state.justified_slot_bitfield mod 8 == 7:
|
||||||
|
state.finalized_slot = state.justified_slot
|
||||||
|
|
||||||
# Balance recalculations related to FFG rewards
|
if state.justified_slot == s - EPOCH_LENGTH - 2 * EPOCH_LENGTH and
|
||||||
let
|
state.justified_slot_bitfield mod 16 in [15'u64, 14]:
|
||||||
# The portion lost by offline [validators](#dfn-validator) after `D`
|
state.finalized_slot = state.justified_slot
|
||||||
# epochs is about `D*D/2/inactivity_penalty_quotient`.
|
|
||||||
inactivity_penalty_quotient = SQRT_E_DROP_TIME^2
|
|
||||||
time_since_finality = blck.slot - state.finalized_slot
|
|
||||||
|
|
||||||
if time_since_finality <= 4'u64 * EPOCH_LENGTH:
|
state.previous_justified_slot = state.justified_slot
|
||||||
# for v in previous_epoch_boundary_attesters:
|
|
||||||
# state.validators[v].balance.inc(adjust_for_inclusion_distance(
|
|
||||||
# base_reward(state.validators[v]) *
|
|
||||||
# prev_cycle_boundary_attesting_balance div total_balance,
|
|
||||||
# inclusion_distance(v)))
|
|
||||||
|
|
||||||
for v in get_active_validator_indices(state.validator_registry):
|
if new_justified_slot.isSome():
|
||||||
if v notin previous_epoch_boundary_attesters:
|
state.justified_slot = new_justified_slot.get()
|
||||||
state.validator_registry[v].balance.dec(
|
|
||||||
base_reward(state.validator_registry[v]).int)
|
|
||||||
else:
|
|
||||||
# Any validator in `prev_cycle_boundary_attesters` sees their balance
|
|
||||||
# unchanged.
|
|
||||||
# Others might get penalized:
|
|
||||||
for vindex, v in state.validator_registry.mpairs():
|
|
||||||
if (v.status == ACTIVE and vindex notin previous_epoch_boundary_attesters) or
|
|
||||||
v.status == EXITED_WITH_PENALTY:
|
|
||||||
v.balance.dec(
|
|
||||||
(base_reward(v) + get_effective_balance(v) * time_since_finality div
|
|
||||||
inactivity_penalty_quotient.uint64).int)
|
|
||||||
|
|
||||||
# For each `v` in `prev_cycle_boundary_attesters`, we determine the proposer `proposer_index = get_beacon_proposer_index(state, inclusion_slot(v))` and set `state.validators[proposer_index].balance += base_reward(v) // INCLUDER_REWARD_SHARE_QUOTIENT`.
|
for sac in state.shard_and_committee_for_slots:
|
||||||
|
# TODO or just state.shard_and_committee_for_slots[s]?
|
||||||
|
for obj in sac:
|
||||||
|
if 3'u64 * total_attesting_balance(obj) >= 2'u64 * total_balance_sac(obj):
|
||||||
|
state.latest_crosslinks[obj.shard] = CrosslinkRecord(
|
||||||
|
slot: state.latest_state_recalculation_slot + EPOCH_LENGTH,
|
||||||
|
shard_block_hash: winning_hash(obj))
|
||||||
|
|
||||||
# Balance recalculations related to crosslink rewards
|
block: # Balance recalculations related to FFG rewards
|
||||||
|
let
|
||||||
|
# The portion lost by offline [validators](#dfn-validator) after `D`
|
||||||
|
# epochs is about `D*D/2/inactivity_penalty_quotient`.
|
||||||
|
inactivity_penalty_quotient = SQRT_E_DROP_TIME^2
|
||||||
|
time_since_finality = blck.slot - state.finalized_slot
|
||||||
|
|
||||||
# Ethereum 1.0 chain related rules
|
if time_since_finality <= 4'u64 * EPOCH_LENGTH:
|
||||||
|
for v in previous_epoch_boundary_attesters:
|
||||||
|
state.validator_registry[v].balance.inc(adjust_for_inclusion_distance(
|
||||||
|
base_reward(state.validator_registry[v]) *
|
||||||
|
previous_epoch_boundary_attesting_balance div total_balance,
|
||||||
|
inclusion_distance(v)).int)
|
||||||
|
|
||||||
# Validator registry change
|
for v in active_validator_indices:
|
||||||
|
if v notin previous_epoch_boundary_attesters:
|
||||||
|
state.validator_registry[v].balance.dec(
|
||||||
|
base_reward(state.validator_registry[v]).int)
|
||||||
|
else:
|
||||||
|
# Any validator in `prev_cycle_boundary_attesters` sees their balance
|
||||||
|
# unchanged.
|
||||||
|
# Others might get penalized:
|
||||||
|
for vindex, v in state.validator_registry.mpairs():
|
||||||
|
if (v.status == ACTIVE and
|
||||||
|
vindex.Uint24 notin previous_epoch_boundary_attesters) or
|
||||||
|
v.status == EXITED_WITH_PENALTY:
|
||||||
|
v.balance.dec(
|
||||||
|
(base_reward(v) + get_effective_balance(v) * time_since_finality div
|
||||||
|
inactivity_penalty_quotient.uint64).int)
|
||||||
|
|
||||||
# If a validator registry change does NOT happen
|
for v in previous_epoch_boundary_attesters:
|
||||||
|
let proposer_index = get_beacon_proposer_index(state, inclusion_slot(v))
|
||||||
|
state.validator_registry[proposer_index].balance.inc(
|
||||||
|
(base_reward(state.validator_registry[v]) div INCLUDER_REWARD_QUOTIENT.uint64).int)
|
||||||
|
|
||||||
# Proposer reshuffling
|
block: # Balance recalculations related to crosslink rewards
|
||||||
|
for sac in state.shard_and_committee_for_slots[0 ..< EPOCH_LENGTH]:
|
||||||
|
for obj in sac:
|
||||||
|
for vindex in obj.committee:
|
||||||
|
let v = state.validator_registry[vindex].addr
|
||||||
|
|
||||||
# Finally...
|
if vindex in attesting_validators(obj):
|
||||||
|
v.balance.inc(adjust_for_inclusion_distance(
|
||||||
|
base_reward(v[]) * total_attesting_balance(obj) div total_balance_sac(obj),
|
||||||
|
inclusion_distance(vindex)).int)
|
||||||
|
else:
|
||||||
|
v.balance.dec(base_reward(v[]).int)
|
||||||
|
|
||||||
|
block: # Ethereum 1.0 chain related rules
|
||||||
|
if state.latest_state_recalculation_slot mod
|
||||||
|
POW_RECEIPT_ROOT_VOTING_PERIOD.uint64 == 0:
|
||||||
|
for x in state.candidate_pow_receipt_roots:
|
||||||
|
if x.votes * 2 >= POW_RECEIPT_ROOT_VOTING_PERIOD.uint64:
|
||||||
|
state.processed_pow_receipt_root = x.candidate_pow_receipt_root
|
||||||
|
break
|
||||||
|
state.candidate_pow_receipt_roots = @[]
|
||||||
|
|
||||||
|
block: # Validator registry change
|
||||||
|
if state.finalized_slot > state.validator_registry_latest_change_slot and
|
||||||
|
allIt(state.shard_and_committee_for_slots,
|
||||||
|
allIt(it,
|
||||||
|
state.latest_crosslinks[it.shard].slot >
|
||||||
|
state.validator_registry_latest_change_slot)):
|
||||||
|
state.change_validators(s)
|
||||||
|
state.validator_registry_latest_change_slot = s + EPOCH_LENGTH
|
||||||
|
for i in 0..<EPOCH_LENGTH:
|
||||||
|
state.shard_and_committee_for_slots[i] =
|
||||||
|
state.shard_and_committee_for_slots[EPOCH_LENGTH + i]
|
||||||
|
# https://github.com/ethereum/eth2.0-specs/issues/223
|
||||||
|
let next_start_shard = (state.shard_and_committee_for_slots[^1][^1].shard + 1) mod SHARD_COUNT
|
||||||
|
for i, v in get_new_shuffling(
|
||||||
|
state.next_seed, state.validator_registry, next_start_shard):
|
||||||
|
state.shard_and_committee_for_slots[i + EPOCH_LENGTH] = v
|
||||||
|
state.next_seed = state.randao_mix
|
||||||
|
else:
|
||||||
|
# If a validator registry change does NOT happen
|
||||||
|
for i in 0..<EPOCH_LENGTH:
|
||||||
|
state.shard_and_committee_for_slots[i] =
|
||||||
|
state.shard_and_committee_for_slots[EPOCH_LENGTH + i]
|
||||||
|
let time_since_finality = blck.slot - state.validator_registry_latest_change_slot
|
||||||
|
let start_shard = state.shard_and_committee_for_slots[0][0].shard
|
||||||
|
if time_since_finality * EPOCH_LENGTH <= MIN_VALIDATOR_REGISTRY_CHANGE_INTERVAL.uint64 or
|
||||||
|
is_power_of_2(time_since_finality):
|
||||||
|
for i, v in get_new_shuffling(
|
||||||
|
state.next_seed, state.validator_registry, start_shard):
|
||||||
|
state.shard_and_committee_for_slots[i + EPOCH_LENGTH] = v
|
||||||
|
state.next_seed = state.randao_mix
|
||||||
|
# Note that `start_shard` is not changed from the last epoch.
|
||||||
|
|
||||||
|
block: # Proposer reshuffling
|
||||||
|
let active_validator_indices = get_active_validator_indices(state.validator_registry)
|
||||||
|
let num_validators_to_reshuffle = len(active_validator_indices) div SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD
|
||||||
|
for i in 0..<num_validators_to_reshuffle:
|
||||||
|
# Multiplying i to 2 to ensure we have different input to all the required hashes in the shuffling
|
||||||
|
# and none of the hashes used for entropy in this loop will be the same
|
||||||
|
let validator_index = 0.Uint24 # active_validator_indices[hash(state.randao_mix + bytes8(i * 2)) mod len(active_validator_indices)]
|
||||||
|
let new_shard = 0'u64 # hash(state.randao_mix + bytes8(i * 2 + 1)) mod SHARD_COUNT
|
||||||
|
let shard_reassignment_record = ShardReassignmentRecord(
|
||||||
|
validator_index: validator_index,
|
||||||
|
shard: new_shard,
|
||||||
|
slot: s + SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD.uint64
|
||||||
|
)
|
||||||
|
state.persistent_committee_reassignments.add(shard_reassignment_record)
|
||||||
|
|
||||||
|
while len(state.persistent_committee_reassignments) > 0 and
|
||||||
|
state.persistent_committee_reassignments[0].slot <= s:
|
||||||
|
let reassignment = state.persistent_committee_reassignments[0]
|
||||||
|
state.persistent_committee_reassignments.delete(0)
|
||||||
|
for committee in state.persistent_committees.mitems():
|
||||||
|
if reassignment.validator_index in committee:
|
||||||
|
committee.delete(committee.find(reassignment.validator_index))
|
||||||
|
state.persistent_committees[reassignment.shard.int].add(
|
||||||
|
reassignment.validator_index)
|
||||||
|
|
||||||
|
block: # Finally...
|
||||||
|
# Remove all attestation records older than slot `s`.
|
||||||
|
for i, v in state.validator_registry:
|
||||||
|
if v.balance < MIN_BALANCE.uint64 and v.status == ACTIVE:
|
||||||
|
exit_validator(i.Uint24, state, penalize=false, current_slot=blck.slot)
|
||||||
|
state.latest_block_hashes = state.latest_block_hashes[EPOCH_LENGTH..^1]
|
||||||
|
state.latest_state_recalculation_slot.inc(EPOCH_LENGTH)
|
||||||
|
|
||||||
|
true
|
||||||
|
|
||||||
|
func updateState*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
||||||
|
## Adjust `state` according to the information in `blck`.
|
||||||
|
## Returns the new state, or `none` if the block is invalid.
|
||||||
|
|
||||||
|
# TODO check to which extent this copy can be avoided (considering forks etc),
|
||||||
|
# for now, it serves as a reminder that we need to handle invalid blocks
|
||||||
|
# somewhere..
|
||||||
|
# TODO many functions will mutate `state` partially without rolling back
|
||||||
|
# the changes in case of failure (look out for `var BeaconState` and
|
||||||
|
# bool return values...)
|
||||||
|
var state = state
|
||||||
|
|
||||||
|
# Block processing is split up into two phases - lightweight updates done
|
||||||
|
# for each block, and bigger updates done for each epoch.
|
||||||
|
|
||||||
|
# Lightweight updates that happen for every block
|
||||||
|
if not processBlock(state, blck): return
|
||||||
|
|
||||||
|
# Heavy updates that happen for every epoch
|
||||||
|
if not processEpoch(state, blck): return
|
||||||
|
|
||||||
|
# All good, we can return the new state
|
||||||
some(state)
|
some(state)
|
||||||
|
|
|
@ -14,13 +14,13 @@ import
|
||||||
suite "Block processing":
|
suite "Block processing":
|
||||||
## For now just test that we can compile and execute block processing with mock data.
|
## For now just test that we can compile and execute block processing with mock data.
|
||||||
|
|
||||||
test "Mock process_block":
|
test "Mock state update":
|
||||||
let
|
let
|
||||||
state = on_startup(makeInitialValidators(), 0, Eth2Digest())
|
state = on_startup(makeInitialValidators(), 0, Eth2Digest())
|
||||||
blck = BeaconBlock(
|
blck = BeaconBlock(
|
||||||
slot: 1,
|
slot: 1,
|
||||||
ancestor_hashes: @[Eth2Digest()]
|
ancestor_hashes: @[Eth2Digest()]
|
||||||
)
|
)
|
||||||
newState = process_block(state, blck)
|
newState = updateState(state, blck)
|
||||||
check:
|
check:
|
||||||
newState.isNone() # Broken block, should fail processing
|
newState.isNone() # Broken block, should fail processing
|
||||||
|
|
Loading…
Reference in New Issue