Merge pull request #29 from status-im/epoch-state-2
add more epoch processing
This commit is contained in:
commit
360cf313e8
|
@ -164,7 +164,7 @@ proc scheduleCycleActions(node: BeaconNode) =
|
|||
let
|
||||
committeesIdx = get_shard_and_committees_index(node.beaconState, slot.uint64)
|
||||
|
||||
for shard in node.beaconState.shard_and_committee_for_slots[committees_idx]:
|
||||
for shard in node.beaconState.shard_committees_at_slots[committees_idx]:
|
||||
for validatorIdx in shard.committee:
|
||||
let attachedValidator = node.getAttachedValidator(validatorIdx)
|
||||
if attachedValidator != nil:
|
||||
|
|
|
@ -52,16 +52,16 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
|
|||
initial_shuffling = get_new_shuffling(Eth2Digest(), validators, 0)
|
||||
|
||||
# initial_shuffling + initial_shuffling in spec, but more ugly
|
||||
var shard_and_committee_for_slots: array[2 * EPOCH_LENGTH, seq[ShardAndCommittee]]
|
||||
var shard_committees_at_slots: array[2 * EPOCH_LENGTH, seq[ShardCommittee]]
|
||||
for i, n in initial_shuffling:
|
||||
shard_and_committee_for_slots[i] = n
|
||||
shard_and_committee_for_slots[EPOCH_LENGTH + i] = n
|
||||
shard_committees_at_slots[i] = n
|
||||
shard_committees_at_slots[EPOCH_LENGTH + i] = n
|
||||
|
||||
# The spec says to use validators, but it's actually indices..
|
||||
let validator_indices = get_active_validator_indices(validators)
|
||||
# TODO validators vs indices
|
||||
let active_validator_indices = get_active_validator_indices(validators)
|
||||
|
||||
let persistent_committees = split(shuffle(
|
||||
validator_indices, ZERO_HASH), SHARD_COUNT)
|
||||
active_validator_indices, ZERO_HASH), SHARD_COUNT)
|
||||
|
||||
BeaconState(
|
||||
validator_registry: validators,
|
||||
|
@ -72,7 +72,7 @@ func on_startup*(initial_validator_entries: openArray[InitialValidator],
|
|||
# Randomness and committees
|
||||
randao_mix: ZERO_HASH,
|
||||
next_seed: ZERO_HASH,
|
||||
shard_and_committee_for_slots: shard_and_committee_for_slots,
|
||||
shard_committees_at_slots: shard_committees_at_slots,
|
||||
persistent_committees: persistent_committees,
|
||||
|
||||
# Finality
|
||||
|
@ -112,9 +112,9 @@ func append_to_recent_block_hashes*(old_block_hashes: seq[Eth2Digest],
|
|||
result = old_block_hashes
|
||||
result.add repeat(parent_hash, d)
|
||||
|
||||
proc get_attestation_participants*(state: BeaconState,
|
||||
func get_attestation_participants*(state: BeaconState,
|
||||
attestation_data: AttestationData,
|
||||
participation_bitfield: seq[byte]): seq[int] =
|
||||
participation_bitfield: seq[byte]): seq[Uint24] =
|
||||
## Attestation participants in the attestation data are called out in a
|
||||
## bit field that corresponds to the committee of the shard at the time - this
|
||||
## function converts it to list of indices in to BeaconState.validators
|
||||
|
@ -138,3 +138,16 @@ proc get_attestation_participants*(state: BeaconState,
|
|||
if bit == 1:
|
||||
result.add(vindex)
|
||||
return # found the shard, we're done
|
||||
|
||||
func change_validators*(state: var BeaconState,
|
||||
current_slot: uint64) =
|
||||
## Change validator registry.
|
||||
|
||||
let res = get_changed_validators(
|
||||
state.validator_registry,
|
||||
state.latest_penalized_exit_balances,
|
||||
state.validator_registry_delta_chain_tip,
|
||||
current_slot
|
||||
)
|
||||
state.validator_registry = res.validators
|
||||
state.latest_penalized_exit_balances = res.latest_penalized_exit_balances
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
# https://github.com/ethereum/eth2.0-specs/blob/master/specs/beacon-chain.md
|
||||
#
|
||||
# How wrong the code is:
|
||||
# https://github.com/ethereum/eth2.0-specs/compare/2983e68f0305551083fac7fcf9330c1fc9da3411...master
|
||||
# https://github.com/ethereum/eth2.0-specs/compare/f956135763cbb410a8c28b3a509f14f750ff287c...master
|
||||
#
|
||||
# These datatypes are used as specifications for serialization - thus should not
|
||||
# be altered outside of what the spec says. Likewise, they should not be made
|
||||
|
@ -31,27 +31,30 @@ const
|
|||
SHARD_COUNT* = 1024 # a constant referring to the number of shards
|
||||
TARGET_COMMITTEE_SIZE* = 2^8 # validators
|
||||
MAX_ATTESTATIONS_PER_BLOCK* = 2^7 # attestations
|
||||
MAX_DEPOSIT* = 2^5 # ETH
|
||||
MIN_BALANCE* = 2^4 # ETH
|
||||
POW_CONTRACT_MERKLE_TREE_DEPTH* = 2^5 #
|
||||
MAX_BALANCE_CHURN_QUOTIENT* = 2^5 # ETH
|
||||
GWEI_PER_ETH* = 10^9 # Gwei/ETH
|
||||
BEACON_CHAIN_SHARD_NUMBER* = not 0'u64
|
||||
|
||||
DEPOSIT_CONTRACT_TREE_DEPTH* = 2^5 #
|
||||
MIN_DEPOSIT* = 2^0 #
|
||||
MAX_DEPOSIT* = 2^5 #
|
||||
|
||||
# Initial values
|
||||
|
||||
INITIAL_FORK_VERSION* = 0 #
|
||||
INITIAL_SLOT_NUMBER* = 0 #
|
||||
GWEI_PER_ETH* = 10^9 # Gwei/ETH
|
||||
ZERO_HASH* = Eth2Digest()
|
||||
BEACON_CHAIN_SHARD_NUMBER* = not 0'u64
|
||||
|
||||
# Time constants
|
||||
SLOT_DURATION* = 6 # seconds
|
||||
MIN_ATTESTATION_INCLUSION_DELAY* = 4 # slots (~25 minutes)
|
||||
EPOCH_LENGTH* = 64 # slots (~6.4 minutes)
|
||||
MIN_VALIDATOR_SET_CHANGE_INTERVAL* = 2^8 # slots (~25.6 minutes)
|
||||
MIN_VALIDATOR_REGISTRY_CHANGE_INTERVAL* = 2^8 # slots (~25.6 minutes)
|
||||
POW_RECEIPT_ROOT_VOTING_PERIOD* = 2^10 # slots (~1.7 hours)
|
||||
SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD* = 2^17 # slots (~9 days)
|
||||
SQRT_E_DROP_TIME* = 2^17 # slots (~9 days); amount of time it takes for the
|
||||
# quadratic leak to cut deposits of non-participating
|
||||
# validators by ~39.4%
|
||||
COLLECTIVE_PENALTY_CALCULATION_PERIOD* = 2^20 # slots (~2.4 months)
|
||||
DELETION_PERIOD* = 2^22 # slots (~290 days)
|
||||
ZERO_BALANCE_VALIDATOR_TTL* = 2^22 # slots (~290 days)
|
||||
|
||||
# Quotients
|
||||
BASE_REWARD_QUOTIENT* = 2^11 # per-cycle interest rate assuming all validators are
|
||||
|
@ -60,9 +63,7 @@ const
|
|||
# million participating ETH.
|
||||
WHISTLEBLOWER_REWARD_QUOTIENT* = 2^9 # ?
|
||||
INCLUDER_REWARD_QUOTIENT* = 2^3 #
|
||||
MAX_CHURN_QUOTIENT* = 2^5 # At most `1/MAX_VALIDATOR_CHURN_QUOTIENT` of the
|
||||
# validators can change during each validator set
|
||||
# change.
|
||||
INACTIVITY_PENALTY_QUOTIENT* = 2^34 #
|
||||
|
||||
type
|
||||
Uint24* = range[0'u32 .. 0xFFFFFF'u32] # TODO: wrap-around
|
||||
|
@ -84,7 +85,7 @@ type
|
|||
data*: AttestationData
|
||||
participation_bitfield*: seq[byte] # Attester participation bitfield
|
||||
custody_bitfield*: seq[byte] # Proof of custody bitfield
|
||||
aggregate_sig*: ValidatorSig # BLS aggregate signature
|
||||
aggregate_signature*: ValidatorSig # BLS aggregate signature
|
||||
|
||||
AttestationData* = object
|
||||
slot*: uint64 # Slot number
|
||||
|
@ -98,7 +99,7 @@ type
|
|||
|
||||
ProposalSignedData* = object
|
||||
slot*: uint64 # Slot number
|
||||
shard*: uint64 # Shard number (or `2**64 - 1` for beacon chain)
|
||||
shard*: uint64 # Shard number (or `BEACON_CHAIN_SHARD_NUMBER` for beacon chain)
|
||||
block_hash*: Eth2Digest # Block hash
|
||||
|
||||
SpecialRecord* = object
|
||||
|
@ -116,7 +117,7 @@ type
|
|||
# Randomness and committees
|
||||
randao_mix*: Eth2Digest # RANDAO state
|
||||
next_seed*: Eth2Digest # Randao seed used for next shuffling
|
||||
shard_and_committee_for_slots*: array[2 * EPOCH_LENGTH, seq[ShardAndCommittee]] ## \
|
||||
shard_committees_at_slots*: array[2 * EPOCH_LENGTH, seq[ShardCommittee]] ## \
|
||||
## Committee members and their assigned shard, per slot, covers 2 cycles
|
||||
## worth of assignments
|
||||
persistent_committees*: seq[seq[Uint24]] # Persistent shard committees
|
||||
|
@ -157,9 +158,10 @@ type
|
|||
slot*: uint64 # Slot number
|
||||
shard_block_hash*: Eth2Digest # Shard chain block hash
|
||||
|
||||
ShardAndCommittee* = object
|
||||
ShardCommittee* = object
|
||||
shard*: uint64 # Shard number
|
||||
committee*: seq[Uint24] # Validator indices
|
||||
total_validator_count*: uint64 # # Total validator count (for proofs of custody)
|
||||
|
||||
ShardReassignmentRecord* = object
|
||||
validator_index*: Uint24 # Which validator to reassign
|
||||
|
@ -170,23 +172,23 @@ type
|
|||
candidate_pow_receipt_root*: Eth2Digest # Candidate PoW receipt root
|
||||
votes*: uint64 # Vote count
|
||||
|
||||
ForkData* = object
|
||||
pre_fork_version*: uint64 # Previous fork version
|
||||
post_fork_version*: uint64 # Post fork version
|
||||
fork_slot*: uint64 # Fork slot number
|
||||
|
||||
PendingAttestationRecord* = object
|
||||
data*: AttestationData # Signed data
|
||||
participation_bitfield*: seq[byte] # Attester participation bitfield
|
||||
custody_bitfield*: seq[byte] # Proof of custody bitfield
|
||||
slot_included*: uint64 # Slot in which it was included
|
||||
|
||||
ForkData* = object
|
||||
pre_fork_version*: uint64 # Previous fork version
|
||||
post_fork_version*: uint64 # Post fork version
|
||||
fork_slot*: uint64 # Fork slot number
|
||||
|
||||
ValidatorStatusCodes* {.pure.} = enum
|
||||
PENDING_ACITVATION = 0
|
||||
PENDING_ACTIVATION = 0
|
||||
ACTIVE = 1
|
||||
EXITED_WITHOUT_PENALTY = 2
|
||||
EXITED_WITH_PENALTY = 3
|
||||
PENDING_EXIT = 29 # https://github.com/ethereum/eth2.0-specs/issues/216
|
||||
ACTIVE_PENDING_EXIT = 2
|
||||
EXITED_WITHOUT_PENALTY = 3
|
||||
EXITED_WITH_PENALTY = 4
|
||||
|
||||
SpecialRecordType* {.pure.} = enum
|
||||
Logout = 0
|
||||
|
|
|
@ -100,11 +100,11 @@ func get_shard_and_committees_index*(state: BeaconState, slot: uint64): uint64 =
|
|||
slot - earliest_slot_in_array
|
||||
|
||||
proc get_shard_and_committees_for_slot*(
|
||||
state: BeaconState, slot: uint64): seq[ShardAndCommittee] =
|
||||
state: BeaconState, slot: uint64): seq[ShardCommittee] =
|
||||
let index = state.get_shard_and_committees_index(slot)
|
||||
state.shard_and_committee_for_slots[index]
|
||||
state.shard_committees_at_slots[index]
|
||||
|
||||
func get_beacon_proposer_index*(state: BeaconState, slot: uint64): uint64 =
|
||||
func get_beacon_proposer_index*(state: BeaconState, slot: uint64): Uint24 =
|
||||
## From Casper RPJ mini-spec:
|
||||
## When slot i begins, validator Vidx is expected
|
||||
## to create ("propose") a block, which contains a pointer to some parent block
|
||||
|
@ -115,7 +115,7 @@ func get_beacon_proposer_index*(state: BeaconState, slot: uint64): uint64 =
|
|||
## idx in Vidx == p(i mod N), pi being a random permutation of validators indices (i.e. a committee)
|
||||
|
||||
let idx = get_shard_and_committees_index(state, slot)
|
||||
state.shard_and_committee_for_slots[idx][0].committee.mod_get(slot)
|
||||
state.shard_committees_at_slots[idx][0].committee.mod_get(slot)
|
||||
|
||||
func int_sqrt*(n: SomeInteger): SomeInteger =
|
||||
var
|
||||
|
@ -133,3 +133,5 @@ func get_fork_version*(fork_data: ForkData, slot: uint64): uint64 =
|
|||
func get_domain*(fork_data: ForkData, slot: uint64, domain_type: uint64): uint64 =
|
||||
# TODO Slot overflow? Or is slot 32 bits for all intents and purposes?
|
||||
(get_fork_version(fork_data, slot) shl 32) + domain_type
|
||||
|
||||
func is_power_of_2*(v: uint64): bool = (v and (v-1)) == 0
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
# Helpers and functions pertaining to managing the validator set
|
||||
|
||||
import
|
||||
options, nimcrypto,
|
||||
options, nimcrypto, sequtils, math,
|
||||
eth_common,
|
||||
../ssz,
|
||||
./crypto, ./datatypes, ./digest, ./helpers
|
||||
|
@ -15,7 +15,8 @@ import
|
|||
func min_empty_validator_index(validators: seq[ValidatorRecord], current_slot: uint64): Option[int] =
|
||||
for i, v in validators:
|
||||
if v.balance == 0 and
|
||||
v.latest_status_change_slot + DELETION_PERIOD.uint64 <= current_slot:
|
||||
v.latest_status_change_slot +
|
||||
ZERO_BALANCE_VALIDATOR_TTL.uint64 <= current_slot:
|
||||
return some(i)
|
||||
|
||||
func get_new_validators*(current_validators: seq[ValidatorRecord],
|
||||
|
@ -76,24 +77,24 @@ func get_new_validators*(current_validators: seq[ValidatorRecord],
|
|||
func get_active_validator_indices*(validators: openArray[ValidatorRecord]): seq[Uint24] =
|
||||
## Select the active validators
|
||||
for idx, val in validators:
|
||||
if val.status in {ACTIVE, PENDING_EXIT}:
|
||||
if val.status in {ACTIVE, ACTIVE_PENDING_EXIT}:
|
||||
result.add idx.Uint24
|
||||
|
||||
func get_new_shuffling*(seed: Eth2Digest,
|
||||
validators: openArray[ValidatorRecord],
|
||||
crosslinking_start_shard: int
|
||||
): array[EPOCH_LENGTH, seq[ShardAndCommittee]] =
|
||||
crosslinking_start_shard: uint64
|
||||
): array[EPOCH_LENGTH, seq[ShardCommittee]] =
|
||||
## Split up validators into groups at the start of every epoch,
|
||||
## determining at what height they can make attestations and what shard they are making crosslinks for
|
||||
## Implementation should do the following: http://vitalik.ca/files/ShuffleAndAssign.png
|
||||
|
||||
let
|
||||
active_validators = get_active_validator_indices(validators)
|
||||
active_validator_indices = get_active_validator_indices(validators)
|
||||
committees_per_slot = clamp(
|
||||
len(active_validators) div EPOCH_LENGTH div TARGET_COMMITTEE_SIZE,
|
||||
1, SHARD_COUNT div EPOCH_LENGTH)
|
||||
len(active_validator_indices) div EPOCH_LENGTH div TARGET_COMMITTEE_SIZE,
|
||||
1, SHARD_COUNT div EPOCH_LENGTH).uint64
|
||||
# Shuffle with seed
|
||||
shuffled_active_validator_indices = shuffle(active_validators, seed)
|
||||
shuffled_active_validator_indices = shuffle(active_validator_indices, seed)
|
||||
# Split the shuffled list into cycle_length pieces
|
||||
validators_per_slot = split(shuffled_active_validator_indices, EPOCH_LENGTH)
|
||||
|
||||
|
@ -102,13 +103,16 @@ func get_new_shuffling*(seed: Eth2Digest,
|
|||
for slot, slot_indices in validators_per_slot:
|
||||
let
|
||||
shard_indices = split(slot_indices, committees_per_slot)
|
||||
shard_id_start = crosslinking_start_shard + slot * committees_per_slot
|
||||
shard_id_start =
|
||||
crosslinking_start_shard + slot.uint64 * committees_per_slot
|
||||
|
||||
var committees = newSeq[ShardAndCommittee](shard_indices.len)
|
||||
var committees = newSeq[ShardCommittee](shard_indices.len)
|
||||
for shard_position, indices in shard_indices:
|
||||
committees[shard_position].shard =
|
||||
uint64(shard_id_start + shard_position) mod SHARD_COUNT
|
||||
uint64(shard_id_start + shard_position.uint64) mod SHARD_COUNT.uint64
|
||||
committees[shard_position].committee = indices
|
||||
committees[shard_position].total_validator_count =
|
||||
len(active_validator_indices).uint64
|
||||
|
||||
result[slot] = committees
|
||||
|
||||
|
@ -164,7 +168,7 @@ func exit_validator*(index: Uint24,
|
|||
whistleblower.balance.inc(whistleblower_reward.int)
|
||||
validator.balance.dec(whistleblower_reward.int)
|
||||
else:
|
||||
validator.status = PENDING_EXIT
|
||||
validator.status = ACTIVE_PENDING_EXIT
|
||||
|
||||
state.validator_registry_delta_chain_tip =
|
||||
get_new_validator_registry_delta_chain_tip(
|
||||
|
@ -173,3 +177,90 @@ func exit_validator*(index: Uint24,
|
|||
validator.pubkey,
|
||||
EXIT,
|
||||
)
|
||||
|
||||
func get_changed_validators*(validators: seq[ValidatorRecord],
|
||||
latest_penalized_exit_balances: seq[uint64],
|
||||
validator_registry_delta_chain_tip: Eth2Digest,
|
||||
current_slot: uint64):
|
||||
tuple[
|
||||
validators: seq[ValidatorRecord],
|
||||
latest_penalized_exit_balances: seq[uint64],
|
||||
validator_registry_delta_chain_tip: Eth2Digest] =
|
||||
## Return changed validator registry and `latest_penalized_exit_balances`,
|
||||
## `validator_registry_delta_chain_tip`.
|
||||
|
||||
# TODO inefficient
|
||||
var validators = validators
|
||||
|
||||
# The active validators
|
||||
let active_validator_indices = get_active_validator_indices(validators)
|
||||
# The total effective balance of active validators
|
||||
let total_balance = sum(mapIt(
|
||||
active_validator_indices, get_effective_balance(validators[it])))
|
||||
|
||||
# The maximum balance churn in Gwei (for deposits and exits separately)
|
||||
let max_balance_churn = max(
|
||||
(MAX_DEPOSIT * GWEI_PER_ETH).uint64,
|
||||
total_balance div (2 * MAX_BALANCE_CHURN_QUOTIENT)
|
||||
)
|
||||
|
||||
# Activate validators within the allowable balance churn
|
||||
var balance_churn = 0'u64
|
||||
var validator_registry_delta_chain_tip = validator_registry_delta_chain_tip
|
||||
for i in 0..<len(validators):
|
||||
if validators[i].status == PENDING_ACTIVATION and
|
||||
validators[i].balance >= MAX_DEPOSIT.uint64:
|
||||
# Check the balance churn would be within the allowance
|
||||
balance_churn.inc(get_effective_balance(validators[i]).int)
|
||||
if balance_churn > max_balance_churn:
|
||||
break
|
||||
|
||||
# Activate validator
|
||||
validators[i].status = ACTIVE
|
||||
validators[i].latest_status_change_slot = current_slot
|
||||
validator_registry_delta_chain_tip =
|
||||
get_new_validator_registry_delta_chain_tip(
|
||||
validator_registry_delta_chain_tip,
|
||||
i.Uint24,
|
||||
validators[i].pubkey,
|
||||
ACTIVATION,
|
||||
)
|
||||
|
||||
# Exit validators within the allowable balance churn
|
||||
balance_churn = 0
|
||||
for i in 0..<len(validators):
|
||||
if validators[i].status == ACTIVE_PENDING_EXIT:
|
||||
# Check the balance churn would be within the allowance
|
||||
balance_churn.inc(get_effective_balance(validators[i]).int)
|
||||
if balance_churn > max_balance_churn:
|
||||
break
|
||||
|
||||
# Exit validator
|
||||
validators[i].status = EXITED_WITHOUT_PENALTY
|
||||
validators[i].latest_status_change_slot = current_slot
|
||||
validator_registry_delta_chain_tip =
|
||||
get_new_validator_registry_delta_chain_tip(
|
||||
validator_registry_delta_chain_tip,
|
||||
i.Uint24,
|
||||
validators[i].pubkey,
|
||||
EXIT,
|
||||
)
|
||||
|
||||
# Calculate the total ETH that has been penalized in the last ~2-3 withdrawal periods
|
||||
let period_index = current_slot.int div COLLECTIVE_PENALTY_CALCULATION_PERIOD
|
||||
let total_penalties = (
|
||||
(latest_penalized_exit_balances[period_index]) +
|
||||
(if period_index >= 1: latest_penalized_exit_balances[period_index - 1] else: 0) +
|
||||
(if period_index >= 2: latest_penalized_exit_balances[period_index - 2] else: 0)
|
||||
)
|
||||
|
||||
# Calculate penalties for slashed validators
|
||||
func to_penalize(v: ValidatorRecord): bool =
|
||||
v.status == EXITED_WITH_PENALTY
|
||||
var validators_to_penalize = filter(validators, to_penalize)
|
||||
for v in validators_to_penalize.mitems():
|
||||
v.balance.dec(
|
||||
(get_effective_balance(v) * min(total_penalties * 3, total_balance) div
|
||||
total_balance).int)
|
||||
|
||||
(validators, latest_penalized_exit_balances, validator_registry_delta_chain_tip)
|
||||
|
|
|
@ -197,7 +197,7 @@ func hashSSZ*(x: ValidatorRecord): array[32, byte] =
|
|||
h.update hashSSZ(x.latest_status_change_slot)
|
||||
h.update hashSSZ(x.exit_count)
|
||||
|
||||
func hashSSZ*(x: ShardAndCommittee): array[32, byte] =
|
||||
func hashSSZ*(x: ShardCommittee): array[32, byte] =
|
||||
withHash:
|
||||
h.update hashSSZ(x.shard)
|
||||
h.update merkleHash(x.committee)
|
||||
|
@ -241,7 +241,7 @@ func hashSSZ*(x: AttestationRecord): array[32, byte] =
|
|||
# h.update hashSSZ(x.data) # TODO this is now a sub-object of its own
|
||||
# h.update hashSSZ(attester_bitfield) # TODO - the bitfield as a specific serialisation format
|
||||
# h.update hashSSZ(x.poc_bitfield) # TODO - same serialization format
|
||||
h.update hashSSZ(x.aggregate_sig)
|
||||
h.update hashSSZ(x.aggregate_signature)
|
||||
|
||||
func hashSSZ*(x: BeaconBlock): array[32, byte] =
|
||||
## TODO - Warning ⚠️: not part of the spec
|
||||
|
|
|
@ -5,27 +5,49 @@
|
|||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
# A imcomplete implementation of the state transition function, as described
|
||||
# under "Per-block processing" in https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md
|
||||
# State transition, as described in
|
||||
# https://github.com/ethereum/eth2.0-specs/blob/master/specs/core/0_beacon-chain.md#beacon-chain-state-transition-function
|
||||
#
|
||||
# The code is here mainly to verify the data types and get an idea about
|
||||
# missing pieces - needs testing throughout
|
||||
# The purpose of this code right is primarily educational, to help piece
|
||||
# together the mechanics of the beacon state and to discover potential problem
|
||||
# areas.
|
||||
#
|
||||
# General notes about the code (TODO):
|
||||
# * It's inefficient - we quadratically copy, allocate and iterate when there
|
||||
# are faster options
|
||||
# * Weird styling - the sections taken from the spec use python styling while
|
||||
# the others use NEP-1 - helps grepping identifiers in spec
|
||||
# * We mix procedural and functional styles for no good reason, except that the
|
||||
# spec does so also.
|
||||
# * There are no tests, and likely lots of bugs.
|
||||
# * For validators, sometimes indices are used and sometimes instances - this
|
||||
# causes unnecessary friction in some sections
|
||||
# * For indices, we get a mix of uint64, Uint24 and int - this is currently
|
||||
# swept under the rug with casts
|
||||
# * The spec uses uint64 for data types, but functions in the spec often assume
|
||||
# signed bigint semantics - under- and overflows ensue
|
||||
# * Sane error handling is missing in most cases (yay, we'll get the chance to
|
||||
# debate exceptions again!)
|
||||
#
|
||||
# When updating the code, add TODO sections to mark where there are clear
|
||||
# improvements to be made - other than that, keep things similar to spec for
|
||||
# now.
|
||||
|
||||
import
|
||||
math, options, sequtils,
|
||||
./extras,
|
||||
./extras, ./ssz,
|
||||
./spec/[beaconstate, crypto, datatypes, digest, helpers, validator],
|
||||
./ssz,
|
||||
milagro_crypto # nimble install https://github.com/status-im/nim-milagro-crypto@#master
|
||||
milagro_crypto
|
||||
|
||||
# TODO there's an ugly mix of functional and procedural styles here that
|
||||
# is due to how the spec is mixed as well - once we're past the prototype
|
||||
# stage, this will need clearing up and unification.
|
||||
|
||||
func checkAttestations(state: BeaconState,
|
||||
blck: BeaconBlock,
|
||||
parent_slot: uint64): Option[seq[PendingAttestationRecord]] =
|
||||
# TODO perf improvement potential..
|
||||
func processAttestations(state: var BeaconState,
|
||||
blck: BeaconBlock,
|
||||
parent_slot: uint64): bool =
|
||||
# Each block includes a number of attestations that the proposer chose. Each
|
||||
# attestation represents an update to a specific shard and is signed by a
|
||||
# committee of validators.
|
||||
# Here we make sanity checks for each attestation and it to the state - most
|
||||
# updates will happen at the epoch boundary where state updates happen in
|
||||
# bulk.
|
||||
if blck.attestations.len > MAX_ATTESTATIONS_PER_BLOCK:
|
||||
return
|
||||
|
||||
|
@ -33,7 +55,8 @@ func checkAttestations(state: BeaconState,
|
|||
for attestation in blck.attestations:
|
||||
if attestation.data.slot <= blck.slot - MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
return
|
||||
# TODO unsigned undeflow in spec
|
||||
|
||||
# TODO spec - unsigned underflow
|
||||
if attestation.data.slot >= max(parent_slot.int - EPOCH_LENGTH + 1, 0).uint64:
|
||||
return
|
||||
|
||||
|
@ -42,6 +65,7 @@ func checkAttestations(state: BeaconState,
|
|||
state.justified_slot
|
||||
else:
|
||||
state.previous_justified_slot
|
||||
|
||||
if attestation.data.justified_slot != expected_justified_slot:
|
||||
return
|
||||
|
||||
|
@ -69,22 +93,24 @@ func checkAttestations(state: BeaconState,
|
|||
else:
|
||||
agg_pubkey.combine(validator.pubkey)
|
||||
|
||||
# Verify that aggregate_sig verifies using the group pubkey.
|
||||
# Verify that aggregate_signature verifies using the group pubkey.
|
||||
let msg = hashSSZ(attestation.data)
|
||||
|
||||
# For now only check compilation
|
||||
# doAssert attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
|
||||
# doAssert attestation.aggregate_signature.verifyMessage(msg, agg_pubkey)
|
||||
debugEcho "Aggregate sig verify message: ",
|
||||
attestation.aggregate_sig.verifyMessage(msg, agg_pubkey)
|
||||
attestation.aggregate_signature.verifyMessage(msg, agg_pubkey)
|
||||
|
||||
res.add PendingAttestationRecord(
|
||||
# All checks passed - update state
|
||||
# TODO no rollback in case of errors
|
||||
state.latest_attestations.add PendingAttestationRecord(
|
||||
data: attestation.data,
|
||||
participation_bitfield: attestation.participation_bitfield,
|
||||
custody_bitfield: attestation.custody_bitfield,
|
||||
slot_included: blck.slot
|
||||
)
|
||||
|
||||
some(res)
|
||||
true
|
||||
|
||||
func verifyProposerSignature(state: BeaconState, blck: BeaconBlock): bool =
|
||||
var blck_without_sig = blck
|
||||
|
@ -109,9 +135,9 @@ func processRandaoReveal(state: var BeaconState,
|
|||
let proposer_index = get_beacon_proposer_index(state, slot)
|
||||
state.validator_registry[proposer_index.int].randao_skips.inc()
|
||||
|
||||
var
|
||||
let
|
||||
proposer_index = get_beacon_proposer_index(state, blck.slot)
|
||||
proposer = state.validator_registry[proposer_index.int]
|
||||
proposer = state.validator_registry[proposer_index.int].addr
|
||||
|
||||
# Check that proposer commit and reveal match
|
||||
if repeat_hash(blck.randao_reveal, proposer.randao_skips + 1) !=
|
||||
|
@ -143,31 +169,25 @@ func processSpecials(state: var BeaconState, blck: BeaconBlock): bool =
|
|||
# TODO incoming spec changes here..
|
||||
true
|
||||
|
||||
func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
||||
func processBlock(state: var BeaconState, blck: BeaconBlock): bool =
|
||||
## When a new block is received, all participants must verify that the block
|
||||
## makes sense and update their state accordingly. This function will return
|
||||
## the new state, unless something breaks along the way
|
||||
|
||||
# TODO: simplistic way to be able to rollback state
|
||||
var state = state
|
||||
# TODO state not rolled back in case of failure
|
||||
|
||||
let
|
||||
parent_hash = blck.ancestor_hashes[0]
|
||||
slot = blck.slot
|
||||
parent_slot = slot - 1 # TODO Not!! can skip slots...
|
||||
# TODO actually get parent block, which means fixing up BeaconState refs above;
|
||||
# there's no distinction between active/crystallized state anymore, etc.
|
||||
|
||||
state.latest_block_hashes =
|
||||
append_to_recent_block_hashes(state.latest_block_hashes, parent_slot, slot,
|
||||
parent_hash)
|
||||
|
||||
let processed_attestations = checkAttestations(state, blck, parent_slot)
|
||||
if processed_attestations.isNone:
|
||||
if not processAttestations(state, blck, parent_slot):
|
||||
return
|
||||
|
||||
state.latest_attestations.add processed_attestations.get()
|
||||
|
||||
if not verifyProposerSignature(state, blck):
|
||||
return
|
||||
|
||||
|
@ -180,46 +200,69 @@ func process_block*(state: BeaconState, blck: BeaconBlock): Option[BeaconState]
|
|||
if not processSpecials(state, blck):
|
||||
return
|
||||
|
||||
some(state) # Looks ok - move on with the updated state
|
||||
true
|
||||
|
||||
func flatten[T](v: openArray[seq[T]]): seq[T] =
|
||||
# TODO not in nim - doh.
|
||||
for x in v: result.add x
|
||||
|
||||
func get_epoch_boundary_attesters(
|
||||
state: BeaconState,
|
||||
attestations: openArray[PendingAttestationRecord]): seq[int] =
|
||||
attestations: openArray[PendingAttestationRecord]): seq[Uint24] =
|
||||
# TODO spec - add as helper?
|
||||
deduplicate(flatten(mapIt(attestations,
|
||||
get_attestation_participants(state, it.data, it.participation_bitfield))))
|
||||
|
||||
func adjust_for_inclusion_distance[T](magnitude: T, dist: T): T =
|
||||
magnitude div 2 + (magnitude div 2) * MIN_ATTESTATION_INCLUSION_DELAY div dist
|
||||
|
||||
func processEpoch*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
||||
func boundary_attestations(
|
||||
state: BeaconState, boundary_hash: Eth2Digest,
|
||||
attestations: openArray[PendingAttestationRecord]
|
||||
): seq[PendingAttestationRecord] =
|
||||
# TODO spec - add as helper?
|
||||
filterIt(attestations,
|
||||
it.data.epoch_boundary_hash == boundary_hash and
|
||||
it.data.justified_slot == state.justified_slot)
|
||||
|
||||
func sum_effective_balances(
|
||||
state: BeaconState, validator_indices: openArray[Uint24]): uint64 =
|
||||
# TODO spec - add as helper?
|
||||
sum(mapIt(
|
||||
validator_indices, get_effective_balance(state.validator_registry[it]))
|
||||
)
|
||||
|
||||
func lowerThan(candidate, current: Eth2Digest): bool =
|
||||
# return true iff candidate is "lower" than current, per spec rule:
|
||||
# "ties broken by favoring lower `shard_block_hash` values"
|
||||
# TODO spec - clarify hash ordering..
|
||||
for i, v in current.data:
|
||||
if v > candidate.data[i]: return true
|
||||
return false
|
||||
|
||||
func processEpoch(state: var BeaconState, blck: BeaconBlock): bool =
|
||||
## Epoch processing happens every time we've passed EPOCH_LENGTH blocks.
|
||||
## Because some slots may be skipped, it may happen that we go through the
|
||||
## loop more than once - each time the latest_state_recalculation_slot will be
|
||||
## increased by EPOCH_LENGTH.
|
||||
|
||||
# TODO: simplistic way to be able to rollback state
|
||||
var state = state
|
||||
|
||||
# Precomputation
|
||||
|
||||
while blck.slot >= EPOCH_LENGTH.uint64 + state.latest_state_recalculation_slot:
|
||||
# Convenience shortcut, from spec
|
||||
let s = state.latest_state_recalculation_slot
|
||||
|
||||
# Precomputation
|
||||
let
|
||||
active_validators =
|
||||
mapIt(get_active_validator_indices(state.validator_registry),
|
||||
state.validator_registry[it])
|
||||
|
||||
total_balance = sum(mapIt(active_validators, get_effective_balance(it)))
|
||||
|
||||
active_validator_indices =
|
||||
get_active_validator_indices(state.validator_registry)
|
||||
total_balance = sum_effective_balances(state, active_validator_indices)
|
||||
total_balance_in_eth = total_balance.int div GWEI_PER_ETH
|
||||
|
||||
# The per-slot maximum interest rate is `2/reward_quotient`.)
|
||||
reward_quotient = BASE_REWARD_QUOTIENT * int_sqrt(total_balance_in_eth)
|
||||
|
||||
# TODO not in spec, convenient
|
||||
epoch_boundary_hash = get_block_hash(state, blck, s)
|
||||
|
||||
proc base_reward(v: ValidatorRecord): uint64 =
|
||||
get_effective_balance(v) div reward_quotient.uint64
|
||||
|
||||
|
@ -229,128 +272,265 @@ func processEpoch*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
|||
this_epoch_attestations = filterIt(state.latest_attestations,
|
||||
s <= it.data.slot and it.data.slot < s + EPOCH_LENGTH)
|
||||
|
||||
this_epoch_boundary_attestations = filterIt(this_epoch_attestations,
|
||||
it.data.epoch_boundary_hash == get_block_hash(state, blck, s) and
|
||||
it.data.justified_slot == state.justified_slot)
|
||||
this_epoch_boundary_attestations =
|
||||
boundary_attestations(state, epoch_boundary_hash,
|
||||
this_epoch_attestations)
|
||||
|
||||
this_epoch_boundary_attesters =
|
||||
get_epoch_boundary_attesters(state, this_epoch_attestations)
|
||||
|
||||
this_epoch_boundary_attesting_balance = sum(
|
||||
mapIt(this_epoch_boundary_attesters,
|
||||
get_effective_balance(state.validator_registry[it]))
|
||||
)
|
||||
this_epoch_boundary_attesting_balance =
|
||||
sum_effective_balances(state, this_epoch_boundary_attesters)
|
||||
|
||||
let
|
||||
previous_epoch_attestations = filterIt(state.latest_attestations,
|
||||
s <= it.data.slot + EPOCH_LENGTH and it.data.slot < s)
|
||||
previous_epoch_boundary_attestations = filterIt(previous_epoch_attestations,
|
||||
it.data.epoch_boundary_hash == get_block_hash(state, blck, s) and
|
||||
it.data.justified_slot == state.justified_slot)
|
||||
|
||||
previous_epoch_boundary_attestations =
|
||||
boundary_attestations(state, epoch_boundary_hash,
|
||||
previous_epoch_attestations)
|
||||
|
||||
previous_epoch_boundary_attesters =
|
||||
get_epoch_boundary_attesters(state, previous_epoch_boundary_attestations)
|
||||
previous_epoch_boundary_attesting_balance = sum(
|
||||
mapIt(previous_epoch_boundary_attesters,
|
||||
get_effective_balance(state.validator_registry[it]))
|
||||
)
|
||||
|
||||
# TODO gets pretty hairy here
|
||||
previous_epoch_boundary_attesting_balance =
|
||||
sum_effective_balances(state, this_epoch_boundary_attesters)
|
||||
|
||||
# TODO this is really hairy - we cannot capture `state` directly, but we
|
||||
# can capture a pointer to it - this is safe because we don't leak
|
||||
# these closures outside this scope, but still..
|
||||
let statePtr = state.addr
|
||||
func attesting_validators(
|
||||
obj: ShardAndCommittee, shard_block_hash: Eth2Digest): seq[int] =
|
||||
obj: ShardCommittee, shard_block_hash: Eth2Digest): seq[Uint24] =
|
||||
flatten(
|
||||
mapIt(
|
||||
filterIt(concat(this_epoch_attestations, previous_epoch_attestations),
|
||||
it.data.shard == obj.shard and
|
||||
it.data.shard_block_hash == shard_block_hash),
|
||||
get_attestation_participants(state, it.data, it.participation_bitfield)))
|
||||
get_attestation_participants(statePtr[], it.data, it.participation_bitfield)))
|
||||
|
||||
# TODO which shard_block_hash:es?
|
||||
# * Let `attesting_validators(obj)` be equal to `attesting_validators(obj, shard_block_hash)` for the value of `shard_block_hash` such that `sum([get_effective_balance(v) for v in attesting_validators(obj, shard_block_hash)])` is maximized (ties broken by favoring lower `shard_block_hash` values).
|
||||
# * Let `total_attesting_balance(obj)` be the sum of the balances-at-stake of `attesting_validators(obj)`.
|
||||
# * Let `winning_hash(obj)` be the winning `shard_block_hash` value.
|
||||
# * Let `total_balance(obj) = sum([get_effective_balance(v) for v in obj.committee])`.
|
||||
func winning_hash(obj: ShardCommittee): Eth2Digest =
|
||||
# * Let `winning_hash(obj)` be the winning `shard_block_hash` value.
|
||||
# ... such that `sum([get_effective_balance(v) for v in attesting_validators(obj, shard_block_hash)])`
|
||||
# is maximized (ties broken by favoring lower `shard_block_hash` values).
|
||||
let candidates =
|
||||
mapIt(
|
||||
filterIt(concat(this_epoch_attestations, previous_epoch_attestations),
|
||||
it.data.shard == obj.shard),
|
||||
it.data.shard_block_hash)
|
||||
|
||||
# Let `inclusion_slot(v)` equal `a.slot_included` for the attestation `a` where `v` is in `get_attestation_participants(state, a.data, a.participation_bitfield)`, and `inclusion_distance(v) = a.slot_included - a.data.slot` for the same attestation. We define a function `adjust_for_inclusion_distance(magnitude, distance)` which adjusts the reward of an attestation based on how long it took to get included (the longer, the lower the reward). Returns a value between 0 and `magnitude`.
|
||||
var max_hash = candidates[0]
|
||||
var max_val =
|
||||
sum_effective_balances(statePtr[], attesting_validators(obj, max_hash))
|
||||
for candidate in candidates[1..^1]:
|
||||
let val = sum_effective_balances(statePtr[], attesting_validators(obj, candidate))
|
||||
if val > max_val or (val == max_val and candidate.lowerThan(max_hash)):
|
||||
max_hash = candidate
|
||||
max_val = val
|
||||
max_hash
|
||||
|
||||
# Adjust justified slots and crosslink status
|
||||
func attesting_validators(obj: ShardCommittee): seq[Uint24] =
|
||||
attesting_validators(obj, winning_hash(obj))
|
||||
|
||||
var new_justified_slot: Option[uint64]
|
||||
# overflow intentional!
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield * 2
|
||||
func total_attesting_balance(obj: ShardCommittee): uint64 =
|
||||
sum_effective_balances(statePtr[], attesting_validators(obj))
|
||||
|
||||
if 3'u64 * previous_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 2
|
||||
new_justified_slot = some(s - EPOCH_LENGTH)
|
||||
func total_balance_sac(obj: ShardCommittee): uint64 =
|
||||
sum_effective_balances(statePtr[], obj.committee)
|
||||
|
||||
if 3'u64 * this_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 1
|
||||
new_justified_slot = some(s)
|
||||
func inclusion_slot(v: Uint24): uint64 =
|
||||
for a in statePtr[].latest_attestations:
|
||||
if v in get_attestation_participants(statePtr[], a.data, a.participation_bitfield):
|
||||
return a.slot_included
|
||||
assert false # shouldn't happen..
|
||||
|
||||
if state.justified_slot == s - EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 4 == 3:
|
||||
state.finalized_slot = state.justified_slot
|
||||
if state.justified_slot == s - EPOCH_LENGTH - EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 8 == 7:
|
||||
state.finalized_slot = state.justified_slot
|
||||
func inclusion_distance(v: Uint24): uint64 =
|
||||
for a in statePtr[].latest_attestations:
|
||||
if v in get_attestation_participants(statePtr[], a.data, a.participation_bitfield):
|
||||
return a.slot_included - a.data.slot
|
||||
assert false # shouldn't happen..
|
||||
|
||||
if state.justified_slot == s - EPOCH_LENGTH - 2 * EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 16 in [15'u64, 14]:
|
||||
state.finalized_slot = state.justified_slot
|
||||
block: # Adjust justified slots and crosslink status
|
||||
var new_justified_slot: Option[uint64]
|
||||
# TODO where's that bitfield type when you need it?
|
||||
# TODO what happens with the bits that drop off..?
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield shl 1
|
||||
|
||||
state.previous_justified_slot = state.justified_slot
|
||||
if 3'u64 * previous_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 2
|
||||
new_justified_slot = some(s - EPOCH_LENGTH)
|
||||
|
||||
if new_justified_slot.isSome():
|
||||
state.justified_slot = new_justified_slot.get()
|
||||
if 3'u64 * this_epoch_boundary_attesting_balance >= 2'u64 * total_balance:
|
||||
# TODO spec says "flip the second lowest bit to 1" and does "AND", wrong?
|
||||
state.justified_slot_bitfield = state.justified_slot_bitfield or 1
|
||||
new_justified_slot = some(s)
|
||||
|
||||
# for obj in state.shard_and_committee_for_slots:
|
||||
# 3 * total_attesting_balance(obj) >= 2 * total_balance(obj):
|
||||
# state.crosslinks[shard] = CrosslinkRecord(
|
||||
# slot: latest_state_recalculation_slot + EPOCH_LENGTH,
|
||||
# hash: winning_hash(obj))
|
||||
if state.justified_slot == s - EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 4 == 3:
|
||||
state.finalized_slot = state.justified_slot
|
||||
if state.justified_slot == s - EPOCH_LENGTH - EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 8 == 7:
|
||||
state.finalized_slot = state.justified_slot
|
||||
|
||||
# Balance recalculations related to FFG rewards
|
||||
let
|
||||
# The portion lost by offline [validators](#dfn-validator) after `D`
|
||||
# epochs is about `D*D/2/inactivity_penalty_quotient`.
|
||||
inactivity_penalty_quotient = SQRT_E_DROP_TIME^2
|
||||
time_since_finality = blck.slot - state.finalized_slot
|
||||
if state.justified_slot == s - EPOCH_LENGTH - 2 * EPOCH_LENGTH and
|
||||
state.justified_slot_bitfield mod 16 in [15'u64, 14]:
|
||||
state.finalized_slot = state.justified_slot
|
||||
|
||||
if time_since_finality <= 4'u64 * EPOCH_LENGTH:
|
||||
# for v in previous_epoch_boundary_attesters:
|
||||
# state.validators[v].balance.inc(adjust_for_inclusion_distance(
|
||||
# base_reward(state.validators[v]) *
|
||||
# prev_cycle_boundary_attesting_balance div total_balance,
|
||||
# inclusion_distance(v)))
|
||||
state.previous_justified_slot = state.justified_slot
|
||||
|
||||
for v in get_active_validator_indices(state.validator_registry):
|
||||
if v notin previous_epoch_boundary_attesters:
|
||||
state.validator_registry[v].balance.dec(
|
||||
base_reward(state.validator_registry[v]).int)
|
||||
else:
|
||||
# Any validator in `prev_cycle_boundary_attesters` sees their balance
|
||||
# unchanged.
|
||||
# Others might get penalized:
|
||||
for vindex, v in state.validator_registry.mpairs():
|
||||
if (v.status == ACTIVE and vindex notin previous_epoch_boundary_attesters) or
|
||||
v.status == EXITED_WITH_PENALTY:
|
||||
v.balance.dec(
|
||||
(base_reward(v) + get_effective_balance(v) * time_since_finality div
|
||||
inactivity_penalty_quotient.uint64).int)
|
||||
if new_justified_slot.isSome():
|
||||
state.justified_slot = new_justified_slot.get()
|
||||
|
||||
# For each `v` in `prev_cycle_boundary_attesters`, we determine the proposer `proposer_index = get_beacon_proposer_index(state, inclusion_slot(v))` and set `state.validators[proposer_index].balance += base_reward(v) // INCLUDER_REWARD_SHARE_QUOTIENT`.
|
||||
for sac in state.shard_committees_at_slots:
|
||||
# TODO or just state.shard_committees_at_slots[s]?
|
||||
for obj in sac:
|
||||
if 3'u64 * total_attesting_balance(obj) >= 2'u64 * total_balance_sac(obj):
|
||||
state.latest_crosslinks[obj.shard] = CrosslinkRecord(
|
||||
slot: state.latest_state_recalculation_slot + EPOCH_LENGTH,
|
||||
shard_block_hash: winning_hash(obj))
|
||||
|
||||
# Balance recalculations related to crosslink rewards
|
||||
block: # Balance recalculations related to FFG rewards
|
||||
let
|
||||
time_since_finality = blck.slot - state.finalized_slot
|
||||
|
||||
# Ethereum 1.0 chain related rules
|
||||
if time_since_finality <= 4'u64 * EPOCH_LENGTH:
|
||||
for v in previous_epoch_boundary_attesters:
|
||||
state.validator_registry[v].balance.inc(adjust_for_inclusion_distance(
|
||||
base_reward(state.validator_registry[v]) *
|
||||
previous_epoch_boundary_attesting_balance div total_balance,
|
||||
inclusion_distance(v)).int)
|
||||
|
||||
# Validator registry change
|
||||
for v in active_validator_indices:
|
||||
if v notin previous_epoch_boundary_attesters:
|
||||
state.validator_registry[v].balance.dec(
|
||||
base_reward(state.validator_registry[v]).int)
|
||||
else:
|
||||
# Any validator in `prev_cycle_boundary_attesters` sees their balance
|
||||
# unchanged.
|
||||
# Others might get penalized:
|
||||
for vindex, v in state.validator_registry.mpairs():
|
||||
if (v.status == ACTIVE and
|
||||
vindex.Uint24 notin previous_epoch_boundary_attesters) or
|
||||
v.status == EXITED_WITH_PENALTY:
|
||||
v.balance.dec(
|
||||
(base_reward(v) + get_effective_balance(v) * time_since_finality div
|
||||
INACTIVITY_PENALTY_QUOTIENT.uint64).int)
|
||||
|
||||
# If a validator registry change does NOT happen
|
||||
for v in previous_epoch_boundary_attesters:
|
||||
let proposer_index = get_beacon_proposer_index(state, inclusion_slot(v))
|
||||
state.validator_registry[proposer_index].balance.inc(
|
||||
(base_reward(state.validator_registry[v]) div INCLUDER_REWARD_QUOTIENT.uint64).int)
|
||||
|
||||
# Proposer reshuffling
|
||||
block: # Balance recalculations related to crosslink rewards
|
||||
for sac in state.shard_committees_at_slots[0 ..< EPOCH_LENGTH]:
|
||||
for obj in sac:
|
||||
for vindex in obj.committee:
|
||||
let v = state.validator_registry[vindex].addr
|
||||
|
||||
# Finally...
|
||||
if vindex in attesting_validators(obj):
|
||||
v.balance.inc(adjust_for_inclusion_distance(
|
||||
base_reward(v[]) * total_attesting_balance(obj) div total_balance_sac(obj),
|
||||
inclusion_distance(vindex)).int)
|
||||
else:
|
||||
v.balance.dec(base_reward(v[]).int)
|
||||
|
||||
block: # Ethereum 1.0 chain related rules
|
||||
if state.latest_state_recalculation_slot mod
|
||||
POW_RECEIPT_ROOT_VOTING_PERIOD.uint64 == 0:
|
||||
for x in state.candidate_pow_receipt_roots:
|
||||
if x.votes * 2 >= POW_RECEIPT_ROOT_VOTING_PERIOD.uint64:
|
||||
state.processed_pow_receipt_root = x.candidate_pow_receipt_root
|
||||
break
|
||||
state.candidate_pow_receipt_roots = @[]
|
||||
|
||||
block: # Validator registry change
|
||||
if state.finalized_slot > state.validator_registry_latest_change_slot and
|
||||
allIt(state.shard_committees_at_slots,
|
||||
allIt(it,
|
||||
state.latest_crosslinks[it.shard].slot >
|
||||
state.validator_registry_latest_change_slot)):
|
||||
state.change_validators(s)
|
||||
state.validator_registry_latest_change_slot = s + EPOCH_LENGTH
|
||||
for i in 0..<EPOCH_LENGTH:
|
||||
state.shard_committees_at_slots[i] =
|
||||
state.shard_committees_at_slots[EPOCH_LENGTH + i]
|
||||
# https://github.com/ethereum/eth2.0-specs/issues/223
|
||||
let next_start_shard = (state.shard_committees_at_slots[^1][^1].shard + 1) mod SHARD_COUNT
|
||||
for i, v in get_new_shuffling(
|
||||
state.next_seed, state.validator_registry, next_start_shard):
|
||||
state.shard_committees_at_slots[i + EPOCH_LENGTH] = v
|
||||
state.next_seed = state.randao_mix
|
||||
else:
|
||||
# If a validator registry change does NOT happen
|
||||
for i in 0..<EPOCH_LENGTH:
|
||||
state.shard_committees_at_slots[i] =
|
||||
state.shard_committees_at_slots[EPOCH_LENGTH + i]
|
||||
let time_since_finality = blck.slot - state.validator_registry_latest_change_slot
|
||||
let start_shard = state.shard_committees_at_slots[0][0].shard
|
||||
if time_since_finality * EPOCH_LENGTH <= MIN_VALIDATOR_REGISTRY_CHANGE_INTERVAL.uint64 or
|
||||
is_power_of_2(time_since_finality):
|
||||
for i, v in get_new_shuffling(
|
||||
state.next_seed, state.validator_registry, start_shard):
|
||||
state.shard_committees_at_slots[i + EPOCH_LENGTH] = v
|
||||
state.next_seed = state.randao_mix
|
||||
# Note that `start_shard` is not changed from the last epoch.
|
||||
|
||||
block: # Proposer reshuffling
|
||||
let active_validator_indices = get_active_validator_indices(state.validator_registry)
|
||||
let num_validators_to_reshuffle = len(active_validator_indices) div SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD
|
||||
for i in 0..<num_validators_to_reshuffle:
|
||||
# Multiplying i to 2 to ensure we have different input to all the required hashes in the shuffling
|
||||
# and none of the hashes used for entropy in this loop will be the same
|
||||
let validator_index = 0.Uint24 # active_validator_indices[hash(state.randao_mix + bytes8(i * 2)) mod len(active_validator_indices)]
|
||||
let new_shard = 0'u64 # hash(state.randao_mix + bytes8(i * 2 + 1)) mod SHARD_COUNT
|
||||
let shard_reassignment_record = ShardReassignmentRecord(
|
||||
validator_index: validator_index,
|
||||
shard: new_shard,
|
||||
slot: s + SHARD_PERSISTENT_COMMITTEE_CHANGE_PERIOD.uint64
|
||||
)
|
||||
state.persistent_committee_reassignments.add(shard_reassignment_record)
|
||||
|
||||
while len(state.persistent_committee_reassignments) > 0 and
|
||||
state.persistent_committee_reassignments[0].slot <= s:
|
||||
let reassignment = state.persistent_committee_reassignments[0]
|
||||
state.persistent_committee_reassignments.delete(0)
|
||||
for committee in state.persistent_committees.mitems():
|
||||
if reassignment.validator_index in committee:
|
||||
committee.delete(committee.find(reassignment.validator_index))
|
||||
state.persistent_committees[reassignment.shard.int].add(
|
||||
reassignment.validator_index)
|
||||
|
||||
block: # Finally...
|
||||
# Remove all attestation records older than slot `s`.
|
||||
for i, v in state.validator_registry:
|
||||
if v.balance < MIN_BALANCE.uint64 and v.status == ACTIVE:
|
||||
exit_validator(i.Uint24, state, penalize=false, current_slot=blck.slot)
|
||||
state.latest_block_hashes = state.latest_block_hashes[EPOCH_LENGTH..^1]
|
||||
state.latest_state_recalculation_slot.inc(EPOCH_LENGTH)
|
||||
|
||||
true
|
||||
|
||||
func updateState*(state: BeaconState, blck: BeaconBlock): Option[BeaconState] =
|
||||
## Adjust `state` according to the information in `blck`.
|
||||
## Returns the new state, or `none` if the block is invalid.
|
||||
|
||||
# TODO check to which extent this copy can be avoided (considering forks etc),
|
||||
# for now, it serves as a reminder that we need to handle invalid blocks
|
||||
# somewhere..
|
||||
# TODO many functions will mutate `state` partially without rolling back
|
||||
# the changes in case of failure (look out for `var BeaconState` and
|
||||
# bool return values...)
|
||||
var state = state
|
||||
|
||||
# Block processing is split up into two phases - lightweight updates done
|
||||
# for each block, and bigger updates done for each epoch.
|
||||
|
||||
# Lightweight updates that happen for every block
|
||||
if not processBlock(state, blck): return
|
||||
|
||||
# Heavy updates that happen for every epoch
|
||||
if not processEpoch(state, blck): return
|
||||
|
||||
# All good, we can return the new state
|
||||
some(state)
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
import
|
||||
./test_beaconstate,
|
||||
./test_block_processing,
|
||||
./test_helpers,
|
||||
./test_ssz,
|
||||
./test_validator,
|
||||
./test_beacon_node,
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import
|
||||
unittest,
|
||||
./testhelpers,
|
||||
../beacon_chain/beacon_node
|
||||
|
||||
suite "Beacon node":
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
import
|
||||
sequtils, unittest,
|
||||
./testhelpers,
|
||||
./testutil,
|
||||
../beacon_chain/extras,
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest]
|
||||
|
||||
|
|
|
@ -7,20 +7,20 @@
|
|||
|
||||
import
|
||||
options, sequtils, unittest,
|
||||
./testhelpers,
|
||||
./testutil,
|
||||
../beacon_chain/spec/[beaconstate, datatypes, digest],
|
||||
../beacon_chain/[extras, state_transition]
|
||||
|
||||
suite "Block processing":
|
||||
## For now just test that we can compile and execute block processing with mock data.
|
||||
|
||||
test "Mock process_block":
|
||||
test "Mock state update":
|
||||
let
|
||||
state = on_startup(makeInitialValidators(), 0, Eth2Digest())
|
||||
blck = BeaconBlock(
|
||||
slot: 1,
|
||||
ancestor_hashes: @[Eth2Digest()]
|
||||
)
|
||||
newState = process_block(state, blck)
|
||||
newState = updateState(state, blck)
|
||||
check:
|
||||
newState.isNone() # Broken block, should fail processing
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
# beacon_chain
|
||||
# Copyright (c) 2018 Status Research & Development GmbH
|
||||
# Licensed and distributed under either of
|
||||
# * MIT license (license terms in the root directory or at http://opensource.org/licenses/MIT).
|
||||
# * Apache v2 license (license terms in the root directory or at http://www.apache.org/licenses/LICENSE-2.0).
|
||||
# at your option. This file may not be copied, modified, or distributed except according to those terms.
|
||||
|
||||
import
|
||||
sequtils, unittest,
|
||||
../beacon_chain/spec/[helpers]
|
||||
|
||||
suite "Spec helpers":
|
||||
test "is_power_of_2 should do its job":
|
||||
check:
|
||||
is_power_of_2(1) == true
|
||||
is_power_of_2(2) == true
|
||||
is_power_of_2(3) == false
|
||||
is_power_of_2(4) == true
|
||||
is_power_of_2(not 0'u64) == false
|
|
@ -66,8 +66,8 @@ suite "Tree hashing":
|
|||
let vr = ValidatorRecord()
|
||||
check: hashSSZ(vr).len > 0
|
||||
|
||||
test "Hash ShardAndCommittee":
|
||||
let sc = ShardAndCommittee()
|
||||
test "Hash ShardCommittee":
|
||||
let sc = ShardCommittee()
|
||||
check: hashSSZ(sc).len > 0
|
||||
|
||||
test "Hash BeaconBlock":
|
||||
|
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
import
|
||||
unittest,
|
||||
./testhelpers,
|
||||
../beacon_chain/sync_protocol
|
||||
|
||||
suite "Sync protocol":
|
||||
|
|
|
@ -8,7 +8,7 @@ import
|
|||
math,unittest, sequtils,
|
||||
../beacon_chain/spec/[datatypes, digest, validator]
|
||||
|
||||
func sumCommittees(v: openArray[seq[ShardAndCommittee]]): int =
|
||||
func sumCommittees(v: openArray[seq[ShardCommittee]]): int =
|
||||
for x in v:
|
||||
for y in x:
|
||||
inc result, y.committee.len
|
||||
|
|
Loading…
Reference in New Issue