Merge branch 'dev' into accounting-reform
This commit is contained in:
commit
46848e4202
|
@ -5,15 +5,15 @@ CONFIG_NAME: "minimal"
|
|||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 64
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_PUBKEY_AGGREGATES_SIZE: 16
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Signature domains
|
||||
|
|
|
@ -40,7 +40,8 @@
|
|||
- [Epoch processing](#epoch-processing)
|
||||
- [New `process_justification_and_finalization`](#new-process_justification_and_finalization)
|
||||
- [New `process_rewards_and_penalties`](#new-process_rewards_and_penalties)
|
||||
- [Final updates](#final-updates)
|
||||
- [Sync committee updates](#sync-committee-updates)
|
||||
- [Participation flags updates](#participation-flags-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
@ -205,8 +206,8 @@ def get_flags_and_numerators() -> Sequence[Tuple[int, int]]:
|
|||
```python
|
||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sync committee indices for a given state and epoch.
|
||||
"""
|
||||
Return the sequence of sync committee indices (which may include duplicate indices) for a given state and epoch.
|
||||
"""
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
||||
|
@ -219,7 +220,7 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
|
|||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||
sync_committee_indices.append(candidate_index)
|
||||
i += 1
|
||||
return sync_committee_indices
|
||||
|
@ -470,6 +471,23 @@ def process_sync_committee(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
|
||||
### Epoch processing
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
# [Added in HF1]
|
||||
process_sync_committee_updates(state)
|
||||
process_participation_flag_updates(state)
|
||||
# [Removed in HF1] -- process_participation_record_updates(state)
|
||||
```
|
||||
|
||||
#### New `process_justification_and_finalization`
|
||||
|
||||
*Note*: The function `process_justification_and_finalization` is modified with `matching_target_attestations` replaced by `matching_target_indices`.
|
||||
|
@ -533,42 +551,26 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
|||
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
|
||||
*Note*: The function `process_final_updates` is modified to handle sync committee updates and with the replacement of `PendingAttestation`s with participation flags.
|
||||
#### Sync committee updates
|
||||
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Reset eth1 data votes
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
# [Added in hf-1] Update sync committees
|
||||
def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
"""
|
||||
Call to ``proces_sync_committee_updates`` added to ``process_epoch`` in HF1
|
||||
"""
|
||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
HYSTERESIS_INCREMENT = uint64(EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT)
|
||||
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
||||
if (
|
||||
balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
# Set historical root accumulator
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
# [Added in hf-1] Rotate current/previous epoch participation flags
|
||||
```
|
||||
|
||||
#### Participation flags updates
|
||||
|
||||
```python
|
||||
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
"""
|
||||
Call to ``process_participation_flag_updates`` added to ``process_epoch`` in HF1
|
||||
"""
|
||||
state.previous_epoch_participation = state.current_epoch_participation
|
||||
state.current_epoch_participation = [Bitvector[PARTICIPATION_FLAGS_LENGTH]() for _ in range(len(state.validators))]
|
||||
# [Removed in hf-1] Rotate current/previous epoch attestations
|
||||
```
|
||||
|
|
|
@ -113,7 +113,12 @@
|
|||
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Slashings](#slashings)
|
||||
- [Final updates](#final-updates)
|
||||
- [Eth1 data votes updates](#eth1-data-votes-updates)
|
||||
- [Effective balances updates](#effective-balances-updates)
|
||||
- [Slashings balances updates](#slashings-balances-updates)
|
||||
- [Randao mixes updates](#randao-mixes-updates)
|
||||
- [Historical roots updates](#historical-roots-updates)
|
||||
- [Participation records rotation](#participation-records-rotation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [RANDAO](#randao)
|
||||
|
@ -1250,7 +1255,12 @@ def process_epoch(state: BeaconState) -> None:
|
|||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
|
@ -1557,15 +1567,19 @@ def process_slashings(state: BeaconState) -> None:
|
|||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
|
||||
#### Eth1 data votes updates
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
def process_eth1_data_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset eth1 data votes
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
```
|
||||
|
||||
#### Effective balances updates
|
||||
|
||||
```python
|
||||
def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
|
@ -1577,14 +1591,41 @@ def process_final_updates(state: BeaconState) -> None:
|
|||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
```
|
||||
|
||||
#### Slashings balances updates
|
||||
|
||||
```python
|
||||
def process_slashings_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
```
|
||||
|
||||
#### Randao mixes updates
|
||||
|
||||
```python
|
||||
def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
```
|
||||
|
||||
#### Historical roots updates
|
||||
```python
|
||||
def process_historical_roots_update(state: BeaconState) -> None:
|
||||
# Set historical root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
```
|
||||
|
||||
#### Participation records rotation
|
||||
|
||||
```python
|
||||
def process_participation_record_updates(state: BeaconState) -> None:
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
state.current_epoch_attestations = []
|
||||
|
|
|
@ -419,7 +419,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
|||
- _[REJECT]_ The signature of `attestation` is valid.
|
||||
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
(a client MAY queue attestations for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||
|
|
|
@ -1054,11 +1054,16 @@ def process_epoch(state: BeaconState) -> None:
|
|||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_challenge_deadlines(state)
|
||||
process_reveal_deadlines(state) # Phase 1
|
||||
process_challenge_deadlines(state) # Phase 1
|
||||
process_slashings(state)
|
||||
process_final_updates(state) # phase 0 final updates
|
||||
process_phase_1_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
process_phase_1_final_updates(state) # Phase 1
|
||||
```
|
||||
|
||||
#### Phase 1 final updates
|
||||
|
|
|
@ -1,13 +1,22 @@
|
|||
|
||||
process_calls = [
|
||||
# PHASE0
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_final_updates',
|
||||
'after_process_final_updates',
|
||||
'process_eth1_data_reset',
|
||||
'process_effective_balance_updates',
|
||||
'process_slashings_reset',
|
||||
'process_randao_mixes_reset',
|
||||
'process_historical_roots_update',
|
||||
'process_participation_record_updates',
|
||||
# LIGHTCLIENT_PATCH
|
||||
'process_sync_committee_updates',
|
||||
# PHASE1
|
||||
'process_phase_1_final_updates',
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from collections import Counter
|
||||
import random
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
|
@ -12,10 +13,33 @@ from eth2spec.test.helpers.sync_committee import (
|
|||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
MAINNET, MINIMAL,
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
def get_committee_indices(spec, state, duplicates=False):
|
||||
'''
|
||||
This utility function allows the caller to ensure there are or are not
|
||||
duplicate validator indices in the returned committee based on
|
||||
the boolean ``duplicates``.
|
||||
'''
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
else:
|
||||
if len(committee) == len(set(committee)):
|
||||
return committee
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
|
@ -72,12 +96,17 @@ def compute_sync_committee_participant_reward(spec, state, participant_index, ac
|
|||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
@ -114,6 +143,57 @@ def test_sync_committee_rewards(spec, state):
|
|||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * committee_size
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
multiplicities = Counter(committee)
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
expected_reward = 0
|
||||
|
||||
if index == block.proposer_index:
|
||||
expected_reward += sum([spec.get_proposer_reward(state, index) for index in committee])
|
||||
|
||||
if index in committee:
|
||||
reward = compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
state,
|
||||
index,
|
||||
active_validator_count,
|
||||
committee_size,
|
||||
)
|
||||
expected_reward += reward * multiplicities[index]
|
||||
|
||||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
|
@ -155,6 +235,7 @@ def test_invalid_signature_past_block(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
def test_invalid_signature_previous_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
|
@ -163,17 +244,20 @@ def test_invalid_signature_previous_committee(spec, state):
|
|||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
previous_committee = state.next_sync_committee
|
||||
old_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_commitee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_commitee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee = [pubkeys.index(pubkey) for pubkey in previous_committee.pubkeys]
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
|
|
|
@ -27,7 +27,7 @@ def test_sync_committees_progress(spec, state):
|
|||
assert state.current_sync_committee == first_sync_committee
|
||||
assert state.next_sync_committee == second_sync_committee
|
||||
|
||||
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
|
@ -2,45 +2,10 @@ from eth2spec.test.context import spec_state_test, with_all_phases
|
|||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
||||
def run_process_effective_balance_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_effective_balance_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -48,7 +13,7 @@ def test_eth1_vote_reset(spec, state):
|
|||
def test_effective_balance_hysteresis(spec, state):
|
||||
# Prepare state up to the final-updates.
|
||||
# Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
|
||||
run_epoch_processing_to(spec, state, 'process_final_updates')
|
||||
run_epoch_processing_to(spec, state, 'process_effective_balance_updates')
|
||||
# Set some edge cases for balances
|
||||
max = spec.MAX_EFFECTIVE_BALANCE
|
||||
min = spec.EJECTION_BALANCE
|
||||
|
@ -79,21 +44,7 @@ def test_effective_balance_hysteresis(spec, state):
|
|||
state.validators[i].effective_balance = pre_eff
|
||||
state.balances[i] = bal
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_final_updates(state)
|
||||
yield 'post', state
|
||||
yield from run_process_effective_balance_updates(spec, state)
|
||||
|
||||
for i, (_, _, post_eff, name) in enumerate(cases):
|
||||
assert state.validators[i].effective_balance == post_eff, name
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
|
@ -0,0 +1,43 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_eth1_data_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_eth1_data_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
|
@ -0,0 +1,20 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_historical_roots_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_historical_roots_update(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
|
@ -0,0 +1,21 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_participation_record_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_participation_record_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_updated_participation_record(spec, state):
|
||||
state.previous_epoch_attestations = [spec.PendingAttestation(proposer_index=100)]
|
||||
current_epoch_attestations = [spec.PendingAttestation(proposer_index=200)]
|
||||
state.current_epoch_attestations = current_epoch_attestations
|
||||
|
||||
yield from run_process_participation_record_updates(spec, state)
|
||||
|
||||
assert state.previous_epoch_attestations == current_epoch_attestations
|
||||
assert state.current_epoch_attestations == []
|
|
@ -0,0 +1,21 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_randao_mixes_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_randao_mixes_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_updated_randao_mixes(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] = b'\x56' * 32
|
||||
|
||||
yield from run_process_randao_mixes_reset(spec, state)
|
||||
|
||||
assert state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == spec.get_randao_mix(
|
||||
state, spec.get_current_epoch(state)
|
||||
)
|
|
@ -0,0 +1,20 @@
|
|||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_slashings_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_slashings_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_flush_slashings(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0
|
||||
|
||||
yield from run_process_slashings_reset(spec, state)
|
||||
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
|
|
@ -37,10 +37,17 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
|||
|
||||
Sub-transitions:
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
- `justification_and_finalization`
|
||||
- `rewards_and_penalties` (limited to `minimal` config)
|
||||
- `rewards_and_penalties`
|
||||
- `registry_updates`
|
||||
- `slashings`
|
||||
- `final_updates`
|
||||
- `eth1_data_reset`
|
||||
- `effective_balance_updates`
|
||||
- `slashings_reset`
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `participation_record_updates`
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
|
|
@ -184,7 +184,7 @@ def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typin
|
|||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||
create_provider('justification_and_finalization', test_process_justification_and_finalization, 'minimal'),
|
||||
...
|
||||
])
|
||||
|
||||
|
|
|
@ -33,16 +33,21 @@ def create_provider(fork_name: str, handler_name: str,
|
|||
|
||||
if __name__ == "__main__":
|
||||
phase_0_mods = {key: 'eth2spec.test.phase0.epoch_processing.test_process_' + key for key in [
|
||||
'final_updates',
|
||||
'justification_and_finalization',
|
||||
'registry_updates',
|
||||
'rewards_and_penalties',
|
||||
'registry_updates',
|
||||
'slashings',
|
||||
'eth1_data_reset',
|
||||
'effective_balance_updates',
|
||||
'slashings_reset',
|
||||
'randao_mixes_reset',
|
||||
'historical_roots_update',
|
||||
'participation_record_updates',
|
||||
]}
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.epoch_processing.test_process_' + key for key in [
|
||||
'reveal_deadlines',
|
||||
'challenge_deadlines',
|
||||
'custody_final_updates',
|
||||
'reveal_deadlines',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
|
|
Loading…
Reference in New Issue