Merge pull request #2408 from ethereum/translate-participation-test

Altair fork attestation translation tests
This commit is contained in:
Danny Ryan 2021-05-13 13:36:30 -06:00 committed by GitHub
commit 7faeaba778
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 345 additions and 153 deletions

View File

@ -36,6 +36,7 @@
- [`get_base_reward_per_increment`](#get_base_reward_per_increment) - [`get_base_reward_per_increment`](#get_base_reward_per_increment)
- [`get_base_reward`](#get_base_reward) - [`get_base_reward`](#get_base_reward)
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices) - [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
- [`get_attestation_participation_flag_indices`](#get_attestation_participation_flag_indices)
- [`get_flag_index_deltas`](#get_flag_index_deltas) - [`get_flag_index_deltas`](#get_flag_index_deltas)
- [Modified `get_inactivity_penalty_deltas`](#modified-get_inactivity_penalty_deltas) - [Modified `get_inactivity_penalty_deltas`](#modified-get_inactivity_penalty_deltas)
- [Beacon state mutators](#beacon-state-mutators) - [Beacon state mutators](#beacon-state-mutators)
@ -350,6 +351,37 @@ def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epo
return set(filter(lambda index: not state.validators[index].slashed, participating_indices)) return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
``` ```
#### `get_attestation_participation_flag_indices`
```python
def get_attestation_participation_flag_indices(state: BeaconState,
data: AttestationData,
inclusion_delay: uint64) -> Sequence[int]:
"""
Return the flag indices that are satisfied by an attestation.
"""
if data.target.epoch == get_current_epoch(state):
justified_checkpoint = state.current_justified_checkpoint
else:
justified_checkpoint = state.previous_justified_checkpoint
# Matching roots
is_matching_source = data.source == justified_checkpoint
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
assert is_matching_source
participation_flag_indices = []
if is_matching_source and inclusion_delay <= integer_squareroot(SLOTS_PER_EPOCH):
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
if is_matching_target and inclusion_delay <= SLOTS_PER_EPOCH:
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
if is_matching_head and inclusion_delay == MIN_ATTESTATION_INCLUSION_DELAY:
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
return participation_flag_indices
```
#### `get_flag_index_deltas` #### `get_flag_index_deltas`
```python ```python
@ -454,32 +486,18 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
committee = get_beacon_committee(state, data.slot, data.index) committee = get_beacon_committee(state, data.slot, data.index)
assert len(attestation.aggregation_bits) == len(committee) assert len(attestation.aggregation_bits) == len(committee)
if data.target.epoch == get_current_epoch(state): # Participation flag indices
epoch_participation = state.current_epoch_participation participation_flag_indices = get_attestation_participation_flag_indices(state, data, state.slot - data.slot)
justified_checkpoint = state.current_justified_checkpoint
else:
epoch_participation = state.previous_epoch_participation
justified_checkpoint = state.previous_justified_checkpoint
# Matching roots
is_matching_source = data.source == justified_checkpoint
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
assert is_matching_source
# Verify signature # Verify signature
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation)) assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
# Participation flag indices
participation_flag_indices = []
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
if is_matching_head and state.slot == data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
# Update epoch participation flags # Update epoch participation flags
if data.target.epoch == get_current_epoch(state):
epoch_participation = state.current_epoch_participation
else:
epoch_participation = state.previous_epoch_participation
proposer_reward_numerator = 0 proposer_reward_numerator = 0
for index in get_attesting_indices(state, data, attestation.aggregation_bits): for index in get_attesting_indices(state, data, attestation.aggregation_bits):
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS): for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):

View File

@ -45,6 +45,20 @@ Care must be taken when transitioning through the fork boundary as implementatio
In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`. In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
```python ```python
def translate_participation(state: BeaconState, pending_attestations: Sequence[phase0.PendingAttestation]) -> None:
for attestation in pending_attestations:
data = attestation.data
inclusion_delay = attestation.inclusion_delay
# Translate attestation inclusion info to flag indices
participation_flag_indices = get_attestation_participation_flag_indices(state, data, inclusion_delay)
# Apply flags to all attesting validators
epoch_participation = state.previous_epoch_participation
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
for flag_index in participation_flag_indices:
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
epoch = phase0.get_current_epoch(pre) epoch = phase0.get_current_epoch(pre)
post = BeaconState( post = BeaconState(
@ -84,6 +98,8 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
# Inactivity # Inactivity
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
) )
# Fill in previous epoch participation from the pre state's pending attestations
translate_participation(post, pre.previous_epoch_attestations)
# Fill in sync committees # Fill in sync committees
# Note: A duplicate committee is assigned for the current and next committee at the fork boundary # Note: A duplicate committee is assigned for the current and next committee at the fork boundary

View File

@ -14,47 +14,10 @@ from eth2spec.test.helpers.state import (
next_epoch, next_epoch,
next_epoch_via_block, next_epoch_via_block,
) )
from eth2spec.test.helpers.altair.fork import (
ALTAIR_FORK_TEST_META_TAGS,
ALTAIR_FORK_TEST_META_TAGS = { run_fork_test,
'fork': 'altair', )
}
def run_fork_test(post_spec, pre_state):
yield 'pre', pre_state
post_state = post_spec.upgrade_to_altair(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state
@with_phases(phases=[PHASE0], other_phases=[ALTAIR]) @with_phases(phases=[PHASE0], other_phases=[ALTAIR])

View File

@ -0,0 +1,120 @@
from random import Random
from eth2spec.test.context import (
with_phases,
with_custom_state,
with_configs,
spec_test, with_state,
low_balances, misc_balances, large_validator_set,
)
from eth2spec.test.utils import with_meta_tags
from eth2spec.test.helpers.constants import (
PHASE0, ALTAIR,
MINIMAL,
)
from eth2spec.test.helpers.altair.fork import (
ALTAIR_FORK_TEST_META_TAGS,
run_fork_test,
)
from eth2spec.test.helpers.random import (
randomize_state,
randomize_attestation_participation,
)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_0(spec, phases, state):
randomize_state(spec, state, rng=Random(1010))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_1(spec, phases, state):
randomize_state(spec, state, rng=Random(2020))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_2(spec, phases, state):
randomize_state(spec, state, rng=Random(3030))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_3(spec, phases, state):
randomize_state(spec, state, rng=Random(4040))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_duplicate_attestations(spec, phases, state):
randomize_state(spec, state, rng=Random(1111))
# Note: `run_fork_test` empties `current_epoch_attestations`
state.previous_epoch_attestations = state.previous_epoch_attestations + state.previous_epoch_attestations
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_state
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_mismatched_attestations(spec, phases, state):
# Create a random state
randomize_state(spec, state, rng=Random(2222))
# Now make two copies
state_0 = state.copy()
state_1 = state.copy()
# Randomize attestation participation of both
randomize_attestation_participation(spec, state_0, rng=Random(3333))
randomize_attestation_participation(spec, state_1, rng=Random(4444))
# Note: `run_fork_test` empties `current_epoch_attestations`
# Use pending attestations from both random states in a single state for testing
state_0.previous_epoch_attestations = state_0.previous_epoch_attestations + state_1.previous_epoch_attestations
yield from run_fork_test(phases[ALTAIR], state_0)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_low_balances(spec, phases, state):
randomize_state(spec, state, rng=Random(5050))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@spec_test
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_misc_balances(spec, phases, state):
randomize_state(spec, state, rng=Random(6060))
yield from run_fork_test(phases[ALTAIR], state)
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
@with_configs([MINIMAL],
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
@spec_test
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
def test_altair_fork_random_large_validator_set(spec, phases, state):
randomize_state(spec, state, rng=Random(7070))
yield from run_fork_test(phases[ALTAIR], state)

View File

@ -0,0 +1,42 @@
ALTAIR_FORK_TEST_META_TAGS = {
'fork': 'altair',
}
def run_fork_test(post_spec, pre_state):
# Clean up state to be more realistic
pre_state.current_epoch_attestations = []
yield 'pre', pre_state
post_state = post_spec.upgrade_to_altair(pre_state)
# Stable fields
stable_fields = [
'genesis_time', 'genesis_validators_root', 'slot',
# History
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
# Eth1
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
# Registry
'validators', 'balances',
# Randomness
'randao_mixes',
# Slashings
'slashings',
# Finality
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
]
for field in stable_fields:
assert getattr(pre_state, field) == getattr(post_state, field)
# Modified fields
modified_fields = ['fork']
for field in modified_fields:
assert getattr(pre_state, field) != getattr(post_state, field)
assert pre_state.fork.current_version == post_state.fork.previous_version
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
yield 'post', post_state

View File

@ -0,0 +1,113 @@
from random import Random
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
from eth2spec.test.context import is_post_altair
from eth2spec.test.helpers.deposits import mock_deposit
from eth2spec.test.helpers.state import next_epoch
def set_some_new_deposits(spec, state, rng):
num_validators = len(state.validators)
# Set ~1/10 to just recently deposited
for index in range(num_validators):
# If not already active, skip
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
continue
if rng.randrange(num_validators) < num_validators // 10:
mock_deposit(spec, state, index)
# Set ~half of selected to eligible for activation
if rng.choice([True, False]):
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
def exit_random_validators(spec, state, rng):
if spec.get_current_epoch(state) < 5:
# Move epochs forward to allow for some validators already exited/withdrawable
for _ in range(5):
next_epoch(spec, state)
current_epoch = spec.get_current_epoch(state)
# Exit ~1/2 of validators
for index in spec.get_active_validator_indices(state, current_epoch):
if rng.choice([True, False]):
continue
validator = state.validators[index]
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
# ~1/2 are withdrawable
if rng.choice([True, False]):
validator.withdrawable_epoch = current_epoch
else:
validator.withdrawable_epoch = current_epoch + 1
def slash_random_validators(spec, state, rng):
# Slash ~1/2 of validators
for index in range(len(state.validators)):
# slash at least one validator
if index == 0 or rng.choice([True, False]):
spec.slash_validator(state, index)
def randomize_epoch_participation(spec, state, epoch, rng):
assert epoch in (spec.get_current_epoch(state), spec.get_previous_epoch(state))
if not is_post_altair(spec):
if epoch == spec.get_current_epoch(state):
pending_attestations = state.current_epoch_attestations
else:
pending_attestations = state.previous_epoch_attestations
for pending_attestation in pending_attestations:
# ~1/3 have bad target
if rng.randint(0, 2) == 0:
pending_attestation.data.target.root = b'\x55' * 32
# ~1/3 have bad head
if rng.randint(0, 2) == 0:
pending_attestation.data.beacon_block_root = b'\x66' * 32
# ~50% participation
pending_attestation.aggregation_bits = [rng.choice([True, False])
for _ in pending_attestation.aggregation_bits]
# Random inclusion delay
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
else:
if epoch == spec.get_current_epoch(state):
epoch_participation = state.current_epoch_participation
else:
epoch_participation = state.previous_epoch_participation
for index in range(len(state.validators)):
# ~1/3 have bad head or bad target or not timely enough
is_timely_correct_head = rng.randint(0, 2) != 0
flags = epoch_participation[index]
def set_flag(index, value):
nonlocal flags
flag = spec.ParticipationFlags(2**index)
if value:
flags |= flag
else:
flags &= 0xff ^ flag
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
if is_timely_correct_head:
# If timely head, then must be timely target
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
# If timely head, then must be timely source
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
else:
# ~50% of remaining have bad target or not timely enough
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
# ~50% of remaining have bad source or not timely enough
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
epoch_participation[index] = flags
def randomize_attestation_participation(spec, state, rng=Random(8020)):
cached_prepare_state_with_attestations(spec, state)
randomize_epoch_participation(spec, state, spec.get_previous_epoch(state), rng)
randomize_epoch_participation(spec, state, spec.get_current_epoch(state), rng)
def randomize_state(spec, state, rng=Random(8020)):
set_some_new_deposits(spec, state, rng)
exit_random_validators(spec, state, rng)
slash_random_validators(spec, state, rng)
randomize_attestation_participation(spec, state, rng)

View File

@ -3,9 +3,16 @@ from lru import LRU
from eth2spec.phase0 import spec as spec_phase0 from eth2spec.phase0 import spec as spec_phase0
from eth2spec.test.context import is_post_altair from eth2spec.test.context import is_post_altair
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations from eth2spec.test.helpers.state import (
from eth2spec.test.helpers.deposits import mock_deposit next_epoch,
from eth2spec.test.helpers.state import next_epoch )
from eth2spec.test.helpers.random import (
set_some_new_deposits, exit_random_validators, slash_random_validators,
randomize_state,
)
from eth2spec.test.helpers.attestations import (
cached_prepare_state_with_attestations,
)
from eth2spec.utils.ssz.ssz_typing import Container, uint64, List from eth2spec.utils.ssz.ssz_typing import Container, uint64, List
@ -285,49 +292,6 @@ def leaking(epochs=None):
return deco return deco
def set_some_new_deposits(spec, state, rng):
num_validators = len(state.validators)
# Set ~1/10 to just recently deposited
for index in range(num_validators):
# If not already active, skip
if not spec.is_active_validator(state.validators[index], spec.get_current_epoch(state)):
continue
if rng.randrange(num_validators) < num_validators // 10:
mock_deposit(spec, state, index)
# Set ~half of selected to eligible for activation
if rng.choice([True, False]):
state.validators[index].activation_eligibility_epoch = spec.get_current_epoch(state)
def exit_random_validators(spec, state, rng):
if spec.get_current_epoch(state) < 5:
# Move epochs forward to allow for some validators already exited/withdrawable
for _ in range(5):
next_epoch(spec, state)
current_epoch = spec.get_current_epoch(state)
# Exit ~1/2 of validators
for index in spec.get_active_validator_indices(state, current_epoch):
if rng.choice([True, False]):
continue
validator = state.validators[index]
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
# ~1/2 are withdrawable
if rng.choice([True, False]):
validator.withdrawable_epoch = current_epoch
else:
validator.withdrawable_epoch = current_epoch + 1
def slash_random_validators(spec, state, rng):
# Slash ~1/2 of validators
for index in range(len(state.validators)):
# slash at least one validator
if index == 0 or rng.choice([True, False]):
spec.slash_validator(state, index)
def run_test_empty(spec, state): def run_test_empty(spec, state):
# Do not add any attestations to state # Do not add any attestations to state
@ -531,49 +495,5 @@ def run_test_all_balances_too_low_for_reward(spec, state):
def run_test_full_random(spec, state, rng=Random(8020)): def run_test_full_random(spec, state, rng=Random(8020)):
set_some_new_deposits(spec, state, rng) randomize_state(spec, state, rng)
exit_random_validators(spec, state, rng)
slash_random_validators(spec, state, rng)
cached_prepare_state_with_attestations(spec, state)
if not is_post_altair(spec):
for pending_attestation in state.previous_epoch_attestations:
# ~1/3 have bad target
if rng.randint(0, 2) == 0:
pending_attestation.data.target.root = b'\x55' * 32
# ~1/3 have bad head
if rng.randint(0, 2) == 0:
pending_attestation.data.beacon_block_root = b'\x66' * 32
# ~50% participation
pending_attestation.aggregation_bits = [rng.choice([True, False])
for _ in pending_attestation.aggregation_bits]
# Random inclusion delay
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
else:
for index in range(len(state.validators)):
# ~1/3 have bad head or bad target or not timely enough
is_timely_correct_head = rng.randint(0, 2) != 0
flags = state.previous_epoch_participation[index]
def set_flag(index, value):
nonlocal flags
flag = spec.ParticipationFlags(2**index)
if value:
flags |= flag
else:
flags &= 0xff ^ flag
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
if is_timely_correct_head:
# If timely head, then must be timely target
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
# If timely head, then must be timely source
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
else:
# ~50% of remaining have bad target or not timely enough
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
# ~50% of remaining have bad source or not timely enough
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
state.previous_epoch_participation[index] = flags
yield from run_deltas(spec, state) yield from run_deltas(spec, state)