Merge branch 'dev' into set-random-to-prev-randao-mix
This commit is contained in:
commit
df657f7c93
2
Makefile
2
Makefile
|
@ -173,7 +173,7 @@ define run_generator
|
||||||
echo "generator $(1) finished"
|
echo "generator $(1) finished"
|
||||||
endef
|
endef
|
||||||
|
|
||||||
# The tests dir itself is simply build by creating the directory (recursively creating deeper directories if necessary)
|
# The tests dir itself is simply built by creating the directory (recursively creating deeper directories if necessary)
|
||||||
$(TEST_VECTOR_DIR):
|
$(TEST_VECTOR_DIR):
|
||||||
$(info creating test output directory, for generators: ${GENERATOR_TARGETS})
|
$(info creating test output directory, for generators: ${GENERATOR_TARGETS})
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
|
|
@ -45,6 +45,7 @@ The merge is still actively in development. The exact specification has not been
|
||||||
* [Merge fork](specs/merge/fork.md)
|
* [Merge fork](specs/merge/fork.md)
|
||||||
* [Fork Choice changes](specs/merge/fork-choice.md)
|
* [Fork Choice changes](specs/merge/fork-choice.md)
|
||||||
* [Validator additions](specs/merge/validator.md)
|
* [Validator additions](specs/merge/validator.md)
|
||||||
|
* [Client settings](specs/merge/client_settings.md)
|
||||||
|
|
||||||
### Sharding
|
### Sharding
|
||||||
|
|
||||||
|
|
|
@ -58,8 +58,8 @@ INACTIVITY_SCORE_RECOVERY_RATE: 16
|
||||||
EJECTION_BALANCE: 16000000000
|
EJECTION_BALANCE: 16000000000
|
||||||
# 2**2 (= 4)
|
# 2**2 (= 4)
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
MIN_PER_EPOCH_CHURN_LIMIT: 4
|
||||||
# 2**16 (= 65,536)
|
# [customized] scale queue churn at much lower validator counts for testing
|
||||||
CHURN_LIMIT_QUOTIENT: 65536
|
CHURN_LIMIT_QUOTIENT: 32
|
||||||
|
|
||||||
|
|
||||||
# Deposit contract
|
# Deposit contract
|
||||||
|
|
3
setup.py
3
setup.py
|
@ -509,8 +509,7 @@ ExecutionState = Any
|
||||||
|
|
||||||
|
|
||||||
def get_pow_block(hash: Bytes32) -> PowBlock:
|
def get_pow_block(hash: Bytes32) -> PowBlock:
|
||||||
return PowBlock(block_hash=hash, parent_hash=Bytes32(), is_valid=True, is_processed=True,
|
return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0), difficulty=uint256(0))
|
||||||
total_difficulty=uint256(0), difficulty=uint256(0))
|
|
||||||
|
|
||||||
|
|
||||||
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
|
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
|
||||||
|
|
|
@ -0,0 +1,26 @@
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||||
|
|
||||||
|
- [The Merge -- Client Settings](#the-merge----client-settings)
|
||||||
|
- [Override terminal total difficulty](#override-terminal-total-difficulty)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
|
||||||
|
# The Merge -- Client Settings
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
This document specifies configurable settings that clients must implement for the Merge.
|
||||||
|
|
||||||
|
### Override terminal total difficulty
|
||||||
|
|
||||||
|
To coordinate manual overrides to [`terminal_total_difficulty`](fork-choice.md#transitionstore), clients
|
||||||
|
must provide `--terminal-total-difficulty-override` as a configurable setting.
|
||||||
|
|
||||||
|
If `TransitionStore` has already [been initialized](./fork.md#initializing-transition-store), this alters the previously initialized value of
|
||||||
|
`TransitionStore.terminal_total_difficulty`, otherwise this setting initializes `TransitionStore` with the specified, bypassing `compute_terminal_total_difficulty` and the use of an `anchor_pow_block`.
|
||||||
|
`terminal_total_difficulty`.
|
||||||
|
|
||||||
|
Except under exceptional scenarios, this setting is expected to not be used, and `terminal_total_difficulty` will operate with [default functionality](./fork.md#initializing-transition-store). Sufficient warning to the user about this exceptional configurable setting should be provided.
|
||||||
|
[here](fork.md#initializing-transition-store).
|
|
@ -83,8 +83,6 @@ class TransitionStore(object):
|
||||||
class PowBlock(object):
|
class PowBlock(object):
|
||||||
block_hash: Hash32
|
block_hash: Hash32
|
||||||
parent_hash: Hash32
|
parent_hash: Hash32
|
||||||
is_processed: boolean
|
|
||||||
is_valid: boolean
|
|
||||||
total_difficulty: uint256
|
total_difficulty: uint256
|
||||||
difficulty: uint256
|
difficulty: uint256
|
||||||
```
|
```
|
||||||
|
@ -93,7 +91,7 @@ class PowBlock(object):
|
||||||
|
|
||||||
Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data.
|
Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data.
|
||||||
|
|
||||||
*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required.
|
*Note*: The `eth_getBlockByHash` JSON-RPC method may be used to pull this information from an execution client.
|
||||||
|
|
||||||
### `is_valid_terminal_pow_block`
|
### `is_valid_terminal_pow_block`
|
||||||
|
|
||||||
|
@ -103,7 +101,7 @@ Used by fork-choice handler, `on_block`.
|
||||||
def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock, parent: PowBlock) -> bool:
|
def is_valid_terminal_pow_block(transition_store: TransitionStore, block: PowBlock, parent: PowBlock) -> bool:
|
||||||
is_total_difficulty_reached = block.total_difficulty >= transition_store.terminal_total_difficulty
|
is_total_difficulty_reached = block.total_difficulty >= transition_store.terminal_total_difficulty
|
||||||
is_parent_total_difficulty_valid = parent.total_difficulty < transition_store.terminal_total_difficulty
|
is_parent_total_difficulty_valid = parent.total_difficulty < transition_store.terminal_total_difficulty
|
||||||
return block.is_valid and is_total_difficulty_reached and is_parent_total_difficulty_valid
|
return is_total_difficulty_reached and is_parent_total_difficulty_valid
|
||||||
```
|
```
|
||||||
|
|
||||||
## Updated fork-choice handlers
|
## Updated fork-choice handlers
|
||||||
|
@ -128,17 +126,16 @@ def on_block(store: Store, signed_block: SignedBeaconBlock, transition_store: Tr
|
||||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||||
|
|
||||||
# [New in Merge]
|
|
||||||
if (transition_store is not None) and is_merge_block(pre_state, block.body):
|
|
||||||
# Delay consideration of block until PoW block is processed by the PoW node
|
|
||||||
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
|
||||||
pow_parent = get_pow_block(pow_block.parent_hash)
|
|
||||||
assert pow_block.is_processed
|
|
||||||
assert is_valid_terminal_pow_block(transition_store, pow_block, pow_parent)
|
|
||||||
|
|
||||||
# Check the block is valid and compute the post-state
|
# Check the block is valid and compute the post-state
|
||||||
state = pre_state.copy()
|
state = pre_state.copy()
|
||||||
state_transition(state, signed_block, True)
|
state_transition(state, signed_block, True)
|
||||||
|
|
||||||
|
# [New in Merge]
|
||||||
|
if (transition_store is not None) and is_merge_block(pre_state, block.body):
|
||||||
|
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||||
|
pow_parent = get_pow_block(pow_block.parent_hash)
|
||||||
|
assert is_valid_terminal_pow_block(transition_store, pow_block, pow_parent)
|
||||||
|
|
||||||
# Add new block to the store
|
# Add new block to the store
|
||||||
store.blocks[hash_tree_root(block)] = block
|
store.blocks[hash_tree_root(block)] = block
|
||||||
# Add new state for this block to the store
|
# Add new state for this block to the store
|
||||||
|
|
|
@ -103,7 +103,7 @@ def upgrade_to_merge(pre: altair.BeaconState) -> BeaconState:
|
||||||
|
|
||||||
### Initializing transition store
|
### Initializing transition store
|
||||||
|
|
||||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, a transition store is initialized to be further utilized by the transition process of the Merge.
|
If `state.slot % SLOTS_PER_EPOCH == 0`, `compute_epoch_at_slot(state.slot) == MERGE_FORK_EPOCH`, and the transition store has not already been initialized, a transition store is initialized to be further utilized by the transition process of the Merge.
|
||||||
|
|
||||||
Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function.
|
Transition store initialization occurs after the state has been modified by corresponding `upgrade_to_merge` function.
|
||||||
|
|
||||||
|
@ -127,3 +127,6 @@ def initialize_transition_store(state: BeaconState) -> TransitionStore:
|
||||||
pow_block = get_pow_block(state.eth1_data.block_hash)
|
pow_block = get_pow_block(state.eth1_data.block_hash)
|
||||||
return get_transition_store(pow_block)
|
return get_transition_store(pow_block)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
*Note*: Transition store can also be initialized at client startup by [overriding terminal total
|
||||||
|
difficulty](client_settings.md#override-terminal-total-difficulty).
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.1.0-beta.3
|
1.1.0-beta.4
|
|
@ -77,6 +77,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
required=False,
|
required=False,
|
||||||
help="specify presets to run with. Allows all if no preset names are specified.",
|
help="specify presets to run with. Allows all if no preset names are specified.",
|
||||||
)
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"-c",
|
||||||
|
"--collect-only",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="if set only print tests to generate, do not actually run the test and dump the target data",
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
output_dir = args.output_dir
|
output_dir = args.output_dir
|
||||||
|
@ -100,12 +107,15 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
if len(presets) != 0:
|
if len(presets) != 0:
|
||||||
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
|
print(f"Filtering test-generator runs to only include presets: {', '.join(presets)}")
|
||||||
|
|
||||||
|
collect_only = args.collect_only
|
||||||
|
collected_test_count = 0
|
||||||
generated_test_count = 0
|
generated_test_count = 0
|
||||||
skipped_test_count = 0
|
skipped_test_count = 0
|
||||||
provider_start = time.time()
|
provider_start = time.time()
|
||||||
for tprov in test_providers:
|
for tprov in test_providers:
|
||||||
# runs anything that we don't want to repeat for every test case.
|
if not collect_only:
|
||||||
tprov.prepare()
|
# runs anything that we don't want to repeat for every test case.
|
||||||
|
tprov.prepare()
|
||||||
|
|
||||||
for test_case in tprov.make_cases():
|
for test_case in tprov.make_cases():
|
||||||
case_dir = (
|
case_dir = (
|
||||||
|
@ -115,6 +125,11 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
)
|
)
|
||||||
incomplete_tag_file = case_dir / "INCOMPLETE"
|
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||||
|
|
||||||
|
collected_test_count += 1
|
||||||
|
if collect_only:
|
||||||
|
print(f"Collected test at: {case_dir}")
|
||||||
|
continue
|
||||||
|
|
||||||
if case_dir.exists():
|
if case_dir.exists():
|
||||||
if not args.force and not incomplete_tag_file.exists():
|
if not args.force and not incomplete_tag_file.exists():
|
||||||
skipped_test_count += 1
|
skipped_test_count += 1
|
||||||
|
@ -193,11 +208,14 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
provider_end = time.time()
|
provider_end = time.time()
|
||||||
span = round(provider_end - provider_start, 2)
|
span = round(provider_end - provider_start, 2)
|
||||||
|
|
||||||
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
|
if collect_only:
|
||||||
summary_message += f" ({skipped_test_count} skipped tests)"
|
print(f"Collected {collected_test_count} tests in total")
|
||||||
if span > TIME_THRESHOLD_TO_PRINT:
|
else:
|
||||||
summary_message += f" in {span} seconds"
|
summary_message = f"completed generation of {generator_name} with {generated_test_count} tests"
|
||||||
print(summary_message)
|
summary_message += f" ({skipped_test_count} skipped tests)"
|
||||||
|
if span > TIME_THRESHOLD_TO_PRINT:
|
||||||
|
summary_message += f" in {span} seconds"
|
||||||
|
print(summary_message)
|
||||||
|
|
||||||
|
|
||||||
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
from inspect import getmembers, isfunction
|
from inspect import getmembers, isfunction
|
||||||
from typing import Any, Callable, Dict, Iterable, Optional
|
from typing import Any, Callable, Dict, Iterable, Optional, List, Union
|
||||||
|
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.test.helpers.constants import ALL_PRESETS, TESTGEN_FORKS
|
from eth2spec.test.helpers.constants import ALL_PRESETS, TESTGEN_FORKS
|
||||||
|
@ -59,8 +59,10 @@ def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||||
def get_provider(create_provider_fn: Callable[[SpecForkName, PresetBaseName, str, str], TestProvider],
|
def get_provider(create_provider_fn: Callable[[SpecForkName, PresetBaseName, str, str], TestProvider],
|
||||||
fork_name: SpecForkName,
|
fork_name: SpecForkName,
|
||||||
preset_name: PresetBaseName,
|
preset_name: PresetBaseName,
|
||||||
all_mods: Dict[str, Dict[str, str]]) -> Iterable[TestProvider]:
|
all_mods: Dict[str, Dict[str, Union[List[str], str]]]) -> Iterable[TestProvider]:
|
||||||
for key, mod_name in all_mods[fork_name].items():
|
for key, mod_name in all_mods[fork_name].items():
|
||||||
|
if not isinstance(mod_name, List):
|
||||||
|
mod_name = [mod_name]
|
||||||
yield create_provider_fn(
|
yield create_provider_fn(
|
||||||
fork_name=fork_name,
|
fork_name=fork_name,
|
||||||
preset_name=preset_name,
|
preset_name=preset_name,
|
||||||
|
@ -75,16 +77,17 @@ def get_create_provider_fn(runner_name: str) -> Callable[[SpecForkName, str, str
|
||||||
return
|
return
|
||||||
|
|
||||||
def create_provider(fork_name: SpecForkName, preset_name: PresetBaseName,
|
def create_provider(fork_name: SpecForkName, preset_name: PresetBaseName,
|
||||||
handler_name: str, tests_src_mod_name: str) -> TestProvider:
|
handler_name: str, tests_src_mod_name: List[str]) -> TestProvider:
|
||||||
def cases_fn() -> Iterable[TestCase]:
|
def cases_fn() -> Iterable[TestCase]:
|
||||||
tests_src = import_module(tests_src_mod_name)
|
for mod_name in tests_src_mod_name:
|
||||||
return generate_from_tests(
|
tests_src = import_module(mod_name)
|
||||||
runner_name=runner_name,
|
yield from generate_from_tests(
|
||||||
handler_name=handler_name,
|
runner_name=runner_name,
|
||||||
src=tests_src,
|
handler_name=handler_name,
|
||||||
fork_name=fork_name,
|
src=tests_src,
|
||||||
preset_name=preset_name,
|
fork_name=fork_name,
|
||||||
)
|
preset_name=preset_name,
|
||||||
|
)
|
||||||
|
|
||||||
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
return create_provider
|
return create_provider
|
||||||
|
|
|
@ -5,6 +5,7 @@ from eth2spec.test.helpers.block import (
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
state_transition_and_sign_block,
|
state_transition_and_sign_block,
|
||||||
transition_to,
|
transition_to,
|
||||||
|
next_epoch_via_block,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
MAINNET, MINIMAL,
|
MAINNET, MINIMAL,
|
||||||
|
@ -12,10 +13,12 @@ from eth2spec.test.helpers.constants import (
|
||||||
from eth2spec.test.helpers.sync_committee import (
|
from eth2spec.test.helpers.sync_committee import (
|
||||||
compute_aggregate_sync_committee_signature,
|
compute_aggregate_sync_committee_signature,
|
||||||
compute_committee_indices,
|
compute_committee_indices,
|
||||||
get_committee_indices,
|
|
||||||
run_sync_committee_processing,
|
run_sync_committee_processing,
|
||||||
run_successful_sync_committee_test,
|
run_successful_sync_committee_test,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import (
|
||||||
|
get_unslashed_exited_validators,
|
||||||
|
)
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
with_altair_and_later,
|
with_altair_and_later,
|
||||||
with_presets,
|
with_presets,
|
||||||
|
@ -28,19 +31,17 @@ from eth2spec.test.context import (
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_bad_domain(spec, state):
|
def test_invalid_signature_bad_domain(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
rng = random.Random(2020)
|
|
||||||
random_participant = rng.choice(committee_indices)
|
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
# Exclude one participant whose signature was included.
|
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[index != random_participant for index in committee_indices],
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices, # full committee signs
|
committee_indices, # full committee signs
|
||||||
|
block_root=block.parent_root,
|
||||||
domain_type=spec.DOMAIN_BEACON_ATTESTER, # Incorrect domain
|
domain_type=spec.DOMAIN_BEACON_ATTESTER, # Incorrect domain
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
@ -51,7 +52,7 @@ def test_invalid_signature_bad_domain(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_missing_participant(spec, state):
|
def test_invalid_signature_missing_participant(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
rng = random.Random(2020)
|
rng = random.Random(2020)
|
||||||
random_participant = rng.choice(committee_indices)
|
random_participant = rng.choice(committee_indices)
|
||||||
|
|
||||||
|
@ -64,6 +65,7 @@ def test_invalid_signature_missing_participant(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices, # full committee signs
|
committee_indices, # full committee signs
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
@ -114,7 +116,7 @@ def test_invalid_signature_infinite_signature_with_single_participant(spec, stat
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_extra_participant(spec, state):
|
def test_invalid_signature_extra_participant(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
rng = random.Random(3030)
|
rng = random.Random(3030)
|
||||||
random_participant = rng.choice(committee_indices)
|
random_participant = rng.choice(committee_indices)
|
||||||
|
|
||||||
|
@ -127,6 +129,7 @@ def test_invalid_signature_extra_participant(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
[index for index in committee_indices if index != random_participant],
|
[index for index in committee_indices if index != random_participant],
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -137,7 +140,7 @@ def test_invalid_signature_extra_participant(spec, state):
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||||
committee_indices = get_committee_indices(spec, state, duplicates=False)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
committee_size = len(committee_indices)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [True] * committee_size
|
committee_bits = [True] * committee_size
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
@ -153,7 +156,7 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
|
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
|
||||||
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
committee_size = len(committee_indices)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [False] * committee_size
|
committee_bits = [False] * committee_size
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
@ -169,7 +172,7 @@ def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state
|
||||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
|
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
|
||||||
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
committee_size = len(committee_indices)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
|
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
|
||||||
assert len(committee_bits) == committee_size
|
assert len(committee_bits) == committee_size
|
||||||
|
@ -186,7 +189,7 @@ def test_sync_committee_rewards_duplicate_committee_half_participation(spec, sta
|
||||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
|
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
|
||||||
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
committee_size = len(committee_indices)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [True] * committee_size
|
committee_bits = [True] * committee_size
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
@ -202,7 +205,7 @@ def test_sync_committee_rewards_duplicate_committee_full_participation(spec, sta
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_sync_committee_rewards_not_full_participants(spec, state):
|
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
rng = random.Random(1010)
|
rng = random.Random(1010)
|
||||||
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
|
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
|
||||||
|
|
||||||
|
@ -213,7 +216,7 @@ def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_sync_committee_rewards_empty_participants(spec, state):
|
def test_sync_committee_rewards_empty_participants(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
committee_bits = [False for _ in committee_indices]
|
committee_bits = [False for _ in committee_indices]
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||||
|
@ -223,7 +226,7 @@ def test_sync_committee_rewards_empty_participants(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_past_block(spec, state):
|
def test_invalid_signature_past_block(spec, state):
|
||||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
|
||||||
for _ in range(2):
|
for _ in range(2):
|
||||||
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
||||||
|
@ -236,6 +239,7 @@ def test_invalid_signature_past_block(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices,
|
committee_indices,
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -286,6 +290,7 @@ def test_invalid_signature_previous_committee(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices,
|
committee_indices,
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -327,6 +332,7 @@ def test_valid_signature_future_committee(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices,
|
committee_indices,
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -360,6 +366,7 @@ def test_proposer_in_committee_without_participation(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
participants,
|
participants,
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -396,6 +403,7 @@ def test_proposer_in_committee_with_participation(spec, state):
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee_indices,
|
committee_indices,
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -406,3 +414,191 @@ def test_proposer_in_committee_with_participation(spec, state):
|
||||||
else:
|
else:
|
||||||
state_transition_and_sign_block(spec, state, block)
|
state_transition_and_sign_block(spec, state, block)
|
||||||
raise AssertionError("failed to find a proposer in the sync committee set; check test setup")
|
raise AssertionError("failed to find a proposer in the sync committee set; check test setup")
|
||||||
|
|
||||||
|
|
||||||
|
def _exit_validator_from_committee_and_transition_state(spec,
|
||||||
|
state,
|
||||||
|
committee_indices,
|
||||||
|
rng,
|
||||||
|
target_epoch_provider,
|
||||||
|
withdrawable_offset=1):
|
||||||
|
exited_validator_index = rng.sample(committee_indices, 1)[0]
|
||||||
|
validator = state.validators[exited_validator_index]
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
validator.exit_epoch = current_epoch
|
||||||
|
validator.withdrawable_epoch = validator.exit_epoch + withdrawable_offset
|
||||||
|
|
||||||
|
target_epoch = target_epoch_provider(state.validators[exited_validator_index])
|
||||||
|
target_slot = target_epoch * spec.SLOTS_PER_EPOCH
|
||||||
|
transition_to(spec, state, target_slot)
|
||||||
|
|
||||||
|
exited_validator_indices = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert exited_validator_index in exited_validator_indices
|
||||||
|
exited_pubkey = state.validators[exited_validator_index].pubkey
|
||||||
|
assert exited_pubkey in state.current_sync_committee.pubkeys
|
||||||
|
|
||||||
|
return exited_validator_index
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_with_participating_exited_member(spec, state):
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# move forward via some blocks
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
rng = random.Random(1010)
|
||||||
|
|
||||||
|
exited_index = _exit_validator_from_committee_and_transition_state(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
committee_indices,
|
||||||
|
rng,
|
||||||
|
lambda v: v.exit_epoch,
|
||||||
|
)
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
assert current_epoch < state.validators[exited_index].withdrawable_epoch
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee_indices, # full committee signs
|
||||||
|
block_root=block.parent_root,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_with_nonparticipating_exited_member(spec, state):
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# move forward via some blocks
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
rng = random.Random(1010)
|
||||||
|
|
||||||
|
exited_index = _exit_validator_from_committee_and_transition_state(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
committee_indices,
|
||||||
|
rng,
|
||||||
|
lambda v: v.exit_epoch,
|
||||||
|
)
|
||||||
|
exited_pubkey = state.validators[exited_index].pubkey
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
assert current_epoch < state.validators[exited_index].withdrawable_epoch
|
||||||
|
|
||||||
|
exited_committee_index = state.current_sync_committee.pubkeys.index(exited_pubkey)
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
committee_bits = [i != exited_committee_index for i in committee_indices]
|
||||||
|
committee_indices = [index for index in committee_indices if index != exited_committee_index]
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=committee_bits,
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee_indices, # with exited validator removed
|
||||||
|
block_root=block.parent_root,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_with_participating_withdrawable_member(spec, state):
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# move forward via some blocks
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
rng = random.Random(1010)
|
||||||
|
|
||||||
|
exited_index = _exit_validator_from_committee_and_transition_state(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
committee_indices,
|
||||||
|
rng,
|
||||||
|
lambda v: v.withdrawable_epoch + 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
assert current_epoch > state.validators[exited_index].withdrawable_epoch
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee_indices, # full committee signs
|
||||||
|
block_root=block.parent_root,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_with_nonparticipating_withdrawable_member(spec, state):
|
||||||
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
|
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
# move forward via some blocks
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
rng = random.Random(1010)
|
||||||
|
|
||||||
|
exited_index = _exit_validator_from_committee_and_transition_state(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
committee_indices,
|
||||||
|
rng,
|
||||||
|
lambda v: v.withdrawable_epoch + 1,
|
||||||
|
)
|
||||||
|
exited_pubkey = state.validators[exited_index].pubkey
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
assert current_epoch > state.validators[exited_index].withdrawable_epoch
|
||||||
|
|
||||||
|
target_committee_index = state.current_sync_committee.pubkeys.index(exited_pubkey)
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
committee_bits = [i != target_committee_index for i in committee_indices]
|
||||||
|
committee_indices = [index for index in committee_indices if index != target_committee_index]
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=committee_bits,
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee_indices, # with withdrawable validator removed
|
||||||
|
block_root=block.parent_root,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
|
@ -2,10 +2,19 @@ import random
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
MAINNET, MINIMAL,
|
MAINNET, MINIMAL,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.random import (
|
||||||
|
randomize_state,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
has_active_balance_differential,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.sync_committee import (
|
from eth2spec.test.helpers.sync_committee import (
|
||||||
get_committee_indices,
|
compute_committee_indices,
|
||||||
run_successful_sync_committee_test,
|
run_successful_sync_committee_test,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import (
|
||||||
|
get_unslashed_exited_validators,
|
||||||
|
)
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
with_altair_and_later,
|
with_altair_and_later,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
|
@ -18,8 +27,8 @@ from eth2spec.test.context import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def _test_harness_for_randomized_test_case(spec, state, duplicates=False, participation_fn=None):
|
def _test_harness_for_randomized_test_case(spec, state, expect_duplicates=False, participation_fn=None):
|
||||||
committee_indices = get_committee_indices(spec, state, duplicates=duplicates)
|
committee_indices = compute_committee_indices(spec, state)
|
||||||
|
|
||||||
if participation_fn:
|
if participation_fn:
|
||||||
participating_indices = participation_fn(committee_indices)
|
participating_indices = participation_fn(committee_indices)
|
||||||
|
@ -28,7 +37,7 @@ def _test_harness_for_randomized_test_case(spec, state, duplicates=False, partic
|
||||||
|
|
||||||
committee_bits = [index in participating_indices for index in committee_indices]
|
committee_bits = [index in participating_indices for index in committee_indices]
|
||||||
committee_size = len(committee_indices)
|
committee_size = len(committee_indices)
|
||||||
if duplicates:
|
if expect_duplicates:
|
||||||
assert committee_size > len(set(committee_indices))
|
assert committee_size > len(set(committee_indices))
|
||||||
else:
|
else:
|
||||||
assert committee_size == len(set(committee_indices))
|
assert committee_size == len(set(committee_indices))
|
||||||
|
@ -44,7 +53,7 @@ def test_random_only_one_participant_with_duplicates(spec, state):
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
duplicates=True,
|
expect_duplicates=True,
|
||||||
participation_fn=lambda comm: [rng.choice(comm)],
|
participation_fn=lambda comm: [rng.choice(comm)],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,7 +66,7 @@ def test_random_low_participation_with_duplicates(spec, state):
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
duplicates=True,
|
expect_duplicates=True,
|
||||||
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.25)),
|
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.25)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,7 +79,7 @@ def test_random_high_participation_with_duplicates(spec, state):
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
duplicates=True,
|
expect_duplicates=True,
|
||||||
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.75)),
|
participation_fn=lambda comm: rng.sample(comm, int(len(comm) * 0.75)),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -83,7 +92,7 @@ def test_random_all_but_one_participating_with_duplicates(spec, state):
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
duplicates=True,
|
expect_duplicates=True,
|
||||||
participation_fn=lambda comm: rng.sample(comm, len(comm) - 1),
|
participation_fn=lambda comm: rng.sample(comm, len(comm) - 1),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -98,7 +107,25 @@ def test_random_misc_balances_and_half_participation_with_duplicates(spec, state
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
duplicates=True,
|
expect_duplicates=True,
|
||||||
|
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||||
|
@spec_state_test
|
||||||
|
@single_phase
|
||||||
|
def test_random_with_exits_with_duplicates(spec, state):
|
||||||
|
rng = random.Random(1402)
|
||||||
|
randomize_state(spec, state, rng=rng, exit_fraction=0.1, slash_fraction=0.0)
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
expect_duplicates=True,
|
||||||
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -163,3 +190,20 @@ def test_random_misc_balances_and_half_participation_without_duplicates(spec, st
|
||||||
state,
|
state,
|
||||||
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
|
@spec_state_test
|
||||||
|
@single_phase
|
||||||
|
def test_random_with_exits_without_duplicates(spec, state):
|
||||||
|
rng = random.Random(1502)
|
||||||
|
randomize_state(spec, state, rng=rng, exit_fraction=0.1, slash_fraction=0.0)
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
participation_fn=lambda comm: rng.sample(comm, len(comm) // 2),
|
||||||
|
)
|
||||||
|
|
|
@ -52,7 +52,7 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot,
|
block_header.slot,
|
||||||
committee,
|
committee,
|
||||||
)
|
)
|
||||||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||||
|
|
|
@ -126,6 +126,17 @@ def default_balances(spec):
|
||||||
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
||||||
|
|
||||||
|
|
||||||
|
def scaled_churn_balances(spec):
|
||||||
|
"""
|
||||||
|
Helper method to create enough validators to scale the churn limit.
|
||||||
|
(This is *firmly* over the churn limit -- thus the +2 instead of just +1)
|
||||||
|
See the second argument of ``max`` in ``get_validator_churn_limit``.
|
||||||
|
Usage: `@with_custom_state(balances_fn=scaled_churn_balances, ...)`
|
||||||
|
"""
|
||||||
|
num_validators = spec.config.CHURN_LIMIT_QUOTIENT * (2 + spec.config.MIN_PER_EPOCH_CHURN_LIMIT)
|
||||||
|
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
||||||
|
|
||||||
|
|
||||||
with_state = with_custom_state(default_balances, default_activation_threshold)
|
with_state = with_custom_state(default_balances, default_activation_threshold)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -156,9 +156,18 @@ def add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_a
|
||||||
'checks': {
|
'checks': {
|
||||||
'time': int(store.time),
|
'time': int(store.time),
|
||||||
'head': get_formatted_head_output(spec, store),
|
'head': get_formatted_head_output(spec, store),
|
||||||
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
|
'justified_checkpoint': {
|
||||||
'finalized_checkpoint_root': encode_hex(store.finalized_checkpoint.root),
|
'epoch': int(store.justified_checkpoint.epoch),
|
||||||
'best_justified_checkpoint': encode_hex(store.best_justified_checkpoint.root),
|
'root': encode_hex(store.justified_checkpoint.root),
|
||||||
|
},
|
||||||
|
'finalized_checkpoint': {
|
||||||
|
'epoch': int(store.finalized_checkpoint.epoch),
|
||||||
|
'root': encode_hex(store.finalized_checkpoint.root),
|
||||||
|
},
|
||||||
|
'best_justified_checkpoint': {
|
||||||
|
'epoch': int(store.best_justified_checkpoint.epoch),
|
||||||
|
'root': encode_hex(store.best_justified_checkpoint.root),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
|
@ -37,8 +37,8 @@ def exit_random_validators(spec, state, rng, fraction=None):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
validator = state.validators[index]
|
validator = state.validators[index]
|
||||||
validator.exit_epoch = rng.choice([current_epoch - 1, current_epoch - 2, current_epoch - 3])
|
validator.exit_epoch = rng.choice([current_epoch, current_epoch - 1, current_epoch - 2, current_epoch - 3])
|
||||||
# ~1/2 are withdrawable
|
# ~1/2 are withdrawable (note, unnatural span between exit epoch and withdrawable epoch)
|
||||||
if rng.choice([True, False]):
|
if rng.choice([True, False]):
|
||||||
validator.withdrawable_epoch = current_epoch
|
validator.withdrawable_epoch = current_epoch
|
||||||
else:
|
else:
|
||||||
|
@ -128,3 +128,34 @@ def randomize_state(spec, state, rng=Random(8020), exit_fraction=None, slash_fra
|
||||||
exit_random_validators(spec, state, rng, fraction=exit_fraction)
|
exit_random_validators(spec, state, rng, fraction=exit_fraction)
|
||||||
slash_random_validators(spec, state, rng, fraction=slash_fraction)
|
slash_random_validators(spec, state, rng, fraction=slash_fraction)
|
||||||
randomize_attestation_participation(spec, state, rng)
|
randomize_attestation_participation(spec, state, rng)
|
||||||
|
|
||||||
|
|
||||||
|
def patch_state_to_non_leaking(spec, state):
|
||||||
|
"""
|
||||||
|
This function performs an irregular state transition so that:
|
||||||
|
1. the current justified checkpoint references the previous epoch
|
||||||
|
2. the previous justified checkpoint references the epoch before previous
|
||||||
|
3. the finalized checkpoint matches the previous justified checkpoint
|
||||||
|
|
||||||
|
The effects of this function are intended to offset randomization side effects
|
||||||
|
performed by other functionality in this module so that if the ``state`` was leaking,
|
||||||
|
then the ``state`` is not leaking after.
|
||||||
|
"""
|
||||||
|
state.justification_bits[0] = True
|
||||||
|
state.justification_bits[1] = True
|
||||||
|
previous_epoch = spec.get_previous_epoch(state)
|
||||||
|
previous_root = spec.get_block_root(state, previous_epoch)
|
||||||
|
previous_previous_epoch = max(spec.GENESIS_EPOCH, spec.Epoch(previous_epoch - 1))
|
||||||
|
previous_previous_root = spec.get_block_root(state, previous_previous_epoch)
|
||||||
|
state.previous_justified_checkpoint = spec.Checkpoint(
|
||||||
|
epoch=previous_previous_epoch,
|
||||||
|
root=previous_previous_root,
|
||||||
|
)
|
||||||
|
state.current_justified_checkpoint = spec.Checkpoint(
|
||||||
|
epoch=previous_epoch,
|
||||||
|
root=previous_root,
|
||||||
|
)
|
||||||
|
state.finalized_checkpoint = spec.Checkpoint(
|
||||||
|
epoch=previous_previous_epoch,
|
||||||
|
root=previous_previous_root,
|
||||||
|
)
|
||||||
|
|
|
@ -255,7 +255,19 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
else:
|
else:
|
||||||
assert penalties[index] > base_penalty
|
assert penalties[index] > base_penalty
|
||||||
else:
|
else:
|
||||||
assert penalties[index] == 0
|
if not is_post_altair(spec):
|
||||||
|
assert penalties[index] == 0
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
# post altair, this penalty is derived from the inactivity score
|
||||||
|
# regardless if the state is leaking or not...
|
||||||
|
if index in matching_attesting_indices:
|
||||||
|
assert penalties[index] == 0
|
||||||
|
else:
|
||||||
|
# copied from spec:
|
||||||
|
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||||
|
penalty_denominator = spec.config.INACTIVITY_SCORE_BIAS * spec.INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||||
|
assert penalties[index] == penalty_numerator // penalty_denominator
|
||||||
|
|
||||||
|
|
||||||
def transition_state_to_leak(spec, state, epochs=None):
|
def transition_state_to_leak(spec, state, epochs=None):
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from eth2spec.test.context import expect_assertion_error, is_post_altair
|
from eth2spec.test.context import expect_assertion_error, is_post_altair
|
||||||
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
|
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
|
||||||
from eth2spec.test.helpers.voluntary_exits import get_exited_validators
|
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||||
|
|
||||||
|
|
||||||
def get_balance(state, index):
|
def get_balance(state, index):
|
||||||
|
@ -142,7 +142,7 @@ def ensure_state_has_validators_across_lifecycle(spec, state):
|
||||||
for each of the following lifecycle states:
|
for each of the following lifecycle states:
|
||||||
1. Pending / deposited
|
1. Pending / deposited
|
||||||
2. Active
|
2. Active
|
||||||
3. Exited
|
3. Exited (but not slashed)
|
||||||
4. Slashed
|
4. Slashed
|
||||||
"""
|
"""
|
||||||
has_pending = any(filter(spec.is_eligible_for_activation_queue, state.validators))
|
has_pending = any(filter(spec.is_eligible_for_activation_queue, state.validators))
|
||||||
|
@ -150,8 +150,18 @@ def ensure_state_has_validators_across_lifecycle(spec, state):
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
has_active = any(filter(lambda v: spec.is_active_validator(v, current_epoch), state.validators))
|
has_active = any(filter(lambda v: spec.is_active_validator(v, current_epoch), state.validators))
|
||||||
|
|
||||||
has_exited = any(get_exited_validators(spec, state))
|
has_exited = any(get_unslashed_exited_validators(spec, state))
|
||||||
|
|
||||||
has_slashed = any(filter(lambda v: v.slashed, state.validators))
|
has_slashed = any(filter(lambda v: v.slashed, state.validators))
|
||||||
|
|
||||||
return has_pending and has_active and has_exited and has_slashed
|
return has_pending and has_active and has_exited and has_slashed
|
||||||
|
|
||||||
|
|
||||||
|
def has_active_balance_differential(spec, state):
|
||||||
|
"""
|
||||||
|
Ensure there is a difference between the total balance of
|
||||||
|
all _active_ validators and _all_ validators.
|
||||||
|
"""
|
||||||
|
active_balance = spec.get_total_active_balance(state)
|
||||||
|
total_balance = spec.get_total_balance(state, set(range(len(state.validators))))
|
||||||
|
return active_balance // spec.EFFECTIVE_BALANCE_INCREMENT != total_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
|
@ -9,7 +9,6 @@ from eth2spec.test.helpers.block import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.block_processing import run_block_processing_to
|
from eth2spec.test.helpers.block_processing import run_block_processing_to
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.utils.hash_function import hash
|
|
||||||
|
|
||||||
|
|
||||||
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None, domain_type=None):
|
def compute_sync_committee_signature(spec, state, slot, privkey, block_root=None, domain_type=None):
|
||||||
|
@ -75,10 +74,12 @@ def compute_sync_committee_proposer_reward(spec, state, committee_indices, commi
|
||||||
return spec.Gwei(participant_reward * participant_number)
|
return spec.Gwei(participant_reward * participant_number)
|
||||||
|
|
||||||
|
|
||||||
def compute_committee_indices(spec, state, committee):
|
def compute_committee_indices(spec, state, committee=None):
|
||||||
"""
|
"""
|
||||||
Given a ``committee``, calculate and return the related indices
|
Given a ``committee``, calculate and return the related indices
|
||||||
"""
|
"""
|
||||||
|
if committee is None:
|
||||||
|
committee = state.current_sync_committee
|
||||||
all_pubkeys = [v.pubkey for v in state.validators]
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
return [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
return [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||||
|
|
||||||
|
@ -153,6 +154,7 @@ def _build_block_for_next_slot_with_sync_participation(spec, state, committee_in
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
[index for index, bit in zip(committee_indices, committee_bits) if bit],
|
[index for index, bit in zip(committee_indices, committee_bits) if bit],
|
||||||
|
block_root=block.parent_root,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return block
|
return block
|
||||||
|
@ -161,23 +163,3 @@ def _build_block_for_next_slot_with_sync_participation(spec, state, committee_in
|
||||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||||
block = _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits)
|
block = _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits)
|
||||||
yield from run_sync_committee_processing(spec, state, block)
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
def get_committee_indices(spec, state, duplicates=False):
|
|
||||||
"""
|
|
||||||
This utility function allows the caller to ensure there are or are not
|
|
||||||
duplicate validator indices in the returned committee based on
|
|
||||||
the boolean ``duplicates``.
|
|
||||||
"""
|
|
||||||
state = state.copy()
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
|
||||||
while True:
|
|
||||||
committee = spec.get_next_sync_committee_indices(state)
|
|
||||||
if duplicates:
|
|
||||||
if len(committee) != len(set(committee)):
|
|
||||||
return committee
|
|
||||||
else:
|
|
||||||
if len(committee) == len(set(committee)):
|
|
||||||
return committee
|
|
||||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
|
||||||
|
|
|
@ -34,6 +34,13 @@ def get_exited_validators(spec, state):
|
||||||
return [index for (index, validator) in enumerate(state.validators) if validator.exit_epoch <= current_epoch]
|
return [index for (index, validator) in enumerate(state.validators) if validator.exit_epoch <= current_epoch]
|
||||||
|
|
||||||
|
|
||||||
|
def get_unslashed_exited_validators(spec, state):
|
||||||
|
return [
|
||||||
|
index for index in get_exited_validators(spec, state)
|
||||||
|
if not state.validators[index].slashed
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def exit_validators(spec, state, validator_count, rng=None):
|
def exit_validators(spec, state, validator_count, rng=None):
|
||||||
if rng is None:
|
if rng is None:
|
||||||
rng = Random(1337)
|
rng = Random(1337)
|
||||||
|
|
|
@ -1,4 +1,10 @@
|
||||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
from eth2spec.test.helpers.constants import MINIMAL
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
spec_state_test, expect_assertion_error,
|
||||||
|
always_bls, with_all_phases, with_presets,
|
||||||
|
spec_test, single_phase,
|
||||||
|
with_custom_state, scaled_churn_balances,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||||
from eth2spec.test.helpers.voluntary_exits import sign_voluntary_exit
|
from eth2spec.test.helpers.voluntary_exits import sign_voluntary_exit
|
||||||
|
|
||||||
|
@ -68,9 +74,7 @@ def test_invalid_signature(spec, state):
|
||||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
def run_test_success_exit_queue(spec, state):
|
||||||
@spec_state_test
|
|
||||||
def test_success_exit_queue(spec, state):
|
|
||||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
|
@ -106,10 +110,29 @@ def test_success_exit_queue(spec, state):
|
||||||
# when processing an additional exit, it results in an exit in a later epoch
|
# when processing an additional exit, it results in an exit in a later epoch
|
||||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
|
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit)
|
||||||
|
|
||||||
assert (
|
for index in initial_indices:
|
||||||
state.validators[validator_index].exit_epoch ==
|
assert (
|
||||||
state.validators[initial_indices[0]].exit_epoch + 1
|
state.validators[validator_index].exit_epoch ==
|
||||||
)
|
state.validators[index].exit_epoch + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_success_exit_queue__min_churn(spec, state):
|
||||||
|
yield from run_test_success_exit_queue(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
@single_phase
|
||||||
|
def test_success_exit_queue__scaled_churn(spec, state):
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
assert churn_limit > spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_success_exit_queue(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
|
from random import Random
|
||||||
from eth2spec.test.context import is_post_altair, spec_state_test, with_all_phases
|
from eth2spec.test.context import is_post_altair, spec_state_test, with_all_phases
|
||||||
from eth2spec.test.helpers.epoch_processing import (
|
from eth2spec.test.helpers.epoch_processing import (
|
||||||
run_epoch_processing_with,
|
run_epoch_processing_with,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import transition_to
|
from eth2spec.test.helpers.state import transition_to, next_epoch_via_block, next_slot
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||||
|
|
||||||
|
|
||||||
def run_process_just_and_fin(spec, state):
|
def run_process_just_and_fin(spec, state):
|
||||||
|
@ -300,3 +302,76 @@ def test_12_ok_support_messed_target(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_12_poor_support(spec, state):
|
def test_12_poor_support(spec, state):
|
||||||
yield from finalize_on_12(spec, state, 3, False, False)
|
yield from finalize_on_12(spec, state, 3, False, False)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_balance_threshold_with_exited_validators(spec, state):
|
||||||
|
"""
|
||||||
|
This test exercises a very specific failure mode where
|
||||||
|
exited validators are incorrectly included in the total active balance
|
||||||
|
when weighing justification.
|
||||||
|
"""
|
||||||
|
rng = Random(133333)
|
||||||
|
# move past genesis conditions
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
# mock attestation helper requires last slot of epoch
|
||||||
|
for _ in range(spec.SLOTS_PER_EPOCH - 1):
|
||||||
|
next_slot(spec, state)
|
||||||
|
|
||||||
|
# Step 1: Exit ~1/2 vals in current epoch
|
||||||
|
epoch = spec.get_current_epoch(state)
|
||||||
|
for index in spec.get_active_validator_indices(state, epoch):
|
||||||
|
if rng.choice([True, False]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
validator = state.validators[index]
|
||||||
|
validator.exit_epoch = epoch
|
||||||
|
validator.withdrawable_epoch = epoch + 1
|
||||||
|
validator.withdrawable_epoch = validator.exit_epoch + spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
|
|
||||||
|
exited_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(exited_validators) != 0
|
||||||
|
|
||||||
|
source = state.current_justified_checkpoint
|
||||||
|
target = spec.Checkpoint(
|
||||||
|
epoch=epoch,
|
||||||
|
root=spec.get_block_root(state, epoch)
|
||||||
|
)
|
||||||
|
add_mock_attestations(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
epoch,
|
||||||
|
source,
|
||||||
|
target,
|
||||||
|
sufficient_support=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
if not is_post_altair(spec):
|
||||||
|
current_attestations = spec.get_matching_target_attestations(state, epoch)
|
||||||
|
total_active_balance = spec.get_total_active_balance(state)
|
||||||
|
current_target_balance = spec.get_attesting_balance(state, current_attestations)
|
||||||
|
# Check we will not justify the current checkpoint
|
||||||
|
does_justify = current_target_balance * 3 >= total_active_balance * 2
|
||||||
|
assert not does_justify
|
||||||
|
# Ensure we would have justified the current checkpoint w/ the exited validators
|
||||||
|
current_exited_balance = spec.get_total_balance(state, exited_validators)
|
||||||
|
does_justify = (current_target_balance + current_exited_balance) * 3 >= total_active_balance * 2
|
||||||
|
assert does_justify
|
||||||
|
else:
|
||||||
|
current_indices = spec.get_unslashed_participating_indices(state, spec.TIMELY_TARGET_FLAG_INDEX, epoch)
|
||||||
|
total_active_balance = spec.get_total_active_balance(state)
|
||||||
|
current_target_balance = spec.get_total_balance(state, current_indices)
|
||||||
|
# Check we will not justify the current checkpoint
|
||||||
|
does_justify = current_target_balance * 3 >= total_active_balance * 2
|
||||||
|
assert not does_justify
|
||||||
|
# Ensure we would have justified the current checkpoint w/ the exited validators
|
||||||
|
current_exited_balance = spec.get_total_balance(state, exited_validators)
|
||||||
|
does_justify = (current_target_balance + current_exited_balance) * 3 >= total_active_balance * 2
|
||||||
|
assert does_justify
|
||||||
|
|
||||||
|
yield from run_process_just_and_fin(spec, state)
|
||||||
|
|
||||||
|
assert state.current_justified_checkpoint.epoch != epoch
|
||||||
|
|
|
@ -1,6 +1,12 @@
|
||||||
from eth2spec.test.helpers.deposits import mock_deposit
|
from eth2spec.test.helpers.deposits import mock_deposit
|
||||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
from eth2spec.test.helpers.constants import MINIMAL
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
spec_test, spec_state_test,
|
||||||
|
with_all_phases, single_phase,
|
||||||
|
with_custom_state, with_presets,
|
||||||
|
scaled_churn_balances,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||||
|
|
||||||
|
|
||||||
|
@ -112,9 +118,7 @@ def test_activation_queue_sorting(spec, state):
|
||||||
assert state.validators[churn_limit - 1].activation_epoch != spec.FAR_FUTURE_EPOCH
|
assert state.validators[churn_limit - 1].activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
def run_test_activation_queue_efficiency(spec, state):
|
||||||
@spec_state_test
|
|
||||||
def test_activation_queue_efficiency(spec, state):
|
|
||||||
churn_limit = spec.get_validator_churn_limit(state)
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
mock_activations = churn_limit * 2
|
mock_activations = churn_limit * 2
|
||||||
|
|
||||||
|
@ -128,23 +132,45 @@ def test_activation_queue_efficiency(spec, state):
|
||||||
|
|
||||||
state.finalized_checkpoint.epoch = epoch + 1
|
state.finalized_checkpoint.epoch = epoch + 1
|
||||||
|
|
||||||
|
# Churn limit could have changed given the active vals removed via `mock_deposit`
|
||||||
|
churn_limit_0 = spec.get_validator_churn_limit(state)
|
||||||
|
|
||||||
# Run first registry update. Do not yield test vectors
|
# Run first registry update. Do not yield test vectors
|
||||||
for _ in run_process_registry_updates(spec, state):
|
for _ in run_process_registry_updates(spec, state):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# Half should churn in first run of registry update
|
# Half should churn in first run of registry update
|
||||||
for i in range(mock_activations):
|
for i in range(mock_activations):
|
||||||
if i < mock_activations // 2:
|
if i < churn_limit_0:
|
||||||
assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
else:
|
else:
|
||||||
assert state.validators[i].activation_epoch == spec.FAR_FUTURE_EPOCH
|
assert state.validators[i].activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
# Second half should churn in second run of registry update
|
# Second half should churn in second run of registry update
|
||||||
|
churn_limit_1 = spec.get_validator_churn_limit(state)
|
||||||
yield from run_process_registry_updates(spec, state)
|
yield from run_process_registry_updates(spec, state)
|
||||||
for i in range(mock_activations):
|
for i in range(churn_limit_0 + churn_limit_1):
|
||||||
assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[i].activation_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_activation_queue_efficiency_min(spec, state):
|
||||||
|
assert spec.get_validator_churn_limit(state) == spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_efficiency(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
@single_phase
|
||||||
|
def test_activation_queue_efficiency_scaled(spec, state):
|
||||||
|
assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_efficiency(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_ejection(spec, state):
|
def test_ejection(spec, state):
|
||||||
|
@ -165,9 +191,7 @@ def test_ejection(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
def run_test_ejection_past_churn_limit(spec, state):
|
||||||
@spec_state_test
|
|
||||||
def test_ejection_past_churn_limit(spec, state):
|
|
||||||
churn_limit = spec.get_validator_churn_limit(state)
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
|
||||||
# try to eject more than per-epoch churn limit
|
# try to eject more than per-epoch churn limit
|
||||||
|
@ -184,58 +208,137 @@ def test_ejection_past_churn_limit(spec, state):
|
||||||
# first third ejected in normal speed
|
# first third ejected in normal speed
|
||||||
if i < mock_ejections // 3:
|
if i < mock_ejections // 3:
|
||||||
assert state.validators[i].exit_epoch == expected_ejection_epoch
|
assert state.validators[i].exit_epoch == expected_ejection_epoch
|
||||||
# second thirdgets delayed by 1 epoch
|
# second third gets delayed by 1 epoch
|
||||||
elif mock_ejections // 3 <= i < mock_ejections * 2 // 3:
|
elif mock_ejections // 3 <= i < mock_ejections * 2 // 3:
|
||||||
assert state.validators[i].exit_epoch == expected_ejection_epoch + 1
|
assert state.validators[i].exit_epoch == expected_ejection_epoch + 1
|
||||||
# second thirdgets delayed by 2 epochs
|
# final third gets delayed by 2 epochs
|
||||||
else:
|
else:
|
||||||
assert state.validators[i].exit_epoch == expected_ejection_epoch + 2
|
assert state.validators[i].exit_epoch == expected_ejection_epoch + 2
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_activation_queue_activation_and_ejection(spec, state):
|
def test_ejection_past_churn_limit_min(spec, state):
|
||||||
|
assert spec.get_validator_churn_limit(state) == spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_ejection_past_churn_limit(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
@single_phase
|
||||||
|
def test_ejection_past_churn_limit_scaled(spec, state):
|
||||||
|
assert spec.get_validator_churn_limit(state) > spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_ejection_past_churn_limit(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_test_activation_queue_activation_and_ejection(spec, state, num_per_status):
|
||||||
# move past first two irregular epochs wrt finality
|
# move past first two irregular epochs wrt finality
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
|
|
||||||
# ready for entrance into activation queue
|
# ready for entrance into activation queue
|
||||||
activation_queue_index = 0
|
activation_queue_start_index = 0
|
||||||
mock_deposit(spec, state, activation_queue_index)
|
activation_queue_indices = list(range(activation_queue_start_index, activation_queue_start_index + num_per_status))
|
||||||
|
for validator_index in activation_queue_indices:
|
||||||
|
mock_deposit(spec, state, validator_index)
|
||||||
|
|
||||||
# ready for activation
|
# ready for activation
|
||||||
activation_index = 1
|
|
||||||
mock_deposit(spec, state, activation_index)
|
|
||||||
state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1
|
state.finalized_checkpoint.epoch = spec.get_current_epoch(state) - 1
|
||||||
state.validators[activation_index].activation_eligibility_epoch = state.finalized_checkpoint.epoch
|
activation_start_index = num_per_status
|
||||||
|
activation_indices = list(range(activation_start_index, activation_start_index + num_per_status))
|
||||||
|
for validator_index in activation_indices:
|
||||||
|
mock_deposit(spec, state, validator_index)
|
||||||
|
state.validators[validator_index].activation_eligibility_epoch = state.finalized_checkpoint.epoch
|
||||||
|
|
||||||
# ready for ejection
|
# ready for ejection
|
||||||
ejection_index = 2
|
ejection_start_index = num_per_status * 2
|
||||||
state.validators[ejection_index].effective_balance = spec.config.EJECTION_BALANCE
|
ejection_indices = list(range(ejection_start_index, ejection_start_index + num_per_status))
|
||||||
|
for validator_index in ejection_indices:
|
||||||
|
state.validators[validator_index].effective_balance = spec.config.EJECTION_BALANCE
|
||||||
|
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
yield from run_process_registry_updates(spec, state)
|
yield from run_process_registry_updates(spec, state)
|
||||||
|
|
||||||
# validator moved into activation queue
|
# all eligible validators moved into activation queue
|
||||||
validator = state.validators[activation_queue_index]
|
for validator_index in activation_queue_indices:
|
||||||
assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
validator = state.validators[validator_index]
|
||||||
assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH
|
assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert not spec.is_active_validator(validator, spec.get_current_epoch(state))
|
assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
assert not spec.is_active_validator(validator, spec.get_current_epoch(state))
|
||||||
|
|
||||||
# validator activated for future epoch
|
# up to churn limit validators get activated for future epoch from the queue
|
||||||
validator = state.validators[activation_index]
|
for validator_index in activation_indices[:churn_limit]:
|
||||||
assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
validator = state.validators[validator_index]
|
||||||
assert validator.activation_epoch != spec.FAR_FUTURE_EPOCH
|
assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert not spec.is_active_validator(validator, spec.get_current_epoch(state))
|
assert validator.activation_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert spec.is_active_validator(
|
assert not spec.is_active_validator(validator, spec.get_current_epoch(state))
|
||||||
validator,
|
assert spec.is_active_validator(
|
||||||
spec.compute_activation_exit_epoch(spec.get_current_epoch(state))
|
validator,
|
||||||
)
|
spec.compute_activation_exit_epoch(spec.get_current_epoch(state))
|
||||||
|
)
|
||||||
|
|
||||||
# validator ejected for future epoch
|
# any remaining validators do not exit the activation queue
|
||||||
validator = state.validators[ejection_index]
|
for validator_index in activation_indices[churn_limit:]:
|
||||||
assert validator.exit_epoch != spec.FAR_FUTURE_EPOCH
|
validator = state.validators[validator_index]
|
||||||
assert spec.is_active_validator(validator, spec.get_current_epoch(state))
|
assert validator.activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
assert not spec.is_active_validator(
|
assert validator.activation_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
validator,
|
|
||||||
spec.compute_activation_exit_epoch(spec.get_current_epoch(state))
|
# all ejection balance validators ejected for a future epoch
|
||||||
)
|
for i, validator_index in enumerate(ejection_indices):
|
||||||
|
validator = state.validators[validator_index]
|
||||||
|
assert validator.exit_epoch != spec.FAR_FUTURE_EPOCH
|
||||||
|
assert spec.is_active_validator(validator, spec.get_current_epoch(state))
|
||||||
|
queue_offset = i // churn_limit
|
||||||
|
assert not spec.is_active_validator(
|
||||||
|
validator,
|
||||||
|
spec.compute_activation_exit_epoch(spec.get_current_epoch(state)) + queue_offset
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_activation_queue_activation_and_ejection__1(spec, state):
|
||||||
|
yield from run_test_activation_queue_activation_and_ejection(spec, state, 1)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_activation_queue_activation_and_ejection__churn_limit(spec, state):
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
assert churn_limit == spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_activation_queue_activation_and_ejection__exceed_churn_limit(spec, state):
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
assert churn_limit == spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit + 1)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
@single_phase
|
||||||
|
def test_activation_queue_activation_and_ejection__scaled_churn_limit(spec, state):
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
assert churn_limit > spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=scaled_churn_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
|
@single_phase
|
||||||
|
def test_activation_queue_activation_and_ejection__exceed_scaled_churn_limit(spec, state):
|
||||||
|
churn_limit = spec.get_validator_churn_limit(state)
|
||||||
|
assert churn_limit > spec.config.MIN_PER_EPOCH_CHURN_LIMIT
|
||||||
|
yield from run_test_activation_queue_activation_and_ejection(spec, state, churn_limit * 2)
|
||||||
|
|
|
@ -1,7 +1,11 @@
|
||||||
|
from random import Random
|
||||||
from eth2spec.test.context import spec_state_test, with_all_phases, is_post_altair
|
from eth2spec.test.context import spec_state_test, with_all_phases, is_post_altair
|
||||||
from eth2spec.test.helpers.epoch_processing import (
|
from eth2spec.test.helpers.epoch_processing import (
|
||||||
run_epoch_processing_with, run_epoch_processing_to
|
run_epoch_processing_with, run_epoch_processing_to
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.random import randomize_state
|
||||||
|
from eth2spec.test.helpers.state import has_active_balance_differential
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||||
from eth2spec.test.helpers.state import next_epoch
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,6 +26,9 @@ def slash_validators(spec, state, indices, out_epochs):
|
||||||
spec.get_current_epoch(state) % spec.EPOCHS_PER_SLASHINGS_VECTOR
|
spec.get_current_epoch(state) % spec.EPOCHS_PER_SLASHINGS_VECTOR
|
||||||
] = total_slashed_balance
|
] = total_slashed_balance
|
||||||
|
|
||||||
|
# verify some slashings happened...
|
||||||
|
assert total_slashed_balance != 0
|
||||||
|
|
||||||
|
|
||||||
def get_slashing_multiplier(spec):
|
def get_slashing_multiplier(spec):
|
||||||
if is_post_altair(spec):
|
if is_post_altair(spec):
|
||||||
|
@ -30,9 +37,7 @@ def get_slashing_multiplier(spec):
|
||||||
return spec.PROPORTIONAL_SLASHING_MULTIPLIER
|
return spec.PROPORTIONAL_SLASHING_MULTIPLIER
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
def _setup_process_slashings_test(spec, state, not_slashable_set=set()):
|
||||||
@spec_state_test
|
|
||||||
def test_max_penalties(spec, state):
|
|
||||||
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
||||||
slashed_count = min(
|
slashed_count = min(
|
||||||
(len(state.validators) // get_slashing_multiplier(spec)) + 1,
|
(len(state.validators) // get_slashing_multiplier(spec)) + 1,
|
||||||
|
@ -41,14 +46,23 @@ def test_max_penalties(spec, state):
|
||||||
)
|
)
|
||||||
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||||
|
|
||||||
slashed_indices = list(range(slashed_count))
|
eligible_indices = set(range(slashed_count))
|
||||||
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
slashed_indices = eligible_indices.difference(not_slashable_set)
|
||||||
|
slash_validators(spec, state, sorted(slashed_indices), [out_epoch] * slashed_count)
|
||||||
|
|
||||||
total_balance = spec.get_total_active_balance(state)
|
total_balance = spec.get_total_active_balance(state)
|
||||||
total_penalties = sum(state.slashings)
|
total_penalties = sum(state.slashings)
|
||||||
|
|
||||||
assert total_balance // get_slashing_multiplier(spec) <= total_penalties
|
assert total_balance // get_slashing_multiplier(spec) <= total_penalties
|
||||||
|
|
||||||
|
return slashed_indices
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_max_penalties(spec, state):
|
||||||
|
slashed_indices = _setup_process_slashings_test(spec, state)
|
||||||
|
|
||||||
yield from run_process_slashings(spec, state)
|
yield from run_process_slashings(spec, state)
|
||||||
|
|
||||||
for i in slashed_indices:
|
for i in slashed_indices:
|
||||||
|
@ -171,3 +185,28 @@ def test_scaled_penalties(spec, state):
|
||||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
)
|
)
|
||||||
assert state.balances[i] == pre_slash_balances[i] - expected_penalty
|
assert state.balances[i] == pre_slash_balances[i] - expected_penalty
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_slashings_with_random_state(spec, state):
|
||||||
|
rng = Random(9998)
|
||||||
|
randomize_state(spec, state, rng)
|
||||||
|
|
||||||
|
pre_balances = state.balances.copy()
|
||||||
|
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
|
||||||
|
slashed_indices = _setup_process_slashings_test(spec, state, not_slashable_set=target_validators)
|
||||||
|
|
||||||
|
# ensure no accidental slashings of protected set...
|
||||||
|
current_target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(current_target_validators) != 0
|
||||||
|
assert current_target_validators == target_validators
|
||||||
|
|
||||||
|
yield from run_process_slashings(spec, state)
|
||||||
|
|
||||||
|
for i in slashed_indices:
|
||||||
|
assert state.balances[i] < pre_balances[i]
|
||||||
|
|
|
@ -26,7 +26,6 @@ from eth2spec.test.helpers.state import (
|
||||||
next_epoch,
|
next_epoch,
|
||||||
next_slots,
|
next_slots,
|
||||||
state_transition_and_sign_block,
|
state_transition_and_sign_block,
|
||||||
transition_to,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -191,6 +190,10 @@ def test_on_block_before_finalized(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_presets([MINIMAL], reason="too slow")
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
def test_on_block_finalized_skip_slots(spec, state):
|
def test_on_block_finalized_skip_slots(spec, state):
|
||||||
|
"""
|
||||||
|
Test case was originally from https://github.com/ethereum/consensus-specs/pull/1579
|
||||||
|
And then rewrote largely.
|
||||||
|
"""
|
||||||
test_steps = []
|
test_steps = []
|
||||||
# Initialization
|
# Initialization
|
||||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
|
@ -200,21 +203,31 @@ def test_on_block_finalized_skip_slots(spec, state):
|
||||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
assert store.time == current_time
|
assert store.time == current_time
|
||||||
|
|
||||||
# Create a finalized chain
|
# Fill epoch 0 and the first slot of epoch 1
|
||||||
for _ in range(4):
|
state, store, _ = yield from apply_next_slots_with_attestations(
|
||||||
|
spec, state, store, spec.SLOTS_PER_EPOCH, True, False, test_steps)
|
||||||
|
|
||||||
|
# Skip the rest slots of epoch 1 and the first slot of epoch 2
|
||||||
|
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# The state after the skipped slots
|
||||||
|
target_state = state.copy()
|
||||||
|
|
||||||
|
# Fill epoch 3 and 4
|
||||||
|
for _ in range(2):
|
||||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
spec, state, store, True, False, test_steps=test_steps)
|
spec, state, store, True, True, test_steps=test_steps)
|
||||||
assert store.finalized_checkpoint.epoch == 2
|
|
||||||
|
|
||||||
# Another chain
|
# Now we get finalized epoch 2, where `compute_start_slot_at_epoch(2)` is a skipped slot
|
||||||
another_state = store.block_states[store.finalized_checkpoint.root].copy()
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
# Build block that includes the skipped slots up to finality in chain
|
assert store.finalized_checkpoint.root == spec.get_block_root(state, 1) == spec.get_block_root(state, 2)
|
||||||
block = build_empty_block(spec,
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||||
another_state,
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
|
||||||
block.body.graffiti = b'\x12' * 32
|
|
||||||
signed_block = state_transition_and_sign_block(spec, another_state, block)
|
|
||||||
|
|
||||||
|
# Now build a block at later slot than finalized *epoch*
|
||||||
|
# Includes finalized block in chain and the skipped slots
|
||||||
|
block = build_empty_block_for_next_slot(spec, target_state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, target_state, block)
|
||||||
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
yield from tick_and_add_block(spec, store, signed_block, test_steps)
|
||||||
|
|
||||||
yield 'steps', test_steps
|
yield 'steps', test_steps
|
||||||
|
@ -224,36 +237,43 @@ def test_on_block_finalized_skip_slots(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_presets([MINIMAL], reason="too slow")
|
@with_presets([MINIMAL], reason="too slow")
|
||||||
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
||||||
|
"""
|
||||||
|
Test case was originally from https://github.com/ethereum/consensus-specs/pull/1579
|
||||||
|
And then rewrote largely.
|
||||||
|
"""
|
||||||
test_steps = []
|
test_steps = []
|
||||||
# Initialization
|
# Initialization
|
||||||
transition_to(spec, state, state.slot + spec.SLOTS_PER_EPOCH - 1)
|
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
|
||||||
transition_unsigned_block(spec, state, block)
|
|
||||||
block.state_root = state.hash_tree_root()
|
|
||||||
store = spec.get_forkchoice_store(state, block)
|
|
||||||
yield 'anchor_state', state
|
yield 'anchor_state', state
|
||||||
yield 'anchor_block', block
|
yield 'anchor_block', anchor_block
|
||||||
|
|
||||||
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time
|
||||||
on_tick_and_append_step(spec, store, current_time, test_steps)
|
on_tick_and_append_step(spec, store, current_time, test_steps)
|
||||||
assert store.time == current_time
|
assert store.time == current_time
|
||||||
|
|
||||||
pre_finalized_checkpoint_epoch = store.finalized_checkpoint.epoch
|
# Fill epoch 0 and the first slot of epoch 1
|
||||||
|
state, store, _ = yield from apply_next_slots_with_attestations(
|
||||||
|
spec, state, store, spec.SLOTS_PER_EPOCH, True, False, test_steps)
|
||||||
|
|
||||||
# Finalized
|
# Skip the rest slots of epoch 1 and the first slot of epoch 2
|
||||||
for _ in range(3):
|
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# Fill epoch 3 and 4
|
||||||
|
for _ in range(2):
|
||||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
spec, state, store, True, False, test_steps=test_steps)
|
spec, state, store, True, True, test_steps=test_steps)
|
||||||
assert store.finalized_checkpoint.epoch == pre_finalized_checkpoint_epoch + 1
|
|
||||||
|
|
||||||
# Now build a block at later slot than finalized epoch
|
# Now we get finalized epoch 2, where `compute_start_slot_at_epoch(2)` is a skipped slot
|
||||||
# Includes finalized block in chain, but not at appropriate skip slot
|
assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2
|
||||||
pre_state = store.block_states[block.hash_tree_root()].copy()
|
assert store.finalized_checkpoint.root == spec.get_block_root(state, 1) == spec.get_block_root(state, 2)
|
||||||
block = build_empty_block(spec,
|
assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3
|
||||||
state=pre_state,
|
assert store.justified_checkpoint == state.current_justified_checkpoint
|
||||||
slot=spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + 2)
|
|
||||||
block.body.graffiti = b'\x12' * 32
|
# Now build a block after the block of the finalized **root**
|
||||||
signed_block = sign_block(spec, pre_state, block)
|
# Includes finalized block in chain, but does not include finalized skipped slots
|
||||||
|
another_state = store.block_states[store.finalized_checkpoint.root].copy()
|
||||||
|
assert another_state.slot == spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch - 1)
|
||||||
|
block = build_empty_block_for_next_slot(spec, another_state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, another_state, block)
|
||||||
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False)
|
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False)
|
||||||
|
|
||||||
yield 'steps', test_steps
|
yield 'steps', test_steps
|
||||||
|
@ -483,7 +503,8 @@ def test_new_justified_is_later_than_store_justified(spec, state):
|
||||||
assert fork_2_state.finalized_checkpoint.epoch == 0
|
assert fork_2_state.finalized_checkpoint.epoch == 0
|
||||||
assert fork_2_state.current_justified_checkpoint.epoch == 5
|
assert fork_2_state.current_justified_checkpoint.epoch == 5
|
||||||
# Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
# Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
spec.on_tick(store, store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT)
|
time = store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT
|
||||||
|
on_tick_and_append_step(spec, store, time, test_steps)
|
||||||
assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED
|
||||||
# Run on_block
|
# Run on_block
|
||||||
yield from add_block(spec, store, signed_block, test_steps)
|
yield from add_block(spec, store, signed_block, test_steps)
|
||||||
|
@ -526,7 +547,8 @@ def test_new_justified_is_later_than_store_justified(spec, state):
|
||||||
# # Apply blocks of `fork_3_state` to `store`
|
# # Apply blocks of `fork_3_state` to `store`
|
||||||
# for block in all_blocks:
|
# for block in all_blocks:
|
||||||
# if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
|
# if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
|
||||||
# spec.on_tick(store, store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT)
|
# time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||||
|
# on_tick_and_append_step(spec, store, time, test_steps)
|
||||||
# # valid_attestations=False because the attestations are outdated (older than previous epoch)
|
# # valid_attestations=False because the attestations are outdated (older than previous epoch)
|
||||||
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False)
|
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False)
|
||||||
|
|
||||||
|
@ -643,7 +665,6 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state):
|
||||||
|
|
||||||
# Process state
|
# Process state
|
||||||
next_epoch(spec, state)
|
next_epoch(spec, state)
|
||||||
spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT)
|
|
||||||
|
|
||||||
state, store, _ = yield from apply_next_epoch_with_attestations(
|
state, store, _ = yield from apply_next_epoch_with_attestations(
|
||||||
spec, state, store, False, True, test_steps=test_steps)
|
spec, state, store, False, True, test_steps=test_steps)
|
||||||
|
|
|
@ -9,6 +9,13 @@ from eth2spec.test.context import (
|
||||||
low_balances, misc_balances,
|
low_balances, misc_balances,
|
||||||
)
|
)
|
||||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||||
|
from eth2spec.test.helpers.random import (
|
||||||
|
randomize_state,
|
||||||
|
patch_state_to_non_leaking,
|
||||||
|
randomize_attestation_participation,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import has_active_balance_differential, next_epoch
|
||||||
|
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@ -35,6 +42,21 @@ def test_full_random_3(spec, state):
|
||||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
|
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_4(spec, state):
|
||||||
|
"""
|
||||||
|
Ensure a rewards test with some exited (but not slashed) validators.
|
||||||
|
"""
|
||||||
|
rng = Random(5050)
|
||||||
|
randomize_state(spec, state, rng)
|
||||||
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
yield from rewards_helpers.run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||||
@spec_test
|
@spec_test
|
||||||
|
@ -57,3 +79,52 @@ def test_full_random_low_balances_1(spec, state):
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_full_random_misc_balances(spec, state):
|
def test_full_random_misc_balances(spec, state):
|
||||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070))
|
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(7070))
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_without_leak_0(spec, state):
|
||||||
|
rng = Random(1010)
|
||||||
|
randomize_state(spec, state, rng)
|
||||||
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
patch_state_to_non_leaking(spec, state)
|
||||||
|
assert not spec.is_in_inactivity_leak(state)
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
yield from rewards_helpers.run_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_all_phases
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_random_without_leak_and_current_exit_0(spec, state):
|
||||||
|
"""
|
||||||
|
This test specifically ensures a validator exits in the current epoch
|
||||||
|
to ensure rewards are handled properly in this case.
|
||||||
|
"""
|
||||||
|
rng = Random(1011)
|
||||||
|
randomize_state(spec, state, rng)
|
||||||
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
patch_state_to_non_leaking(spec, state)
|
||||||
|
assert not spec.is_in_inactivity_leak(state)
|
||||||
|
target_validators = get_unslashed_exited_validators(spec, state)
|
||||||
|
assert len(target_validators) != 0
|
||||||
|
|
||||||
|
# move forward some epochs to process attestations added
|
||||||
|
# by ``randomize_state`` before we exit validators in
|
||||||
|
# what will be the current epoch
|
||||||
|
for _ in range(2):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
for index in target_validators:
|
||||||
|
# patch exited validators to exit in the current epoch
|
||||||
|
validator = state.validators[index]
|
||||||
|
validator.exit_epoch = current_epoch
|
||||||
|
validator.withdrawable_epoch = current_epoch + spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
|
|
||||||
|
# re-randomize attestation participation for the current epoch
|
||||||
|
randomize_attestation_participation(spec, state, rng)
|
||||||
|
|
||||||
|
assert has_active_balance_differential(spec, state)
|
||||||
|
yield from rewards_helpers.run_deltas(spec, state)
|
||||||
|
|
|
@ -19,6 +19,7 @@ from eth2spec.test.helpers.attester_slashings import (
|
||||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||||
|
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||||
from eth2spec.test.helpers.multi_operations import (
|
from eth2spec.test.helpers.multi_operations import (
|
||||||
run_slash_and_exit,
|
run_slash_and_exit,
|
||||||
|
@ -38,6 +39,7 @@ from eth2spec.test.context import (
|
||||||
with_custom_state,
|
with_custom_state,
|
||||||
large_validator_set,
|
large_validator_set,
|
||||||
is_post_altair,
|
is_post_altair,
|
||||||
|
is_post_merge,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -146,6 +148,11 @@ def process_and_sign_block_without_header_validations(spec, state, block):
|
||||||
spec.process_randao(state, block.body)
|
spec.process_randao(state, block.body)
|
||||||
spec.process_eth1_data(state, block.body)
|
spec.process_eth1_data(state, block.body)
|
||||||
spec.process_operations(state, block.body)
|
spec.process_operations(state, block.body)
|
||||||
|
if is_post_altair(spec):
|
||||||
|
spec.process_sync_aggregate(state, block.body.sync_aggregate)
|
||||||
|
if is_post_merge(spec):
|
||||||
|
if spec.is_execution_enabled(state, block.body):
|
||||||
|
spec.process_execution_payload(state, block.body.execution_payload, spec.EXECUTION_ENGINE)
|
||||||
|
|
||||||
# Insert post-state rot
|
# Insert post-state rot
|
||||||
block.state_root = state.hash_tree_root()
|
block.state_root = state.hash_tree_root()
|
||||||
|
@ -188,6 +195,10 @@ def test_parent_from_same_slot(spec, state):
|
||||||
child_block = parent_block.copy()
|
child_block = parent_block.copy()
|
||||||
child_block.parent_root = state.latest_block_header.hash_tree_root()
|
child_block.parent_root = state.latest_block_header.hash_tree_root()
|
||||||
|
|
||||||
|
if is_post_merge(spec):
|
||||||
|
randao_mix = spec.compute_randao_mix(state, child_block.body.randao_reveal)
|
||||||
|
child_block.body.execution_payload = build_empty_execution_payload(spec, state, randao_mix)
|
||||||
|
|
||||||
# Show that normal path through transition fails
|
# Show that normal path through transition fails
|
||||||
failed_state = state.copy()
|
failed_state = state.copy()
|
||||||
expect_assertion_error(
|
expect_assertion_error(
|
||||||
|
@ -974,12 +985,9 @@ def test_historical_batch(spec, state):
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL], reason="suffices to test eth1 data voting without long voting period")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_eth1_data_votes_consensus(spec, state):
|
def test_eth1_data_votes_consensus(spec, state):
|
||||||
if spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 2:
|
|
||||||
return dump_skipping_message("Skip test if config with longer `EPOCHS_PER_ETH1_VOTING_PERIOD` for saving time."
|
|
||||||
" Minimal config suffice to cover the target-of-test.")
|
|
||||||
|
|
||||||
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1)
|
offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1)
|
||||||
|
@ -1018,12 +1026,9 @@ def test_eth1_data_votes_consensus(spec, state):
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases
|
@with_all_phases
|
||||||
|
@with_presets([MINIMAL], reason="suffices to test eth1 data voting without long voting period")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_eth1_data_votes_no_consensus(spec, state):
|
def test_eth1_data_votes_no_consensus(spec, state):
|
||||||
if spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 2:
|
|
||||||
return dump_skipping_message("Skip test if config with longer `EPOCHS_PER_ETH1_VOTING_PERIOD` for saving time."
|
|
||||||
" Minimal config suffice to cover the target-of-test.")
|
|
||||||
|
|
||||||
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
|
||||||
pre_eth1_hash = state.eth1_data.block_hash
|
pre_eth1_hash = state.eth1_data.block_hash
|
||||||
|
|
|
@ -17,6 +17,7 @@ from eth2spec.test.helpers.inactivity_scores import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.random import (
|
from eth2spec.test.helpers.random import (
|
||||||
randomize_state as randomize_state_helper,
|
randomize_state as randomize_state_helper,
|
||||||
|
patch_state_to_non_leaking,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
next_slot,
|
next_slot,
|
||||||
|
@ -274,23 +275,7 @@ def _randomized_scenario_setup(state_randomizer):
|
||||||
may not reflect this condition with prior (arbitrary) mutations,
|
may not reflect this condition with prior (arbitrary) mutations,
|
||||||
so this mutator addresses that fact.
|
so this mutator addresses that fact.
|
||||||
"""
|
"""
|
||||||
state.justification_bits = (True, True, True, True)
|
patch_state_to_non_leaking(spec, state)
|
||||||
previous_epoch = spec.get_previous_epoch(state)
|
|
||||||
previous_root = spec.get_block_root(state, previous_epoch)
|
|
||||||
previous_previous_epoch = max(spec.GENESIS_EPOCH, spec.Epoch(previous_epoch - 1))
|
|
||||||
previous_previous_root = spec.get_block_root(state, previous_previous_epoch)
|
|
||||||
state.previous_justified_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=previous_previous_epoch,
|
|
||||||
root=previous_previous_root,
|
|
||||||
)
|
|
||||||
state.current_justified_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=previous_epoch,
|
|
||||||
root=previous_root,
|
|
||||||
)
|
|
||||||
state.finalized_checkpoint = spec.Checkpoint(
|
|
||||||
epoch=previous_previous_epoch,
|
|
||||||
root=previous_previous_root,
|
|
||||||
)
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
# NOTE: the block randomization function assumes at least 1 shard committee period
|
# NOTE: the block randomization function assumes at least 1 shard committee period
|
||||||
|
|
|
@ -80,26 +80,34 @@ checks: {<store_attibute>: value} -- the assertions.
|
||||||
`<store_attibute>` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. Currently, the possible fields included:
|
`<store_attibute>` is the field member or property of [`Store`](../../../specs/phase0/fork-choice.md#store) object that maintained by client implementation. Currently, the possible fields included:
|
||||||
|
|
||||||
```yaml
|
```yaml
|
||||||
head: { -- Encoded 32-byte value from get_head(store)
|
head: {
|
||||||
slot: slot,
|
slot: int,
|
||||||
root: string,
|
root: string, -- Encoded 32-byte value from get_head(store)
|
||||||
|
}
|
||||||
|
time: int -- store.time
|
||||||
|
genesis_time: int -- store.genesis_time
|
||||||
|
justified_checkpoint: {
|
||||||
|
epoch: int, -- Integer value from store.justified_checkpoint.epoch
|
||||||
|
root: string, -- Encoded 32-byte value from store.justified_checkpoint.root
|
||||||
|
}
|
||||||
|
finalized_checkpoint: {
|
||||||
|
epoch: int, -- Integer value from store.finalized_checkpoint.epoch
|
||||||
|
root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root
|
||||||
|
}
|
||||||
|
best_justified_checkpoint: {
|
||||||
|
epoch: int, -- Integer value from store.best_justified_checkpoint.epoch
|
||||||
|
root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root
|
||||||
}
|
}
|
||||||
time: int -- store.time
|
|
||||||
genesis_time: int -- store.genesis_time
|
|
||||||
justified_checkpoint_root: string -- Encoded 32-byte value from store.justified_checkpoint.root
|
|
||||||
finalized_checkpoint_root: string -- Encoded 32-byte value from store.finalized_checkpoint.root
|
|
||||||
best_justified_checkpoint_root: string -- Encoded 32-byte value from store.best_justified_checkpoint.root
|
|
||||||
```
|
```
|
||||||
|
|
||||||
For example:
|
For example:
|
||||||
```yaml
|
```yaml
|
||||||
- checks:
|
- checks:
|
||||||
time: 144
|
time: 192
|
||||||
genesis_time: 0
|
head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'}
|
||||||
head: {slot: 17, root: '0xd2724c86002f7e1f8656ab44a341a409ad80e6e70a5225fd94835566deebb66f'}
|
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||||
justified_checkpoint_root: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
|
||||||
finalized_checkpoint_root: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||||
best_justified_checkpoint: '0xcea6ecd3d3188e32ebf611f960eebd45b6c6f477a7cff242fa567a42653bfc7c'
|
|
||||||
```
|
```
|
||||||
|
|
||||||
*Note*: Each `checks` step may include one or multiple items. Each item has to be checked against the current store.
|
*Note*: Each `checks` step may include one or multiple items. Each item has to be checked against the current store.
|
||||||
|
|
|
@ -12,9 +12,9 @@ if __name__ == "__main__":
|
||||||
'voluntary_exit',
|
'voluntary_exit',
|
||||||
]}
|
]}
|
||||||
altair_mods = {
|
altair_mods = {
|
||||||
**{key: 'eth2spec.test.altair.block_processing.sync_aggregate.test_process_' + key for key in [
|
**{'sync_aggregate': [
|
||||||
'sync_aggregate',
|
'eth2spec.test.altair.block_processing.sync_aggregate.test_process_' + key
|
||||||
'sync_aggregate_random',
|
for key in ['sync_aggregate', 'sync_aggregate_random']
|
||||||
]},
|
]},
|
||||||
**phase_0_mods,
|
**phase_0_mods,
|
||||||
} # also run the previous phase 0 tests
|
} # also run the previous phase 0 tests
|
||||||
|
|
Loading…
Reference in New Issue