Merge pull request #2733 from ethereum/dev
minor Merge and forkchoice updates release
This commit is contained in:
commit
c81c27eb48
4
Makefile
4
Makefile
|
@ -97,12 +97,12 @@ install_test:
|
|||
# Testing against `minimal` config by default
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.merge.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
# Testing against `minimal` config by default
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.merge.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p tests/core/pyspec/test-reports/eth2spec;
|
||||
|
|
|
@ -70,6 +70,11 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4
|
|||
CHURN_LIMIT_QUOTIENT: 65536
|
||||
|
||||
|
||||
# Fork choice
|
||||
# ---------------------------------------------------------------
|
||||
# 70%
|
||||
PROPOSER_SCORE_BOOST: 70
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum PoW Mainnet
|
||||
|
|
|
@ -69,6 +69,12 @@ MIN_PER_EPOCH_CHURN_LIMIT: 4
|
|||
CHURN_LIMIT_QUOTIENT: 32
|
||||
|
||||
|
||||
# Fork choice
|
||||
# ---------------------------------------------------------------
|
||||
# 70%
|
||||
PROPOSER_SCORE_BOOST: 70
|
||||
|
||||
|
||||
# Deposit contract
|
||||
# ---------------------------------------------------------------
|
||||
# Ethereum Goerli testnet
|
||||
|
|
2
setup.py
2
setup.py
|
@ -511,7 +511,7 @@ ExecutionState = Any
|
|||
|
||||
|
||||
def get_pow_block(hash: Bytes32) -> Optional[PowBlock]:
|
||||
return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0), difficulty=uint256(0))
|
||||
return PowBlock(block_hash=hash, parent_hash=Bytes32(), total_difficulty=uint256(0))
|
||||
|
||||
|
||||
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
|
||||
|
|
|
@ -685,7 +685,7 @@ This helper function is only for initializing the state for pure Altair testnets
|
|||
*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit]) -> BeaconState:
|
||||
fork = Fork(
|
||||
|
|
|
@ -24,8 +24,8 @@
|
|||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Predicates](#predicates)
|
||||
- [`is_merge_complete`](#is_merge_complete)
|
||||
- [`is_merge_block`](#is_merge_block)
|
||||
- [`is_merge_transition_complete`](#is_merge_transition_complete)
|
||||
- [`is_merge_transition_block`](#is_merge_transition_block)
|
||||
- [`is_execution_enabled`](#is_execution_enabled)
|
||||
- [Misc](#misc)
|
||||
- [`compute_timestamp_at_slot`](#compute_timestamp_at_slot)
|
||||
|
@ -167,7 +167,7 @@ class BeaconState(Container):
|
|||
class ExecutionPayload(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
coinbase: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||
state_root: Bytes32
|
||||
receipt_root: Bytes32 # 'receipts root' in the yellow paper
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
|
@ -189,7 +189,7 @@ class ExecutionPayload(Container):
|
|||
class ExecutionPayloadHeader(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
coinbase: ExecutionAddress
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipt_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
|
@ -209,25 +209,25 @@ class ExecutionPayloadHeader(Container):
|
|||
|
||||
### Predicates
|
||||
|
||||
#### `is_merge_complete`
|
||||
#### `is_merge_transition_complete`
|
||||
|
||||
```python
|
||||
def is_merge_complete(state: BeaconState) -> bool:
|
||||
def is_merge_transition_complete(state: BeaconState) -> bool:
|
||||
return state.latest_execution_payload_header != ExecutionPayloadHeader()
|
||||
```
|
||||
|
||||
#### `is_merge_block`
|
||||
#### `is_merge_transition_block`
|
||||
|
||||
```python
|
||||
def is_merge_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
return not is_merge_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
```
|
||||
|
||||
#### `is_execution_enabled`
|
||||
|
||||
```python
|
||||
def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
return is_merge_block(state, body) or is_merge_complete(state)
|
||||
return is_merge_transition_block(state, body) or is_merge_transition_complete(state)
|
||||
```
|
||||
|
||||
### Misc
|
||||
|
@ -346,7 +346,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
|||
```python
|
||||
def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None:
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||
if is_merge_complete(state):
|
||||
if is_merge_transition_complete(state):
|
||||
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
# Verify random
|
||||
assert payload.random == get_randao_mix(state, get_current_epoch(state))
|
||||
|
@ -357,7 +357,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
|||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
coinbase=payload.coinbase,
|
||||
fee_recipient=payload.fee_recipient,
|
||||
state_root=payload.state_root,
|
||||
receipt_root=payload.receipt_root,
|
||||
logs_bloom=payload.logs_bloom,
|
||||
|
@ -406,7 +406,7 @@ Modifications include:
|
|||
Else, the Merge starts from genesis and the transition is incomplete.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
|
|
|
@ -77,7 +77,7 @@ Used to signal to initiate the payload build process via `notify_forkchoice_upda
|
|||
class PayloadAttributes(object):
|
||||
timestamp: uint64
|
||||
random: Bytes32
|
||||
fee_recipient: ExecutionAddress
|
||||
suggested_fee_recipient: ExecutionAddress
|
||||
```
|
||||
|
||||
### `PowBlock`
|
||||
|
@ -87,7 +87,6 @@ class PowBlock(Container):
|
|||
block_hash: Hash32
|
||||
parent_hash: Hash32
|
||||
total_difficulty: uint256
|
||||
difficulty: uint256
|
||||
```
|
||||
|
||||
### `get_pow_block`
|
||||
|
@ -168,7 +167,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
state_transition(state, signed_block, True)
|
||||
|
||||
# [New in Merge]
|
||||
if is_merge_block(pre_state, block.body):
|
||||
if is_merge_transition_block(pre_state, block.body):
|
||||
validate_merge_block(block)
|
||||
|
||||
# Add new block to the store
|
||||
|
@ -176,6 +175,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
|
@ -186,17 +191,5 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
|
||||
# Potentially update justified if different from store
|
||||
if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||
# Update justified if new justified is later than store justified
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
return
|
||||
|
||||
# Update justified if store justified is not in chain with finalized checkpoint
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
```
|
||||
|
|
|
@ -43,12 +43,16 @@ Please see related Beacon Chain doc before continuing and use them as a referenc
|
|||
```python
|
||||
def get_pow_block_at_terminal_total_difficulty(pow_chain: Dict[Hash32, PowBlock]) -> Optional[PowBlock]:
|
||||
# `pow_chain` abstractly represents all blocks in the PoW chain
|
||||
for block in pow_chain:
|
||||
parent = pow_chain[block.parent_hash]
|
||||
for block in pow_chain.values():
|
||||
block_reached_ttd = block.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
if block_reached_ttd and not parent_reached_ttd:
|
||||
return block
|
||||
if block_reached_ttd:
|
||||
# If genesis block, no parent exists so reaching TTD alone qualifies as valid terminal block
|
||||
if block.parent_hash == Hash32():
|
||||
return block
|
||||
parent = pow_chain[block.parent_hash]
|
||||
parent_reached_ttd = parent.total_difficulty >= TERMINAL_TOTAL_DIFFICULTY
|
||||
if not parent_reached_ttd:
|
||||
return block
|
||||
|
||||
return None
|
||||
```
|
||||
|
@ -106,22 +110,22 @@ All validator responsibilities remain unchanged other than those noted below. Na
|
|||
|
||||
To obtain an execution payload, a block proposer building a block on top of a `state` must take the following actions:
|
||||
|
||||
1. Set `payload_id = prepare_execution_payload(state, pow_chain, finalized_block_hash, fee_recipient, execution_engine)`, where:
|
||||
1. Set `payload_id = prepare_execution_payload(state, pow_chain, finalized_block_hash, suggested_fee_recipient, execution_engine)`, where:
|
||||
* `state` is the state object after applying `process_slots(state, slot)` transition to the resulting state of the parent block processing
|
||||
* `pow_chain` is a `Dict[Hash32, PowBlock]` dictionary that abstractly represents all blocks in the PoW chain with block hash as the dictionary key
|
||||
* `finalized_block_hash` is the hash of the latest finalized execution payload (`Hash32()` if none yet finalized)
|
||||
* `fee_recipient` is the value suggested to be used for the `coinbase` field of the execution payload
|
||||
* `suggested_fee_recipient` is the value suggested to be used for the `fee_recipient` field of the execution payload
|
||||
|
||||
|
||||
```python
|
||||
def prepare_execution_payload(state: BeaconState,
|
||||
pow_chain: Dict[Hash32, PowBlock],
|
||||
finalized_block_hash: Hash32,
|
||||
fee_recipient: ExecutionAddress,
|
||||
suggested_fee_recipient: ExecutionAddress,
|
||||
execution_engine: ExecutionEngine) -> Optional[PayloadId]:
|
||||
if not is_merge_complete(state):
|
||||
if not is_merge_transition_complete(state):
|
||||
is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32()
|
||||
is_activation_epoch_reached = get_current_epoch(state.slot) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH
|
||||
if is_terminal_block_hash_set and not is_activation_epoch_reached:
|
||||
# Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed
|
||||
return None
|
||||
|
@ -140,7 +144,7 @@ def prepare_execution_payload(state: BeaconState,
|
|||
payload_attributes = PayloadAttributes(
|
||||
timestamp=compute_timestamp_at_slot(state, state.slot),
|
||||
random=get_randao_mix(state, get_current_epoch(state)),
|
||||
fee_recipient=fee_recipient,
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(parent_hash, finalized_block_hash, payload_attributes)
|
||||
```
|
||||
|
|
|
@ -169,7 +169,6 @@ We define the following Python custom types for type hinting and readability:
|
|||
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
|
||||
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
|
||||
|
||||
|
||||
## Constants
|
||||
|
||||
The following values are (non-configurable) constants used throughout the specification.
|
||||
|
@ -1175,7 +1174,7 @@ Before the Ethereum beacon chain genesis has been triggered, and for every Ether
|
|||
Proof-of-work blocks must only be considered once they are at least `SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE` seconds old (i.e. `eth1_timestamp + SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE <= current_unix_time`). Due to this constraint, if `GENESIS_DELAY < SECONDS_PER_ETH1_BLOCK * ETH1_FOLLOW_DISTANCE`, then the `genesis_time` can happen before the time/state is first known. Values should be configured to avoid this case.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit]) -> BeaconState:
|
||||
fork = Fork(
|
||||
|
|
|
@ -7,7 +7,9 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Constant](#constant)
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
- [Helpers](#helpers)
|
||||
- [`LatestMessage`](#latestmessage)
|
||||
- [`Store`](#store)
|
||||
|
@ -22,6 +24,7 @@
|
|||
- [`get_head`](#get_head)
|
||||
- [`should_update_justified_checkpoint`](#should_update_justified_checkpoint)
|
||||
- [`on_attestation` helpers](#on_attestation-helpers)
|
||||
- [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time)
|
||||
- [`validate_on_attestation`](#validate_on_attestation)
|
||||
- [`store_target_checkpoint_state`](#store_target_checkpoint_state)
|
||||
- [`update_latest_messages`](#update_latest_messages)
|
||||
|
@ -55,12 +58,27 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass
|
|||
4) **Manual forks**: Manual forks may arbitrarily change the fork choice rule but are expected to be enacted at epoch transitions, with the fork details reflected in `state.fork`.
|
||||
5) **Implementation**: The implementation found in this specification is constructed for ease of understanding rather than for optimization in computation, space, or any other resource. A number of optimized alternatives can be found [here](https://github.com/protolambda/lmd-ghost).
|
||||
|
||||
|
||||
### Constant
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `INTERVALS_PER_SLOT` | `uint64(3)` |
|
||||
|
||||
### Preset
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds |
|
||||
|
||||
### Configuration
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PROPOSER_SCORE_BOOST` | `uint64(70)` |
|
||||
|
||||
- The proposer score boost is worth `PROPOSER_SCORE_BOOST` percentage of the committee's weight, i.e., for slot with committee weight `committee_weight` the boost weight is equal to `(committee_weight * PROPOSER_SCORE_BOOST) // 100`.
|
||||
|
||||
### Helpers
|
||||
|
||||
#### `LatestMessage`
|
||||
|
@ -82,6 +100,7 @@ class Store(object):
|
|||
justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
best_justified_checkpoint: Checkpoint
|
||||
proposer_boost_root: Root
|
||||
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
||||
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||
|
@ -102,12 +121,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
|||
anchor_epoch = get_current_epoch(anchor_state)
|
||||
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
proposer_boost_root = Root()
|
||||
return Store(
|
||||
time=uint64(anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot),
|
||||
genesis_time=anchor_state.genesis_time,
|
||||
justified_checkpoint=justified_checkpoint,
|
||||
finalized_checkpoint=finalized_checkpoint,
|
||||
best_justified_checkpoint=justified_checkpoint,
|
||||
proposer_boost_root=proposer_boost_root,
|
||||
blocks={anchor_root: copy(anchor_block)},
|
||||
block_states={anchor_root: copy(anchor_state)},
|
||||
checkpoint_states={justified_checkpoint: copy(anchor_state)},
|
||||
|
@ -155,11 +176,22 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root:
|
|||
def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
|
||||
state = store.checkpoint_states[store.justified_checkpoint]
|
||||
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
||||
return Gwei(sum(
|
||||
attestation_score = Gwei(sum(
|
||||
state.validators[i].effective_balance for i in active_indices
|
||||
if (i in store.latest_messages
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
))
|
||||
proposer_score = Gwei(0)
|
||||
if store.proposer_boost_root != Root():
|
||||
block = store.blocks[root]
|
||||
if get_ancestor(store, root, block.slot) == store.proposer_boost_root:
|
||||
num_validators = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
avg_balance = get_total_active_balance(state) // num_validators
|
||||
committee_size = num_validators // SLOTS_PER_EPOCH
|
||||
committee_weight = committee_size * avg_balance
|
||||
proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100
|
||||
return attestation_score + proposer_score
|
||||
|
||||
```
|
||||
|
||||
#### `filter_block_tree`
|
||||
|
@ -257,10 +289,11 @@ def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: C
|
|||
|
||||
#### `on_attestation` helpers
|
||||
|
||||
##### `validate_on_attestation`
|
||||
|
||||
##### `validate_target_epoch_against_current_time`
|
||||
|
||||
```python
|
||||
def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
def validate_target_epoch_against_current_time(store: Store, attestation: Attestation) -> None:
|
||||
target = attestation.data.target
|
||||
|
||||
# Attestations must be from the current or previous epoch
|
||||
|
@ -269,6 +302,19 @@ def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
|||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
# If attestation target is from a future epoch, delay consideration until the epoch arrives
|
||||
assert target.epoch in [current_epoch, previous_epoch]
|
||||
```
|
||||
|
||||
##### `validate_on_attestation`
|
||||
|
||||
```python
|
||||
def validate_on_attestation(store: Store, attestation: Attestation, is_from_block: bool) -> None:
|
||||
target = attestation.data.target
|
||||
|
||||
# If the given attestation is not from a beacon block message, we have to check the target epoch scope.
|
||||
if not is_from_block:
|
||||
validate_target_epoch_against_current_time(store, attestation)
|
||||
|
||||
# Check that the epoch number and slot number are matching
|
||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
|
||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
|
@ -324,6 +370,11 @@ def on_tick(store: Store, time: uint64) -> None:
|
|||
store.time = time
|
||||
|
||||
current_slot = get_current_slot(store)
|
||||
|
||||
# Reset store.proposer_boost_root if this is a new slot
|
||||
if current_slot > previous_slot:
|
||||
store.proposer_boost_root = Root()
|
||||
|
||||
# Not a new epoch, return
|
||||
if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0):
|
||||
return
|
||||
|
@ -362,6 +413,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Add new state for this block to the store
|
||||
store.block_states[hash_tree_root(block)] = state
|
||||
|
||||
# Add proposer score boost if the block is timely
|
||||
time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT
|
||||
is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
store.proposer_boost_root = hash_tree_root(block)
|
||||
|
||||
# Update justified checkpoint
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||
|
@ -372,32 +429,21 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
|||
# Update finalized checkpoint
|
||||
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||
store.finalized_checkpoint = state.finalized_checkpoint
|
||||
|
||||
# Potentially update justified if different from store
|
||||
if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||
# Update justified if new justified is later than store justified
|
||||
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
return
|
||||
|
||||
# Update justified if store justified is not in chain with finalized checkpoint
|
||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||
if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
store.justified_checkpoint = state.current_justified_checkpoint
|
||||
```
|
||||
|
||||
#### `on_attestation`
|
||||
|
||||
```python
|
||||
def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
def on_attestation(store: Store, attestation: Attestation, is_from_block: bool=False) -> None:
|
||||
"""
|
||||
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
|
||||
An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
consider scheduling it for later processing in such case.
|
||||
"""
|
||||
validate_on_attestation(store, attestation)
|
||||
validate_on_attestation(store, attestation, is_from_block)
|
||||
|
||||
store_target_checkpoint_state(store, attestation.data.target)
|
||||
|
||||
# Get state at the `target` to fully validate attestation
|
||||
|
|
|
@ -446,7 +446,7 @@ def get_block_signature(state: BeaconState, block: BeaconBlock, privkey: int) ->
|
|||
|
||||
A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `index`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`.
|
||||
|
||||
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid block from the expected block proposer for the assigned `slot` or (b) one-third of the `slot` has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
|
||||
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid block from the expected block proposer for the assigned `slot` or (b) `1 / INTERVALS_PER_SLOT` of the `slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of `slot`) -- whichever comes _first_.
|
||||
|
||||
*Note*: Although attestations during `GENESIS_EPOCH` do not count toward FFG finality, these initial attestations do give weight to the fork choice, are rewarded, and should be made.
|
||||
|
||||
|
@ -569,7 +569,7 @@ def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature
|
|||
|
||||
#### Broadcast aggregate
|
||||
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) `2 / INTERVALS_PER_SLOT` of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / INTERVALS_PER_SLOT` seconds after the start of `slot`.
|
||||
|
||||
Selection proofs are provided in `AggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
|
||||
|
||||
|
|
|
@ -0,0 +1,473 @@
|
|||
# Getting Started with Consensus Spec Tests
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Creating the environment
|
||||
|
||||
Use an OS that has Python 3.8 or above. For example, Debian 11 (bullseye)
|
||||
|
||||
1. Install the packages you need:
|
||||
```sh
|
||||
sudo apt install -y make git wget python3-venv gcc python3-dev
|
||||
```
|
||||
1. Download the latest [consensus specs](https://github.com/ethereum/consensus-specs)
|
||||
```sh
|
||||
git clone https://github.com/ethereum/consensus-specs.git
|
||||
cd consensus-specs
|
||||
```
|
||||
1. Create the specifications and tests:
|
||||
```sh
|
||||
make install_test
|
||||
make pyspec
|
||||
```
|
||||
|
||||
To read more about creating the environment, [see here](core/pyspec/README.md).
|
||||
|
||||
### Running your first test
|
||||
|
||||
|
||||
1. Enter the virtual Python environment:
|
||||
```sh
|
||||
cd ~/consensus-specs
|
||||
. venv/bin/activate
|
||||
```
|
||||
1. Run a sanity check test:
|
||||
```sh
|
||||
cd tests/core/pyspec
|
||||
python -m pytest -k test_empty_block_transition --fork Merge eth2spec
|
||||
```
|
||||
1. The output should be similar to:
|
||||
```
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.9.2, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
|
||||
rootdir: /home/qbzzt1/consensus-specs
|
||||
plugins: cov-2.12.1, forked-1.3.0, xdist-2.3.0
|
||||
collected 629 items / 626 deselected / 3 selected
|
||||
|
||||
eth2spec/test/merge/sanity/test_blocks.py . [ 33%]
|
||||
eth2spec/test/phase0/sanity/test_blocks.py .. [100%]
|
||||
|
||||
=============================== warnings summary ===============================
|
||||
../../../venv/lib/python3.9/site-packages/cytoolz/compatibility.py:2
|
||||
/home/qbzzt1/consensus-specs/venv/lib/python3.9/site-packages/cytoolz/compatibility.py:2:
|
||||
DeprecationWarning: The toolz.compatibility module is no longer needed in Python 3 and has
|
||||
been deprecated. Please import these utilities directly from the standard library. This
|
||||
module will be removed in a future release.
|
||||
warnings.warn("The toolz.compatibility module is no longer "
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/stable/warnings.html
|
||||
================ 3 passed, 626 deselected, 1 warning in 16.81s =================
|
||||
```
|
||||
|
||||
|
||||
## The "Hello, World" of Consensus Spec Tests
|
||||
|
||||
One of the `test_empty_block_transition` tests is implemented by a function with the same
|
||||
name located in
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py).
|
||||
To learn how consensus spec tests are written, let's go over the code:
|
||||
|
||||
```python
|
||||
@with_all_phases
|
||||
```
|
||||
|
||||
This [decorator](https://book.pythontips.com/en/latest/decorators.html) specifies that this test
|
||||
is applicable to all the phases of consensus layer development. These phases are similar to forks (Istanbul,
|
||||
Berlin, London, etc.) in the execution blockchain. If you are interested, [you can see the definition of
|
||||
this decorator here](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L331-L335).
|
||||
|
||||
```python
|
||||
@spec_state_test
|
||||
```
|
||||
|
||||
[This decorator](https://github.com/qbzzt/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py#L232-L234) specifies
|
||||
that this test is a state transition test, and that it does not include a transition between different forks.
|
||||
|
||||
```python
|
||||
def test_empty_block_transition(spec, state):
|
||||
```
|
||||
|
||||
This type of test receives two parameters:
|
||||
|
||||
* `specs`: The protocol specifications
|
||||
* `state`: The genesis state before the test
|
||||
|
||||
```python
|
||||
pre_slot = state.slot
|
||||
```
|
||||
|
||||
A slot is a unit of time (every 12 seconds in mainnet), for which a specific validator (selected randomly but in a
|
||||
deterministic manner) is a proposer. The proposer can propose a block during that slot.
|
||||
|
||||
```python
|
||||
pre_eth1_votes = len(state.eth1_data_votes)
|
||||
pre_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
|
||||
```
|
||||
|
||||
Store some values to check later that certain updates happened.
|
||||
|
||||
```python
|
||||
yield 'pre', state
|
||||
```
|
||||
|
||||
In Python `yield` is used by [generators](https://wiki.python.org/moin/Generators). However, for our purposes
|
||||
we can treat it as a partial return statement that doesn't stop the function's processing, only adds to a list
|
||||
of return values. Here we add two values, the string `'pre'` and the initial state, to the list of return values.
|
||||
|
||||
[You can read more about test generators and how the are used here](generators).
|
||||
|
||||
```python
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
```
|
||||
|
||||
The state contains the last block, which is necessary for building up the next block (every block needs to
|
||||
have the hash of the previous one in a blockchain).
|
||||
|
||||
```python
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
```
|
||||
|
||||
Create a block signed by the appropriate proposer and advance the state.
|
||||
|
||||
```python
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
```
|
||||
|
||||
More `yield` statements. The output of a consensus test is:
|
||||
|
||||
1. `'pre'`
|
||||
2. The state before the test was run
|
||||
3. `'blocks'`
|
||||
4. A list of signed blocks
|
||||
5. `'post'`
|
||||
6. The state after the test
|
||||
|
||||
|
||||
|
||||
```python
|
||||
# One vote for the eth1
|
||||
assert len(state.eth1_data_votes) == pre_eth1_votes + 1
|
||||
|
||||
# Check that the new parent root is correct
|
||||
assert spec.get_block_root_at_slot(state, pre_slot) == signed_block.message.parent_root
|
||||
|
||||
# Random data changed
|
||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != pre_mix
|
||||
```
|
||||
|
||||
Finally we assertions that test the transition was legitimate. In this case we have three assertions:
|
||||
|
||||
1. One item was added to `eth1_data_votes`
|
||||
2. The new block's `parent_root` is the same as the block in the previous location
|
||||
3. The random data that every block includes was changed.
|
||||
|
||||
|
||||
## New Tests
|
||||
|
||||
The easiest way to write a new test is to copy and modify an existing one. For example,
|
||||
lets write a test where the first slot of the beacon chain is empty (because the assigned
|
||||
proposer is offline, for example), and then there's an empty block in the second slot.
|
||||
|
||||
We already know how to accomplish most of what we need for this test, but the only way we know
|
||||
to advance the state is `state_transition_and_sign_block`, a function that also puts a block
|
||||
into the slot. So let's see if the function's definition tells us how to advance the state without
|
||||
a block.
|
||||
|
||||
First, we need to find out where the function is located. Run:
|
||||
|
||||
```sh
|
||||
find . -name '*.py' -exec grep 'def state_transition_and_sign_block' {} \; -print
|
||||
```
|
||||
|
||||
And you'll find that the function is defined in
|
||||
[`eth2spec/test/helpers/state.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/state.py). Looking
|
||||
in that file, we see that the second function is:
|
||||
|
||||
```python
|
||||
def next_slot(spec, state):
|
||||
"""
|
||||
Transition to the next slot.
|
||||
"""
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
```
|
||||
|
||||
This looks like exactly what we need. So we add this call before we create the empty block:
|
||||
|
||||
|
||||
```python
|
||||
.
|
||||
.
|
||||
.
|
||||
yield 'pre', state
|
||||
|
||||
next_slot(spec, state)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
.
|
||||
.
|
||||
.
|
||||
```
|
||||
|
||||
That's it. Our new test works (copy `test_empty_block_transition`, rename it, add the `next_slot` call, and then run it to
|
||||
verify this).
|
||||
|
||||
|
||||
|
||||
## Tests Designed to Fail
|
||||
|
||||
It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol
|
||||
is supposed to reject something. To see such a test, look at `test_prev_slot_block_transition` (in the same
|
||||
file we used previously,
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py)).
|
||||
|
||||
```python
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_prev_slot_block_transition(spec, state):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
block = build_empty_block(spec, state, slot=state.slot)
|
||||
```
|
||||
|
||||
Build an empty block for the current slot.
|
||||
|
||||
```python
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
```
|
||||
|
||||
Get the identity of the current proposer, the one for *this* slot.
|
||||
|
||||
```python
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
```
|
||||
|
||||
Transition to the new slot, which naturally has a different proposer.
|
||||
|
||||
```python
|
||||
yield 'pre', state
|
||||
expect_assertion_error(lambda: transition_unsigned_block(spec, state, block))
|
||||
```
|
||||
|
||||
Specify that the function `transition_unsigned_block` will cause an assertion error.
|
||||
You can see this function in
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/block.py),
|
||||
and one of the tests is that the block must be for this slot:
|
||||
> ```python
|
||||
> assert state.slot == block.slot
|
||||
> ```
|
||||
|
||||
Because we use [lambda notation](https://www.w3schools.com/python/python_lambda.asp), the test
|
||||
does not call `transition_unsigned_block` here. Instead, this is a function parameter that can
|
||||
be called later.
|
||||
|
||||
```python
|
||||
block.state_root = state.hash_tree_root()
|
||||
```
|
||||
|
||||
Set the block's state root to the current state hash tree root, which identifies this block as
|
||||
belonging to this slot (even though it was created for the previous slot).
|
||||
|
||||
```python
|
||||
signed_block = sign_block(spec, state, block, proposer_index=proposer_index)
|
||||
```
|
||||
|
||||
Notice that `proposer_index` is the variable we set earlier, *before* we advanced
|
||||
the slot with `spec.process_slots(state, state.slot + 1)`. It is not the proposer
|
||||
for the current state.
|
||||
|
||||
```python
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None # No post state, signifying it errors out
|
||||
```
|
||||
|
||||
This is the way we specify that a test is designed to fail - failed tests have no post state,
|
||||
because the processing mechanism errors out before creating it.
|
||||
|
||||
|
||||
## Attestation Tests
|
||||
|
||||
The consensus layer doesn't provide any direct functionality to end users. It does
|
||||
not execute EVM programs or store user data. It exists to provide a secure source of
|
||||
information about the latest verified block hash of the execution layer.
|
||||
|
||||
For every slot a validator is randomly selected as the proposer. The proposer proposes a block
|
||||
for the current head of the consensus layer chain (built on the previous block). That block
|
||||
includes the hash of the proposed new head of the execution layer.
|
||||
|
||||
For every slot there is also a randomly selected committee of validators that needs to vote whether
|
||||
the new consensus layer block is valid, which requires the proposed head of the execution chain to
|
||||
also be a valid block. These votes are called [attestations](https://notes.ethereum.org/@hww/aggregation#112-Attestation),
|
||||
and they are sent as independent messages. The proposer for a block is able to include attestations from previous slots,
|
||||
which is how they get on chain to form consensus, reward honest validators, etc.
|
||||
|
||||
[You can see a simple successful attestation test here](https://github.com/ethereum/consensus-specs/blob/926e5a3d722df973b9a12f12c015783de35cafa9/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py#L26-L30):
|
||||
Lets go over it line by line.
|
||||
|
||||
|
||||
```python
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
```
|
||||
|
||||
[This function](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L88-L120)
|
||||
creates a valid attestation (which can then be modified to make it invalid if needed).
|
||||
To see an attestion "from the inside" we need to follow it.
|
||||
|
||||
|
||||
> ```python
|
||||
> def get_valid_attestation(spec,
|
||||
> state,
|
||||
> slot=None,
|
||||
> index=None,
|
||||
> filter_participant_set=None,
|
||||
> signed=False):
|
||||
> ```
|
||||
>
|
||||
> Only two parameters, `spec` and `state` are required. However, there are four other parameters that can affect
|
||||
> the attestation created by this function.
|
||||
>
|
||||
>
|
||||
> ```python
|
||||
> # If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
||||
> # Thus strictly speaking invalid when no participant is added later.
|
||||
> if slot is None:
|
||||
> slot = state.slot
|
||||
> if index is None:
|
||||
> index = 0
|
||||
> ```
|
||||
>
|
||||
> Default values. Normally we want to choose the current slot, and out of the proposers and committees that it can have,
|
||||
> we want the first one.
|
||||
>
|
||||
> ```python
|
||||
> attestation_data = build_attestation_data(
|
||||
> spec, state, slot=slot, index=index
|
||||
> )
|
||||
> ```
|
||||
>
|
||||
> Build the actual attestation. You can see this function
|
||||
> [here](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L53-L85)
|
||||
> to see the exact data in an attestation.
|
||||
>
|
||||
> ```python
|
||||
> beacon_committee = spec.get_beacon_committee(
|
||||
> state,
|
||||
> attestation_data.slot,
|
||||
> attestation_data.index,
|
||||
> )
|
||||
> ```
|
||||
>
|
||||
> This is the committee that is supposed to approve or reject the proposed block.
|
||||
>
|
||||
> ```python
|
||||
>
|
||||
> committee_size = len(beacon_committee)
|
||||
> aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||
> ```
|
||||
>
|
||||
> There's a bit for every committee member to see if it approves or not.
|
||||
>
|
||||
> ```python
|
||||
> attestation = spec.Attestation(
|
||||
> aggregation_bits=aggregation_bits,
|
||||
> data=attestation_data,
|
||||
> )
|
||||
> # fill the attestation with (optionally filtered) participants, and optionally sign it
|
||||
> fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
|
||||
>
|
||||
> return attestation
|
||||
> ```
|
||||
|
||||
```python
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
```
|
||||
|
||||
Attestations have to appear after the block they attest for, so we advance
|
||||
`spec.MIN_ATTESTATION_INCLUSION_DELAY` slots before creating the block that includes the attestation.
|
||||
Currently a single block is sufficient, but that may change in the future.
|
||||
|
||||
```python
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
```
|
||||
|
||||
[This function](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L13-L50)
|
||||
processes the attestation and returns the result.
|
||||
|
||||
|
||||
### Adding an Attestation Test
|
||||
|
||||
Attestations can't happen in the same block as the one about which they are attesting, or in a block that is
|
||||
after the block is finalized. This is specified as part of the specs, in the `process_attestation` function
|
||||
(which is created from the spec by the `make pyspec` command you ran earlier). Here is the relevant code
|
||||
fragment:
|
||||
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
...
|
||||
```
|
||||
|
||||
In the last line you can see two conditions being asserted:
|
||||
|
||||
1. `data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot` which verifies that the attestation doesn't
|
||||
arrive too early.
|
||||
1. `state.slot <= data.slot + SLOTS_PER_EPOCH` which verifies that the attestation doesn't
|
||||
arrive too late.
|
||||
|
||||
This is how the consensus layer tests deal with edge cases, by asserting the conditions required for the
|
||||
values to be legitimate. In the case of these particular conditions, they are tested
|
||||
[here](https://github.com/ethereum/consensus-specs/blob/926e5a3d722df973b9a12f12c015783de35cafa9/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py#L87-L104).
|
||||
One test checks what happens if the attestation is too early, and another if it is too late.
|
||||
|
||||
However, it is not enough to ensure we reject invalid blocks. It is also necessary to ensure we accept all valid blocks. You saw earlier
|
||||
a test (`test_success`) that tested that being `MIN_ATTESTATION_INCLUSION_DELAY` after the data for which we attest is enough.
|
||||
Now we'll write a similar test that verifies that being `SLOTS_PER_EPOCH` away is still valid. To do this, we modify the
|
||||
`test_after_epoch_slots` function. We need two changes:
|
||||
|
||||
1. Call `transition_to_slot_via_block` with one less slot to advance
|
||||
1. Don't tell `run_attestation_processing` to return an empty post state.
|
||||
|
||||
The modified function is:
|
||||
|
||||
```python
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_almost_after_epoch_slots(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
# increment to latest inclusion slot (not beyond it)
|
||||
transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
```
|
||||
|
||||
Add this function to the file `consensus-specs/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py`,
|
||||
and run the test:
|
||||
|
||||
```sh
|
||||
cd ~/consensus-specs
|
||||
. venv/bin/activate
|
||||
cd tests/core/pyspec
|
||||
python -m pytest -k almost_after --fork Merge eth2spec
|
||||
```
|
||||
|
||||
You should see it ran successfully (although you might get a warning, you can ignore it)
|
||||
|
||||
## How are These Tests Used?
|
||||
|
||||
So far we've ran tests against the formal specifications. This is a way to check the specifications
|
||||
are what we expect, but it doesn't actually check the beacon chain clients. The way these tests get applied
|
||||
by clients is that every few weeks
|
||||
[new test specifications are released](https://github.com/ethereum/consensus-spec-tests/releases),
|
||||
in a format [documented here](https://github.com/ethereum/consensus-specs/tree/dev/tests/formats).
|
||||
All the consensus layer clients implement test-runners that consume the test vectors in this standard format.
|
||||
|
||||
---
|
||||
|
||||
Original version by [Ori Pomerantz](mailto:qbzzt1@gmail.com)
|
|
@ -1 +1 @@
|
|||
1.1.5
|
||||
1.1.6
|
|
@ -1,12 +1,16 @@
|
|||
import random
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
fork_transition_test,
|
||||
ForkMeta,
|
||||
ALTAIR,
|
||||
with_presets,
|
||||
with_fork_metas,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PRE_POST_FORKS,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_altair_fork,
|
||||
do_fork,
|
||||
transition_until_fork,
|
||||
transition_to_next_epoch_and_append_blocks,
|
||||
)
|
||||
|
@ -21,7 +25,7 @@ from eth2spec.test.helpers.random import (
|
|||
# Exit
|
||||
#
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@with_presets([MINIMAL],
|
||||
reason="only test with enough validators such that at least one exited index is not in sync committee")
|
||||
def test_transition_with_one_fourth_exiting_validators_exit_post_fork(state,
|
||||
|
@ -59,7 +63,7 @@ def test_transition_with_one_fourth_exiting_validators_exit_post_fork(state,
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# ensure that some of the current sync committee members are exiting
|
||||
|
@ -81,7 +85,7 @@ def test_transition_with_one_fourth_exiting_validators_exit_post_fork(state,
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_one_fourth_exiting_validators_exit_at_fork(state,
|
||||
fork_epoch,
|
||||
spec,
|
||||
|
@ -117,7 +121,7 @@ def test_transition_with_one_fourth_exiting_validators_exit_at_fork(state,
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# check post transition state
|
||||
|
@ -127,9 +131,13 @@ def test_transition_with_one_fourth_exiting_validators_exit_at_fork(state,
|
|||
assert not post_spec.is_active_validator(validator, post_spec.get_current_epoch(state))
|
||||
assert not post_spec.is_in_inactivity_leak(state)
|
||||
|
||||
# ensure that none of the current sync committee members are exited validators
|
||||
exited_pubkeys = [state.validators[index].pubkey for index in exited_indices]
|
||||
assert not any(set(exited_pubkeys).intersection(list(state.current_sync_committee.pubkeys)))
|
||||
some_sync_committee_exited = any(set(exited_pubkeys).intersection(list(state.current_sync_committee.pubkeys)))
|
||||
if post_spec.fork == ALTAIR:
|
||||
# in Altair fork, the sync committee members would be set with only active validators
|
||||
assert not some_sync_committee_exited
|
||||
else:
|
||||
assert some_sync_committee_exited
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
|
||||
|
@ -143,7 +151,7 @@ def test_transition_with_one_fourth_exiting_validators_exit_at_fork(state,
|
|||
#
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_non_empty_activation_queue(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create some deposits before the transition
|
||||
|
@ -161,7 +169,7 @@ def test_transition_with_non_empty_activation_queue(state, fork_epoch, spec, pos
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
|
@ -171,7 +179,7 @@ def test_transition_with_non_empty_activation_queue(state, fork_epoch, spec, pos
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_activation_at_fork_epoch(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create some deposits before the transition
|
||||
|
@ -191,7 +199,7 @@ def test_transition_with_activation_at_fork_epoch(state, fork_epoch, spec, post_
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
|
|
|
@ -1,13 +1,18 @@
|
|||
from eth2spec.test.context import fork_transition_test
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.context import (
|
||||
ForkMeta,
|
||||
with_fork_metas,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PRE_POST_FORKS,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_altair_fork,
|
||||
do_fork,
|
||||
transition_until_fork,
|
||||
transition_to_next_epoch_and_append_blocks,
|
||||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=7)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=7) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_leaking_pre_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Leaking starts at epoch 6 (MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2).
|
||||
|
@ -22,7 +27,7 @@ def test_transition_with_leaking_pre_fork(state, fork_epoch, spec, post_spec, pr
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# check post transition state
|
||||
|
@ -35,7 +40,7 @@ def test_transition_with_leaking_pre_fork(state, fork_epoch, spec, post_spec, pr
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=6)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=6) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_leaking_at_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Leaking starts at epoch 6 (MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2).
|
||||
|
@ -50,7 +55,7 @@ def test_transition_with_leaking_at_fork(state, fork_epoch, spec, post_spec, pre
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# check post transition state
|
||||
|
|
|
@ -1,8 +1,13 @@
|
|||
from eth2spec.test.context import (
|
||||
ForkMeta,
|
||||
always_bls,
|
||||
fork_transition_test,
|
||||
with_fork_metas,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PRE_POST_FORKS,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
OperationType,
|
||||
run_transition_with_operation,
|
||||
|
@ -13,7 +18,7 @@ from eth2spec.test.helpers.fork_transition import (
|
|||
# PROPOSER_SLASHING
|
||||
#
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_proposer_slashing_right_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
|
@ -31,7 +36,7 @@ def test_transition_with_proposer_slashing_right_after_fork(state, fork_epoch, s
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_proposer_slashing_right_before_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
|
@ -54,7 +59,7 @@ def test_transition_with_proposer_slashing_right_before_fork(state, fork_epoch,
|
|||
#
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_attester_slashing_right_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
|
@ -72,7 +77,7 @@ def test_transition_with_attester_slashing_right_after_fork(state, fork_epoch, s
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@always_bls
|
||||
def test_transition_with_attester_slashing_right_before_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
|
@ -95,7 +100,7 @@ def test_transition_with_attester_slashing_right_before_fork(state, fork_epoch,
|
|||
#
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_deposit_right_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a deposit right *after* the transition
|
||||
|
@ -112,7 +117,7 @@ def test_transition_with_deposit_right_after_fork(state, fork_epoch, spec, post_
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_deposit_right_before_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a deposit right *before* the transition
|
||||
|
@ -134,11 +139,12 @@ def test_transition_with_deposit_right_before_fork(state, fork_epoch, spec, post
|
|||
#
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=260)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=66) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_transition_with_voluntary_exit_right_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a voluntary exit right *after* the transition.
|
||||
fork_epoch=260 because mainnet `SHARD_COMMITTEE_PERIOD` is 256 epochs.
|
||||
fork_epoch=66 because minimal preset `SHARD_COMMITTEE_PERIOD` is 64 epochs.
|
||||
"""
|
||||
# Fast forward to the future epoch so that validator can do voluntary exit
|
||||
state.slot = spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
@ -155,11 +161,12 @@ def test_transition_with_voluntary_exit_right_after_fork(state, fork_epoch, spec
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=260)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=66) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_transition_with_voluntary_exit_right_before_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Create a voluntary exit right *before* the transition.
|
||||
fork_epoch=260 because mainnet `SHARD_COMMITTEE_PERIOD` is 256 epochs.
|
||||
fork_epoch=66 because minimal preset `SHARD_COMMITTEE_PERIOD` is 64 epochs.
|
||||
"""
|
||||
# Fast forward to the future epoch so that validator can do voluntary exit
|
||||
state.slot = spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
import random
|
||||
from eth2spec.test.context import (
|
||||
MINIMAL,
|
||||
fork_transition_test,
|
||||
ForkMeta,
|
||||
with_fork_metas,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PRE_POST_FORKS,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_altair_fork,
|
||||
do_fork,
|
||||
transition_to_next_epoch_and_append_blocks,
|
||||
transition_until_fork,
|
||||
)
|
||||
|
@ -15,7 +18,7 @@ from eth2spec.test.helpers.random import (
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=1)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=1) for pre, post in ALL_PRE_POST_FORKS])
|
||||
@with_presets([MINIMAL],
|
||||
reason="only test with enough validators such that at least one exited index is not in sync committee")
|
||||
def test_transition_with_one_fourth_slashed_active_validators_pre_fork(state,
|
||||
|
@ -45,7 +48,7 @@ def test_transition_with_one_fourth_slashed_active_validators_pre_fork(state,
|
|||
yield "pre", state
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
state, _ = do_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# ensure that some of the current sync committee members are slashed
|
||||
slashed_pubkeys = [state.validators[index].pubkey for index in slashed_indices]
|
||||
|
|
|
@ -1,12 +1,17 @@
|
|||
import random
|
||||
from eth2spec.test.context import fork_transition_test
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.context import (
|
||||
ForkMeta,
|
||||
with_fork_metas,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALL_PRE_POST_FORKS,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch_via_signed_block,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import next_slots_with_attestations
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_altair_fork,
|
||||
do_fork,
|
||||
no_blocks,
|
||||
only_at,
|
||||
skip_slots,
|
||||
|
@ -15,7 +20,7 @@ from eth2spec.test.helpers.fork_transition import (
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -34,7 +39,7 @@ def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag
|
|||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
|
@ -51,7 +56,7 @@ def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -71,7 +76,7 @@ def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec,
|
|||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
state, _ = do_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks)
|
||||
|
@ -88,7 +93,7 @@ def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec,
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -109,7 +114,7 @@ def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_sp
|
|||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
|
@ -127,7 +132,7 @@ def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_sp
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -148,7 +153,7 @@ def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pr
|
|||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
state, _ = do_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
|
@ -215,7 +220,7 @@ def _run_transition_test_with_attestations(state,
|
|||
assert (state.slot + 1) % spec.SLOTS_PER_EPOCH == 0
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
|
@ -253,7 +258,7 @@ def _run_transition_test_with_attestations(state,
|
|||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=3)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=3) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_finality(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -262,7 +267,7 @@ def test_transition_with_finality(state, fork_epoch, spec, post_spec, pre_tag, p
|
|||
yield from _run_transition_test_with_attestations(state, fork_epoch, spec, post_spec, pre_tag, post_tag)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=3)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=3) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_random_three_quarters_participation(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
|
@ -289,7 +294,7 @@ def test_transition_with_random_three_quarters_participation(state, fork_epoch,
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=3)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=3) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_random_half_participation(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
rng = random.Random(2020)
|
||||
|
||||
|
@ -313,7 +318,7 @@ def test_transition_with_random_half_participation(state, fork_epoch, spec, post
|
|||
)
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
@with_fork_metas([ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=3) for pre, post in ALL_PRE_POST_FORKS])
|
||||
def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the ``fork_epoch`` with no attestations,
|
||||
|
@ -332,7 +337,7 @@ def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spe
|
|||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition but add attestations
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
import pytest
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
|
||||
from eth2spec.phase0 import mainnet as spec_phase0_mainnet, minimal as spec_phase0_minimal
|
||||
from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal
|
||||
from eth2spec.merge import mainnet as spec_merge_mainnet, minimal as spec_merge_minimal
|
||||
|
@ -7,12 +9,16 @@ from eth2spec.utils import bls
|
|||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
SpecForkName, PresetBaseName,
|
||||
PHASE0, ALTAIR, MERGE, MINIMAL, MAINNET,
|
||||
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
|
||||
ALL_FORK_UPGRADES,
|
||||
)
|
||||
from .helpers.typing import SpecForkName, PresetBaseName
|
||||
from .helpers.genesis import create_genesis_state
|
||||
from .utils import vector_test, with_meta_tags, build_transition_test
|
||||
from .utils import (
|
||||
vector_test,
|
||||
with_meta_tags,
|
||||
)
|
||||
|
||||
from random import Random
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol, Dict
|
||||
|
@ -50,6 +56,13 @@ class SpecMerge(Spec):
|
|||
...
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ForkMeta:
|
||||
pre_fork_name: str
|
||||
post_fork_name: str
|
||||
fork_epoch: int
|
||||
|
||||
|
||||
spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = {
|
||||
MINIMAL: {
|
||||
PHASE0: spec_phase0_minimal,
|
||||
|
@ -86,7 +99,6 @@ _custom_state_cache_dict = LRU(size=10)
|
|||
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||
threshold_fn: Callable[[Any], int]):
|
||||
def deco(fn):
|
||||
|
||||
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
||||
# make a key for the state, unique to the fork + config (incl preset choice) and balances/activations
|
||||
key = (spec.fork, spec.config.__hash__(), spec.__file__, balances_fn, threshold_fn)
|
||||
|
@ -104,7 +116,7 @@ def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
|||
return deco
|
||||
|
||||
|
||||
def default_activation_threshold(spec):
|
||||
def default_activation_threshold(spec: Spec):
|
||||
"""
|
||||
Helper method to use the default balance activation threshold for state creation for tests.
|
||||
Usage: `@with_custom_state(threshold_fn=default_activation_threshold, ...)`
|
||||
|
@ -112,7 +124,7 @@ def default_activation_threshold(spec):
|
|||
return spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
|
||||
def zero_activation_threshold(spec):
|
||||
def zero_activation_threshold(spec: Spec):
|
||||
"""
|
||||
Helper method to use 0 gwei as the activation threshold for state creation for tests.
|
||||
Usage: `@with_custom_state(threshold_fn=zero_activation_threshold, ...)`
|
||||
|
@ -120,7 +132,7 @@ def zero_activation_threshold(spec):
|
|||
return 0
|
||||
|
||||
|
||||
def default_balances(spec):
|
||||
def default_balances(spec: Spec):
|
||||
"""
|
||||
Helper method to create a series of default balances.
|
||||
Usage: `@with_custom_state(balances_fn=default_balances, ...)`
|
||||
|
@ -129,7 +141,7 @@ def default_balances(spec):
|
|||
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
||||
|
||||
|
||||
def scaled_churn_balances(spec):
|
||||
def scaled_churn_balances(spec: Spec):
|
||||
"""
|
||||
Helper method to create enough validators to scale the churn limit.
|
||||
(This is *firmly* over the churn limit -- thus the +2 instead of just +1)
|
||||
|
@ -143,7 +155,7 @@ def scaled_churn_balances(spec):
|
|||
with_state = with_custom_state(default_balances, default_activation_threshold)
|
||||
|
||||
|
||||
def low_balances(spec):
|
||||
def low_balances(spec: Spec):
|
||||
"""
|
||||
Helper method to create a series of low balances.
|
||||
Usage: `@with_custom_state(balances_fn=low_balances, ...)`
|
||||
|
@ -154,7 +166,7 @@ def low_balances(spec):
|
|||
return [low_balance] * num_validators
|
||||
|
||||
|
||||
def misc_balances(spec):
|
||||
def misc_balances(spec: Spec):
|
||||
"""
|
||||
Helper method to create a series of balances that includes some misc. balances.
|
||||
Usage: `@with_custom_state(balances_fn=misc_balances, ...)`
|
||||
|
@ -166,7 +178,7 @@ def misc_balances(spec):
|
|||
return balances
|
||||
|
||||
|
||||
def misc_balances_in_default_range_with_many_validators(spec):
|
||||
def misc_balances_in_default_range_with_many_validators(spec: Spec):
|
||||
"""
|
||||
Helper method to create a series of balances that includes some misc. balances but
|
||||
none that are below the ``EJECTION_BALANCE``.
|
||||
|
@ -182,7 +194,7 @@ def misc_balances_in_default_range_with_many_validators(spec):
|
|||
return balances
|
||||
|
||||
|
||||
def low_single_balance(spec):
|
||||
def low_single_balance(spec: Spec):
|
||||
"""
|
||||
Helper method to create a single of balance of 1 Gwei.
|
||||
Usage: `@with_custom_state(balances_fn=low_single_balance, ...)`
|
||||
|
@ -190,7 +202,7 @@ def low_single_balance(spec):
|
|||
return [1]
|
||||
|
||||
|
||||
def large_validator_set(spec):
|
||||
def large_validator_set(spec: Spec):
|
||||
"""
|
||||
Helper method to create a large series of default balances.
|
||||
Usage: `@with_custom_state(balances_fn=default_balances, ...)`
|
||||
|
@ -347,6 +359,66 @@ def with_all_phases_except(exclusion_phases):
|
|||
return decorator
|
||||
|
||||
|
||||
def _get_preset_targets(kw):
|
||||
preset_name = DEFAULT_TEST_PRESET
|
||||
if 'preset' in kw:
|
||||
preset_name = kw.pop('preset')
|
||||
return spec_targets[preset_name]
|
||||
|
||||
|
||||
def _get_run_phases(phases, kw):
|
||||
"""
|
||||
Return the fork names for the base `spec` in test cases
|
||||
"""
|
||||
if 'phase' in kw:
|
||||
# Limit phases if one explicitly specified
|
||||
phase = kw.pop('phase')
|
||||
if phase not in phases:
|
||||
dump_skipping_message(f"doesn't support this fork: {phase}")
|
||||
return None
|
||||
run_phases = [phase]
|
||||
else:
|
||||
# If pytest `--fork` flag is set, filter out the rest of the forks
|
||||
run_phases = set(phases).intersection(DEFAULT_PYTEST_FORKS)
|
||||
|
||||
return run_phases
|
||||
|
||||
|
||||
def _get_available_phases(run_phases, other_phases):
|
||||
"""
|
||||
Return the available fork names for multi-phase tests
|
||||
"""
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases |= set(other_phases)
|
||||
return available_phases
|
||||
|
||||
|
||||
def _run_test_case_with_phases(fn, phases, other_phases, kw, args, is_fork_transition=False):
|
||||
run_phases = _get_run_phases(phases, kw)
|
||||
|
||||
if len(run_phases) == 0:
|
||||
if not is_fork_transition:
|
||||
dump_skipping_message("none of the recognized phases are executable, skipping test.")
|
||||
return None
|
||||
|
||||
available_phases = _get_available_phases(run_phases, other_phases)
|
||||
|
||||
targets = _get_preset_targets(kw)
|
||||
|
||||
# Populate all phases for multi-phase tests
|
||||
phase_dir = {}
|
||||
for phase in available_phases:
|
||||
phase_dir[phase] = targets[phase]
|
||||
|
||||
# Return is ignored whenever multiple phases are ran.
|
||||
# This return is for test generators to emit python generators (yielding test vector outputs)
|
||||
for phase in run_phases:
|
||||
ret = fn(spec=targets[phase], phases=phase_dir, *args, **kw)
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def with_phases(phases, other_phases=None):
|
||||
"""
|
||||
Decorator factory that returns a decorator that runs a test for the appropriate phases.
|
||||
|
@ -354,49 +426,22 @@ def with_phases(phases, other_phases=None):
|
|||
"""
|
||||
def decorator(fn):
|
||||
def wrapper(*args, **kw):
|
||||
run_phases = set(phases).intersection(DEFAULT_PYTEST_FORKS)
|
||||
|
||||
# limit phases if one explicitly specified
|
||||
if 'phase' in kw:
|
||||
phase = kw.pop('phase')
|
||||
if phase not in phases:
|
||||
dump_skipping_message(f"doesn't support this fork: {phase}")
|
||||
return None
|
||||
run_phases = [phase]
|
||||
|
||||
if PHASE0 not in run_phases and ALTAIR not in run_phases and MERGE not in run_phases:
|
||||
dump_skipping_message("none of the recognized phases are executable, skipping test.")
|
||||
return None
|
||||
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases |= set(other_phases)
|
||||
|
||||
preset_name = DEFAULT_TEST_PRESET
|
||||
if 'preset' in kw:
|
||||
preset_name = kw.pop('preset')
|
||||
targets = spec_targets[preset_name]
|
||||
|
||||
# Populate all phases for multi-phase tests
|
||||
phase_dir = {}
|
||||
if PHASE0 in available_phases:
|
||||
phase_dir[PHASE0] = targets[PHASE0]
|
||||
if ALTAIR in available_phases:
|
||||
phase_dir[ALTAIR] = targets[ALTAIR]
|
||||
if MERGE in available_phases:
|
||||
phase_dir[MERGE] = targets[MERGE]
|
||||
|
||||
# return is ignored whenever multiple phases are ran.
|
||||
# This return is for test generators to emit python generators (yielding test vector outputs)
|
||||
if PHASE0 in run_phases:
|
||||
ret = fn(spec=targets[PHASE0], phases=phase_dir, *args, **kw)
|
||||
if ALTAIR in run_phases:
|
||||
ret = fn(spec=targets[ALTAIR], phases=phase_dir, *args, **kw)
|
||||
if MERGE in run_phases:
|
||||
ret = fn(spec=targets[MERGE], phases=phase_dir, *args, **kw)
|
||||
|
||||
# TODO: merge, sharding, custody_game and das are not executable yet.
|
||||
# Tests that specify these features will not run, and get ignored for these specific phases.
|
||||
if 'fork_metas' in kw:
|
||||
fork_metas = kw.pop('fork_metas')
|
||||
if 'phase' in kw:
|
||||
# When running test generator, it sets specific `phase`
|
||||
phase = kw['phase']
|
||||
_phases = [phase]
|
||||
_other_phases = [ALL_FORK_UPGRADES[phase]]
|
||||
ret = _run_test_case_with_phases(fn, _phases, _other_phases, kw, args, is_fork_transition=True)
|
||||
else:
|
||||
# When running pytest, go through `fork_metas` instead of using `phases`
|
||||
for fork_meta in fork_metas:
|
||||
_phases = [fork_meta.pre_fork_name]
|
||||
_other_phases = [fork_meta.post_fork_name]
|
||||
ret = _run_test_case_with_phases(fn, _phases, _other_phases, kw, args, is_fork_transition=True)
|
||||
else:
|
||||
ret = _run_test_case_with_phases(fn, phases, other_phases, kw, args)
|
||||
return ret
|
||||
return wrapper
|
||||
return decorator
|
||||
|
@ -481,10 +526,25 @@ def only_generator(reason):
|
|||
return _decorator
|
||||
|
||||
|
||||
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
#
|
||||
# Fork transition state tests
|
||||
#
|
||||
|
||||
|
||||
def set_fork_metas(fork_metas: Sequence[ForkMeta]):
|
||||
def decorator(fn):
|
||||
def wrapper(*args, **kwargs):
|
||||
return fn(*args, fork_metas=fork_metas, **kwargs)
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def with_fork_metas(fork_metas: Sequence[ForkMeta]):
|
||||
"""
|
||||
A decorator to construct a "transition" test from one fork of the eth2 spec
|
||||
to another.
|
||||
A decorator to construct a "transition" test from one fork to another.
|
||||
|
||||
Decorator takes a list of `ForkMeta` and each item defines `pre_fork_name`,
|
||||
`post_fork_name`, and `fork_epoch`.
|
||||
|
||||
Decorator assumes a transition from the `pre_fork_name` fork to the
|
||||
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
|
||||
|
@ -502,15 +562,65 @@ def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
|||
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
|
||||
Used to discriminate data during consumption of the generated spec tests.
|
||||
"""
|
||||
def _wrapper(fn):
|
||||
@with_phases([pre_fork_name], other_phases=[post_fork_name])
|
||||
@spec_test
|
||||
@with_state
|
||||
def _adapter(*args, **kwargs):
|
||||
wrapped = build_transition_test(fn,
|
||||
pre_fork_name,
|
||||
post_fork_name,
|
||||
fork_epoch=fork_epoch)
|
||||
return wrapped(*args, **kwargs)
|
||||
return _adapter
|
||||
return _wrapper
|
||||
run_yield_fork_meta = yield_fork_meta(fork_metas)
|
||||
run_with_phases = with_phases(ALL_PHASES)
|
||||
run_set_fork_metas = set_fork_metas(fork_metas)
|
||||
|
||||
def decorator(fn):
|
||||
return run_set_fork_metas(run_with_phases(spec_test(with_state(run_yield_fork_meta(fn)))))
|
||||
return decorator
|
||||
|
||||
|
||||
def yield_fork_meta(fork_metas: Sequence[ForkMeta]):
|
||||
"""
|
||||
Yield meta fields to `meta.yaml` and pass post spec and meta fields to `fn`.
|
||||
"""
|
||||
def decorator(fn):
|
||||
def wrapper(*args, **kw):
|
||||
phases = kw.pop('phases')
|
||||
spec = kw["spec"]
|
||||
try:
|
||||
fork_meta = next(filter(lambda m: m.pre_fork_name == spec.fork, fork_metas))
|
||||
except StopIteration:
|
||||
dump_skipping_message(f"doesn't support this fork: {spec.fork}")
|
||||
|
||||
post_spec = phases[fork_meta.post_fork_name]
|
||||
|
||||
# Reset counter
|
||||
pre_fork_counter = 0
|
||||
|
||||
def pre_tag(obj):
|
||||
nonlocal pre_fork_counter
|
||||
pre_fork_counter += 1
|
||||
return obj
|
||||
|
||||
def post_tag(obj):
|
||||
return obj
|
||||
|
||||
yield "post_fork", "meta", fork_meta.post_fork_name
|
||||
|
||||
has_fork_epoch = False
|
||||
if fork_meta.fork_epoch:
|
||||
kw["fork_epoch"] = fork_meta.fork_epoch
|
||||
has_fork_epoch = True
|
||||
yield "fork_epoch", "meta", fork_meta.fork_epoch
|
||||
|
||||
result = fn(
|
||||
*args,
|
||||
post_spec=post_spec,
|
||||
pre_tag=pre_tag,
|
||||
post_tag=post_tag,
|
||||
**kw,
|
||||
)
|
||||
if result is not None:
|
||||
for part in result:
|
||||
if part[0] == "fork_epoch":
|
||||
has_fork_epoch = True
|
||||
yield part
|
||||
assert has_fork_epoch
|
||||
|
||||
if pre_fork_counter > 0:
|
||||
yield "fork_block", "meta", pre_fork_counter - 1
|
||||
|
||||
return wrapper
|
||||
return decorator
|
||||
|
|
|
@ -21,6 +21,14 @@ TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
|||
|
||||
FORKS_BEFORE_ALTAIR = (PHASE0,)
|
||||
FORKS_BEFORE_MERGE = (PHASE0, ALTAIR)
|
||||
ALL_FORK_UPGRADES = {
|
||||
# pre_fork_name: post_fork_name
|
||||
PHASE0: ALTAIR,
|
||||
ALTAIR: MERGE,
|
||||
}
|
||||
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
|
||||
AFTER_MERGE_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key not in FORKS_BEFORE_ALTAIR}
|
||||
AFTER_MERGE_PRE_POST_FORKS = AFTER_MERGE_UPGRADES.items()
|
||||
|
||||
#
|
||||
# Config
|
||||
|
|
|
@ -11,7 +11,7 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
|||
|
||||
payload = spec.ExecutionPayload(
|
||||
parent_hash=latest.block_hash,
|
||||
coinbase=spec.ExecutionAddress(),
|
||||
fee_recipient=spec.ExecutionAddress(),
|
||||
state_root=latest.state_root, # no changes to the state
|
||||
receipt_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
|
||||
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||
|
@ -34,7 +34,7 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
|||
def get_execution_payload_header(spec, execution_payload):
|
||||
return spec.ExecutionPayloadHeader(
|
||||
parent_hash=execution_payload.parent_hash,
|
||||
coinbase=execution_payload.coinbase,
|
||||
fee_recipient=execution_payload.fee_recipient,
|
||||
state_root=execution_payload.state_root,
|
||||
receipt_root=execution_payload.receipt_root,
|
||||
logs_bloom=execution_payload.logs_bloom,
|
||||
|
|
|
@ -23,12 +23,12 @@ def add_block_to_store(spec, store, signed_block):
|
|||
spec.on_block(store, signed_block)
|
||||
|
||||
|
||||
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, allow_invalid_attestations=False,
|
||||
def tick_and_add_block(spec, store, signed_block, test_steps, valid=True,
|
||||
merge_block=False, block_not_found=False):
|
||||
pre_state = store.block_states[signed_block.message.parent_root]
|
||||
block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||
if merge_block:
|
||||
assert spec.is_merge_block(pre_state, signed_block.message.body)
|
||||
assert spec.is_merge_transition_block(pre_state, signed_block.message.body)
|
||||
|
||||
if store.time < block_time:
|
||||
on_tick_and_append_step(spec, store, block_time, test_steps)
|
||||
|
@ -36,14 +36,13 @@ def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, allow_
|
|||
post_state = yield from add_block(
|
||||
spec, store, signed_block, test_steps,
|
||||
valid=valid,
|
||||
allow_invalid_attestations=allow_invalid_attestations,
|
||||
block_not_found=block_not_found,
|
||||
)
|
||||
|
||||
return post_state
|
||||
|
||||
|
||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps):
|
||||
def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False):
|
||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||
block_time = pre_state.genesis_time + parent_block.slot * spec.config.SECONDS_PER_SLOT
|
||||
|
@ -53,40 +52,21 @@ def tick_and_run_on_attestation(spec, store, attestation, test_steps):
|
|||
spec.on_tick(store, next_epoch_time)
|
||||
test_steps.append({'tick': int(next_epoch_time)})
|
||||
|
||||
spec.on_attestation(store, attestation)
|
||||
spec.on_attestation(store, attestation, is_from_block=is_from_block)
|
||||
yield get_attestation_file_name(attestation), attestation
|
||||
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||
|
||||
|
||||
def add_attestation(spec, store, attestation, test_steps, valid=True):
|
||||
yield get_attestation_file_name(attestation), attestation
|
||||
|
||||
def run_on_attestation(spec, store, attestation, is_from_block=False, valid=True):
|
||||
if not valid:
|
||||
try:
|
||||
run_on_attestation(spec, store, attestation, valid=True)
|
||||
except AssertionError:
|
||||
test_steps.append({
|
||||
'attestation': get_attestation_file_name(attestation),
|
||||
'valid': False,
|
||||
})
|
||||
return
|
||||
else:
|
||||
assert False
|
||||
|
||||
run_on_attestation(spec, store, attestation, valid=True)
|
||||
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||
|
||||
|
||||
def run_on_attestation(spec, store, attestation, valid=True):
|
||||
if not valid:
|
||||
try:
|
||||
spec.on_attestation(store, attestation)
|
||||
spec.on_attestation(store, attestation, is_from_block=is_from_block)
|
||||
except AssertionError:
|
||||
return
|
||||
else:
|
||||
assert False
|
||||
|
||||
spec.on_attestation(store, attestation)
|
||||
spec.on_attestation(store, attestation, is_from_block=is_from_block)
|
||||
|
||||
|
||||
def get_genesis_forkchoice_store(spec, genesis_state):
|
||||
|
@ -131,7 +111,6 @@ def add_block(spec,
|
|||
signed_block,
|
||||
test_steps,
|
||||
valid=True,
|
||||
allow_invalid_attestations=False,
|
||||
block_not_found=False):
|
||||
"""
|
||||
Run on_block and on_attestation
|
||||
|
@ -156,14 +135,8 @@ def add_block(spec,
|
|||
test_steps.append({'block': get_block_file_name(signed_block)})
|
||||
|
||||
# An on_block step implies receiving block's attestations
|
||||
try:
|
||||
for attestation in signed_block.message.body.attestations:
|
||||
run_on_attestation(spec, store, attestation, valid=True)
|
||||
except AssertionError:
|
||||
if allow_invalid_attestations:
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
for attestation in signed_block.message.body.attestations:
|
||||
run_on_attestation(spec, store, attestation, is_from_block=True, valid=True)
|
||||
|
||||
block_root = signed_block.message.hash_tree_root()
|
||||
assert store.blocks[block_root] == signed_block.message
|
||||
|
@ -184,6 +157,7 @@ def add_block(spec,
|
|||
'epoch': int(store.best_justified_checkpoint.epoch),
|
||||
'root': encode_hex(store.best_justified_checkpoint.root),
|
||||
},
|
||||
'proposer_boost_root': encode_hex(store.proposer_boost_root),
|
||||
}
|
||||
})
|
||||
|
||||
|
|
|
@ -9,6 +9,10 @@ from eth2spec.test.helpers.block import (
|
|||
build_empty_block,
|
||||
sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
MERGE,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
)
|
||||
|
@ -133,17 +137,25 @@ def state_transition_across_slots_with_ignoring_proposers(spec,
|
|||
next_slot(spec, state)
|
||||
|
||||
|
||||
def do_altair_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None):
|
||||
def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
assert spec.get_current_epoch(state) == fork_epoch
|
||||
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
if post_spec.fork == ALTAIR:
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
elif post_spec.fork == MERGE:
|
||||
state = post_spec.upgrade_to_merge(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
|
||||
if post_spec.fork == ALTAIR:
|
||||
assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
elif post_spec.fork == MERGE:
|
||||
assert state.fork.previous_version == post_spec.config.ALTAIR_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.MERGE_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
|
||||
|
@ -280,7 +292,7 @@ def run_transition_with_operation(state,
|
|||
|
||||
# irregular state transition to handle fork:
|
||||
_operation_at_slot = operation_dict if is_at_fork else None
|
||||
state, block = do_altair_fork(state, spec, post_spec, fork_epoch, operation_dict=_operation_at_slot)
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch, operation_dict=_operation_at_slot)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
if is_at_fork:
|
||||
|
|
|
@ -26,7 +26,7 @@ def get_sample_genesis_execution_payload_header(spec,
|
|||
eth1_block_hash = b'\x55' * 32
|
||||
return spec.ExecutionPayloadHeader(
|
||||
parent_hash=b'\x30' * 32,
|
||||
coinbase=b'\x42' * 20,
|
||||
fee_recipient=b'\x42' * 20,
|
||||
state_root=b'\x20' * 32,
|
||||
receipt_root=b'\x20' * 32,
|
||||
logs_bloom=b'\x35' * spec.BYTES_PER_LOGS_BLOOM,
|
||||
|
|
|
@ -15,13 +15,18 @@ class PowChain:
|
|||
assert offset <= 0
|
||||
return self.blocks[offset - 1]
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
block.block_hash: block
|
||||
for block in self.blocks
|
||||
}
|
||||
|
||||
|
||||
def prepare_random_pow_block(spec, rng=Random(3131)):
|
||||
return spec.PowBlock(
|
||||
block_hash=spec.Hash32(spec.hash(bytearray(rng.getrandbits(8) for _ in range(32)))),
|
||||
parent_hash=spec.Hash32(spec.hash(bytearray(rng.getrandbits(8) for _ in range(32)))),
|
||||
total_difficulty=uint256(0),
|
||||
difficulty=uint256(0)
|
||||
)
|
||||
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ def test_initialize_pre_transition_no_param(spec):
|
|||
yield 'execution_payload_header', 'meta', False
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
|
||||
assert not spec.is_merge_complete(state)
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'state', state
|
||||
|
||||
|
@ -79,7 +79,7 @@ def test_initialize_pre_transition_empty_payload(spec):
|
|||
execution_payload_header=execution_payload_header,
|
||||
)
|
||||
|
||||
assert not spec.is_merge_complete(state)
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'execution_payload_header', execution_payload_header
|
||||
|
||||
|
@ -117,6 +117,6 @@ def test_initialize_post_transition(spec):
|
|||
|
||||
yield 'execution_payload_header', genesis_execution_payload_header
|
||||
|
||||
assert spec.is_merge_complete(state)
|
||||
assert spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'state', state
|
||||
|
|
|
@ -23,3 +23,22 @@ def test_empty_block_transition_no_tx(spec, state):
|
|||
yield 'post', state
|
||||
|
||||
# TODO: tests with EVM, mock or replacement?
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_is_execution_enabled_false(spec, state):
|
||||
# Set `latest_execution_payload_header` to empty
|
||||
state.latest_execution_payload_header = spec.ExecutionPayloadHeader()
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Set `execution_payload` to empty
|
||||
block.body.execution_payload = spec.ExecutionPayload()
|
||||
assert len(block.body.execution_payload.transactions) == 0
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
from eth2spec.test.context import (
|
||||
ForkMeta,
|
||||
with_fork_metas,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
AFTER_MERGE_PRE_POST_FORKS,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_transition import (
|
||||
do_fork,
|
||||
transition_to_next_epoch_and_append_blocks,
|
||||
transition_until_fork,
|
||||
)
|
||||
|
||||
|
||||
@with_fork_metas([
|
||||
ForkMeta(pre_fork_name=pre, post_fork_name=post, fork_epoch=2) for pre, post in AFTER_MERGE_PRE_POST_FORKS
|
||||
])
|
||||
def test_sample_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
transition_until_fork(spec, state, fork_epoch)
|
||||
|
||||
# check pre state
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
yield "pre", state
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
blocks = []
|
||||
state, block = do_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
transition_to_next_epoch_and_append_blocks(post_spec, state, post_tag, blocks, only_last_block=True)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
|
@ -13,17 +13,17 @@ from eth2spec.test.context import (
|
|||
@spec_state_test
|
||||
def test_fail_merge_complete(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
assert not spec.is_merge_complete(state)
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_merge_complete(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
assert spec.is_merge_complete(state)
|
||||
assert spec.is_merge_transition_complete(state)
|
||||
|
||||
|
||||
# with_complete_transition', 'with_execution_payload', 'is_merge_block', 'is_execution_enabled'
|
||||
# with_complete_transition', 'with_execution_payload', 'is_merge_transition_block', 'is_execution_enabled'
|
||||
expected_results = [
|
||||
(True, True, False, True),
|
||||
(True, False, False, True),
|
||||
|
@ -39,7 +39,7 @@ def test_is_merge_block_and_is_execution_enabled(spec, state):
|
|||
(
|
||||
with_complete_transition,
|
||||
with_execution_payload,
|
||||
is_merge_block,
|
||||
is_merge_transition_block,
|
||||
is_execution_enabled
|
||||
) = result
|
||||
if with_complete_transition:
|
||||
|
@ -51,5 +51,5 @@ def test_is_merge_block_and_is_execution_enabled(spec, state):
|
|||
if with_execution_payload:
|
||||
body.execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
assert spec.is_merge_block(state, body) == is_merge_block
|
||||
assert spec.is_merge_transition_block(state, body) == is_merge_transition_block
|
||||
assert spec.is_execution_enabled(state, body) == is_execution_enabled
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.pow_block import (
|
||||
prepare_random_pow_chain,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_merge_and_later,
|
||||
)
|
||||
|
||||
|
||||
# For test_get_pow_block_at_terminal_total_difficulty
|
||||
IS_HEAD_BLOCK = 'is_head_block'
|
||||
IS_HEAD_PARENT_BLOCK = 'is_head_parent_block'
|
||||
|
||||
# NOTE: The following parameter names are in the view of the head block (the second block)
|
||||
# 'block_reached_ttd', 'block_parent_hash_is_empty', 'parent_reached_ttd', 'return_block'
|
||||
expected_results = [
|
||||
(False, False, False, None),
|
||||
(False, False, True, IS_HEAD_PARENT_BLOCK),
|
||||
(False, True, False, None),
|
||||
(False, True, True, IS_HEAD_PARENT_BLOCK),
|
||||
(True, False, False, IS_HEAD_BLOCK),
|
||||
(True, False, True, IS_HEAD_PARENT_BLOCK),
|
||||
(True, True, False, IS_HEAD_BLOCK),
|
||||
(True, True, True, IS_HEAD_PARENT_BLOCK),
|
||||
]
|
||||
# NOTE: since the first block's `parent_hash` is set to `Hash32()` in test, if `parent_reached_ttd is True`,
|
||||
# it would return the first block (IS_HEAD_PARENT_BLOCK).
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_get_pow_block_at_terminal_total_difficulty(spec, state):
|
||||
for result in expected_results:
|
||||
(
|
||||
block_reached_ttd,
|
||||
block_parent_hash_is_empty,
|
||||
parent_reached_ttd,
|
||||
return_block
|
||||
) = result
|
||||
pow_chain = prepare_random_pow_chain(spec, 2)
|
||||
pow_chain.head(-1).parent_hash = spec.Hash32()
|
||||
|
||||
if block_reached_ttd:
|
||||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
else:
|
||||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - 1
|
||||
|
||||
if parent_reached_ttd:
|
||||
pow_chain.head(-1).total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
else:
|
||||
pow_chain.head(-1).total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - 1
|
||||
|
||||
if block_parent_hash_is_empty:
|
||||
pow_chain.head().parent_hash = spec.Hash32()
|
||||
|
||||
pow_block = spec.get_pow_block_at_terminal_total_difficulty(pow_chain.to_dict())
|
||||
if return_block == IS_HEAD_BLOCK:
|
||||
assert pow_block == pow_chain.head()
|
||||
elif return_block == IS_HEAD_PARENT_BLOCK:
|
||||
assert pow_block == pow_chain.head(-1)
|
||||
elif return_block is None:
|
||||
assert pow_block is None
|
||||
else:
|
||||
raise Exception('Something is wrong')
|
||||
|
||||
|
||||
SAMPLE_PAYLOAD_ID = b'\x12' * 8
|
||||
# ('is_merge_complete', 'is_terminal_block_hash_set', 'is_activation_epoch_reached',
|
||||
# 'terminal_pow_block_is_none', 'result_payload_id')
|
||||
prepare_execution_payload_expected_results = [
|
||||
(False, False, False, False, SAMPLE_PAYLOAD_ID),
|
||||
(False, False, False, True, None),
|
||||
(False, False, True, False, SAMPLE_PAYLOAD_ID),
|
||||
(False, False, True, True, None),
|
||||
(False, True, False, False, None),
|
||||
(False, True, False, True, None),
|
||||
(False, True, True, False, SAMPLE_PAYLOAD_ID),
|
||||
(False, True, True, True, None),
|
||||
(True, False, False, False, SAMPLE_PAYLOAD_ID),
|
||||
(True, False, False, True, SAMPLE_PAYLOAD_ID),
|
||||
(True, False, True, False, SAMPLE_PAYLOAD_ID),
|
||||
(True, False, True, True, SAMPLE_PAYLOAD_ID),
|
||||
(True, True, False, False, SAMPLE_PAYLOAD_ID),
|
||||
(True, True, False, True, SAMPLE_PAYLOAD_ID),
|
||||
(True, True, True, False, SAMPLE_PAYLOAD_ID),
|
||||
(True, True, True, True, SAMPLE_PAYLOAD_ID),
|
||||
]
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_prepare_execution_payload(spec, state):
|
||||
for result in prepare_execution_payload_expected_results:
|
||||
(
|
||||
is_merge_complete,
|
||||
is_terminal_block_hash_set,
|
||||
is_activation_epoch_reached,
|
||||
terminal_pow_block_is_none,
|
||||
result_payload_id,
|
||||
) = result
|
||||
|
||||
# 1. Handle `is_merge_complete`
|
||||
if is_merge_complete:
|
||||
state.latest_execution_payload_header = spec.ExecutionPayloadHeader(random=b'\x12' * 32)
|
||||
else:
|
||||
state.latest_execution_payload_header = spec.ExecutionPayloadHeader()
|
||||
|
||||
# 2. `is_terminal_block_hash_set` and `is_activation_epoch_reached` require mocking configs in runtime
|
||||
config_overrides = {}
|
||||
_mock_terminal_block_hash = b'\x34' * 32
|
||||
if is_terminal_block_hash_set:
|
||||
config_overrides['TERMINAL_BLOCK_HASH'] = _mock_terminal_block_hash
|
||||
else:
|
||||
config_overrides['TERMINAL_BLOCK_HASH'] = spec.Hash32()
|
||||
|
||||
# Default `TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH` is too big and too close to overflow
|
||||
_mock_terminal_block_hash_activation_epoch = 3
|
||||
config_overrides['TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH'] = _mock_terminal_block_hash_activation_epoch
|
||||
if is_activation_epoch_reached:
|
||||
state.slot = _mock_terminal_block_hash_activation_epoch * spec.SLOTS_PER_EPOCH
|
||||
else:
|
||||
state.slot = (_mock_terminal_block_hash_activation_epoch - 1) * spec.SLOTS_PER_EPOCH
|
||||
|
||||
# Logic from `with_config_overrides`
|
||||
old_config = spec.config
|
||||
tmp_config = deepcopy(old_config._asdict())
|
||||
tmp_config.update(config_overrides)
|
||||
config_types = spec.Configuration.__annotations__
|
||||
test_config = {k: config_types[k](v) for k, v in tmp_config.items()}
|
||||
spec.config = spec.Configuration(**test_config)
|
||||
|
||||
# 3. Handle `terminal_pow_block_is_none`
|
||||
pow_chain = prepare_random_pow_chain(spec, 2)
|
||||
if terminal_pow_block_is_none:
|
||||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY - 1
|
||||
else:
|
||||
if is_terminal_block_hash_set:
|
||||
pow_chain.head().block_hash = _mock_terminal_block_hash
|
||||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
|
||||
# Dummy arguments
|
||||
finalized_block_hash = b'\x56' * 32
|
||||
suggested_fee_recipient = b'\x78' * 20
|
||||
|
||||
# Mock execution_engine
|
||||
class TestEngine(spec.NoopExecutionEngine):
|
||||
def notify_forkchoice_updated(self, parent_hash, finalized_block_hash, payload_attributes) -> bool:
|
||||
return SAMPLE_PAYLOAD_ID
|
||||
|
||||
payload_id = spec.prepare_execution_payload(
|
||||
state=state,
|
||||
pow_chain=pow_chain.to_dict(),
|
||||
finalized_block_hash=finalized_block_hash,
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
execution_engine=TestEngine(),
|
||||
)
|
||||
assert payload_id == result_payload_id
|
||||
|
||||
# Restore config
|
||||
spec.config = old_config
|
|
@ -1,3 +1,4 @@
|
|||
import random
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.test.context import (
|
||||
|
@ -19,6 +20,7 @@ from eth2spec.test.helpers.fork_choice import (
|
|||
add_block,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
next_epoch,
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
|
@ -103,18 +105,23 @@ def test_split_tie_breaker_no_attestations(spec, state):
|
|||
}
|
||||
})
|
||||
|
||||
# block at slot 1
|
||||
# Create block at slot 1
|
||||
block_1_state = genesis_state.copy()
|
||||
block_1 = build_empty_block_for_next_slot(spec, block_1_state)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, block_1_state, block_1)
|
||||
yield from tick_and_add_block(spec, store, signed_block_1, test_steps)
|
||||
|
||||
# additional block at slot 1
|
||||
# Create additional block at slot 1
|
||||
block_2_state = genesis_state.copy()
|
||||
block_2 = build_empty_block_for_next_slot(spec, block_2_state)
|
||||
block_2.body.graffiti = b'\x42' * 32
|
||||
signed_block_2 = state_transition_and_sign_block(spec, block_2_state, block_2)
|
||||
yield from tick_and_add_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
# Tick time past slot 1 so proposer score boost does not apply
|
||||
time = store.genesis_time + (block_2.slot + 1) * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
|
||||
yield from add_block(spec, store, signed_block_1, test_steps)
|
||||
yield from add_block(spec, store, signed_block_2, test_steps)
|
||||
|
||||
highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2))
|
||||
assert spec.get_head(store) == highest_root
|
||||
|
@ -261,3 +268,67 @@ def test_filtered_block_tree(spec, state):
|
|||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_boost_correct_head(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
|
||||
state_1 = genesis_state.copy()
|
||||
next_slots(spec, state_1, 3)
|
||||
block_1 = build_empty_block_for_next_slot(spec, state_1)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1)
|
||||
|
||||
# Build block that serves as current head, and remains the head after block_1.slot
|
||||
state_2 = genesis_state.copy()
|
||||
next_slots(spec, state_2, 2)
|
||||
block_2 = build_empty_block_for_next_slot(spec, state_2)
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2)
|
||||
while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2):
|
||||
block_2.body.graffiti = spec.Bytes32(hex(random.getrandbits(8 * 32))[2:].zfill(64))
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2)
|
||||
assert spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2)
|
||||
|
||||
# Tick to block_1 slot time
|
||||
time = store.genesis_time + block_1.slot * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
|
||||
# Process block_2
|
||||
yield from add_block(spec, store, signed_block_2, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
# Process block_1 on timely arrival
|
||||
# The head should temporarily change to block_1
|
||||
yield from add_block(spec, store, signed_block_1, test_steps)
|
||||
assert store.proposer_boost_root == spec.hash_tree_root(block_1)
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_1)
|
||||
|
||||
# After block_1.slot, the head should revert to block_2
|
||||
time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import random
|
||||
from eth_utils import encode_hex
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets
|
||||
|
@ -543,21 +544,17 @@ def test_new_justified_is_later_than_store_justified(spec, state):
|
|||
assert fork_3_state.finalized_checkpoint.epoch == 3
|
||||
assert fork_3_state.current_justified_checkpoint.epoch == 4
|
||||
|
||||
# FIXME: pending on the `on_block`, `on_attestation` fix
|
||||
# # Apply blocks of `fork_3_state` to `store`
|
||||
# for block in all_blocks:
|
||||
# if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
|
||||
# time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||
# on_tick_and_append_step(spec, store, time, test_steps)
|
||||
# # valid_attestations=False because the attestations are outdated (older than previous epoch)
|
||||
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=False)
|
||||
# Apply blocks of `fork_3_state` to `store`
|
||||
for block in all_blocks:
|
||||
if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot):
|
||||
time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
yield from add_block(spec, store, block, test_steps)
|
||||
|
||||
# assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint
|
||||
# assert (store.justified_checkpoint
|
||||
# == fork_3_state.current_justified_checkpoint
|
||||
# != store.best_justified_checkpoint)
|
||||
# assert (store.best_justified_checkpoint
|
||||
# == fork_2_state.current_justified_checkpoint)
|
||||
assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint
|
||||
assert store.justified_checkpoint == fork_3_state.current_justified_checkpoint
|
||||
assert store.justified_checkpoint != store.best_justified_checkpoint
|
||||
assert store.best_justified_checkpoint == fork_2_state.current_justified_checkpoint
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
@ -622,20 +619,19 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state):
|
|||
assert state.finalized_checkpoint != another_state.finalized_checkpoint
|
||||
assert state.current_justified_checkpoint != another_state.current_justified_checkpoint
|
||||
|
||||
# pre_store_justified_checkpoint_root = store.justified_checkpoint.root
|
||||
pre_store_justified_checkpoint_root = store.justified_checkpoint.root
|
||||
|
||||
# FIXME: pending on the `on_block`, `on_attestation` fix
|
||||
# # Apply blocks of `another_state` to `store`
|
||||
# for block in all_blocks:
|
||||
# # NOTE: Do not call `on_tick` here
|
||||
# yield from add_block(spec, store, block, test_steps, allow_invalid_attestations=True)
|
||||
# Apply blocks of `another_state` to `store`
|
||||
for block in all_blocks:
|
||||
# NOTE: Do not call `on_tick` here
|
||||
yield from add_block(spec, store, block, test_steps)
|
||||
|
||||
# finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
# ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot)
|
||||
# assert ancestor_at_finalized_slot != store.finalized_checkpoint.root
|
||||
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot)
|
||||
assert ancestor_at_finalized_slot != store.finalized_checkpoint.root
|
||||
|
||||
# assert store.finalized_checkpoint == another_state.finalized_checkpoint
|
||||
# assert store.justified_checkpoint == another_state.current_justified_checkpoint
|
||||
assert store.finalized_checkpoint == another_state.finalized_checkpoint
|
||||
assert store.justified_checkpoint == another_state.current_justified_checkpoint
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
@ -698,15 +694,106 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state):
|
|||
|
||||
pre_store_justified_checkpoint_root = store.justified_checkpoint.root
|
||||
for block in all_blocks:
|
||||
# FIXME: Once `on_block` and `on_attestation` logic is fixed,
|
||||
# fix test case and remove allow_invalid_attestations flag
|
||||
yield from tick_and_add_block(spec, store, block, test_steps, allow_invalid_attestations=True)
|
||||
yield from tick_and_add_block(spec, store, block, test_steps)
|
||||
|
||||
finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||
ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot)
|
||||
assert ancestor_at_finalized_slot == store.finalized_checkpoint.root
|
||||
|
||||
assert store.finalized_checkpoint == another_state.finalized_checkpoint
|
||||
assert store.justified_checkpoint != another_state.current_justified_checkpoint
|
||||
assert store.justified_checkpoint == another_state.current_justified_checkpoint
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_boost(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
|
||||
state = genesis_state.copy()
|
||||
next_slots(spec, state, 3)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Process block on timely arrival just before end of boost interval
|
||||
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
|
||||
spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT - 1)
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
yield from add_block(spec, store, signed_block, test_steps)
|
||||
assert store.proposer_boost_root == spec.hash_tree_root(block)
|
||||
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
|
||||
|
||||
# Ensure that boost is removed after slot is over
|
||||
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
|
||||
spec.config.SECONDS_PER_SLOT)
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
|
||||
|
||||
next_slots(spec, state, 3)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Process block on timely arrival at start of boost interval
|
||||
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT)
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
yield from add_block(spec, store, signed_block, test_steps)
|
||||
assert store.proposer_boost_root == spec.hash_tree_root(block)
|
||||
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0
|
||||
|
||||
# Ensure that boost is removed after slot is over
|
||||
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
|
||||
spec.config.SECONDS_PER_SLOT)
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'proposer_boost_root': encode_hex(store.proposer_boost_root),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_boost_root_same_slot_untimely_block(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
# Build block that serves as head ONLY on timely arrival, and ONLY in that slot
|
||||
state = genesis_state.copy()
|
||||
next_slots(spec, state, 3)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
# Process block on untimely arrival in the same slot
|
||||
time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT +
|
||||
spec.config.SECONDS_PER_SLOT // spec.INTERVALS_PER_SLOT)
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
yield from add_block(spec, store, signed_block, test_steps)
|
||||
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'proposer_boost_root': encode_hex(store.proposer_boost_root),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
|
|
@ -74,3 +74,10 @@ def test_time(spec, state):
|
|||
@spec_state_test
|
||||
def test_networking(spec, state):
|
||||
assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_fork_choice(spec, state):
|
||||
assert spec.INTERVALS_PER_SLOT < spec.config.SECONDS_PER_SLOT
|
||||
assert spec.config.PROPOSER_SCORE_BOOST <= 100
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
from .utils import (
|
||||
vector_test,
|
||||
with_meta_tags,
|
||||
build_transition_test,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [ # avoid "unused import" lint error
|
||||
"vector_test",
|
||||
"with_meta_tags",
|
||||
"build_transition_test",
|
||||
]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import inspect
|
||||
from typing import Dict, Any
|
||||
from eth2spec.utils.ssz.ssz_typing import View
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
|
@ -94,50 +93,3 @@ def with_meta_tags(tags: Dict[str, Any]):
|
|||
yield k, 'meta', v
|
||||
return entry
|
||||
return runner
|
||||
|
||||
|
||||
def build_transition_test(fn, pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
"""
|
||||
Handles the inner plumbing to generate `transition_test`s.
|
||||
See that decorator in `context.py` for more information.
|
||||
"""
|
||||
def _adapter(*args, **kwargs):
|
||||
post_spec = kwargs["phases"][post_fork_name]
|
||||
|
||||
pre_fork_counter = 0
|
||||
|
||||
def pre_tag(obj):
|
||||
nonlocal pre_fork_counter
|
||||
pre_fork_counter += 1
|
||||
return obj
|
||||
|
||||
def post_tag(obj):
|
||||
return obj
|
||||
|
||||
yield "post_fork", "meta", post_fork_name
|
||||
|
||||
has_fork_epoch = False
|
||||
if fork_epoch:
|
||||
kwargs["fork_epoch"] = fork_epoch
|
||||
has_fork_epoch = True
|
||||
yield "fork_epoch", "meta", fork_epoch
|
||||
|
||||
# massage args to handle an optional custom state using
|
||||
# `with_custom_state` decorator
|
||||
expected_args = inspect.getfullargspec(fn)
|
||||
if "phases" not in expected_args.kwonlyargs:
|
||||
kwargs.pop("phases", None)
|
||||
|
||||
for part in fn(*args,
|
||||
post_spec=post_spec,
|
||||
pre_tag=pre_tag,
|
||||
post_tag=post_tag,
|
||||
**kwargs):
|
||||
if part[0] == "fork_epoch":
|
||||
has_fork_epoch = True
|
||||
yield part
|
||||
assert has_fork_epoch
|
||||
|
||||
if pre_fork_counter > 0:
|
||||
yield "fork_block", "meta", pre_fork_counter - 1
|
||||
return _adapter
|
||||
|
|
|
@ -110,6 +110,7 @@ best_justified_checkpoint: {
|
|||
epoch: int, -- Integer value from store.best_justified_checkpoint.epoch
|
||||
root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root
|
||||
}
|
||||
proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root
|
||||
```
|
||||
|
||||
For example:
|
||||
|
@ -120,6 +121,7 @@ For example:
|
|||
justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||
finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'}
|
||||
best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'}
|
||||
proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'
|
||||
```
|
||||
|
||||
*Note*: Each `checks` step may include one or multiple items. Each item has to be checked against the current store.
|
||||
|
|
|
@ -1,6 +1,14 @@
|
|||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
MAINNET,
|
||||
ALL_PRE_POST_FORKS,
|
||||
)
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import (
|
||||
generate_from_tests,
|
||||
)
|
||||
from eth2spec.test.altair.transition import (
|
||||
test_transition as test_altair_transition,
|
||||
test_activations_and_exits as test_altair_activations_and_exits,
|
||||
|
@ -8,9 +16,9 @@ from eth2spec.test.altair.transition import (
|
|||
test_slashing as test_altair_slashing,
|
||||
test_operations as test_altair_operations,
|
||||
)
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
|
||||
from eth2spec.test.merge.transition import (
|
||||
test_transition as test_merge_transition,
|
||||
)
|
||||
|
||||
|
||||
def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
|
||||
|
@ -31,18 +39,21 @@ def create_provider(tests_src, preset_name: str, pre_fork_name: str, post_fork_n
|
|||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
TRANSITION_TESTS = (
|
||||
(PHASE0, ALTAIR, test_altair_transition),
|
||||
(PHASE0, ALTAIR, test_altair_activations_and_exits),
|
||||
(PHASE0, ALTAIR, test_altair_leaking),
|
||||
(PHASE0, ALTAIR, test_altair_slashing),
|
||||
(PHASE0, ALTAIR, test_altair_operations),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
|
||||
gen_runner.run_generator("transition", [
|
||||
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
|
||||
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
|
||||
])
|
||||
altair_tests = (
|
||||
test_altair_transition,
|
||||
test_altair_activations_and_exits,
|
||||
test_altair_leaking,
|
||||
test_altair_slashing,
|
||||
test_altair_operations,
|
||||
)
|
||||
merge_tests = (
|
||||
test_merge_transition,
|
||||
)
|
||||
all_tests = altair_tests + merge_tests
|
||||
for transition_test_module in all_tests:
|
||||
for pre_fork, post_fork in ALL_PRE_POST_FORKS:
|
||||
gen_runner.run_generator("transition", [
|
||||
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
|
||||
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
|
||||
])
|
||||
|
|
Loading…
Reference in New Issue