mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-01-24 17:39:05 +00:00
Merge branch 'dev' into translate-participation-test
This commit is contained in:
commit
a2c8e0e6c6
@ -23,6 +23,7 @@ The current features are:
|
||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||
* [Honest Validator](specs/phase0/validator.md)
|
||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md)
|
||||
|
||||
### Altair
|
||||
|
||||
|
@ -10,22 +10,20 @@ MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
|
||||
|
||||
# Sync committee
|
||||
# ---------------------------------------------------------------
|
||||
# 2**9 (= 512)
|
||||
SYNC_COMMITTEE_SIZE: 512
|
||||
# 2**9 (= 512)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10 (= 1,024)
|
||||
SYNC_COMMITTEE_SIZE: 1024
|
||||
# 2**6 (= 64)
|
||||
SYNC_PUBKEYS_PER_AGGREGATE: 64
|
||||
# 2**2 (= 4)
|
||||
INACTIVITY_SCORE_BIAS: 4
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||
@ -45,10 +43,6 @@ ALTAIR_FORK_EPOCH: 18446744073709551615
|
||||
# ---------------------------------------------------------------
|
||||
# 1
|
||||
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
# 2**13
|
||||
MAX_VALID_LIGHT_CLIENT_UPDATES: 8192
|
||||
# 2**13 (=8192)
|
||||
LIGHT_CLIENT_UPDATE_TIMEOUT: 8192
|
||||
|
||||
|
||||
# Validator
|
||||
|
@ -10,22 +10,20 @@ MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# Sync committee
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
SYNC_PUBKEYS_PER_AGGREGATE: 16
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**2 (= 4)
|
||||
INACTIVITY_SCORE_BIAS: 4
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||
@ -45,10 +43,8 @@ ALTAIR_FORK_EPOCH: 18446744073709551615
|
||||
# ---------------------------------------------------------------
|
||||
# 1
|
||||
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||
# [customized]
|
||||
MAX_VALID_LIGHT_CLIENT_UPDATES: 32
|
||||
# [customized]
|
||||
LIGHT_CLIENT_UPDATE_TIMEOUT: 32
|
||||
|
||||
|
||||
|
||||
# Validator
|
||||
# ---------------------------------------------------------------
|
||||
|
6
setup.py
6
setup.py
@ -167,7 +167,7 @@ def get_spec(file_name: str) -> SpecObject:
|
||||
comment = _get_eth2_spec_comment(child)
|
||||
if comment == "skip":
|
||||
should_skip = True
|
||||
|
||||
|
||||
return SpecObject(
|
||||
functions=functions,
|
||||
custom_types=custom_types,
|
||||
@ -441,7 +441,7 @@ ExecutionState = Any
|
||||
|
||||
|
||||
def get_pow_block(hash: Bytes32) -> PowBlock:
|
||||
pass
|
||||
return PowBlock(block_hash=hash, is_valid=True, is_processed=True, total_difficulty=TRANSITION_TOTAL_DIFFICULTY)
|
||||
|
||||
|
||||
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
|
||||
@ -548,7 +548,7 @@ ignored_dependencies = [
|
||||
'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||
'bytes', 'byte', 'ByteList', 'ByteVector',
|
||||
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2',
|
||||
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
|
||||
]
|
||||
|
||||
|
||||
|
@ -14,8 +14,8 @@
|
||||
- [Misc](#misc)
|
||||
- [Configuration](#configuration)
|
||||
- [Updated penalty values](#updated-penalty-values)
|
||||
- [Sync committee](#sync-committee)
|
||||
- [Misc](#misc-1)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Domain types](#domain-types)
|
||||
- [Containers](#containers)
|
||||
- [Modified containers](#modified-containers)
|
||||
@ -28,12 +28,11 @@
|
||||
- [`Predicates`](#predicates)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
- [Misc](#misc-2)
|
||||
- [`get_flag_indices_and_weights`](#get_flag_indices_and_weights)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
||||
- [`get_sync_committee`](#get_sync_committee)
|
||||
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
|
||||
- [`get_next_sync_committee`](#get_next_sync_committee)
|
||||
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
|
||||
- [`get_base_reward`](#get_base_reward)
|
||||
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||
@ -100,6 +99,7 @@ Altair is the first beacon chain hard fork. Its main features are:
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_HEAD_WEIGHT, TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT]` |
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -115,20 +115,19 @@ This patch updates a few configuration values to move penalty parameters closer
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` | `uint64(2**6)` (= 64) |
|
||||
| `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR` | `uint64(2)` |
|
||||
|
||||
### Sync committee
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours |
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1,024) |
|
||||
| `SYNC_PUBKEYS_PER_AGGREGATE` | `uint64(2**6)` (= 64) |
|
||||
| `INACTIVITY_SCORE_BIAS` | `uint64(4)` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
@ -213,7 +212,7 @@ class SyncAggregate(Container):
|
||||
```python
|
||||
class SyncCommittee(Container):
|
||||
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
|
||||
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_PUBKEYS_PER_AGGREGATE]
|
||||
aggregate_pubkey: BLSPubkey
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
@ -234,20 +233,6 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_flag_indices_and_weights`
|
||||
|
||||
```python
|
||||
def get_flag_indices_and_weights() -> Sequence[Tuple[int, uint64]]:
|
||||
"""
|
||||
Return paired tuples of participation flag indices along with associated incentivization weights.
|
||||
"""
|
||||
return (
|
||||
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT),
|
||||
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT),
|
||||
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_WEIGHT),
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_flag`
|
||||
|
||||
```python
|
||||
@ -272,19 +257,22 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_sync_committee_indices`
|
||||
#### `get_next_sync_committee_indices`
|
||||
|
||||
```python
|
||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sequence of sync committee indices (which may include duplicate indices)
|
||||
for a given ``state`` and ``epoch``.
|
||||
for the next sync committee, given a ``state`` at a sync committee period boundary.
|
||||
|
||||
Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128)
|
||||
"""
|
||||
epoch = Epoch(get_current_epoch(state) + 1)
|
||||
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
active_validator_count = uint64(len(active_validator_indices))
|
||||
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
i = 0
|
||||
sync_committee_indices: List[ValidatorIndex] = []
|
||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
@ -292,24 +280,34 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
|
||||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||
sync_committee_indices.append(candidate_index)
|
||||
i += 1
|
||||
return sync_committee_indices
|
||||
```
|
||||
|
||||
#### `get_sync_committee`
|
||||
#### `get_next_sync_committee`
|
||||
|
||||
```python
|
||||
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
||||
def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||
"""
|
||||
Return the sync committee for a given ``state`` and ``epoch``.
|
||||
Return the *next* sync committee for a given ``state``.
|
||||
|
||||
``SyncCommittee`` contains an aggregate pubkey that enables
|
||||
resource-constrained clients to save some computation when verifying
|
||||
the sync committee's signature.
|
||||
|
||||
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices``
|
||||
returns duplicate indices. Implementations must take care when handling
|
||||
optimizations relating to aggregation and verification in the presence of duplicates.
|
||||
|
||||
Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates``
|
||||
as ``get_next_sync_committee_indices`` is not stable within a given period.
|
||||
"""
|
||||
indices = get_sync_committee_indices(state, epoch)
|
||||
indices = get_next_sync_committee_indices(state)
|
||||
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||
partition = [pubkeys[i:i + SYNC_PUBKEYS_PER_AGGREGATE] for i in range(0, len(pubkeys), SYNC_PUBKEYS_PER_AGGREGATE)]
|
||||
pubkey_aggregates = [bls.AggregatePKs(preaggregate) for preaggregate in partition]
|
||||
return SyncCommittee(pubkeys=pubkeys, pubkey_aggregates=pubkey_aggregates)
|
||||
aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
||||
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||
```
|
||||
|
||||
#### `get_base_reward_per_increment`
|
||||
@ -387,35 +385,31 @@ def get_attestation_participation_flag_indices(state: BeaconState,
|
||||
#### `get_flag_index_deltas`
|
||||
|
||||
```python
|
||||
def get_flag_index_deltas(state: BeaconState, flag_index: int, weight: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
def get_flag_index_deltas(state: BeaconState, flag_index: int) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Return the deltas for a given ``flag_index`` scaled by ``weight`` by scanning through the participation flags.
|
||||
"""
|
||||
rewards = [Gwei(0)] * len(state.validators)
|
||||
penalties = [Gwei(0)] * len(state.validators)
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
||||
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
||||
active_increments = get_total_active_balance(state) // increment
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, previous_epoch)
|
||||
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index]
|
||||
unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices)
|
||||
unslashed_participating_increments = unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||
for index in get_eligible_validator_indices(state):
|
||||
base_reward = get_base_reward(state, index)
|
||||
if index in unslashed_participating_indices:
|
||||
if is_in_inactivity_leak(state):
|
||||
# This flag reward cancels the inactivity penalty corresponding to the flag index
|
||||
rewards[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||
else:
|
||||
if not is_in_inactivity_leak(state):
|
||||
reward_numerator = base_reward * weight * unslashed_participating_increments
|
||||
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
|
||||
else:
|
||||
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
|
||||
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
#### Modified `get_inactivity_penalty_deltas`
|
||||
|
||||
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
||||
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
@ -427,9 +421,6 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
for (_, weight) in get_flag_indices_and_weights():
|
||||
# This inactivity penalty cancels the flag reward corresponding to the flag index
|
||||
penalties[index] += Gwei(get_base_reward(state, index) * weight // WEIGHT_DENOMINATOR)
|
||||
if index not in matching_target_indices:
|
||||
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||
@ -441,7 +432,7 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||
|
||||
#### Modified `slash_validator`
|
||||
|
||||
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`
|
||||
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`
|
||||
and use `PROPOSER_WEIGHT` when calculating the proposer reward.
|
||||
|
||||
```python
|
||||
@ -509,7 +500,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
for flag_index, weight in get_flag_indices_and_weights():
|
||||
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||
@ -583,7 +574,8 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None
|
||||
proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
|
||||
|
||||
# Apply participant and proposer rewards
|
||||
committee_indices = get_sync_committee_indices(state, get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
|
||||
participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit]
|
||||
for participant_index in participant_indices:
|
||||
increase_balance(state, participant_index, participant_reward)
|
||||
@ -650,8 +642,7 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
flag_indices_and_numerators = get_flag_indices_and_weights()
|
||||
flag_deltas = [get_flag_index_deltas(state, index, numerator) for (index, numerator) in flag_indices_and_numerators]
|
||||
flag_deltas = [get_flag_index_deltas(state, flag_index) for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS))]
|
||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||
for (rewards, penalties) in deltas:
|
||||
for index in range(len(state.validators)):
|
||||
@ -695,7 +686,7 @@ def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
```
|
||||
|
||||
## Initialize state for pure Altair testnets and test vectors
|
||||
@ -740,8 +731,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# [New in Altair] Fill in sync committees
|
||||
state.current_sync_committee = get_sync_committee(state, get_current_epoch(state))
|
||||
state.next_sync_committee = get_sync_committee(state, get_current_epoch(state) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -38,7 +38,11 @@ Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
After `process_slots` of Phase 0 finishes, if `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
|
||||
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
|
||||
|
||||
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
||||
Care must be taken when transitioning through the fork boundary as implementations will need a modified state transition function that deviates from the Phase 0 spec.
|
||||
In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
|
||||
|
||||
```python
|
||||
def translate_participation(state: BeaconState, pending_attestations: Sequence[PendingAttestation]) -> None:
|
||||
@ -51,7 +55,7 @@ def translate_participation(state: BeaconState, pending_attestations: Sequence[P
|
||||
# Apply flags to all attesting validators
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
for flag_index, weight in get_flag_indices_and_weights():
|
||||
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
|
||||
@ -97,8 +101,10 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||
)
|
||||
# Fill in previous epoch participation from the pre state's pending attestations
|
||||
translate_participation(post, pre.previous_epoch_attestations)
|
||||
|
||||
# Fill in sync committees
|
||||
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
||||
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
# Note: A duplicate committee is assigned for the current and next committee at the fork boundary
|
||||
post.current_sync_committee = get_next_sync_committee(post)
|
||||
post.next_sync_committee = get_next_sync_committee(post)
|
||||
return post
|
||||
```
|
||||
|
@ -106,9 +106,18 @@ The following validations MUST pass before forwarding the `signed_contribution_a
|
||||
|
||||
```python
|
||||
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
||||
# Committees assigned to `slot` sign for `slot - 1`
|
||||
# This creates the exceptional logic below when transitioning between sync committee periods
|
||||
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||
sync_committee = state.current_sync_committee
|
||||
else:
|
||||
sync_committee = state.next_sync_committee
|
||||
|
||||
# Return pubkeys for the subcommittee index
|
||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
i = subcommittee_index * sync_subcommittee_size
|
||||
return state.current_sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||
```
|
||||
|
||||
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
||||
|
@ -12,7 +12,6 @@
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Misc](#misc)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientSnapshot`](#lightclientsnapshot)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
@ -51,13 +50,6 @@ uses sync committees introduced in [this beacon chain extension](./beacon-chain.
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` |
|
||||
| `MAX_VALID_LIGHT_CLIENT_UPDATES` | `uint64(2**64 - 1)` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `LIGHT_CLIENT_UPDATE_TIMEOUT` | `Slot(2**13)` | slots | ~27 hours |
|
||||
|
||||
## Containers
|
||||
|
||||
@ -94,9 +86,10 @@ class LightClientUpdate(Container):
|
||||
### `LightClientStore`
|
||||
|
||||
```python
|
||||
class LightClientStore(Container):
|
||||
@dataclass
|
||||
class LightClientStore(object):
|
||||
snapshot: LightClientSnapshot
|
||||
valid_updates: List[LightClientUpdate, MAX_VALID_LIGHT_CLIENT_UPDATES]
|
||||
valid_updates: Set[LightClientUpdate]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
@ -182,20 +175,21 @@ def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClient
|
||||
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot,
|
||||
genesis_validators_root: Root) -> None:
|
||||
validate_light_client_update(store.snapshot, update, genesis_validators_root)
|
||||
store.valid_updates.append(update)
|
||||
store.valid_updates.add(update)
|
||||
|
||||
update_timeout = SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
if (
|
||||
sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2
|
||||
sum(update.sync_committee_bits) * 3 >= len(update.sync_committee_bits) * 2
|
||||
and update.finality_header != BeaconBlockHeader()
|
||||
):
|
||||
# Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
|
||||
# Note that (2) means that the current light client design needs finality.
|
||||
# It may be changed to re-organizable light client design. See the on-going issue eth2.0-specs#2182.
|
||||
apply_light_client_update(store.snapshot, update)
|
||||
store.valid_updates = []
|
||||
elif current_slot > store.snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT:
|
||||
store.valid_updates = set()
|
||||
elif current_slot > store.snapshot.header.slot + update_timeout:
|
||||
# Forced best update when the update timeout has elapsed
|
||||
apply_light_client_update(store.snapshot,
|
||||
max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
|
||||
store.valid_updates = []
|
||||
store.valid_updates = set()
|
||||
```
|
||||
|
@ -74,7 +74,7 @@ This document is currently illustrative for early Altair testnets and some parts
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**2` (= 4) | validators |
|
||||
| `SYNC_COMMITTEE_SUBNET_COUNT` | `8` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
|
||||
| `SYNC_COMMITTEE_SUBNET_COUNT` | `4` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
|
||||
|
||||
## Containers
|
||||
|
||||
@ -143,6 +143,11 @@ A validator determines beacon committee assignments and beacon block proposal du
|
||||
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
|
||||
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
|
||||
|
||||
*Note*: Being assigned to a sync committee for a given `slot` means that the validator produces and broadcasts signatures for `slot - 1` for inclusion in `slot`.
|
||||
This means that when assigned to an `epoch` sync committee signatures must be produced and broadcast for slots on range `[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)`
|
||||
rather than for the range `[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`.
|
||||
To reduce complexity during the Altair fork, sync committees are not expected to produce signatures for `compute_epoch_at_slot(ALTAIR_FORK_EPOCH) - 1`.
|
||||
|
||||
```python
|
||||
def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
@ -172,7 +177,6 @@ At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and
|
||||
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
|
||||
|
||||
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
|
||||
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
|
||||
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
|
||||
|
||||
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
|
||||
@ -224,12 +228,12 @@ def process_sync_committee_contributions(block: BeaconBlock,
|
||||
contributions: Set[SyncCommitteeContribution]) -> None:
|
||||
sync_aggregate = SyncAggregate()
|
||||
signatures = []
|
||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
|
||||
for contribution in contributions:
|
||||
subcommittee_index = contribution.subcommittee_index
|
||||
for index, participated in enumerate(contribution.aggregation_bits):
|
||||
if participated:
|
||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||
participant_index = sync_subcommittee_size * subcommittee_index + index
|
||||
sync_aggregate.sync_committee_bits[participant_index] = True
|
||||
signatures.append(contribution.signature)
|
||||
@ -261,12 +265,12 @@ This process occurs each slot.
|
||||
|
||||
##### Prepare sync committee signature
|
||||
|
||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every slot in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot.
|
||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`.
|
||||
|
||||
This logic is triggered upon the same conditions as when producing an attestation.
|
||||
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||
|
||||
`get_sync_committee_signature()` assumes `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||
`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||
|
||||
```python
|
||||
def get_sync_committee_signature(state: BeaconState,
|
||||
@ -286,17 +290,20 @@ def get_sync_committee_signature(state: BeaconState,
|
||||
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
||||
|
||||
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
|
||||
`subnet_id` can be computed via `compute_subnets_for_sync_committee()` where `state` is a `BeaconState` during the matching sync committee period.
|
||||
`subnet_id` can be computed via `compute_subnets_for_sync_committee(state, validator_index)` where `state` is a `BeaconState` during the matching sync committee period.
|
||||
|
||||
*Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
|
||||
|
||||
```python
|
||||
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
|
||||
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||
sync_committee = state.current_sync_committee
|
||||
else:
|
||||
sync_committee = state.next_sync_committee
|
||||
|
||||
target_pubkey = state.validators[validator_index].pubkey
|
||||
sync_committee_indices = [
|
||||
index for index, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||
if pubkey == target_pubkey
|
||||
]
|
||||
sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
|
||||
return [
|
||||
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||
for index in sync_committee_indices
|
||||
@ -359,7 +366,7 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co
|
||||
###### Aggregation bits
|
||||
|
||||
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
||||
An aggregator finds the index in the sync committee (as returned by `get_sync_committee_indices()`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||
An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||
|
||||
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
||||
|
||||
|
@ -13,8 +13,8 @@
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Transition](#transition)
|
||||
- [Execution](#execution)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [Extended containers](#extended-containers)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
@ -24,6 +24,7 @@
|
||||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [`is_execution_enabled`](#is_execution_enabled)
|
||||
- [`is_transition_completed`](#is_transition_completed)
|
||||
- [`is_transition_block`](#is_transition_block)
|
||||
- [`compute_time_at_slot`](#compute_time_at_slot)
|
||||
@ -50,12 +51,6 @@ We define the following Python custom types for type hinting and readability:
|
||||
|
||||
## Constants
|
||||
|
||||
### Transition
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** |
|
||||
|
||||
### Execution
|
||||
|
||||
| Name | Value |
|
||||
@ -64,6 +59,16 @@ We define the following Python custom types for type hinting and readability:
|
||||
| `MAX_EXECUTION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) |
|
||||
| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) |
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MERGE_FORK_VERSION` | `Version('0x02000000')` |
|
||||
| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** |
|
||||
|
||||
## Containers
|
||||
|
||||
### Extended containers
|
||||
@ -136,6 +141,13 @@ class ExecutionPayloadHeader(Container):
|
||||
|
||||
### Misc
|
||||
|
||||
#### `is_execution_enabled`
|
||||
|
||||
```python
|
||||
def is_execution_enabled(state: BeaconState, block: BeaconBlock) -> bool:
|
||||
return is_transition_completed(state) or is_transition_block(state, block)
|
||||
```
|
||||
|
||||
#### `is_transition_completed`
|
||||
|
||||
```python
|
||||
@ -146,8 +158,8 @@ def is_transition_completed(state: BeaconState) -> bool:
|
||||
#### `is_transition_block`
|
||||
|
||||
```python
|
||||
def is_transition_block(state: BeaconState, block_body: BeaconBlockBody) -> bool:
|
||||
return not is_transition_completed(state) and block_body.execution_payload != ExecutionPayload()
|
||||
def is_transition_block(state: BeaconState, block: BeaconBlock) -> bool:
|
||||
return not is_transition_completed(state) and block.body.execution_payload != ExecutionPayload()
|
||||
```
|
||||
|
||||
#### `compute_time_at_slot`
|
||||
@ -168,7 +180,9 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_execution_payload(state, block.body) # [New in Merge]
|
||||
# Pre-merge, skip execution payload processing
|
||||
if is_execution_enabled(state, block):
|
||||
process_execution_payload(state, block.body.execution_payload) # [New in Merge]
|
||||
```
|
||||
|
||||
#### Execution payload processing
|
||||
@ -181,16 +195,10 @@ The body of the function is implementation dependent.
|
||||
##### `process_execution_payload`
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
def process_execution_payload(state: BeaconState, execution_payload: ExecutionPayload) -> None:
|
||||
"""
|
||||
Note: This function is designed to be able to be run in parallel with the other `process_block` sub-functions
|
||||
"""
|
||||
# Pre-merge, skip processing
|
||||
if not is_transition_completed(state) and not is_transition_block(state, body):
|
||||
return
|
||||
|
||||
execution_payload = body.execution_payload
|
||||
|
||||
if is_transition_completed(state):
|
||||
assert execution_payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
assert execution_payload.number == state.latest_execution_payload_header.number + 1
|
||||
|
@ -75,7 +75,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
|
||||
# [New in Merge]
|
||||
if is_transition_block(pre_state, block.body):
|
||||
if is_transition_block(pre_state, block):
|
||||
# Delay consideration of block until PoW block is processed by the PoW node
|
||||
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||
assert pow_block.is_processed
|
||||
|
@ -93,6 +93,8 @@ It consists of four main sections:
|
||||
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
|
||||
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
|
||||
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
|
||||
- [Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?](#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs)
|
||||
- [Why must the proposer signature be checked when backfilling blocks in the database?](#why-must-the-proposer-signature-be-checked-when-backfilling-blocks-in-the-database)
|
||||
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
|
||||
- [Discovery](#discovery)
|
||||
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
||||
@ -171,6 +173,7 @@ This section outlines constants that are used in this spec.
|
||||
|---|---|---|
|
||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
|
||||
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||
@ -179,7 +182,6 @@ This section outlines constants that are used in this spec.
|
||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||
|
||||
|
||||
## MetaData
|
||||
|
||||
Clients MUST locally store the following `MetaData`:
|
||||
@ -565,6 +567,8 @@ The response code can have one of the following values, encoded as a single unsi
|
||||
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||
- 2: **ServerError** -- the responder encountered an error while processing the request.
|
||||
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||
- 3: **ResourceUnavailable** -- the responder does not have requested resource.
|
||||
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||
|
||||
Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.
|
||||
|
||||
@ -745,10 +749,27 @@ The request MUST be encoded as an SSZ-container.
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
|
||||
|
||||
Clients MUST keep a record of signed blocks seen since the start of the weak subjectivity period
|
||||
and MUST support serving requests of blocks up to their own `head_block_root`.
|
||||
Clients MUST keep a record of signed blocks seen on the epoch range
|
||||
`[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blocks on this range.
|
||||
|
||||
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||
Peers that are unable to reply to block requests within the
|
||||
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epoch range MAY get descored or disconnected at any time.
|
||||
|
||||
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||
MUST backfill the local block database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS`
|
||||
to be fully compliant with `BlocksByRange` requests. To safely perform such a
|
||||
backfill of blocks to the recent state, the node MUST validate both (1) the
|
||||
proposer signatures and (2) that the blocks form a valid chain up to the most
|
||||
recent block referenced in the weak subjectivity state.
|
||||
|
||||
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||
participating in the networking immediately, other peers MAY
|
||||
disconnect and/or temporarily ban such an un-synced or semi-synced client.
|
||||
|
||||
Clients MUST respond with at least the first block that exists in the range, if they have it,
|
||||
and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||
|
||||
The following blocks, where they exist, MUST be sent in consecutive order.
|
||||
|
||||
@ -1393,6 +1414,45 @@ To avoid this race condition, we allow the responding side to choose which branc
|
||||
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
||||
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
||||
|
||||
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
||||
|
||||
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
|
||||
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
|
||||
beacon state and then a simple block sync from there to the head. We expect the latter to be the dominant UX strategy.
|
||||
|
||||
These checkpoints *in the worst case* (i.e. very large validator set and maximal allowed safety decay) must be from the
|
||||
most recent `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block sync to the head from this starting point.
|
||||
Thus, this defines the epoch range outside which nodes may prune blocks, and
|
||||
the epoch range that a new node syncing from a checkpoint must backfill.
|
||||
|
||||
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from `compute_weak_subjectivity_period` found in the
|
||||
[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size
|
||||
(`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`).
|
||||
|
||||
```python
|
||||
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
+ MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
)
|
||||
```
|
||||
|
||||
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
|
||||
|
||||
### Why must the proposer signature be checked when backfilling blocks in the database?
|
||||
|
||||
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
|
||||
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
|
||||
but also must check that the proposer signature is valid in the `SignedBeaconBlock` wrapper.
|
||||
|
||||
This is because the signature is not part of the `BeaconBlock` hash chain, and
|
||||
thus could be corrupted by an attacker serving valid `BeaconBlock`s but invalid
|
||||
signatures contained in `SignedBeaconBlock`.
|
||||
|
||||
Although in this particular use case this does not represent a decay in safety
|
||||
(due to the assumptions of starting at a weak subjectivity checkpoint), it
|
||||
would represent invalid historic data and could be unwittingly transmitted to
|
||||
additional nodes.
|
||||
|
||||
### What's the effect of empty slots on the sync algorithm?
|
||||
|
||||
When syncing one can only tell that a slot has been skipped on a particular branch
|
||||
|
@ -136,7 +136,9 @@ A brief reference for what these values look like in practice ([reference script
|
||||
|
||||
## Weak Subjectivity Sync
|
||||
|
||||
Clients should allow users to input a Weak Subjectivity Checkpoint at startup, and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
|
||||
Clients should allow users to input a Weak Subjectivity Checkpoint at startup,
|
||||
and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain.
|
||||
If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
|
||||
|
||||
### Weak Subjectivity Sync Procedure
|
||||
|
||||
|
@ -406,7 +406,7 @@ def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard
|
||||
|
||||
# Proposer must have sufficient balance to pay for worst case fee burn
|
||||
EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = (
|
||||
(EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT)
|
||||
EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT
|
||||
* HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT
|
||||
)
|
||||
min_effective_balance = (
|
||||
@ -714,11 +714,11 @@ def process_pending_headers(state: BeaconState) -> None:
|
||||
```python
|
||||
def charge_confirmed_header_fees(state: BeaconState) -> None:
|
||||
new_gasprice = state.shard_gasprice
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
adjustment_quotient = (
|
||||
get_active_shard_count(state, get_current_epoch(state))
|
||||
get_active_shard_count(state, previous_epoch)
|
||||
* SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT
|
||||
)
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
for shard_index in range(get_active_shard_count(state, previous_epoch)):
|
||||
|
@ -9,7 +9,6 @@ from eth2spec.test.helpers.state import (
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0,
|
||||
MAINNET, MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
@ -17,7 +16,7 @@ from eth2spec.test.helpers.sync_committee import (
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
@ -50,9 +49,9 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||
"""
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee = spec.get_next_sync_committee_indices(state)
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
@ -62,57 +61,73 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
def compute_committee_indices(spec, state, committee):
|
||||
"""
|
||||
Given a ``committee``, calculate and return the related indices
|
||||
"""
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||
return committee_indices
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_missing_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(2020)
|
||||
random_participant = rng.choice(committee)
|
||||
random_participant = rng.choice(committee_indices)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one participant whose signature was included.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[index != random_participant for index in committee],
|
||||
sync_committee_bits=[index != random_participant for index in committee_indices],
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee, # full committee signs
|
||||
committee_indices, # full committee signs
|
||||
)
|
||||
)
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_extra_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(3030)
|
||||
random_participant = rng.choice(committee)
|
||||
random_participant = rng.choice(committee_indices)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one signature even though the block claims the entire committee participated.
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index in committee if index != random_participant],
|
||||
[index for index in committee_indices if index != random_participant],
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
|
||||
def compute_sync_committee_inclusion_reward(spec,
|
||||
state,
|
||||
participant_index,
|
||||
committee_indices,
|
||||
committee_bits):
|
||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(
|
||||
max_epoch_rewards * len(included_indices)
|
||||
// len(committee_indices) // spec.SLOTS_PER_EPOCH
|
||||
)
|
||||
|
||||
# Compute the participant and proposer sync rewards
|
||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||
@ -121,23 +136,23 @@ def compute_sync_committee_inclusion_reward(spec, state, participant_index, comm
|
||||
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
multiplicities = Counter(included_indices)
|
||||
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, participant_index, committee, committee_bits,
|
||||
spec, state, participant_index, committee_indices, committee_bits,
|
||||
)
|
||||
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
||||
|
||||
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
|
||||
proposer_reward = 0
|
||||
for index, bit in zip(committee, committee_bits):
|
||||
for index, bit in zip(committee_indices, committee_bits):
|
||||
if not bit:
|
||||
continue
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, index, committee, committee_bits,
|
||||
spec, state, index, committee_indices, committee_bits,
|
||||
)
|
||||
proposer_reward_denominator = (
|
||||
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
||||
@ -148,30 +163,30 @@ def compute_sync_committee_proposer_reward(spec, state, committee, committee_bit
|
||||
return proposer_reward
|
||||
|
||||
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
|
||||
for index in range(len(post_state.validators)):
|
||||
reward = 0
|
||||
if index in committee:
|
||||
if index in committee_indices:
|
||||
reward += compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
index,
|
||||
committee,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
committee,
|
||||
committee_bits,
|
||||
)
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
spec,
|
||||
pre_state,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||
|
||||
|
||||
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||
pre_state = state.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
@ -181,7 +196,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index, bit in zip(committee, committee_bits) if bit],
|
||||
[index for index, bit in zip(committee_indices, committee_bits) if bit],
|
||||
)
|
||||
)
|
||||
|
||||
@ -191,70 +206,70 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||
spec,
|
||||
pre_state,
|
||||
state,
|
||||
committee,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
block.proposer_index,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
committee_indices = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
assert committee_size == len(set(committee_indices))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
assert committee_size > len(set(committee_indices))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
rng = random.Random(1010)
|
||||
committee_bits = [rng.choice([True, False]) for _ in committee]
|
||||
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_sync_committee_rewards_empty_participants(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_bits = [False for _ in committee]
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
committee_bits = [False for _ in committee_indices]
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
|
||||
blocks = []
|
||||
for _ in range(2):
|
||||
@ -262,12 +277,12 @@ def test_invalid_signature_past_block(spec, state):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Valid sync committee signature here...
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
@ -277,19 +292,19 @@ def test_invalid_signature_past_block(spec, state):
|
||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Invalid signature from a slot other than the previous
|
||||
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
invalid_block.slot - 2,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@ -307,26 +322,25 @@ def test_invalid_signature_previous_committee(spec, state):
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
committee_indices = compute_committee_indices(spec, state, old_sync_committee)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
sync_committee_bits=[True] * len(committee),
|
||||
sync_committee_bits=[True] * len(committee_indices),
|
||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
committee_indices,
|
||||
)
|
||||
)
|
||||
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -345,15 +359,13 @@ def test_valid_signature_future_committee(spec, state):
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
sync_committee = state.current_sync_committee
|
||||
next_sync_committee = state.next_sync_committee
|
||||
|
||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||
|
||||
assert sync_committee == expected_sync_committee
|
||||
assert next_sync_committee != sync_committee
|
||||
assert sync_committee != old_current_sync_committee
|
||||
assert sync_committee != old_next_sync_committee
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||
committee_indices = compute_committee_indices(spec, state, sync_committee)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_aggregate = spec.SyncAggregate(
|
||||
|
@ -2,16 +2,13 @@ from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_state_test,
|
||||
spec_test,
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
with_custom_state,
|
||||
single_phase,
|
||||
misc_balances,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
@ -42,14 +39,13 @@ def run_sync_committees_progress_test(spec, state):
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
third_sync_committee = spec.get_sync_committee(state, current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
third_sync_committee = spec.get_next_sync_committee(state)
|
||||
|
||||
assert state.current_sync_committee == second_sync_committee
|
||||
assert state.next_sync_committee == third_sync_committee
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -60,7 +56,7 @@ def test_sync_committees_progress_genesis(spec, state):
|
||||
yield from run_sync_committees_progress_test(spec, state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -73,7 +69,7 @@ def test_sync_committees_progress_not_genesis(spec, state):
|
||||
yield from run_sync_committees_progress_test(spec, state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
|
@ -11,15 +11,15 @@ from eth2spec.test.helpers.block import (
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||
|
||||
yield 'pre', state
|
||||
@ -40,46 +40,46 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_inactivity_scores(spec, state):
|
||||
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):
|
||||
|
@ -0,0 +1,244 @@
|
||||
from eth2spec.test.context import fork_transition_test
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
|
||||
|
||||
|
||||
def _state_transition_and_sign_block_at_slot(spec, state):
|
||||
"""
|
||||
Cribbed from ``transition_unsigned_block`` helper
|
||||
where the early parts of the state transition have already
|
||||
been applied to ``state``.
|
||||
|
||||
Used to produce a block during an irregular state transition.
|
||||
"""
|
||||
block = build_empty_block(spec, state)
|
||||
|
||||
assert state.latest_block_header.slot < block.slot
|
||||
assert state.slot == block.slot
|
||||
spec.process_block(state, block)
|
||||
block.state_root = state.hash_tree_root()
|
||||
return sign_block(spec, state, block)
|
||||
|
||||
|
||||
def _all_blocks(_):
|
||||
return True
|
||||
|
||||
|
||||
def _skip_slots(*slots):
|
||||
"""
|
||||
Skip making a block if its slot is
|
||||
passed as an argument to this filter
|
||||
"""
|
||||
def f(state_at_prior_slot):
|
||||
return state_at_prior_slot.slot + 1 not in slots
|
||||
return f
|
||||
|
||||
|
||||
def _no_blocks(_):
|
||||
return False
|
||||
|
||||
|
||||
def _only_at(slot):
|
||||
"""
|
||||
Only produce a block if its slot is ``slot``.
|
||||
"""
|
||||
def f(state_at_prior_slot):
|
||||
return state_at_prior_slot.slot + 1 == slot
|
||||
return f
|
||||
|
||||
|
||||
def _state_transition_across_slots(spec, state, to_slot, block_filter=_all_blocks):
|
||||
assert state.slot < to_slot
|
||||
while state.slot < to_slot:
|
||||
should_make_block = block_filter(state)
|
||||
if should_make_block:
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield signed_block
|
||||
else:
|
||||
next_slot(spec, state)
|
||||
|
||||
|
||||
def _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=True):
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
assert spec.compute_epoch_at_slot(state.slot) == fork_epoch
|
||||
|
||||
state = post_spec.upgrade_to_altair(state)
|
||||
|
||||
assert state.fork.epoch == fork_epoch
|
||||
assert state.fork.previous_version == post_spec.GENESIS_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state)
|
||||
else:
|
||||
return state, None
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
assert set(range(1, state.slot + 1)) == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way except for the first block
|
||||
of the new fork.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
expected_slots = set(range(1, state.slot + 1)).difference(set([fork_epoch * spec.SLOTS_PER_EPOCH]))
|
||||
assert expected_slots == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
producing blocks for every slot along the way except for the last block
|
||||
of the old fork.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
to_slot = last_slot_of_pre_fork
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot, block_filter=_skip_slots(last_slot_of_pre_fork))
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
|
||||
blocks.append(post_tag(block))
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot)
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
|
||||
expected_slots = set(range(1, state.slot + 1)).difference(set([last_slot_of_pre_fork]))
|
||||
assert expected_slots == set(slots_with_blocks)
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
||||
|
||||
|
||||
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
|
||||
def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
|
||||
"""
|
||||
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
|
||||
skipping blocks for every slot along the way except for the first block
|
||||
in the ending epoch.
|
||||
"""
|
||||
yield "pre", state
|
||||
|
||||
assert spec.get_current_epoch(state) < fork_epoch
|
||||
|
||||
# regular state transition until fork:
|
||||
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
|
||||
to_slot = last_slot_of_pre_fork
|
||||
blocks = []
|
||||
blocks.extend([
|
||||
pre_tag(block) for block in
|
||||
_state_transition_across_slots(spec, state, to_slot, block_filter=_no_blocks)
|
||||
])
|
||||
|
||||
# irregular state transition to handle fork:
|
||||
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
|
||||
|
||||
# continue regular state transition with new spec into next epoch
|
||||
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
|
||||
last_slot = (fork_epoch + 1) * post_spec.SLOTS_PER_EPOCH
|
||||
blocks.extend([
|
||||
post_tag(block) for block in
|
||||
_state_transition_across_slots(post_spec, state, to_slot, block_filter=_only_at(last_slot))
|
||||
])
|
||||
|
||||
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
|
||||
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
|
||||
|
||||
slots_with_blocks = [block.message.slot for block in blocks]
|
||||
assert len(slots_with_blocks) == 1
|
||||
assert slots_with_blocks[0] == last_slot
|
||||
|
||||
yield "blocks", blocks
|
||||
yield "post", state
|
@ -32,7 +32,7 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Block at slot 1 doesn't increase sync committee period, so it won't update snapshot
|
||||
@ -46,7 +46,8 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
body_root=signed_block.message.body.hash_tree_root(),
|
||||
)
|
||||
# Sync committee signing the header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
@ -76,7 +77,7 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||
|
||||
assert len(store.valid_updates) == 1
|
||||
assert store.valid_updates[0] == update
|
||||
assert store.valid_updates.pop() == update
|
||||
assert store.snapshot == pre_snapshot
|
||||
|
||||
|
||||
@ -91,7 +92,7 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Forward to next sync committee period
|
||||
@ -111,7 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
@ -156,7 +158,7 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||
)
|
||||
store = spec.LightClientStore(
|
||||
snapshot=pre_snapshot,
|
||||
valid_updates=[]
|
||||
valid_updates=set(),
|
||||
)
|
||||
|
||||
# Change finality
|
||||
@ -190,7 +192,8 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||
)
|
||||
|
||||
# Sync committee signing the finalized_block_header
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||
sync_committee_bits = [True] * len(committee)
|
||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
|
@ -7,10 +7,13 @@ from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
with_all_phases_except,
|
||||
with_altair_and_later,
|
||||
with_configs,
|
||||
with_state,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
MINIMAL,
|
||||
)
|
||||
|
||||
rng = random.Random(1337)
|
||||
|
||||
@ -25,7 +28,7 @@ def ensure_assignments_in_sync_committee(
|
||||
assert spec.is_assigned_to_sync_committee(state, epoch, validator_index)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_state
|
||||
def test_is_assigned_to_sync_committee(phases, spec, state):
|
||||
epoch = spec.get_current_epoch(state)
|
||||
@ -91,7 +94,8 @@ def _get_sync_committee_signature(
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_process_sync_committee_contributions(phases, spec, state):
|
||||
# skip over slots at genesis
|
||||
@ -144,20 +148,63 @@ def _subnet_for_sync_committee_index(spec, i):
|
||||
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
def _get_expected_subnets_by_pubkey(sync_committee_members):
|
||||
expected_subnets_by_pubkey = defaultdict(list)
|
||||
for (subnet, pubkey) in sync_committee_members:
|
||||
expected_subnets_by_pubkey[pubkey].append(subnet)
|
||||
return expected_subnets_by_pubkey
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_compute_subnets_for_sync_committee(state, spec, phases):
|
||||
# Transition to the head of the next period
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||
assert (
|
||||
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||
== spec.compute_sync_committee_period(next_slot_epoch)
|
||||
)
|
||||
some_sync_committee_members = list(
|
||||
(
|
||||
_subnet_for_sync_committee_index(spec, i),
|
||||
pubkey,
|
||||
)
|
||||
# use current_sync_committee
|
||||
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||
)
|
||||
|
||||
expected_subnets_by_pubkey = defaultdict(list)
|
||||
for (subnet, pubkey) in some_sync_committee_members:
|
||||
expected_subnets_by_pubkey[pubkey].append(subnet)
|
||||
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||
|
||||
for _, pubkey in some_sync_committee_members:
|
||||
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
|
||||
expected_subnets = expected_subnets_by_pubkey[pubkey]
|
||||
assert subnets == expected_subnets
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@with_state
|
||||
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
|
||||
# Transition to the end of the period
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
|
||||
|
||||
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||
assert (
|
||||
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||
!= spec.compute_sync_committee_period(next_slot_epoch)
|
||||
)
|
||||
some_sync_committee_members = list(
|
||||
(
|
||||
_subnet_for_sync_committee_index(spec, i),
|
||||
pubkey,
|
||||
)
|
||||
# use next_sync_committee
|
||||
for i, pubkey in enumerate(state.next_sync_committee.pubkeys)
|
||||
)
|
||||
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||
|
||||
for _, pubkey in some_sync_committee_members:
|
||||
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||
|
@ -7,11 +7,11 @@ from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
ALL_PHASES, FORKS_BEFORE_ALTAIR,
|
||||
PHASE0, ALTAIR, MERGE,
|
||||
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
|
||||
)
|
||||
from .helpers.genesis import create_genesis_state
|
||||
from .utils import vector_test, with_meta_tags
|
||||
from .utils import vector_test, with_meta_tags, build_transition_test
|
||||
|
||||
from random import Random
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
||||
@ -312,7 +312,7 @@ def with_phases(phases, other_phases=None):
|
||||
return None
|
||||
run_phases = [phase]
|
||||
|
||||
if PHASE0 not in run_phases and ALTAIR not in run_phases:
|
||||
if PHASE0 not in run_phases and ALTAIR not in run_phases and MERGE not in run_phases:
|
||||
dump_skipping_message("none of the recognized phases are executable, skipping test.")
|
||||
return None
|
||||
|
||||
@ -330,6 +330,8 @@ def with_phases(phases, other_phases=None):
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
if ALTAIR in available_phases:
|
||||
phase_dir[ALTAIR] = spec_altair
|
||||
if MERGE in available_phases:
|
||||
phase_dir[MERGE] = spec_merge
|
||||
|
||||
# return is ignored whenever multiple phases are ran.
|
||||
# This return is for test generators to emit python generators (yielding test vector outputs)
|
||||
@ -337,6 +339,8 @@ def with_phases(phases, other_phases=None):
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if ALTAIR in run_phases:
|
||||
ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw)
|
||||
if MERGE in run_phases:
|
||||
ret = fn(spec=spec_merge, phases=phase_dir, *args, **kw)
|
||||
|
||||
# TODO: merge, sharding, custody_game and das are not executable yet.
|
||||
# Tests that specify these features will not run, and get ignored for these specific phases.
|
||||
@ -362,6 +366,55 @@ def with_configs(configs, reason=None):
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||
return False
|
||||
if spec.fork in FORKS_BEFORE_ALTAIR:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_post_merge(spec):
|
||||
if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||
return False
|
||||
if spec.fork in FORKS_BEFORE_MERGE:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
|
||||
with_merge_and_later = with_phases([MERGE])
|
||||
|
||||
|
||||
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
"""
|
||||
A decorator to construct a "transition" test from one fork of the eth2 spec
|
||||
to another.
|
||||
|
||||
Decorator assumes a transition from the `pre_fork_name` fork to the
|
||||
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
|
||||
fork occurs or they must compute one (yielding to the generator) during the test
|
||||
if more custom behavior is desired.
|
||||
|
||||
A test using this decorator should expect to receive as parameters:
|
||||
`state`: the default state constructed for the `pre_fork_name` fork
|
||||
according to the `with_state` decorator.
|
||||
`fork_epoch`: the `fork_epoch` provided to this decorator, if given.
|
||||
`spec`: the version of the eth2 spec corresponding to `pre_fork_name`.
|
||||
`post_spec`: the version of the eth2 spec corresponding to `post_fork_name`.
|
||||
`pre_tag`: a function to tag data as belonging to `pre_fork_name` fork.
|
||||
Used to discriminate data during consumption of the generated spec tests.
|
||||
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
|
||||
Used to discriminate data during consumption of the generated spec tests.
|
||||
"""
|
||||
def _wrapper(fn):
|
||||
@with_phases([pre_fork_name], other_phases=[post_fork_name])
|
||||
@spec_test
|
||||
@with_state
|
||||
def _adapter(*args, **kwargs):
|
||||
wrapped = build_transition_test(fn,
|
||||
pre_fork_name,
|
||||
post_fork_name,
|
||||
fork_epoch=fork_epoch)
|
||||
return wrapped(*args, **kwargs)
|
||||
return _adapter
|
||||
return _wrapper
|
||||
|
@ -1,4 +1,5 @@
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.context import is_post_altair, is_post_merge
|
||||
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
@ -94,6 +95,9 @@ def build_empty_block(spec, state, slot=None):
|
||||
if is_post_altair(spec):
|
||||
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
||||
if is_post_merge(spec):
|
||||
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
||||
|
@ -7,21 +7,24 @@ from .typing import SpecForkName, ConfigName
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
ALTAIR = SpecForkName('altair')
|
||||
MERGE = SpecForkName('merge')
|
||||
|
||||
# Experimental phases (not included in default "ALL_PHASES"):
|
||||
MERGE = SpecForkName('merge')
|
||||
SHARDING = SpecForkName('sharding')
|
||||
CUSTODY_GAME = SpecForkName('custody_game')
|
||||
DAS = SpecForkName('das')
|
||||
|
||||
# The forks that pytest runs with.
|
||||
ALL_PHASES = (PHASE0, ALTAIR)
|
||||
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR)
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
||||
# TODO: everything runs in parallel to Altair.
|
||||
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
|
||||
FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS)
|
||||
|
||||
# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple.
|
||||
FORKS_BEFORE_MERGE = (PHASE0,)
|
||||
|
||||
#
|
||||
# Config
|
||||
#
|
||||
|
26
tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
Normal file
26
tests/core/pyspec/eth2spec/test/helpers/execution_payload.py
Normal file
@ -0,0 +1,26 @@
|
||||
|
||||
def build_empty_execution_payload(spec, state):
|
||||
"""
|
||||
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
|
||||
"""
|
||||
latest = state.latest_execution_payload_header
|
||||
timestamp = spec.compute_time_at_slot(state, state.slot)
|
||||
empty_txs = spec.List[spec.OpaqueTransaction, spec.MAX_EXECUTION_TRANSACTIONS]()
|
||||
|
||||
payload = spec.ExecutionPayload(
|
||||
block_hash=spec.Hash32(),
|
||||
parent_hash=latest.block_hash,
|
||||
coinbase=spec.Bytes20(),
|
||||
state_root=latest.state_root, # no changes to the state
|
||||
number=latest.number + 1,
|
||||
gas_limit=latest.gas_limit, # retain same limit
|
||||
gas_used=0, # empty block, 0 gas
|
||||
timestamp=timestamp,
|
||||
receipt_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
|
||||
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||
transactions=empty_txs,
|
||||
)
|
||||
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
|
||||
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
|
||||
|
||||
return payload
|
@ -1,6 +1,7 @@
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
FORKS_BEFORE_ALTAIR,
|
||||
MERGE,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
|
||||
@ -28,6 +29,8 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
|
||||
if spec.fork == ALTAIR:
|
||||
current_version = spec.ALTAIR_FORK_VERSION
|
||||
elif spec.fork == MERGE:
|
||||
current_version = spec.MERGE_FORK_VERSION
|
||||
|
||||
state = spec.BeaconState(
|
||||
genesis_time=0,
|
||||
@ -66,9 +69,8 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
|
||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||
# Fill in sync committees
|
||||
state.current_sync_committee = spec.get_sync_committee(state, spec.get_current_epoch(state))
|
||||
state.next_sync_committee = (
|
||||
spec.get_sync_committee(state, spec.get_current_epoch(state) + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
)
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = spec.get_next_sync_committee(state)
|
||||
state.next_sync_committee = spec.get_next_sync_committee(state)
|
||||
|
||||
return state
|
||||
|
@ -62,13 +62,13 @@ def run_deltas(spec, state):
|
||||
|
||||
if is_post_altair(spec):
|
||||
def get_source_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||
|
||||
def get_head_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX)
|
||||
|
||||
def get_target_deltas(state):
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
|
||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
@ -133,14 +133,23 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||
validator = state.validators[index]
|
||||
enough_for_reward = has_enough_for_reward(spec, state, index)
|
||||
if index in matching_indices and not validator.slashed:
|
||||
if enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
if is_post_altair(spec):
|
||||
if not spec.is_in_inactivity_leak(state) and enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
if enough_for_reward:
|
||||
assert rewards[index] > 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
|
||||
assert penalties[index] == 0
|
||||
else:
|
||||
assert rewards[index] == 0
|
||||
if enough_for_reward:
|
||||
if is_post_altair(spec) and 'head' in deltas_name:
|
||||
assert penalties[index] == 0
|
||||
elif enough_for_reward:
|
||||
assert penalties[index] > 0
|
||||
else:
|
||||
assert penalties[index] == 0
|
||||
@ -225,18 +234,19 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
if not is_post_altair(spec):
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||
else:
|
||||
base_penalty = sum(
|
||||
base_reward * numerator // spec.WEIGHT_DENOMINATOR
|
||||
for (_, numerator) in spec.get_flag_indices_and_weights()
|
||||
)
|
||||
|
||||
if not has_enough_for_reward(spec, state, index):
|
||||
assert penalties[index] == 0
|
||||
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
||||
assert penalties[index] == base_penalty
|
||||
if is_post_altair(spec):
|
||||
assert penalties[index] == 0
|
||||
else:
|
||||
assert penalties[index] == base_penalty
|
||||
else:
|
||||
assert penalties[index] > base_penalty
|
||||
if is_post_altair(spec):
|
||||
assert penalties[index] > 0
|
||||
else:
|
||||
assert penalties[index] > base_penalty
|
||||
else:
|
||||
assert penalties[index] == 0
|
||||
|
||||
|
0
tests/core/pyspec/eth2spec/test/merge/__init__.py
Normal file
0
tests/core/pyspec/eth2spec/test/merge/__init__.py
Normal file
@ -0,0 +1,43 @@
|
||||
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_merge_and_later
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
|
||||
|
||||
def run_execution_payload_processing(spec, state, execution_payload, valid=True, execution_valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- execution details, to mock EVM execution ('execution.yml', a dict with 'execution_valid' key and boolean value)
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
|
||||
pre_exec_header = state.latest_execution_payload_header.copy()
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution', {'execution_valid': execution_valid}
|
||||
yield 'execution_payload', execution_payload
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_execution_payload(state, execution_payload))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_execution_payload(state, execution_payload)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
assert pre_exec_header != state.latest_execution_payload_header
|
||||
# TODO: any more assertions to make?
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_success_first_payload(spec, state):
|
||||
next_slot(spec, state)
|
||||
assert not spec.is_transition_completed(state)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
25
tests/core/pyspec/eth2spec/test/merge/sanity/test_blocks.py
Normal file
25
tests/core/pyspec/eth2spec/test/merge/sanity/test_blocks.py
Normal file
@ -0,0 +1,25 @@
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_merge_and_later, spec_state_test
|
||||
)
|
||||
|
||||
|
||||
@with_merge_and_later
|
||||
@spec_state_test
|
||||
def test_empty_block_transition(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
assert len(block.body.execution_payload.transactions) == 0
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
# TODO: tests with EVM, mock or replacement?
|
@ -1,7 +1,7 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.fork_choice import get_genesis_forkchoice_store
|
||||
|
||||
@ -19,7 +19,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
if spec.fork in (PHASE0, ALTAIR):
|
||||
if spec.fork in (PHASE0, ALTAIR, MERGE):
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
|
@ -1,3 +1,4 @@
|
||||
import inspect
|
||||
from typing import Dict, Any
|
||||
from eth2spec.utils.ssz.ssz_typing import View
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
@ -93,3 +94,50 @@ def with_meta_tags(tags: Dict[str, Any]):
|
||||
yield k, 'meta', v
|
||||
return entry
|
||||
return runner
|
||||
|
||||
|
||||
def build_transition_test(fn, pre_fork_name, post_fork_name, fork_epoch=None):
|
||||
"""
|
||||
Handles the inner plumbing to generate `transition_test`s.
|
||||
See that decorator in `context.py` for more information.
|
||||
"""
|
||||
def _adapter(*args, **kwargs):
|
||||
post_spec = kwargs["phases"][post_fork_name]
|
||||
|
||||
pre_fork_counter = 0
|
||||
|
||||
def pre_tag(obj):
|
||||
nonlocal pre_fork_counter
|
||||
pre_fork_counter += 1
|
||||
return obj
|
||||
|
||||
def post_tag(obj):
|
||||
return obj
|
||||
|
||||
yield "post_fork", "meta", post_fork_name
|
||||
|
||||
has_fork_epoch = False
|
||||
if fork_epoch:
|
||||
kwargs["fork_epoch"] = fork_epoch
|
||||
has_fork_epoch = True
|
||||
yield "fork_epoch", "meta", fork_epoch
|
||||
|
||||
# massage args to handle an optional custom state using
|
||||
# `with_custom_state` decorator
|
||||
expected_args = inspect.getfullargspec(fn)
|
||||
if "phases" not in expected_args.kwonlyargs:
|
||||
kwargs.pop("phases", None)
|
||||
|
||||
for part in fn(*args,
|
||||
post_spec=post_spec,
|
||||
pre_tag=pre_tag,
|
||||
post_tag=post_tag,
|
||||
**kwargs):
|
||||
if part[0] == "fork_epoch":
|
||||
has_fork_epoch = True
|
||||
yield part
|
||||
assert has_fork_epoch
|
||||
|
||||
if pre_fork_counter > 0:
|
||||
yield "fork_block", "meta", pre_fork_counter - 1
|
||||
return _adapter
|
||||
|
@ -33,17 +33,23 @@ This excludes the other parts of the block-transition.
|
||||
|
||||
Operations:
|
||||
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|-----------------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
|
||||
| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
|
||||
|-------------------------|-----------------------|----------------------|----------------------------------------------------------------------|
|
||||
| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
|
||||
| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
|
||||
| `block_header` | `BeaconBlock` | **`block`** | `process_block_header(state, block)` |
|
||||
| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
|
||||
| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
|
||||
| `voluntary_exit` | `SignedVoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
|
||||
| `sync_aggregate` | `SyncAggregate` | `sync_aggregate` | `process_sync_committee(state, sync_aggregate)` (new in Altair) |
|
||||
| `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Merge) |
|
||||
|
||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||
|
||||
The `execution_payload` processing normally requires a `verify_execution_state_transition(execution_payload)`,
|
||||
a responsibility of an (external) execution engine.
|
||||
During testing this execution is mocked, an `execution.yml` is provided instead:
|
||||
a dict containing an `execution_valid` boolean field with the verification result.
|
||||
|
||||
The resulting state should match the expected `post` state, or if the `post` state is left blank,
|
||||
the handler should reject the input operation as invalid.
|
||||
|
72
tests/formats/transition/README.md
Normal file
72
tests/formats/transition/README.md
Normal file
@ -0,0 +1,72 @@
|
||||
# Transition testing
|
||||
|
||||
Transition tests to cover processing the chain across a fork boundary.
|
||||
|
||||
Each test case contains a `post_fork` key in the `meta.yaml` that indicates the target fork which also fixes the fork the test begins in.
|
||||
|
||||
Clients should assume forks happen sequentially in the following manner:
|
||||
|
||||
0. `phase0`
|
||||
1. `altair`
|
||||
|
||||
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
```yaml
|
||||
post_fork: string -- String name of the spec after the fork.
|
||||
fork_epoch: int -- The epoch at which the fork takes place.
|
||||
fork_block: int -- Optional. The `<index>` of the last block on the initial fork.
|
||||
blocks_count: int -- The number of blocks processed in this test.
|
||||
```
|
||||
|
||||
*Note*: There may be a fork transition function to run at the `fork_epoch`.
|
||||
Refer to the specs for the relevant fork for further details.
|
||||
|
||||
### `pre.ssz_snappy`
|
||||
|
||||
A SSZ-snappy encoded `BeaconState` according to the specification of
|
||||
the initial fork, the state before running the block transitions.
|
||||
|
||||
### `blocks_<index>.ssz_snappy`
|
||||
|
||||
A series of files, with `<index>` in range `[0, blocks_count)`.
|
||||
Blocks must be processed in order, following the main transition function
|
||||
(i.e. process slot and epoch transitions in between blocks as normal).
|
||||
|
||||
Blocks are encoded as `SignedBeaconBlock`s from the relevant spec version
|
||||
as indicated by the `post_fork` and `fork_block` data in the `meta.yaml`.
|
||||
|
||||
As blocks span fork boundaires, a `fork_block` number is given in
|
||||
the `meta.yaml` to help resolve which blocks belong to which fork.
|
||||
|
||||
The `fork_block` is the index in the test data of the **last** block
|
||||
of the **initial** fork.
|
||||
|
||||
To demonstrate, the following diagram shows slots with `_` and blocks
|
||||
in those slots as `x`. The fork happens at the epoch delineated by the `|`.
|
||||
|
||||
```
|
||||
x x x x
|
||||
_ _ _ _ | _ _ _ _
|
||||
```
|
||||
|
||||
The `blocks_count` value in the `meta.yaml` in this case is `4` where the
|
||||
`fork_block` value in the `meta.yaml` is `1`. If this particular example were
|
||||
testing the fork from Phase 0 to Altair, blocks with indices `0, 1` represent
|
||||
`SignedBeaconBlock`s defined in the Phase 0 spec and blocks with indices `2, 3`
|
||||
represent `SignedBeaconBlock`s defined in the Altair spec.
|
||||
|
||||
*Note*: If `fork_block` is missing, then all block data should be
|
||||
interpreted as belonging to the post fork.
|
||||
|
||||
### `post.ssz_snappy`
|
||||
|
||||
A SSZ-snappy encoded `BeaconState` according to the specification of
|
||||
the post fork, the state after running the block transitions.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post` state.
|
@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -27,6 +28,10 @@ if __name__ == "__main__":
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
|
||||
# No epoch-processing changes in Merge and previous testing repeats with new types, so no additional tests required.
|
||||
# TODO: rebase onto Altair testing later.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
# TODO Custody Game testgen is disabled for now
|
||||
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [
|
||||
# 'reveal_deadlines',
|
||||
@ -37,6 +42,7 @@ if __name__ == "__main__":
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,19 +1,22 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
phase_0_mods = {'finality': 'eth2spec.test.phase0.finality.test_finality'}
|
||||
altair_mods = phase_0_mods # No additional altair specific finality tests
|
||||
altair_mods = phase_0_mods # No additional Altair specific finality tests
|
||||
merge_mods = phase_0_mods # No additional Merge specific finality tests
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: spec_merge,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="finality", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -13,10 +14,13 @@ if __name__ == "__main__":
|
||||
]}
|
||||
# No additional Altair specific finality tests, yet.
|
||||
altair_mods = phase_0_mods
|
||||
# No specific Merge tests yet. TODO: rebase onto Altair testing later.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="fork_choice", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -23,6 +24,13 @@ if __name__ == "__main__":
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
|
||||
merge_mods = {
|
||||
**{key: 'eth2spec.test.merge.block_processing.test_process_' + key for key in [
|
||||
'execution_payload',
|
||||
]},
|
||||
**phase_0_mods, # TODO: runs phase0 tests. Rebase to include `altair_mods` testing later.
|
||||
}
|
||||
|
||||
# TODO Custody Game testgen is disabled for now
|
||||
# custody_game_mods = {**{key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [
|
||||
# 'attestation',
|
||||
@ -35,6 +43,7 @@ if __name__ == "__main__":
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="operations", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,10 +1,11 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -16,9 +17,15 @@ if __name__ == "__main__":
|
||||
# No additional altair specific rewards tests, yet.
|
||||
altair_mods = phase_0_mods
|
||||
|
||||
# No additional merge specific rewards tests, yet.
|
||||
# Note: Block rewards are non-epoch rewards and are tested as part of block processing tests.
|
||||
# Transaction fees are part of the execution-layer.
|
||||
merge_mods = phase_0_mods
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="rewards", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,11 +1,12 @@
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.merge import spec as spec_merge
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, MERGE
|
||||
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
|
||||
specs = (spec_phase0, spec_altair)
|
||||
specs = (spec_phase0, spec_altair, spec_merge)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -17,9 +18,15 @@ if __name__ == "__main__":
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests
|
||||
|
||||
# Altair-specific test cases are ignored, but should be included after the Merge is rebased onto Altair work.
|
||||
merge_mods = {**{key: 'eth2spec.test.merge.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # TODO: Merge inherits phase0 tests for now.
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
ALTAIR: altair_mods,
|
||||
MERGE: merge_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods)
|
||||
|
@ -93,8 +93,7 @@ if __name__ == "__main__":
|
||||
seed += 1
|
||||
settings.append((seed, MAINNET, random_value.RandomizationMode.mode_random, False, 5))
|
||||
seed += 1
|
||||
# TODO: enable testing for the whole merge spec.
|
||||
for fork in TESTGEN_FORKS + (MERGE,):
|
||||
for fork in TESTGEN_FORKS:
|
||||
gen_runner.run_generator("ssz_static", [
|
||||
create_provider(fork, config_name, seed, mode, chaos, cases_if_random)
|
||||
for (seed, config_name, mode, chaos, cases_if_random) in settings
|
||||
|
42
tests/generators/transition/main.py
Normal file
42
tests/generators/transition/main.py
Normal file
@ -0,0 +1,42 @@
|
||||
from importlib import reload
|
||||
from typing import Iterable
|
||||
|
||||
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.altair.transition import test_transition as test_altair_transition
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.altair import spec as spec_altair
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
|
||||
|
||||
|
||||
def create_provider(tests_src, config_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_altair)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='transition',
|
||||
handler_name='core',
|
||||
src=tests_src,
|
||||
fork_name=post_fork_name,
|
||||
phase=pre_fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
|
||||
|
||||
TRANSITION_TESTS = ((PHASE0, ALTAIR, test_altair_transition),)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
|
||||
gen_runner.run_generator("transition", [
|
||||
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
|
||||
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
|
||||
])
|
2
tests/generators/transition/requirements.txt
Normal file
2
tests/generators/transition/requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
pytest>=4.4
|
||||
../../../[generator]
|
Loading…
x
Reference in New Issue
Block a user