Merge branch 'dev' into vbuterin-patch-11
This commit is contained in:
commit
24f6e1e99f
|
@ -23,6 +23,7 @@ The current features are:
|
||||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||||
* [Honest Validator](specs/phase0/validator.md)
|
* [Honest Validator](specs/phase0/validator.md)
|
||||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||||
|
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md)
|
||||||
|
|
||||||
### Altair
|
### Altair
|
||||||
|
|
||||||
|
|
|
@ -28,12 +28,11 @@
|
||||||
- [`Predicates`](#predicates)
|
- [`Predicates`](#predicates)
|
||||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||||
- [Misc](#misc-2)
|
- [Misc](#misc-2)
|
||||||
- [`get_flag_indices_and_weights`](#get_flag_indices_and_weights)
|
|
||||||
- [`add_flag`](#add_flag)
|
- [`add_flag`](#add_flag)
|
||||||
- [`has_flag`](#has_flag)
|
- [`has_flag`](#has_flag)
|
||||||
- [Beacon state accessors](#beacon-state-accessors)
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
- [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
|
||||||
- [`get_sync_committee`](#get_sync_committee)
|
- [`get_next_sync_committee`](#get_next_sync_committee)
|
||||||
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
|
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
|
||||||
- [`get_base_reward`](#get_base_reward)
|
- [`get_base_reward`](#get_base_reward)
|
||||||
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||||
|
@ -99,6 +98,7 @@ Altair is the first beacon chain hard fork. Its main features are:
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||||
|
| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_HEAD_WEIGHT, TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT]` |
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
@ -233,20 +233,6 @@ def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, s
|
||||||
|
|
||||||
### Misc
|
### Misc
|
||||||
|
|
||||||
#### `get_flag_indices_and_weights`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_flag_indices_and_weights() -> Sequence[Tuple[int, uint64]]:
|
|
||||||
"""
|
|
||||||
Return paired tuples of participation flag indices along with associated incentivization weights.
|
|
||||||
"""
|
|
||||||
return (
|
|
||||||
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT),
|
|
||||||
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT),
|
|
||||||
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_WEIGHT),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `add_flag`
|
#### `add_flag`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -271,22 +257,22 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||||
|
|
||||||
### Beacon state accessors
|
### Beacon state accessors
|
||||||
|
|
||||||
#### `get_sync_committee_indices`
|
#### `get_next_sync_committee_indices`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||||
"""
|
"""
|
||||||
Return the sequence of sync committee indices (which may include duplicate indices)
|
Return the sequence of sync committee indices (which may include duplicate indices)
|
||||||
for a given ``state`` and ``epoch``.
|
for the next sync committee, given a ``state`` at a sync committee period boundary.
|
||||||
|
|
||||||
Note: This function is not stable during a sync committee period as
|
Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128)
|
||||||
a validator's effective balance may change enough to affect the sampling.
|
|
||||||
"""
|
"""
|
||||||
|
epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
|
|
||||||
MAX_RANDOM_BYTE = 2**8 - 1
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||||
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
|
||||||
active_validator_count = uint64(len(active_validator_indices))
|
active_validator_count = uint64(len(active_validator_indices))
|
||||||
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
|
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||||
i = 0
|
i = 0
|
||||||
sync_committee_indices: List[ValidatorIndex] = []
|
sync_committee_indices: List[ValidatorIndex] = []
|
||||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||||
|
@ -294,31 +280,31 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
|
||||||
candidate_index = active_validator_indices[shuffled_index]
|
candidate_index = active_validator_indices[shuffled_index]
|
||||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||||
effective_balance = state.validators[candidate_index].effective_balance
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||||
sync_committee_indices.append(candidate_index)
|
sync_committee_indices.append(candidate_index)
|
||||||
i += 1
|
i += 1
|
||||||
return sync_committee_indices
|
return sync_committee_indices
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_sync_committee`
|
#### `get_next_sync_committee`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||||
"""
|
"""
|
||||||
Return the sync committee for a given ``state`` and ``epoch``.
|
Return the *next* sync committee for a given ``state``.
|
||||||
|
|
||||||
``SyncCommittee`` contains an aggregate pubkey that enables
|
``SyncCommittee`` contains an aggregate pubkey that enables
|
||||||
resource-constrained clients to save some computation when verifying
|
resource-constrained clients to save some computation when verifying
|
||||||
the sync committee's signature.
|
the sync committee's signature.
|
||||||
|
|
||||||
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_sync_committee_indices``
|
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices``
|
||||||
returns duplicate indices. Implementations must take care when handling
|
returns duplicate indices. Implementations must take care when handling
|
||||||
optimizations relating to aggregation and verification in the presence of duplicates.
|
optimizations relating to aggregation and verification in the presence of duplicates.
|
||||||
|
|
||||||
Note: This function should only be called at sync committee period boundaries, as
|
Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates``
|
||||||
``get_sync_committee_indices`` is not stable within a given period.
|
as ``get_next_sync_committee_indices`` is not stable within a given period.
|
||||||
"""
|
"""
|
||||||
indices = get_sync_committee_indices(state, epoch)
|
indices = get_next_sync_committee_indices(state)
|
||||||
pubkeys = [state.validators[index].pubkey for index in indices]
|
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||||
aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
||||||
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||||
|
@ -368,35 +354,31 @@ def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epo
|
||||||
#### `get_flag_index_deltas`
|
#### `get_flag_index_deltas`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_flag_index_deltas(state: BeaconState, flag_index: int, weight: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
def get_flag_index_deltas(state: BeaconState, flag_index: int) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
"""
|
"""
|
||||||
Return the deltas for a given ``flag_index`` scaled by ``weight`` by scanning through the participation flags.
|
Return the deltas for a given ``flag_index`` scaled by ``weight`` by scanning through the participation flags.
|
||||||
"""
|
"""
|
||||||
rewards = [Gwei(0)] * len(state.validators)
|
rewards = [Gwei(0)] * len(state.validators)
|
||||||
penalties = [Gwei(0)] * len(state.validators)
|
penalties = [Gwei(0)] * len(state.validators)
|
||||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
previous_epoch = get_previous_epoch(state)
|
||||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, previous_epoch)
|
||||||
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
weight = PARTICIPATION_FLAG_WEIGHTS[flag_index]
|
||||||
active_increments = get_total_active_balance(state) // increment
|
unslashed_participating_balance = get_total_balance(state, unslashed_participating_indices)
|
||||||
|
unslashed_participating_increments = unslashed_participating_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||||
for index in get_eligible_validator_indices(state):
|
for index in get_eligible_validator_indices(state):
|
||||||
base_reward = get_base_reward(state, index)
|
base_reward = get_base_reward(state, index)
|
||||||
if index in unslashed_participating_indices:
|
if index in unslashed_participating_indices:
|
||||||
if is_in_inactivity_leak(state):
|
if not is_in_inactivity_leak(state):
|
||||||
# This flag reward cancels the inactivity penalty corresponding to the flag index
|
|
||||||
rewards[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
|
||||||
else:
|
|
||||||
reward_numerator = base_reward * weight * unslashed_participating_increments
|
reward_numerator = base_reward * weight * unslashed_participating_increments
|
||||||
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
|
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
|
||||||
else:
|
elif flag_index != TIMELY_HEAD_FLAG_INDEX:
|
||||||
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||||
return rewards, penalties
|
return rewards, penalties
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Modified `get_inactivity_penalty_deltas`
|
#### Modified `get_inactivity_penalty_deltas`
|
||||||
|
|
||||||
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
|
||||||
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
"""
|
"""
|
||||||
|
@ -407,11 +389,6 @@ def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], S
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||||
for index in get_eligible_validator_indices(state):
|
for index in get_eligible_validator_indices(state):
|
||||||
if is_in_inactivity_leak(state):
|
|
||||||
# TODO: to be removed in PR 2399
|
|
||||||
for (_, weight) in get_flag_indices_and_weights():
|
|
||||||
# This inactivity penalty cancels the flag reward corresponding to the flag index
|
|
||||||
penalties[index] += Gwei(get_base_reward(state, index) * weight // WEIGHT_DENOMINATOR)
|
|
||||||
if index not in matching_target_indices:
|
if index not in matching_target_indices:
|
||||||
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||||
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||||
|
@ -505,7 +482,7 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
# Update epoch participation flags
|
# Update epoch participation flags
|
||||||
proposer_reward_numerator = 0
|
proposer_reward_numerator = 0
|
||||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||||
for flag_index, weight in get_flag_indices_and_weights():
|
for flag_index, weight in enumerate(PARTICIPATION_FLAG_WEIGHTS):
|
||||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||||
proposer_reward_numerator += get_base_reward(state, index) * weight
|
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||||
|
@ -650,8 +627,7 @@ def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
return
|
return
|
||||||
|
|
||||||
flag_indices_and_numerators = get_flag_indices_and_weights()
|
flag_deltas = [get_flag_index_deltas(state, flag_index) for flag_index in range(len(PARTICIPATION_FLAG_WEIGHTS))]
|
||||||
flag_deltas = [get_flag_index_deltas(state, index, numerator) for (index, numerator) in flag_indices_and_numerators]
|
|
||||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||||
for (rewards, penalties) in deltas:
|
for (rewards, penalties) in deltas:
|
||||||
for index in range(len(state.validators)):
|
for index in range(len(state.validators)):
|
||||||
|
@ -695,7 +671,7 @@ def process_sync_committee_updates(state: BeaconState) -> None:
|
||||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||||
state.current_sync_committee = state.next_sync_committee
|
state.current_sync_committee = state.next_sync_committee
|
||||||
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
state.next_sync_committee = get_next_sync_committee(state)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Initialize state for pure Altair testnets and test vectors
|
## Initialize state for pure Altair testnets and test vectors
|
||||||
|
@ -740,8 +716,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||||
|
|
||||||
# [New in Altair] Fill in sync committees
|
# [New in Altair] Fill in sync committees
|
||||||
state.current_sync_committee = get_sync_committee(state, get_current_epoch(state))
|
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||||
state.next_sync_committee = get_sync_committee(state, get_current_epoch(state) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
state.current_sync_committee = get_next_sync_committee(state)
|
||||||
|
state.next_sync_committee = get_next_sync_committee(state)
|
||||||
|
|
||||||
return state
|
return state
|
||||||
```
|
```
|
||||||
|
|
|
@ -38,7 +38,11 @@ Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since
|
||||||
|
|
||||||
### Upgrading the state
|
### Upgrading the state
|
||||||
|
|
||||||
After `process_slots` of Phase 0 finishes, if `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
|
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
|
||||||
|
|
||||||
|
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
||||||
|
Care must be taken when transitioning through the fork boundary as implementations will need a modified state transition function that deviates from the Phase 0 spec.
|
||||||
|
In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||||
|
@ -80,8 +84,10 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||||
# Inactivity
|
# Inactivity
|
||||||
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
|
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
|
||||||
)
|
)
|
||||||
|
|
||||||
# Fill in sync committees
|
# Fill in sync committees
|
||||||
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
# Note: A duplicate committee is assigned for the current and next committee at the fork boundary
|
||||||
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
post.current_sync_committee = get_next_sync_committee(post)
|
||||||
|
post.next_sync_committee = get_next_sync_committee(post)
|
||||||
return post
|
return post
|
||||||
```
|
```
|
||||||
|
|
|
@ -106,9 +106,18 @@ The following validations MUST pass before forwarding the `signed_contribution_a
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
||||||
|
# Committees assigned to `slot` sign for `slot - 1`
|
||||||
|
# This creates the exceptional logic below when transitioning between sync committee periods
|
||||||
|
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||||
|
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||||
|
sync_committee = state.current_sync_committee
|
||||||
|
else:
|
||||||
|
sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
|
# Return pubkeys for the subcommittee index
|
||||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
i = subcommittee_index * sync_subcommittee_size
|
i = subcommittee_index * sync_subcommittee_size
|
||||||
return state.current_sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||||
```
|
```
|
||||||
|
|
||||||
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
||||||
|
|
|
@ -143,6 +143,11 @@ A validator determines beacon committee assignments and beacon block proposal du
|
||||||
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
|
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
|
||||||
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
|
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
|
||||||
|
|
||||||
|
*Note*: Being assigned to a sync committee for a given `slot` means that the validator produces and broadcasts signatures for `slot - 1` for inclusion in `slot`.
|
||||||
|
This means that when assigned to an `epoch` sync committee signatures must be produced and broadcast for slots on range `[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)`
|
||||||
|
rather than for the range `[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`.
|
||||||
|
To reduce complexity during the Altair fork, sync committees are not expected to produce signatures for `compute_epoch_at_slot(ALTAIR_FORK_EPOCH) - 1`.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||||
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
@ -172,7 +177,6 @@ At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and
|
||||||
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
|
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
|
||||||
|
|
||||||
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
|
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
|
||||||
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
|
|
||||||
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
|
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
|
||||||
|
|
||||||
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
|
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
|
||||||
|
@ -224,12 +228,12 @@ def process_sync_committee_contributions(block: BeaconBlock,
|
||||||
contributions: Set[SyncCommitteeContribution]) -> None:
|
contributions: Set[SyncCommitteeContribution]) -> None:
|
||||||
sync_aggregate = SyncAggregate()
|
sync_aggregate = SyncAggregate()
|
||||||
signatures = []
|
signatures = []
|
||||||
|
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
|
||||||
for contribution in contributions:
|
for contribution in contributions:
|
||||||
subcommittee_index = contribution.subcommittee_index
|
subcommittee_index = contribution.subcommittee_index
|
||||||
for index, participated in enumerate(contribution.aggregation_bits):
|
for index, participated in enumerate(contribution.aggregation_bits):
|
||||||
if participated:
|
if participated:
|
||||||
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
|
||||||
participant_index = sync_subcommittee_size * subcommittee_index + index
|
participant_index = sync_subcommittee_size * subcommittee_index + index
|
||||||
sync_aggregate.sync_committee_bits[participant_index] = True
|
sync_aggregate.sync_committee_bits[participant_index] = True
|
||||||
signatures.append(contribution.signature)
|
signatures.append(contribution.signature)
|
||||||
|
@ -261,12 +265,12 @@ This process occurs each slot.
|
||||||
|
|
||||||
##### Prepare sync committee signature
|
##### Prepare sync committee signature
|
||||||
|
|
||||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every slot in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot.
|
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`.
|
||||||
|
|
||||||
This logic is triggered upon the same conditions as when producing an attestation.
|
This logic is triggered upon the same conditions as when producing an attestation.
|
||||||
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||||
|
|
||||||
`get_sync_committee_signature()` assumes `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_sync_committee_signature(state: BeaconState,
|
def get_sync_committee_signature(state: BeaconState,
|
||||||
|
@ -286,17 +290,20 @@ def get_sync_committee_signature(state: BeaconState,
|
||||||
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
||||||
|
|
||||||
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
|
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
|
||||||
`subnet_id` can be computed via `compute_subnets_for_sync_committee()` where `state` is a `BeaconState` during the matching sync committee period.
|
`subnet_id` can be computed via `compute_subnets_for_sync_committee(state, validator_index)` where `state` is a `BeaconState` during the matching sync committee period.
|
||||||
|
|
||||||
*Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
|
*Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
|
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
|
||||||
|
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
|
||||||
|
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
|
||||||
|
sync_committee = state.current_sync_committee
|
||||||
|
else:
|
||||||
|
sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
target_pubkey = state.validators[validator_index].pubkey
|
target_pubkey = state.validators[validator_index].pubkey
|
||||||
sync_committee_indices = [
|
sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
|
||||||
index for index, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
|
||||||
if pubkey == target_pubkey
|
|
||||||
]
|
|
||||||
return [
|
return [
|
||||||
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||||
for index in sync_committee_indices
|
for index in sync_committee_indices
|
||||||
|
@ -359,7 +366,7 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co
|
||||||
###### Aggregation bits
|
###### Aggregation bits
|
||||||
|
|
||||||
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
||||||
An aggregator finds the index in the sync committee (as returned by `get_sync_committee_indices()`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||||
|
|
||||||
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
||||||
|
|
||||||
|
|
|
@ -93,6 +93,8 @@ It consists of four main sections:
|
||||||
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
|
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
|
||||||
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
|
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
|
||||||
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
|
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
|
||||||
|
- [Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?](#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs)
|
||||||
|
- [Why must the proposer signature be checked when backfilling blocks in the database?](#why-must-the-proposer-signature-be-checked-when-backfilling-blocks-in-the-database)
|
||||||
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
|
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
|
||||||
- [Discovery](#discovery)
|
- [Discovery](#discovery)
|
||||||
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
||||||
|
@ -171,6 +173,7 @@ This section outlines constants that are used in this spec.
|
||||||
|---|---|---|
|
|---|---|---|
|
||||||
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
|
||||||
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
|
||||||
|
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
|
||||||
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
|
||||||
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
|
||||||
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
|
||||||
|
@ -179,7 +182,6 @@ This section outlines constants that are used in this spec.
|
||||||
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
|
||||||
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
|
||||||
|
|
||||||
|
|
||||||
## MetaData
|
## MetaData
|
||||||
|
|
||||||
Clients MUST locally store the following `MetaData`:
|
Clients MUST locally store the following `MetaData`:
|
||||||
|
@ -565,6 +567,8 @@ The response code can have one of the following values, encoded as a single unsi
|
||||||
The response payload adheres to the `ErrorMessage` schema (described below).
|
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||||
- 2: **ServerError** -- the responder encountered an error while processing the request.
|
- 2: **ServerError** -- the responder encountered an error while processing the request.
|
||||||
The response payload adheres to the `ErrorMessage` schema (described below).
|
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||||
|
- 3: **ResourceUnavailable** -- the responder does not have requested resource.
|
||||||
|
The response payload adheres to the `ErrorMessage` schema (described below).
|
||||||
|
|
||||||
Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.
|
Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.
|
||||||
|
|
||||||
|
@ -745,10 +749,27 @@ The request MUST be encoded as an SSZ-container.
|
||||||
The response MUST consist of zero or more `response_chunk`.
|
The response MUST consist of zero or more `response_chunk`.
|
||||||
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
|
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
|
||||||
|
|
||||||
Clients MUST keep a record of signed blocks seen since the start of the weak subjectivity period
|
Clients MUST keep a record of signed blocks seen on the epoch range
|
||||||
and MUST support serving requests of blocks up to their own `head_block_root`.
|
`[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]`
|
||||||
|
where `current_epoch` is defined by the current wall-clock time,
|
||||||
|
and clients MUST support serving requests of blocks on this range.
|
||||||
|
|
||||||
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks.
|
Peers that are unable to reply to block requests within the
|
||||||
|
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epoch range MAY get descored or disconnected at any time.
|
||||||
|
|
||||||
|
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||||
|
MUST backfill the local block database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS`
|
||||||
|
to be fully compliant with `BlocksByRange` requests. To safely perform such a
|
||||||
|
backfill of blocks to the recent state, the node MUST validate both (1) the
|
||||||
|
proposer signatures and (2) that the blocks form a valid chain up to the most
|
||||||
|
recent block referenced in the weak subjectivity state.
|
||||||
|
|
||||||
|
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||||
|
participating in the networking immediately, other peers MAY
|
||||||
|
disconnect and/or temporarily ban such an un-synced or semi-synced client.
|
||||||
|
|
||||||
|
Clients MUST respond with at least the first block that exists in the range, if they have it,
|
||||||
|
and no more than `MAX_REQUEST_BLOCKS` blocks.
|
||||||
|
|
||||||
The following blocks, where they exist, MUST be sent in consecutive order.
|
The following blocks, where they exist, MUST be sent in consecutive order.
|
||||||
|
|
||||||
|
@ -1393,6 +1414,45 @@ To avoid this race condition, we allow the responding side to choose which branc
|
||||||
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
The requesting client then goes on to validate the blocks and incorporate them in their own database
|
||||||
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
-- because they follow the same rules, they should at this point arrive at the same canonical chain.
|
||||||
|
|
||||||
|
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
|
||||||
|
|
||||||
|
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
|
||||||
|
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
|
||||||
|
beacon state and then a simple block sync from there to the head. We expect the latter to be the dominant UX strategy.
|
||||||
|
|
||||||
|
These checkpoints *in the worst case* (i.e. very large validator set and maximal allowed safety decay) must be from the
|
||||||
|
most recent `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block sync to the head from this starting point.
|
||||||
|
Thus, this defines the epoch range outside which nodes may prune blocks, and
|
||||||
|
the epoch range that a new node syncing from a checkpoint must backfill.
|
||||||
|
|
||||||
|
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from `compute_weak_subjectivity_period` found in the
|
||||||
|
[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size
|
||||||
|
(`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`).
|
||||||
|
|
||||||
|
```python
|
||||||
|
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
|
||||||
|
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
|
+ MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
|
||||||
|
|
||||||
|
### Why must the proposer signature be checked when backfilling blocks in the database?
|
||||||
|
|
||||||
|
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
|
||||||
|
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
|
||||||
|
but also must check that the proposer signature is valid in the `SignedBeaconBlock` wrapper.
|
||||||
|
|
||||||
|
This is because the signature is not part of the `BeaconBlock` hash chain, and
|
||||||
|
thus could be corrupted by an attacker serving valid `BeaconBlock`s but invalid
|
||||||
|
signatures contained in `SignedBeaconBlock`.
|
||||||
|
|
||||||
|
Although in this particular use case this does not represent a decay in safety
|
||||||
|
(due to the assumptions of starting at a weak subjectivity checkpoint), it
|
||||||
|
would represent invalid historic data and could be unwittingly transmitted to
|
||||||
|
additional nodes.
|
||||||
|
|
||||||
### What's the effect of empty slots on the sync algorithm?
|
### What's the effect of empty slots on the sync algorithm?
|
||||||
|
|
||||||
When syncing one can only tell that a slot has been skipped on a particular branch
|
When syncing one can only tell that a slot has been skipped on a particular branch
|
||||||
|
|
|
@ -136,7 +136,9 @@ A brief reference for what these values look like in practice ([reference script
|
||||||
|
|
||||||
## Weak Subjectivity Sync
|
## Weak Subjectivity Sync
|
||||||
|
|
||||||
Clients should allow users to input a Weak Subjectivity Checkpoint at startup, and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
|
Clients should allow users to input a Weak Subjectivity Checkpoint at startup,
|
||||||
|
and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain.
|
||||||
|
If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
|
||||||
|
|
||||||
### Weak Subjectivity Sync Procedure
|
### Weak Subjectivity Sync Procedure
|
||||||
|
|
||||||
|
|
|
@ -7,7 +7,6 @@ from eth2spec.test.helpers.block_processing import run_block_processing_to
|
||||||
from eth2spec.test.helpers.state import (
|
from eth2spec.test.helpers.state import (
|
||||||
state_transition_and_sign_block,
|
state_transition_and_sign_block,
|
||||||
transition_to,
|
transition_to,
|
||||||
next_epoch,
|
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.constants import (
|
from eth2spec.test.helpers.constants import (
|
||||||
MAINNET, MINIMAL,
|
MAINNET, MINIMAL,
|
||||||
|
@ -50,9 +49,9 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||||
"""
|
"""
|
||||||
state = state.copy()
|
state = state.copy()
|
||||||
current_epoch = spec.get_current_epoch(state)
|
current_epoch = spec.get_current_epoch(state)
|
||||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||||
while True:
|
while True:
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee = spec.get_next_sync_committee_indices(state)
|
||||||
if duplicates:
|
if duplicates:
|
||||||
if len(committee) != len(set(committee)):
|
if len(committee) != len(set(committee)):
|
||||||
return committee
|
return committee
|
||||||
|
@ -62,23 +61,32 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||||
|
|
||||||
|
|
||||||
|
def compute_committee_indices(spec, state, committee):
|
||||||
|
"""
|
||||||
|
Given a ``committee``, calculate and return the related indices
|
||||||
|
"""
|
||||||
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||||
|
return committee_indices
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_missing_participant(spec, state):
|
def test_invalid_signature_missing_participant(spec, state):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||||
rng = random.Random(2020)
|
rng = random.Random(2020)
|
||||||
random_participant = rng.choice(committee)
|
random_participant = rng.choice(committee_indices)
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
# Exclude one participant whose signature was included.
|
# Exclude one participant whose signature was included.
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[index != random_participant for index in committee],
|
sync_committee_bits=[index != random_participant for index in committee_indices],
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee, # full committee signs
|
committee_indices, # full committee signs
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
@ -88,31 +96,38 @@ def test_invalid_signature_missing_participant(spec, state):
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_extra_participant(spec, state):
|
def test_invalid_signature_extra_participant(spec, state):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||||
rng = random.Random(3030)
|
rng = random.Random(3030)
|
||||||
random_participant = rng.choice(committee)
|
random_participant = rng.choice(committee_indices)
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
# Exclude one signature even though the block claims the entire committee participated.
|
# Exclude one signature even though the block claims the entire committee participated.
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[True] * len(committee),
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
[index for index in committee if index != random_participant],
|
[index for index in committee_indices if index != random_participant],
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
|
def compute_sync_committee_inclusion_reward(spec,
|
||||||
|
state,
|
||||||
|
participant_index,
|
||||||
|
committee_indices,
|
||||||
|
committee_bits):
|
||||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||||
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||||
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
|
max_slot_rewards = spec.Gwei(
|
||||||
|
max_epoch_rewards * len(included_indices)
|
||||||
|
// len(committee_indices) // spec.SLOTS_PER_EPOCH
|
||||||
|
)
|
||||||
|
|
||||||
# Compute the participant and proposer sync rewards
|
# Compute the participant and proposer sync rewards
|
||||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||||
|
@ -121,23 +136,23 @@ def compute_sync_committee_inclusion_reward(spec, state, participant_index, comm
|
||||||
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||||
|
|
||||||
|
|
||||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
|
def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
|
||||||
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||||
multiplicities = Counter(included_indices)
|
multiplicities = Counter(included_indices)
|
||||||
|
|
||||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||||
spec, state, participant_index, committee, committee_bits,
|
spec, state, participant_index, committee_indices, committee_bits,
|
||||||
)
|
)
|
||||||
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
||||||
|
|
||||||
|
|
||||||
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
|
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
|
||||||
proposer_reward = 0
|
proposer_reward = 0
|
||||||
for index, bit in zip(committee, committee_bits):
|
for index, bit in zip(committee_indices, committee_bits):
|
||||||
if not bit:
|
if not bit:
|
||||||
continue
|
continue
|
||||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||||
spec, state, index, committee, committee_bits,
|
spec, state, index, committee_indices, committee_bits,
|
||||||
)
|
)
|
||||||
proposer_reward_denominator = (
|
proposer_reward_denominator = (
|
||||||
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
||||||
|
@ -148,15 +163,15 @@ def compute_sync_committee_proposer_reward(spec, state, committee, committee_bit
|
||||||
return proposer_reward
|
return proposer_reward
|
||||||
|
|
||||||
|
|
||||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
|
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
|
||||||
for index in range(len(post_state.validators)):
|
for index in range(len(post_state.validators)):
|
||||||
reward = 0
|
reward = 0
|
||||||
if index in committee:
|
if index in committee_indices:
|
||||||
reward += compute_sync_committee_participant_reward(
|
reward += compute_sync_committee_participant_reward(
|
||||||
spec,
|
spec,
|
||||||
pre_state,
|
pre_state,
|
||||||
index,
|
index,
|
||||||
committee,
|
committee_indices,
|
||||||
committee_bits,
|
committee_bits,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -164,14 +179,14 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee, comm
|
||||||
reward += compute_sync_committee_proposer_reward(
|
reward += compute_sync_committee_proposer_reward(
|
||||||
spec,
|
spec,
|
||||||
pre_state,
|
pre_state,
|
||||||
committee,
|
committee_indices,
|
||||||
committee_bits,
|
committee_bits,
|
||||||
)
|
)
|
||||||
|
|
||||||
assert post_state.balances[index] == pre_state.balances[index] + reward
|
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||||
|
|
||||||
|
|
||||||
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
@ -181,7 +196,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
[index for index, bit in zip(committee, committee_bits) if bit],
|
[index for index, bit in zip(committee_indices, committee_bits) if bit],
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -191,7 +206,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||||
spec,
|
spec,
|
||||||
pre_state,
|
pre_state,
|
||||||
state,
|
state,
|
||||||
committee,
|
committee_indices,
|
||||||
committee_bits,
|
committee_bits,
|
||||||
block.proposer_index,
|
block.proposer_index,
|
||||||
)
|
)
|
||||||
|
@ -201,60 +216,60 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||||
committee = get_committee_indices(spec, state, duplicates=False)
|
committee_indices = get_committee_indices(spec, state, duplicates=False)
|
||||||
committee_size = len(committee)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [True] * committee_size
|
committee_bits = [True] * committee_size
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
|
||||||
# Preconditions of this test case
|
# Preconditions of this test case
|
||||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||||
assert committee_size == len(set(committee))
|
assert committee_size == len(set(committee_indices))
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||||
committee = get_committee_indices(spec, state, duplicates=True)
|
committee_indices = get_committee_indices(spec, state, duplicates=True)
|
||||||
committee_size = len(committee)
|
committee_size = len(committee_indices)
|
||||||
committee_bits = [True] * committee_size
|
committee_bits = [True] * committee_size
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
|
||||||
# Preconditions of this test case
|
# Preconditions of this test case
|
||||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||||
assert committee_size > len(set(committee))
|
assert committee_size > len(set(committee_indices))
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_sync_committee_rewards_not_full_participants(spec, state):
|
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||||
rng = random.Random(1010)
|
rng = random.Random(1010)
|
||||||
committee_bits = [rng.choice([True, False]) for _ in committee]
|
committee_bits = [rng.choice([True, False]) for _ in committee_indices]
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_sync_committee_rewards_empty_participants(spec, state):
|
def test_sync_committee_rewards_empty_participants(spec, state):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||||
committee_bits = [False for _ in committee]
|
committee_bits = [False for _ in committee_indices]
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_invalid_signature_past_block(spec, state):
|
def test_invalid_signature_past_block(spec, state):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||||
|
|
||||||
blocks = []
|
blocks = []
|
||||||
for _ in range(2):
|
for _ in range(2):
|
||||||
|
@ -262,12 +277,12 @@ def test_invalid_signature_past_block(spec, state):
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
# Valid sync committee signature here...
|
# Valid sync committee signature here...
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[True] * len(committee),
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee,
|
committee_indices,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -277,12 +292,12 @@ def test_invalid_signature_past_block(spec, state):
|
||||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||||
# Invalid signature from a slot other than the previous
|
# Invalid signature from a slot other than the previous
|
||||||
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[True] * len(committee),
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
invalid_block.slot - 2,
|
invalid_block.slot - 2,
|
||||||
committee,
|
committee_indices,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -307,19 +322,18 @@ def test_invalid_signature_previous_committee(spec, state):
|
||||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||||
|
|
||||||
# Use the previous sync committee to produce the signature.
|
# Use the previous sync committee to produce the signature.
|
||||||
pubkeys = [validator.pubkey for validator in state.validators]
|
|
||||||
# Ensure that the pubkey sets are different.
|
# Ensure that the pubkey sets are different.
|
||||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
committee_indices = compute_committee_indices(spec, state, old_sync_committee)
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
sync_committee_bits=[True] * len(committee),
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
block.slot - 1,
|
block.slot - 1,
|
||||||
committee,
|
committee_indices,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -345,15 +359,13 @@ def test_valid_signature_future_committee(spec, state):
|
||||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||||
|
|
||||||
sync_committee = state.current_sync_committee
|
sync_committee = state.current_sync_committee
|
||||||
|
next_sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
assert next_sync_committee != sync_committee
|
||||||
|
|
||||||
assert sync_committee == expected_sync_committee
|
|
||||||
assert sync_committee != old_current_sync_committee
|
assert sync_committee != old_current_sync_committee
|
||||||
assert sync_committee != old_next_sync_committee
|
assert sync_committee != old_next_sync_committee
|
||||||
|
|
||||||
pubkeys = [validator.pubkey for validator in state.validators]
|
committee_indices = compute_committee_indices(spec, state, sync_committee)
|
||||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
block.body.sync_aggregate = spec.SyncAggregate(
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
@ -367,43 +379,3 @@ def test_valid_signature_future_committee(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_sync_committee_processing(spec, state, block)
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
|
||||||
@spec_state_test
|
|
||||||
def test_sync_committee_is_only_computed_at_epoch_boundary(spec, state):
|
|
||||||
"""
|
|
||||||
Sync committees can only be computed at sync committee period boundaries.
|
|
||||||
Ensure a client respects the committee in the state (assumed to be derived
|
|
||||||
in the correct way).
|
|
||||||
"""
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
|
|
||||||
# use a "synthetic" committee to simulate the situation
|
|
||||||
# where ``spec.get_sync_committee`` at the sync committee
|
|
||||||
# period epoch boundary would have diverged some epochs into the
|
|
||||||
# period; ``aggregate_pubkey`` is not relevant to this test
|
|
||||||
pubkeys = []
|
|
||||||
committee_indices = []
|
|
||||||
i = 0
|
|
||||||
active_validator_count = len(spec.get_active_validator_indices(state, current_epoch))
|
|
||||||
while len(pubkeys) < spec.SYNC_COMMITTEE_SIZE:
|
|
||||||
v = state.validators[i % active_validator_count]
|
|
||||||
if spec.is_active_validator(v, current_epoch):
|
|
||||||
pubkeys.append(v.pubkey)
|
|
||||||
committee_indices.append(i)
|
|
||||||
i += 1
|
|
||||||
|
|
||||||
synthetic_committee = spec.SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=spec.BLSPubkey())
|
|
||||||
state.current_sync_committee = synthetic_committee
|
|
||||||
|
|
||||||
assert spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD > 3
|
|
||||||
for _ in range(3):
|
|
||||||
next_epoch(spec, state)
|
|
||||||
|
|
||||||
committee = get_committee_indices(spec, state)
|
|
||||||
assert committee != committee_indices
|
|
||||||
committee_size = len(committee_indices)
|
|
||||||
committee_bits = [True] * committee_size
|
|
||||||
|
|
||||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
|
||||||
|
|
|
@ -39,8 +39,7 @@ def run_sync_committees_progress_test(spec, state):
|
||||||
|
|
||||||
# Can compute the third committee having computed final balances in the last epoch
|
# Can compute the third committee having computed final balances in the last epoch
|
||||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||||
current_epoch = spec.get_current_epoch(state)
|
third_sync_committee = spec.get_next_sync_committee(state)
|
||||||
third_sync_committee = spec.get_sync_committee(state, current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
|
||||||
|
|
||||||
assert state.current_sync_committee == second_sync_committee
|
assert state.current_sync_committee == second_sync_committee
|
||||||
assert state.next_sync_committee == third_sync_committee
|
assert state.next_sync_committee == third_sync_committee
|
||||||
|
|
|
@ -18,7 +18,8 @@ from eth2spec.test.context import (
|
||||||
|
|
||||||
|
|
||||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
|
@ -46,7 +46,8 @@ def test_process_light_client_update_not_updated(spec, state):
|
||||||
body_root=signed_block.message.body.hash_tree_root(),
|
body_root=signed_block.message.body.hash_tree_root(),
|
||||||
)
|
)
|
||||||
# Sync committee signing the header
|
# Sync committee signing the header
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||||
sync_committee_bits = [True] * len(committee)
|
sync_committee_bits = [True] * len(committee)
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
|
@ -111,7 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
# Sync committee signing the finalized_block_header
|
# Sync committee signing the finalized_block_header
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||||
sync_committee_bits = [True] * len(committee)
|
sync_committee_bits = [True] * len(committee)
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
|
@ -190,7 +192,8 @@ def test_process_light_client_update_finality_updated(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
# Sync committee signing the finalized_block_header
|
# Sync committee signing the finalized_block_header
|
||||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
|
||||||
sync_committee_bits = [True] * len(committee)
|
sync_committee_bits = [True] * len(committee)
|
||||||
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
spec,
|
spec,
|
||||||
|
|
|
@ -8,8 +8,12 @@ from eth2spec.utils import bls
|
||||||
from eth2spec.utils.bls import only_with_bls
|
from eth2spec.utils.bls import only_with_bls
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
with_altair_and_later,
|
with_altair_and_later,
|
||||||
|
with_configs,
|
||||||
with_state,
|
with_state,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
|
|
||||||
rng = random.Random(1337)
|
rng = random.Random(1337)
|
||||||
|
|
||||||
|
@ -91,6 +95,7 @@ def _get_sync_committee_signature(
|
||||||
|
|
||||||
@only_with_bls()
|
@only_with_bls()
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
@with_state
|
@with_state
|
||||||
def test_process_sync_committee_contributions(phases, spec, state):
|
def test_process_sync_committee_contributions(phases, spec, state):
|
||||||
# skip over slots at genesis
|
# skip over slots at genesis
|
||||||
|
@ -143,20 +148,63 @@ def _subnet_for_sync_committee_index(spec, i):
|
||||||
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_expected_subnets_by_pubkey(sync_committee_members):
|
||||||
|
expected_subnets_by_pubkey = defaultdict(list)
|
||||||
|
for (subnet, pubkey) in sync_committee_members:
|
||||||
|
expected_subnets_by_pubkey[pubkey].append(subnet)
|
||||||
|
return expected_subnets_by_pubkey
|
||||||
|
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
@with_state
|
@with_state
|
||||||
def test_compute_subnets_for_sync_committee(state, spec, phases):
|
def test_compute_subnets_for_sync_committee(state, spec, phases):
|
||||||
|
# Transition to the head of the next period
|
||||||
|
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
|
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||||
|
assert (
|
||||||
|
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||||
|
== spec.compute_sync_committee_period(next_slot_epoch)
|
||||||
|
)
|
||||||
some_sync_committee_members = list(
|
some_sync_committee_members = list(
|
||||||
(
|
(
|
||||||
_subnet_for_sync_committee_index(spec, i),
|
_subnet_for_sync_committee_index(spec, i),
|
||||||
pubkey,
|
pubkey,
|
||||||
)
|
)
|
||||||
|
# use current_sync_committee
|
||||||
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||||
)
|
)
|
||||||
|
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||||
expected_subnets_by_pubkey = defaultdict(list)
|
|
||||||
for (subnet, pubkey) in some_sync_committee_members:
|
for _, pubkey in some_sync_committee_members:
|
||||||
expected_subnets_by_pubkey[pubkey].append(subnet)
|
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||||
|
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
|
||||||
|
expected_subnets = expected_subnets_by_pubkey[pubkey]
|
||||||
|
assert subnets == expected_subnets
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@with_state
|
||||||
|
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
|
||||||
|
# Transition to the end of the period
|
||||||
|
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
|
||||||
|
|
||||||
|
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
|
||||||
|
assert (
|
||||||
|
spec.compute_sync_committee_period(spec.get_current_epoch(state))
|
||||||
|
!= spec.compute_sync_committee_period(next_slot_epoch)
|
||||||
|
)
|
||||||
|
some_sync_committee_members = list(
|
||||||
|
(
|
||||||
|
_subnet_for_sync_committee_index(spec, i),
|
||||||
|
pubkey,
|
||||||
|
)
|
||||||
|
# use next_sync_committee
|
||||||
|
for i, pubkey in enumerate(state.next_sync_committee.pubkeys)
|
||||||
|
)
|
||||||
|
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
|
||||||
|
|
||||||
for _, pubkey in some_sync_committee_members:
|
for _, pubkey in some_sync_committee_members:
|
||||||
validator_index = _validator_index_for_pubkey(state, pubkey)
|
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||||
|
|
|
@ -69,9 +69,8 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
|
|
||||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||||
# Fill in sync committees
|
# Fill in sync committees
|
||||||
state.current_sync_committee = spec.get_sync_committee(state, spec.get_current_epoch(state))
|
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||||
state.next_sync_committee = (
|
state.current_sync_committee = spec.get_next_sync_committee(state)
|
||||||
spec.get_sync_committee(state, spec.get_current_epoch(state) + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
state.next_sync_committee = spec.get_next_sync_committee(state)
|
||||||
)
|
|
||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
|
@ -62,13 +62,13 @@ def run_deltas(spec, state):
|
||||||
|
|
||||||
if is_post_altair(spec):
|
if is_post_altair(spec):
|
||||||
def get_source_deltas(state):
|
def get_source_deltas(state):
|
||||||
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
|
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||||
|
|
||||||
def get_head_deltas(state):
|
def get_head_deltas(state):
|
||||||
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
|
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX)
|
||||||
|
|
||||||
def get_target_deltas(state):
|
def get_target_deltas(state):
|
||||||
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
|
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX)
|
||||||
|
|
||||||
yield from run_attestation_component_deltas(
|
yield from run_attestation_component_deltas(
|
||||||
spec,
|
spec,
|
||||||
|
@ -133,14 +133,23 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||||
validator = state.validators[index]
|
validator = state.validators[index]
|
||||||
enough_for_reward = has_enough_for_reward(spec, state, index)
|
enough_for_reward = has_enough_for_reward(spec, state, index)
|
||||||
if index in matching_indices and not validator.slashed:
|
if index in matching_indices and not validator.slashed:
|
||||||
|
if is_post_altair(spec):
|
||||||
|
if not spec.is_in_inactivity_leak(state) and enough_for_reward:
|
||||||
|
assert rewards[index] > 0
|
||||||
|
else:
|
||||||
|
assert rewards[index] == 0
|
||||||
|
else:
|
||||||
if enough_for_reward:
|
if enough_for_reward:
|
||||||
assert rewards[index] > 0
|
assert rewards[index] > 0
|
||||||
else:
|
else:
|
||||||
assert rewards[index] == 0
|
assert rewards[index] == 0
|
||||||
|
|
||||||
assert penalties[index] == 0
|
assert penalties[index] == 0
|
||||||
else:
|
else:
|
||||||
assert rewards[index] == 0
|
assert rewards[index] == 0
|
||||||
if enough_for_reward:
|
if is_post_altair(spec) and 'head' in deltas_name:
|
||||||
|
assert penalties[index] == 0
|
||||||
|
elif enough_for_reward:
|
||||||
assert penalties[index] > 0
|
assert penalties[index] > 0
|
||||||
else:
|
else:
|
||||||
assert penalties[index] == 0
|
assert penalties[index] == 0
|
||||||
|
@ -225,16 +234,17 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
if not is_post_altair(spec):
|
if not is_post_altair(spec):
|
||||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||||
else:
|
|
||||||
base_penalty = sum(
|
|
||||||
base_reward * numerator // spec.WEIGHT_DENOMINATOR
|
|
||||||
for (_, numerator) in spec.get_flag_indices_and_weights()
|
|
||||||
)
|
|
||||||
|
|
||||||
if not has_enough_for_reward(spec, state, index):
|
if not has_enough_for_reward(spec, state, index):
|
||||||
assert penalties[index] == 0
|
assert penalties[index] == 0
|
||||||
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
||||||
|
if is_post_altair(spec):
|
||||||
|
assert penalties[index] == 0
|
||||||
|
else:
|
||||||
assert penalties[index] == base_penalty
|
assert penalties[index] == base_penalty
|
||||||
|
else:
|
||||||
|
if is_post_altair(spec):
|
||||||
|
assert penalties[index] > 0
|
||||||
else:
|
else:
|
||||||
assert penalties[index] > base_penalty
|
assert penalties[index] > base_penalty
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Reference in New Issue