working through future shuffling notes
This commit is contained in:
parent
56c6322054
commit
a164ad9504
|
@ -800,6 +800,17 @@ def get_current_epoch_committee_count(state: BeaconState) -> int:
|
|||
return get_epoch_committee_count(len(current_active_validators))
|
||||
```
|
||||
|
||||
### `get_next_epoch_committee_count`
|
||||
|
||||
```python
|
||||
def get_next_epoch_committee_count(state: BeaconState) -> int:
|
||||
next_active_validators = get_active_validator_indices(
|
||||
state.validator_registry,
|
||||
get_current_epoch(state) + 1,
|
||||
)
|
||||
return get_epoch_committee_count(len(next_active_validators))
|
||||
```
|
||||
|
||||
### `get_crosslink_committees_at_slot`
|
||||
|
||||
```python
|
||||
|
@ -807,24 +818,33 @@ def get_crosslink_committees_at_slot(state: BeaconState,
|
|||
slot: SlotNumber) -> List[Tuple[List[ValidatorIndex], ShardNumber]]:
|
||||
"""
|
||||
Returns the list of ``(committee, shard)`` tuples for the ``slot``.
|
||||
|
||||
Note: Crosslink committees for a ``slot`` in the next epoch are only valid
|
||||
if a validator registry change occurs at the end of the current epoch.
|
||||
"""
|
||||
epoch = slot_to_epoch(slot)
|
||||
current_epoch = get_current_epoch(state)
|
||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else current_epoch
|
||||
next_epoch = current_epoch + 1
|
||||
|
||||
assert previous_epoch <= epoch < next_epoch
|
||||
assert previous_epoch <= epoch <= next_epoch
|
||||
|
||||
if epoch < current_epoch:
|
||||
if epoch == previous_epoch:
|
||||
committees_per_epoch = get_previous_epoch_committee_count(state)
|
||||
seed = state.previous_epoch_seed
|
||||
shuffling_epoch = state.previous_calculation_epoch
|
||||
shuffling_start_shard = state.previous_epoch_start_shard
|
||||
else:
|
||||
elif epoch == current_epoch:
|
||||
committees_per_epoch = get_current_epoch_committee_count(state)
|
||||
seed = state.current_epoch_seed
|
||||
shuffling_epoch = state.current_calculation_epoch
|
||||
shuffling_start_shard = state.current_epoch_start_shard
|
||||
elif epoch == next_epoch:
|
||||
current_committees_per_epoch = get_current_epoch_committee_count(state)
|
||||
committees_per_epoch = get_next_epoch_committee_count(state)
|
||||
seed = state.current_epoch_seed
|
||||
shuffling_epoch = generate_seed(state, next_epoch)
|
||||
shuffling_start_shard = (state.current_epoch_start_shard + current_committees_per_epoch) % SHARD_COUNT
|
||||
|
||||
shuffling = get_shuffling(
|
||||
seed,
|
||||
|
@ -881,7 +901,7 @@ def get_active_index_root(state: BeaconState,
|
|||
"""
|
||||
Returns the index root at a recent ``epoch``.
|
||||
"""
|
||||
assert get_current_epoch(state) - LATEST_INDEX_ROOTS_LENGTH < epoch <= get_current_epoch(state)
|
||||
assert get_current_epoch(state) - LATEST_INDEX_ROOTS_LENGTH < epoch <= get_current_epoch(state) + ENTRY_EXIT_DELAY
|
||||
return state.latest_index_roots[epoch % LATEST_INDEX_ROOTS_LENGTH]
|
||||
```
|
||||
|
||||
|
@ -900,6 +920,17 @@ def generate_seed(state: BeaconState,
|
|||
)
|
||||
```
|
||||
|
||||
### `get_beacon_proposer_from_committee`
|
||||
|
||||
```python
|
||||
def get_beacon_proposer_index(committee: List[ValidatorIndex],
|
||||
slot: SlotNumber) -> ValidatorIndex:
|
||||
"""
|
||||
Returns the beacon proposer index of the ``committee`` at the ``slot``.
|
||||
"""
|
||||
return first_committee[slot % len(committee)]
|
||||
```
|
||||
|
||||
### `get_beacon_proposer_index`
|
||||
|
||||
```python
|
||||
|
@ -909,7 +940,8 @@ def get_beacon_proposer_index(state: BeaconState,
|
|||
Returns the beacon proposer index for the ``slot``.
|
||||
"""
|
||||
first_committee, _ = get_crosslink_committees_at_slot(state, slot)[0]
|
||||
return first_committee[slot % len(first_committee)]
|
||||
return get_beacon_proposer_from_committee(first_committee, slot)
|
||||
|
||||
```
|
||||
|
||||
### `merkle_root`
|
||||
|
@ -1458,7 +1490,10 @@ def get_initial_beacon_state(initial_validator_deposits: List[Deposit],
|
|||
if get_effective_balance(state, validator_index) >= MAX_DEPOSIT_AMOUNT:
|
||||
activate_validator(state, validator_index, True)
|
||||
|
||||
state.latest_index_roots[GENESIS_EPOCH % LATEST_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state, GENESIS_EPOCH))
|
||||
for epoch in range(GENESIS_EPOCH, GENESIS_EPOCH + ENTRY_EXIT_DELAY):
|
||||
state.latest_index_roots[epoch % LATEST_INDEX_ROOTS_LENGTH] = hash_tree_root(
|
||||
get_active_validator_indices(state, epoch)
|
||||
)
|
||||
state.current_epoch_seed = generate_seed(state, GENESIS_EPOCH)
|
||||
|
||||
return state
|
||||
|
@ -1869,7 +1904,7 @@ First, update the following:
|
|||
* Set `state.previous_calculation_epoch = state.current_calculation_epoch`.
|
||||
* Set `state.previous_epoch_start_shard = state.current_epoch_start_shard`.
|
||||
* Set `state.previous_epoch_seed = state.current_epoch_seed`.
|
||||
* Set `state.latest_index_roots[next_epoch % LATEST_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state, next_epoch))`.
|
||||
* Set `state.latest_index_roots[current_epoch + ENTRY_EXIT_DELAY % LATEST_INDEX_ROOTS_LENGTH] = hash_tree_root(get_active_validator_indices(state, current_epoch + ENTRY_EXIT_DELAY))`.
|
||||
|
||||
If the following are satisfied:
|
||||
|
||||
|
@ -1932,7 +1967,7 @@ and perform the following updates:
|
|||
If a validator registry update does _not_ happen do the following:
|
||||
|
||||
* Let `epochs_since_last_registry_update = current_epoch - state.validator_registry_update_epoch`.
|
||||
* If `epochs_since_last_registry_update` is an exact power of 2:
|
||||
* If `epochs_since_last_registry_update > 1` and `epochs_since_last_registry_update` is an exact power of 2:
|
||||
* Set `state.current_calculation_epoch = next_epoch`.
|
||||
* Set `state.current_epoch_seed = generate_seed(state, state.current_calculation_epoch)`
|
||||
* _Note_ that `state.current_epoch_start_shard` is left unchanged.
|
||||
|
|
|
@ -327,6 +327,50 @@ signed_attestation_data = bls_sign(
|
|||
)
|
||||
```
|
||||
|
||||
## Responsibility lookahead
|
||||
|
||||
It is useful for a validator to have some amount of "lookahead" on the validator's upcoming responsibilities of proposing and attesting dictated by the shuffling and slot.
|
||||
The beacon chain shufflings are designed to given a minimum of 1 epoch lookahead.
|
||||
|
||||
There are three possibilities for the shuffling at the next epoch:
|
||||
1. The shuffling remains the same (i.e. the validator is in the same shard committee).
|
||||
2. The shuffling changes due to a "validator registry change".
|
||||
3. The shuffling changes due to `epochs_since_last_registry_update` being an exact power of 2 greater than 1.
|
||||
|
||||
(2) and (3) result in the same validator shuffling but with (potentially) different associated shards for each committee.o
|
||||
|
||||
A validator should always plan for each as a possibility unless the validator
|
||||
can concretely eliminate one or two. Planning for a future shuffling involves noting
|
||||
at which slot one might have to attest and propose and also which shard
|
||||
one should begin syncing.
|
||||
|
||||
### Shuffling remains the same
|
||||
|
||||
Validator remains in the same crosslink committee for the same shard and same slot position (`slot + EPOCH_LENGTH`) and might be called upon as as proposer depending upon the committee position and slot in the next epoch. Validator can safely check using `get_beacon_proposer_index` for the slot in question at the start of the next epoch.
|
||||
|
||||
In phase 1, the validator would continue syncing the committee's associated shard in case the shuffling remains the same.
|
||||
|
||||
### Shuffling change due to validator registry
|
||||
|
||||
A validator registry change occurs when a new block has been finalied and all expected shards have been crosslinked. _This can only occur_ when `epochs_since_last_registry_update > 1`.
|
||||
A validator can use `get_crosslink_committees_at_slot` for slots in the next epoch to find the expected new crosslink committee in the event of a validator registry change at the end of the current epoch.
|
||||
|
||||
In phase 1, if the validator is uncertain as to whether a validator registry change will occur at the end of the current epoch, the validator should opportunistically begin to sync the potential new shard for the next epoch to be prepared in the event of a registry change. Re-checking `get_crosslink_committees_at_slot` at the start of the next epoch will affirm or deny whether the committee did in fact update.
|
||||
|
||||
### Shuffling change due to power of 2 since last change
|
||||
|
||||
In the event that a validator registry change does not occur at the end of the current epoch and `epochs_since_last_registry_update > 1` and `epochs_since_last_registry_update` is an exact power of two, a reshuffling occurs but the shards remain the same.
|
||||
|
||||
In phase 1, if a power of 2 since last registry update is coming up and the validator is uncertain if a registry change will occur, the validator should opportunistically begin to sync the potential new shard for the new shuffling.
|
||||
|
||||
To find what the shard to attempt to sync:
|
||||
* Find the new `committee` using `get_crosslink_committees_at_slot` for slots in the next epoch.
|
||||
* Let `slot` the be the future slot in which the validator is a committee.
|
||||
* Let `committee_slot_position` be the position of the committee within the list of committees at that slot.
|
||||
* Let `shard = get_crosslink_committees_at_slot(state, slot - EPOCH_LENGTH)[committee_slot_position][1]`
|
||||
|
||||
Opportunistically sync `shard` during the current epoch. The validator can check `get_crosslink_committees_at_slot` at the start of the next committee to affirm or deny whether the power of 2 committee update did occur.
|
||||
|
||||
## How to avoid slashing
|
||||
|
||||
"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed -- [proposal slashing](#proposal-slashing) and [attestation slashing](#casper-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages you have previously signed.
|
||||
|
|
Loading…
Reference in New Issue