mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-20 22:38:11 +00:00
Merge branch 'dev' into invariant-checks
This commit is contained in:
commit
8ba740a93a
@ -14,8 +14,8 @@ PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||
# ---------------------------------------------------------------
|
||||
# 2**9 (= 512)
|
||||
SYNC_COMMITTEE_SIZE: 512
|
||||
# 2**9 (= 512)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
|
||||
|
||||
# Sync protocol
|
||||
|
@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 16
|
||||
# Shard block configs
|
||||
# ---------------------------------------------------------------
|
||||
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||
# 2**8 (= 256)
|
||||
SHARD_STATE_MEMORY_SLOTS: 256
|
||||
# 2**11 (= 2,048)
|
||||
MAX_SAMPLES_PER_BLOCK: 2048
|
||||
# 2**10 (= 1,1024)
|
||||
|
@ -15,6 +15,8 @@ MAX_SHARD_PROPOSER_SLASHINGS: 4
|
||||
# Shard block configs
|
||||
# ---------------------------------------------------------------
|
||||
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||
# 2**8 (= 256)
|
||||
SHARD_STATE_MEMORY_SLOTS: 256
|
||||
# 2**11 (= 2,048)
|
||||
MAX_SAMPLES_PER_BLOCK: 2048
|
||||
# 2**10 (= 1,1024)
|
||||
|
32
setup.py
32
setup.py
@ -55,6 +55,12 @@ def floorlog2(x: int) -> uint64:
|
||||
'''
|
||||
|
||||
|
||||
OPTIMIZED_BLS_AGGREGATE_PUBKEYS = '''
|
||||
def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||
return bls.AggregatePKs(pubkeys)
|
||||
'''
|
||||
|
||||
|
||||
class ProtocolDefinition(NamedTuple):
|
||||
# just function definitions currently. May expand with configuration vars in future.
|
||||
functions: Dict[str, str]
|
||||
@ -297,6 +303,11 @@ class SpecBuilder(ABC):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def build_spec(cls, preset_name: str,
|
||||
@ -417,6 +428,10 @@ get_attesting_indices = cache_this(
|
||||
def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
||||
return functions
|
||||
|
||||
@classmethod
|
||||
def build_spec(cls, preset_name: str,
|
||||
source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:
|
||||
@ -463,6 +478,11 @@ def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariable
|
||||
}
|
||||
return {**super().hardcoded_ssz_dep_constants(), **constants}
|
||||
|
||||
@classmethod
|
||||
def implement_optimizations(cls, functions: Dict[str, str]) -> Dict[str, str]:
|
||||
if "eth2_aggregate_pubkeys" in functions:
|
||||
functions["eth2_aggregate_pubkeys"] = OPTIMIZED_BLS_AGGREGATE_PUBKEYS.strip()
|
||||
return super().implement_optimizations(functions)
|
||||
|
||||
#
|
||||
# MergeSpecBuilder
|
||||
@ -569,7 +589,8 @@ def objects_to_spec(preset_name: str,
|
||||
for k in list(spec_object.functions):
|
||||
if "ceillog2" in k or "floorlog2" in k:
|
||||
del spec_object.functions[k]
|
||||
functions_spec = '\n\n\n'.join(spec_object.functions.values())
|
||||
functions = builder.implement_optimizations(spec_object.functions)
|
||||
functions_spec = '\n\n\n'.join(functions.values())
|
||||
|
||||
# Access global dict of config vars for runtime configurables
|
||||
for name in spec_object.config_vars.keys():
|
||||
@ -577,7 +598,7 @@ def objects_to_spec(preset_name: str,
|
||||
|
||||
def format_config_var(name: str, vardef: VariableDefinition) -> str:
|
||||
if vardef.type_name is None:
|
||||
out = f'{name}={vardef.value}'
|
||||
out = f'{name}={vardef.value},'
|
||||
else:
|
||||
out = f'{name}={vardef.type_name}({vardef.value}),'
|
||||
if vardef.comment is not None:
|
||||
@ -811,7 +832,7 @@ class PySpecCommand(Command):
|
||||
self.out_dir = 'pyspec_output'
|
||||
self.build_targets = """
|
||||
minimal:presets/minimal:configs/minimal.yaml
|
||||
mainnet:presets/mainnet:configs/mainnet.yaml
|
||||
mainnet:presets/mainnet:configs/mainnet.yaml
|
||||
"""
|
||||
|
||||
def finalize_options(self):
|
||||
@ -833,6 +854,7 @@ class PySpecCommand(Command):
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
specs/altair/beacon-chain.md
|
||||
specs/altair/bls.md
|
||||
specs/altair/fork.md
|
||||
specs/altair/validator.md
|
||||
specs/altair/p2p-interface.md
|
||||
@ -987,7 +1009,7 @@ setup(
|
||||
python_requires=">=3.8, <4",
|
||||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==3.7.7", "mypy==0.750"],
|
||||
"lint": ["flake8==3.7.7", "mypy==0.812"],
|
||||
"generator": ["python-snappy==0.5.4"],
|
||||
},
|
||||
install_requires=[
|
||||
@ -997,7 +1019,7 @@ setup(
|
||||
"py_ecc==5.2.0",
|
||||
"milagro_bls_binding==1.6.3",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.19",
|
||||
"remerkleable==0.1.20",
|
||||
RUAMEL_YAML_VERSION,
|
||||
"lru-dict==1.1.6",
|
||||
MARKO_VERSION,
|
||||
|
@ -26,8 +26,7 @@
|
||||
- [`SyncAggregate`](#syncaggregate)
|
||||
- [`SyncCommittee`](#synccommittee)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`Predicates`](#predicates)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
- [Crypto](#crypto)
|
||||
- [Misc](#misc-1)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
@ -86,10 +85,10 @@ Altair is the first beacon chain hard fork. Its main features are:
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` |
|
||||
| `TIMELY_TARGET_WEIGHT` | `uint64(24)` |
|
||||
| `TIMELY_HEAD_WEIGHT` | `uint64(12)` |
|
||||
| `SYNC_REWARD_WEIGHT` | `uint64(8)` |
|
||||
| `TIMELY_SOURCE_WEIGHT` | `uint64(14)` |
|
||||
| `TIMELY_TARGET_WEIGHT` | `uint64(26)` |
|
||||
| `TIMELY_HEAD_WEIGHT` | `uint64(14)` |
|
||||
| `SYNC_REWARD_WEIGHT` | `uint64(2)` |
|
||||
| `PROPOSER_WEIGHT` | `uint64(8)` |
|
||||
| `WEIGHT_DENOMINATOR` | `uint64(64)` |
|
||||
|
||||
@ -107,7 +106,6 @@ Altair is the first beacon chain hard fork. Its main features are:
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
| `PARTICIPATION_FLAG_WEIGHTS` | `[TIMELY_SOURCE_WEIGHT, TIMELY_TARGET_WEIGHT, TIMELY_HEAD_WEIGHT]` |
|
||||
|
||||
## Preset
|
||||
@ -129,7 +127,7 @@ This patch updates a few configuration values to move penalty parameters closer
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**8)` (= 256) | epochs | ~27 hours |
|
||||
|
||||
## Configuration
|
||||
|
||||
@ -137,8 +135,8 @@ This patch updates a few configuration values to move penalty parameters closer
|
||||
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `INACTIVITY_SCORE_BIAS` | `uint64(4)` | score points per inactive epoch |
|
||||
| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(16)` | score points per recovering epoch |
|
||||
| `INACTIVITY_SCORE_BIAS` | `uint64(2**2)` (= 4) | score points per inactive epoch |
|
||||
| `INACTIVITY_SCORE_RECOVERY_RATE` | `uint64(2**4)` (= 16) | score points per leak-free epoch |
|
||||
|
||||
## Containers
|
||||
|
||||
@ -157,8 +155,7 @@ class BeaconBlockBody(Container):
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
# [New in Altair]
|
||||
sync_aggregate: SyncAggregate
|
||||
sync_aggregate: SyncAggregate # [New in Altair]
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
@ -221,19 +218,11 @@ class SyncCommittee(Container):
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `Predicates`
|
||||
### Crypto
|
||||
|
||||
#### `eth2_fast_aggregate_verify`
|
||||
|
||||
```python
|
||||
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||
"""
|
||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||
"""
|
||||
if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY:
|
||||
return True
|
||||
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||
```
|
||||
Refer to the definitions in the [phase 0 document regarding BLS signatures](../phase0/beacon-chain.md#bls-signatures)
|
||||
and the extensions defined in the [Altair BLS document](./bls.md). This specification assumes knowledge of
|
||||
the functionality described in those documents.
|
||||
|
||||
### Misc
|
||||
|
||||
@ -266,10 +255,7 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
```python
|
||||
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sequence of sync committee indices (which may include duplicate indices)
|
||||
for the next sync committee, given a ``state`` at a sync committee period boundary.
|
||||
|
||||
Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128)
|
||||
Return the sync committee indices, with possible duplicates, for the next sync committee.
|
||||
"""
|
||||
epoch = Epoch(get_current_epoch(state) + 1)
|
||||
|
||||
@ -292,25 +278,16 @@ def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorInd
|
||||
|
||||
#### `get_next_sync_committee`
|
||||
|
||||
*Note*: The function `get_next_sync_committee` should only be called at sync committee period boundaries.
|
||||
|
||||
```python
|
||||
def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
|
||||
"""
|
||||
Return the *next* sync committee for a given ``state``.
|
||||
|
||||
``SyncCommittee`` contains an aggregate pubkey that enables
|
||||
resource-constrained clients to save some computation when verifying
|
||||
the sync committee's signature.
|
||||
|
||||
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices``
|
||||
returns duplicate indices. Implementations must take care when handling
|
||||
optimizations relating to aggregation and verification in the presence of duplicates.
|
||||
|
||||
Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates``
|
||||
as ``get_next_sync_committee_indices`` is not stable within a given period.
|
||||
Return the next sync committee, with possible pubkey duplicates.
|
||||
"""
|
||||
indices = get_next_sync_committee_indices(state)
|
||||
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||
aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
||||
aggregate_pubkey = eth2_aggregate_pubkeys(pubkeys)
|
||||
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||
```
|
||||
|
||||
@ -325,14 +302,12 @@ def get_base_reward_per_increment(state: BeaconState) -> Gwei:
|
||||
|
||||
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting.
|
||||
|
||||
*Note*: On average an optimally performing validator earns one base reward per epoch.
|
||||
|
||||
```python
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
"""
|
||||
Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
|
||||
|
||||
Note: An optimally performing validator can earn one base reward per epoch over a long time horizon.
|
||||
This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
|
||||
and sync committees).
|
||||
"""
|
||||
increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
return Gwei(increments * get_base_reward_per_increment(state))
|
||||
@ -559,6 +534,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
|
||||
#### Sync committee processing
|
||||
|
||||
*Note*: The function `process_sync_committee` is new.
|
||||
|
||||
```python
|
||||
def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None:
|
||||
# Verify sync committee aggregate signature signing over the previous slot block root
|
||||
@ -579,10 +556,12 @@ def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None
|
||||
# Apply participant and proposer rewards
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
|
||||
participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit]
|
||||
for participant_index in participant_indices:
|
||||
increase_balance(state, participant_index, participant_reward)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
for participant_index, participation_bit in zip(committee_indices, aggregate.sync_committee_bits):
|
||||
if participation_bit:
|
||||
increase_balance(state, participant_index, participant_reward)
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
else:
|
||||
decrease_balance(state, participant_index, participant_reward)
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
@ -627,17 +606,17 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
|
||||
```python
|
||||
def process_inactivity_updates(state: BeaconState) -> None:
|
||||
# Score updates based on previous epoch participation, skip genesis epoch
|
||||
# Skip the genesis epoch as score updates are based on the previous epoch participation
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# Increase inactivity score of inactive validators
|
||||
# Increase the inactivity score of inactive validators
|
||||
if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)):
|
||||
state.inactivity_scores[index] -= min(1, state.inactivity_scores[index])
|
||||
else:
|
||||
state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS
|
||||
# Decrease the score of all validators for forgiveness when not during a leak
|
||||
# Decrease the inactivity score of all eligible validators during a leak-free epoch
|
||||
if not is_in_inactivity_leak(state):
|
||||
state.inactivity_scores[index] -= min(INACTIVITY_SCORE_RECOVERY_RATE, state.inactivity_scores[index])
|
||||
```
|
||||
|
65
specs/altair/bls.md
Normal file
65
specs/altair/bls.md
Normal file
@ -0,0 +1,65 @@
|
||||
# Ethereum 2.0 Altair BLS extensions
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Extensions](#extensions)
|
||||
- [`eth2_aggregate_pubkeys`](#eth2_aggregate_pubkeys)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
A number of extensions are defined to handle BLS signatures in the Altair upgrade.
|
||||
|
||||
Knowledge of the [phase 0 specification](../phase0/beacon-chain.md) is assumed, including type definitions.
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
|
||||
## Extensions
|
||||
|
||||
### `eth2_aggregate_pubkeys`
|
||||
|
||||
An additional function `AggregatePKs` is defined to extend the
|
||||
[IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04)
|
||||
spec referenced in the phase 0 document.
|
||||
|
||||
```python
|
||||
def eth2_aggregate_pubkeys(pubkeys: Sequence[BLSPubkey]) -> BLSPubkey:
|
||||
"""
|
||||
Return the aggregate public key for the public keys in ``pubkeys``.
|
||||
|
||||
NOTE: the ``+`` operation should be interpreted as elliptic curve point addition, which takes as input
|
||||
elliptic curve points that must be decoded from the input ``BLSPubkey``s.
|
||||
This implementation is for demonstrative purposes only and ignores encoding/decoding concerns.
|
||||
Refer to the BLS signature draft standard for more information.
|
||||
"""
|
||||
assert len(pubkeys) > 0
|
||||
result = copy(pubkeys[0])
|
||||
for pubkey in pubkeys[1:]:
|
||||
result += pubkey
|
||||
return result
|
||||
```
|
||||
|
||||
### `eth2_fast_aggregate_verify`
|
||||
|
||||
```python
|
||||
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||
"""
|
||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||
"""
|
||||
if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY:
|
||||
return True
|
||||
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||
```
|
@ -80,7 +80,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
||||
| - | - |
|
||||
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||
| `sync_committee_contribution_and_proof` | `SignedContributionAndProof` |
|
||||
| `sync_committee_{subnet_id}` | `SyncCommitteeSignature` |
|
||||
| `sync_committee_{subnet_id}` | `SyncCommitteeMessage` |
|
||||
|
||||
Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers).
|
||||
|
||||
@ -120,7 +120,7 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64
|
||||
return sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||
```
|
||||
|
||||
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
||||
- _[IGNORE]_ The contribution's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `contribution.slot == current_slot`.
|
||||
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
|
||||
@ -139,12 +139,12 @@ Sync committee subnets are used to propagate unaggregated sync committee signatu
|
||||
|
||||
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
The following validations MUST pass before forwarding the `sync_committee_signature` on the network:
|
||||
The following validations MUST pass before forwarding the `sync_committee_message` on the network:
|
||||
|
||||
- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`.
|
||||
- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`.
|
||||
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`.
|
||||
- _[IGNORE]_ The signature's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. `sync_committee_message.slot == current_slot`.
|
||||
- _[IGNORE]_ The block being signed over (`sync_committee_message.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_message.validator_index`.
|
||||
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index)`.
|
||||
Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
|
||||
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||
|
||||
@ -156,7 +156,7 @@ The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair
|
||||
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
|
||||
Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees.
|
||||
|
||||
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics.
|
||||
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeMessage`s on the `sync_committee_{subnet_id}` topics.
|
||||
|
||||
Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
||||
|
||||
|
@ -14,7 +14,7 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Containers](#containers)
|
||||
- [`SyncCommitteeSignature`](#synccommitteesignature)
|
||||
- [`SyncCommitteeMessage`](#synccommitteemessage)
|
||||
- [`SyncCommitteeContribution`](#synccommitteecontribution)
|
||||
- [`ContributionAndProof`](#contributionandproof)
|
||||
- [`SignedContributionAndProof`](#signedcontributionandproof)
|
||||
@ -30,9 +30,9 @@ This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./
|
||||
- [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock)
|
||||
- [Attesting and attestation aggregation](#attesting-and-attestation-aggregation)
|
||||
- [Sync committees](#sync-committees)
|
||||
- [Sync committee signatures](#sync-committee-signatures)
|
||||
- [Prepare sync committee signature](#prepare-sync-committee-signature)
|
||||
- [Broadcast sync committee signature](#broadcast-sync-committee-signature)
|
||||
- [Sync committee messages](#sync-committee-messages)
|
||||
- [Prepare sync committee message](#prepare-sync-committee-message)
|
||||
- [Broadcast sync committee message](#broadcast-sync-committee-message)
|
||||
- [Sync committee contributions](#sync-committee-contributions)
|
||||
- [Aggregation selection](#aggregation-selection)
|
||||
- [Construct sync committee contribution](#construct-sync-committee-contribution)
|
||||
@ -78,10 +78,10 @@ This document is currently illustrative for early Altair testnets and some parts
|
||||
|
||||
## Containers
|
||||
|
||||
### `SyncCommitteeSignature`
|
||||
### `SyncCommitteeMessage`
|
||||
|
||||
```python
|
||||
class SyncCommitteeSignature(Container):
|
||||
class SyncCommitteeMessage(Container):
|
||||
# Slot to which this contribution pertains
|
||||
slot: Slot
|
||||
# Block root for this signature
|
||||
@ -258,34 +258,39 @@ There is no change compared to the phase 0 document.
|
||||
### Sync committees
|
||||
|
||||
Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot.
|
||||
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
|
||||
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeMessage`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
|
||||
This process occurs each slot.
|
||||
|
||||
#### Sync committee signatures
|
||||
#### Sync committee messages
|
||||
|
||||
##### Prepare sync committee signature
|
||||
##### Prepare sync committee message
|
||||
|
||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`.
|
||||
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeMessage` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_message` as soon as they have determined the head block of `slot - 1`.
|
||||
|
||||
This logic is triggered upon the same conditions as when producing an attestation.
|
||||
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeMessage` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||
|
||||
`get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||
`get_sync_committee_message(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||
|
||||
```python
|
||||
def get_sync_committee_signature(state: BeaconState,
|
||||
block_root: Root,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int) -> SyncCommitteeSignature:
|
||||
def get_sync_committee_message(state: BeaconState,
|
||||
block_root: Root,
|
||||
validator_index: ValidatorIndex,
|
||||
privkey: int) -> SyncCommitteeMessage:
|
||||
epoch = get_current_epoch(state)
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
|
||||
signing_root = compute_signing_root(block_root, domain)
|
||||
signature = bls.Sign(privkey, signing_root)
|
||||
|
||||
return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature)
|
||||
return SyncCommitteeMessage(
|
||||
slot=state.slot,
|
||||
beacon_block_root=block_root,
|
||||
validator_index=validator_index,
|
||||
signature=signature,
|
||||
)
|
||||
```
|
||||
|
||||
##### Broadcast sync committee signature
|
||||
##### Broadcast sync committee message
|
||||
|
||||
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
||||
|
||||
@ -312,11 +317,11 @@ def compute_subnets_for_sync_committee(state: BeaconState, validator_index: Vali
|
||||
|
||||
*Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee.
|
||||
|
||||
*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_signature` on each of the distinct subnets.
|
||||
*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_message` on each of the distinct subnets.
|
||||
|
||||
#### Sync committee contributions
|
||||
|
||||
Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
|
||||
Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeMessage`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
|
||||
|
||||
##### Aggregation selection
|
||||
|
||||
@ -347,9 +352,9 @@ def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
|
||||
|
||||
##### Construct sync committee contribution
|
||||
|
||||
If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
||||
If a validator is selected to aggregate the `SyncCommitteeMessage`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
||||
|
||||
Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
||||
Given all of the (valid) collected `sync_committee_messages: Set[SyncCommitteeMessage]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
||||
|
||||
###### Slot
|
||||
|
||||
@ -357,7 +362,7 @@ Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the
|
||||
|
||||
###### Beacon block root
|
||||
|
||||
Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_signatures`.
|
||||
Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_messages`.
|
||||
|
||||
###### Subcommittee index
|
||||
|
||||
@ -366,15 +371,15 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co
|
||||
###### Aggregation bits
|
||||
|
||||
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
||||
An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||
An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_message.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||
|
||||
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
||||
|
||||
*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`.
|
||||
*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeMessage`.
|
||||
|
||||
###### Signature
|
||||
|
||||
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`.
|
||||
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_messages` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`.
|
||||
|
||||
The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee.
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Domain types](#domain-types)
|
||||
- [Shard Work Status](#shard-work-status)
|
||||
- [Preset](#preset)
|
||||
- [Misc](#misc-1)
|
||||
- [Shard block samples](#shard-block-samples)
|
||||
@ -32,6 +33,7 @@
|
||||
- [`ShardBlobReference`](#shardblobreference)
|
||||
- [`SignedShardBlobReference`](#signedshardblobreference)
|
||||
- [`ShardProposerSlashing`](#shardproposerslashing)
|
||||
- [`ShardWork`](#shardwork)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc-2)
|
||||
- [`next_power_of_two`](#next_power_of_two)
|
||||
@ -49,14 +51,14 @@
|
||||
- [`compute_committee_index_from_shard`](#compute_committee_index_from_shard)
|
||||
- [Block processing](#block-processing)
|
||||
- [Operations](#operations)
|
||||
- [New Attestation processing](#new-attestation-processing)
|
||||
- [Updated `process_attestation`](#updated-process_attestation)
|
||||
- [`update_pending_votes`](#update_pending_votes)
|
||||
- [`process_shard_header`](#process_shard_header)
|
||||
- [Shard Proposer slashings](#shard-proposer-slashings)
|
||||
- [Extended Attestation processing](#extended-attestation-processing)
|
||||
- [`process_shard_header`](#process_shard_header)
|
||||
- [`process_shard_proposer_slashing`](#process_shard_proposer_slashing)
|
||||
- [Epoch transition](#epoch-transition)
|
||||
- [Pending headers](#pending-headers)
|
||||
- [Shard epoch increment](#shard-epoch-increment)
|
||||
- [`process_pending_shard_confirmations`](#process_pending_shard_confirmations)
|
||||
- [`charge_confirmed_shard_fees`](#charge_confirmed_shard_fees)
|
||||
- [`reset_pending_shard_work`](#reset_pending_shard_work)
|
||||
- [`process_shard_epoch_increment`](#process_shard_epoch_increment)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -99,6 +101,14 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` |
|
||||
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` |
|
||||
|
||||
### Shard Work Status
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `SHARD_WORK_UNCONFIRMED` | `0` | Unconfirmed, nullified after confirmation time elapses |
|
||||
| `SHARD_WORK_CONFIRMED` | `1` | Confirmed, reduced to just the commitment |
|
||||
| `SHARD_WORK_PENDING` | `2` | Pending, a list of competing headers |
|
||||
|
||||
## Preset
|
||||
|
||||
### Misc
|
||||
@ -109,6 +119,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* |
|
||||
| `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block |
|
||||
| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | |
|
||||
| `SHARD_STATE_MEMORY_SLOTS` | `uint64(2**8)` (= 256) | Number of slots for which shard commitments and confirmation status is directly available in the state |
|
||||
|
||||
### Shard block samples
|
||||
|
||||
@ -169,13 +180,12 @@ class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body]
|
||||
|
||||
```python
|
||||
class BeaconState(merge.BeaconState): # [extends The Merge state]
|
||||
# [Updated fields]
|
||||
# [Updated fields] (Warning: this changes with Altair, Sharding will rebase to use participation-flags)
|
||||
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
# [New fields]
|
||||
previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH]
|
||||
current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH]
|
||||
grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS]
|
||||
# A ring buffer of the latest slots, with information per active shard.
|
||||
shard_buffer: Vector[List[ShardWork, MAX_SHARDS], SHARD_STATE_MEMORY_SLOTS]
|
||||
shard_gasprice: uint64
|
||||
current_epoch_start_shard: Shard
|
||||
```
|
||||
@ -216,6 +226,7 @@ class ShardBlobHeader(Container):
|
||||
# Slot and shard that this header is intended for
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
# SSZ-summary of ShardBlobBody
|
||||
body_summary: ShardBlobBodySummary
|
||||
# Proposer of the shard-blob
|
||||
proposer_index: ValidatorIndex
|
||||
@ -233,17 +244,16 @@ class SignedShardBlobHeader(Container):
|
||||
|
||||
```python
|
||||
class PendingShardHeader(Container):
|
||||
# Slot and shard that this header is intended for
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
# KZG10 commitment to the data
|
||||
commitment: DataCommitment
|
||||
# hash_tree_root of the ShardHeader (stored so that attestations can be checked against it)
|
||||
root: Root
|
||||
# Who voted for the header
|
||||
votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||
# Has this header been confirmed?
|
||||
confirmed: boolean
|
||||
# Sum of effective balances of votes
|
||||
weight: Gwei
|
||||
# When the header was last updated, as reference for weight accuracy
|
||||
update_slot: Slot
|
||||
```
|
||||
|
||||
### `ShardBlobReference`
|
||||
@ -253,7 +263,7 @@ class ShardBlobReference(Container):
|
||||
# Slot and shard that this reference is intended for
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
# Hash-tree-root of commitment data
|
||||
# Hash-tree-root of ShardBlobBody
|
||||
body_root: Root
|
||||
# Proposer of the shard-blob
|
||||
proposer_index: ValidatorIndex
|
||||
@ -275,6 +285,18 @@ class ShardProposerSlashing(Container):
|
||||
signed_reference_2: SignedShardBlobReference
|
||||
```
|
||||
|
||||
### `ShardWork`
|
||||
|
||||
```python
|
||||
class ShardWork(Container):
|
||||
# Upon confirmation the data is reduced to just the header.
|
||||
status: Union[ # See Shard Work Status enum
|
||||
None, # SHARD_WORK_UNCONFIRMED
|
||||
DataCommitment, # SHARD_WORK_CONFIRMED
|
||||
List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD] # SHARD_WORK_PENDING
|
||||
]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
@ -448,6 +470,7 @@ def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
|
||||
```python
|
||||
def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard:
|
||||
active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot))
|
||||
assert index < active_shards
|
||||
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
||||
```
|
||||
|
||||
@ -455,8 +478,11 @@ def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: Co
|
||||
|
||||
```python
|
||||
def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex:
|
||||
active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot))
|
||||
return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards)
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
active_shards = get_active_shard_count(state, epoch)
|
||||
index = CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards)
|
||||
assert index < get_committee_count_per_slot(state, epoch)
|
||||
return index
|
||||
```
|
||||
|
||||
|
||||
@ -497,67 +523,74 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
```
|
||||
|
||||
### New Attestation processing
|
||||
|
||||
#### Updated `process_attestation`
|
||||
##### Extended Attestation processing
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
phase0.process_attestation(state, attestation)
|
||||
update_pending_votes(state, attestation)
|
||||
update_pending_shard_work(state, attestation)
|
||||
```
|
||||
|
||||
#### `update_pending_votes`
|
||||
|
||||
```python
|
||||
def update_pending_votes(state: BeaconState, attestation: Attestation) -> None:
|
||||
# Find and update the PendingShardHeader object, invalid block if pending header not in state
|
||||
if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state):
|
||||
pending_headers = state.current_epoch_pending_shard_headers
|
||||
else:
|
||||
pending_headers = state.previous_epoch_pending_shard_headers
|
||||
|
||||
def update_pending_shard_work(state: BeaconState, attestation: Attestation) -> None:
|
||||
attestation_shard = compute_shard_from_committee_index(
|
||||
state,
|
||||
attestation.data.slot,
|
||||
attestation.data.index,
|
||||
)
|
||||
pending_header = None
|
||||
for header in pending_headers:
|
||||
if (
|
||||
header.root == attestation.data.shard_header_root
|
||||
and header.slot == attestation.data.slot
|
||||
and header.shard == attestation_shard
|
||||
):
|
||||
pending_header = header
|
||||
assert pending_header is not None
|
||||
buffer_index = attestation.data.slot % SHARD_STATE_MEMORY_SLOTS
|
||||
committee_work = state.shard_buffer[buffer_index][attestation_shard]
|
||||
|
||||
for i in range(len(pending_header.votes)):
|
||||
pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i]
|
||||
|
||||
# Check if the PendingShardHeader is eligible for expedited confirmation
|
||||
# Requirement 1: nothing else confirmed
|
||||
all_candidates = [
|
||||
c for c in pending_headers if
|
||||
(c.slot, c.shard) == (pending_header.slot, pending_header.shard)
|
||||
]
|
||||
if True in [c.confirmed for c in all_candidates]:
|
||||
# Skip attestation vote accounting if the header is not pending
|
||||
if committee_work.status.selector != SHARD_WORK_PENDING:
|
||||
# TODO In Altair: set participation bit flag, if attestation matches winning header.
|
||||
return
|
||||
|
||||
# Requirement 2: >= 2/3 of balance attesting
|
||||
participants = get_attesting_indices(state, attestation.data, pending_header.votes)
|
||||
participants_balance = get_total_balance(state, participants)
|
||||
current_headers: Sequence[PendingShardHeader] = committee_work.status.value
|
||||
|
||||
# Find the corresponding header, abort if it cannot be found
|
||||
header_index = [header.root for header in current_headers].index(attestation.data.shard_header_root)
|
||||
|
||||
pending_header: PendingShardHeader = current_headers[header_index]
|
||||
full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||
full_committee_balance = get_total_balance(state, set(full_committee))
|
||||
if participants_balance * 3 >= full_committee_balance * 2:
|
||||
pending_header.confirmed = True
|
||||
|
||||
# The weight may be outdated if it is not the initial weight, and from a previous epoch
|
||||
if pending_header.weight != 0 and compute_epoch_at_slot(pending_header.update_slot) < get_current_epoch(state):
|
||||
pending_header.weight = sum(state.validators[index].effective_balance for index, bit
|
||||
in zip(full_committee, pending_header.votes) if bit)
|
||||
|
||||
pending_header.update_slot = state.slot
|
||||
|
||||
full_committee_balance = Gwei(0)
|
||||
# Update votes bitfield in the state, update weights
|
||||
for i, bit in enumerate(attestation.aggregation_bits):
|
||||
weight = state.validators[full_committee[i]].effective_balance
|
||||
full_committee_balance += weight
|
||||
if bit:
|
||||
if not pending_header.votes[i]:
|
||||
pending_header.weight += weight
|
||||
pending_header.votes[i] = True
|
||||
|
||||
# Check if the PendingShardHeader is eligible for expedited confirmation, requiring 2/3 of balance attesting
|
||||
if pending_header.weight * 3 >= full_committee_balance * 2:
|
||||
# TODO In Altair: set participation bit flag for voters of this early winning header
|
||||
if pending_header.commitment == DataCommitment():
|
||||
# The committee voted to not confirm anything
|
||||
state.shard_buffer[buffer_index][attestation_shard].change(
|
||||
selector=SHARD_WORK_UNCONFIRMED,
|
||||
value=None,
|
||||
)
|
||||
else:
|
||||
state.shard_buffer[buffer_index][attestation_shard].change(
|
||||
selector=SHARD_WORK_CONFIRMED,
|
||||
value=pending_header.commitment,
|
||||
)
|
||||
```
|
||||
|
||||
#### `process_shard_header`
|
||||
##### `process_shard_header`
|
||||
|
||||
```python
|
||||
def process_shard_header(state: BeaconState,
|
||||
signed_header: SignedShardBlobHeader) -> None:
|
||||
def process_shard_header(state: BeaconState, signed_header: SignedShardBlobHeader) -> None:
|
||||
header = signed_header.message
|
||||
# Verify the header is not 0, and not from the future.
|
||||
assert Slot(0) < header.slot <= state.slot
|
||||
@ -569,6 +602,16 @@ def process_shard_header(state: BeaconState,
|
||||
# Verify that the block root matches,
|
||||
# to ensure the header will only be included in this specific Beacon Chain sub-tree.
|
||||
assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1)
|
||||
|
||||
# Check that this data is still pending
|
||||
committee_work = state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard]
|
||||
assert committee_work.status.selector == SHARD_WORK_PENDING
|
||||
|
||||
# Check that this header is not yet in the pending list
|
||||
current_headers: Sequence[PendingShardHeader] = committee_work.status.value
|
||||
header_root = hash_tree_root(header)
|
||||
assert header_root not in [pending_header.root for pending_header in current_headers]
|
||||
|
||||
# Verify proposer
|
||||
assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard)
|
||||
# Verify signature
|
||||
@ -584,27 +627,20 @@ def process_shard_header(state: BeaconState,
|
||||
== bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length])
|
||||
)
|
||||
|
||||
# Get the correct pending header list
|
||||
if header_epoch == get_current_epoch(state):
|
||||
pending_headers = state.current_epoch_pending_shard_headers
|
||||
else:
|
||||
pending_headers = state.previous_epoch_pending_shard_headers
|
||||
|
||||
header_root = hash_tree_root(header)
|
||||
# Check that this header is not yet in the pending list
|
||||
assert header_root not in [pending_header.root for pending_header in pending_headers]
|
||||
|
||||
# Include it in the pending list
|
||||
# Initialize the pending header
|
||||
index = compute_committee_index_from_shard(state, header.slot, header.shard)
|
||||
committee_length = len(get_beacon_committee(state, header.slot, index))
|
||||
pending_headers.append(PendingShardHeader(
|
||||
slot=header.slot,
|
||||
shard=header.shard,
|
||||
initial_votes = Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length)
|
||||
pending_header = PendingShardHeader(
|
||||
commitment=body_summary.commitment,
|
||||
root=header_root,
|
||||
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||
confirmed=False,
|
||||
))
|
||||
votes=initial_votes,
|
||||
weight=0,
|
||||
update_slot=state.slot,
|
||||
)
|
||||
|
||||
# Include it in the pending list
|
||||
state.shard_buffer[header.slot % SHARD_STATE_MEMORY_SLOTS][header.shard].append(pending_header)
|
||||
```
|
||||
|
||||
The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values),
|
||||
@ -612,7 +648,7 @@ the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1
|
||||
where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`.
|
||||
The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction).
|
||||
|
||||
##### Shard Proposer slashings
|
||||
##### `process_shard_proposer_slashing`
|
||||
|
||||
```python
|
||||
def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None:
|
||||
@ -645,19 +681,18 @@ This epoch transition overrides the Merge epoch transition:
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
# Sharding
|
||||
process_pending_shard_confirmations(state)
|
||||
charge_confirmed_shard_fees(state)
|
||||
reset_pending_shard_work(state)
|
||||
|
||||
# Phase0
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
|
||||
process_slashings(state)
|
||||
|
||||
# Sharding
|
||||
process_pending_headers(state)
|
||||
charge_confirmed_header_fees(state)
|
||||
reset_pending_headers(state)
|
||||
|
||||
# Final updates
|
||||
# Phase 0
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
@ -668,10 +703,10 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_shard_epoch_increment(state)
|
||||
```
|
||||
|
||||
#### Pending headers
|
||||
#### `process_pending_shard_confirmations`
|
||||
|
||||
```python
|
||||
def process_pending_headers(state: BeaconState) -> None:
|
||||
def process_pending_shard_confirmations(state: BeaconState) -> None:
|
||||
# Pending header processing applies to the previous epoch.
|
||||
# Skip if `GENESIS_EPOCH` because no prior epoch to process.
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
@ -679,108 +714,93 @@ def process_pending_headers(state: BeaconState) -> None:
|
||||
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
for shard_index in range(get_active_shard_count(state, previous_epoch)):
|
||||
shard = Shard(shard_index)
|
||||
# Pending headers for this (slot, shard) combo
|
||||
candidates = [
|
||||
c for c in state.previous_epoch_pending_shard_headers
|
||||
if (c.slot, c.shard) == (slot, shard)
|
||||
]
|
||||
# If any candidates already confirmed, skip
|
||||
if True in [c.confirmed for c in candidates]:
|
||||
continue
|
||||
|
||||
# The entire committee (and its balance)
|
||||
index = compute_committee_index_from_shard(state, slot, shard)
|
||||
full_committee = get_beacon_committee(state, slot, index)
|
||||
# The set of voters who voted for each header (and their total balances)
|
||||
voting_sets = [
|
||||
set(v for i, v in enumerate(full_committee) if c.votes[i])
|
||||
for c in candidates
|
||||
]
|
||||
voting_balances = [
|
||||
get_total_balance(state, voters)
|
||||
for voters in voting_sets
|
||||
]
|
||||
# Get the index with the most total balance voting for them.
|
||||
# NOTE: if two choices get exactly the same voting balance,
|
||||
# the candidate earlier in the list wins
|
||||
if max(voting_balances) > 0:
|
||||
winning_index = voting_balances.index(max(voting_balances))
|
||||
else:
|
||||
# If no votes, zero wins
|
||||
winning_index = [c.root for c in candidates].index(Root())
|
||||
candidates[winning_index].confirmed = True
|
||||
for slot_index in range(SLOTS_PER_EPOCH):
|
||||
for shard in range(MAX_SHARDS):
|
||||
state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment()
|
||||
confirmed_headers = [candidate for candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed]
|
||||
for header in confirmed_headers:
|
||||
state.grandparent_epoch_confirmed_commitments[header.shard][header.slot % SLOTS_PER_EPOCH] = header.commitment
|
||||
# Mark stale headers as unconfirmed
|
||||
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
|
||||
for shard_index in range(len(state.shard_buffer[buffer_index])):
|
||||
committee_work = state.shard_buffer[buffer_index][shard_index]
|
||||
if committee_work.status.selector == SHARD_WORK_PENDING:
|
||||
winning_header = max(committee_work.status.value, key=lambda header: header.weight)
|
||||
# TODO In Altair: set participation bit flag of voters for winning header
|
||||
if winning_header.commitment == DataCommitment():
|
||||
committee_work.status.change(selector=SHARD_WORK_UNCONFIRMED, value=None)
|
||||
else:
|
||||
committee_work.status.change(selector=SHARD_WORK_CONFIRMED, value=winning_header.commitment)
|
||||
```
|
||||
|
||||
#### `charge_confirmed_shard_fees`
|
||||
|
||||
```python
|
||||
def charge_confirmed_header_fees(state: BeaconState) -> None:
|
||||
def charge_confirmed_shard_fees(state: BeaconState) -> None:
|
||||
new_gasprice = state.shard_gasprice
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||
adjustment_quotient = (
|
||||
get_active_shard_count(state, previous_epoch)
|
||||
* SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT
|
||||
)
|
||||
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||
# Iterate through confirmed shard-headers
|
||||
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
for shard_index in range(get_active_shard_count(state, previous_epoch)):
|
||||
shard = Shard(shard_index)
|
||||
confirmed_candidates = [
|
||||
c for c in state.previous_epoch_pending_shard_headers
|
||||
if (c.slot, c.shard, c.confirmed) == (slot, shard, True)
|
||||
]
|
||||
if not any(confirmed_candidates):
|
||||
continue
|
||||
candidate = confirmed_candidates[0]
|
||||
buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
|
||||
for shard_index in range(len(state.shard_buffer[buffer_index])):
|
||||
committee_work = state.shard_buffer[buffer_index][shard_index]
|
||||
if committee_work.status.selector == SHARD_WORK_CONFIRMED:
|
||||
commitment: DataCommitment = committee_work.status.value
|
||||
# Charge EIP 1559 fee
|
||||
proposer = get_shard_proposer_index(state, slot, Shard(shard_index))
|
||||
fee = (
|
||||
(state.shard_gasprice * commitment.length)
|
||||
// TARGET_SAMPLES_PER_BLOCK
|
||||
)
|
||||
decrease_balance(state, proposer, fee)
|
||||
|
||||
# Charge EIP 1559 fee
|
||||
proposer = get_shard_proposer_index(state, slot, shard)
|
||||
fee = (
|
||||
(state.shard_gasprice * candidate.commitment.length)
|
||||
// TARGET_SAMPLES_PER_BLOCK
|
||||
)
|
||||
decrease_balance(state, proposer, fee)
|
||||
|
||||
# Track updated gas price
|
||||
new_gasprice = compute_updated_gasprice(
|
||||
new_gasprice,
|
||||
candidate.commitment.length,
|
||||
adjustment_quotient,
|
||||
)
|
||||
# Track updated gas price
|
||||
new_gasprice = compute_updated_gasprice(
|
||||
new_gasprice,
|
||||
commitment.length,
|
||||
adjustment_quotient,
|
||||
)
|
||||
state.shard_gasprice = new_gasprice
|
||||
```
|
||||
|
||||
#### `reset_pending_shard_work`
|
||||
|
||||
```python
|
||||
def reset_pending_headers(state: BeaconState) -> None:
|
||||
state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers
|
||||
state.current_epoch_pending_shard_headers = []
|
||||
# Add dummy "empty" PendingShardHeader (default vote for if no shard header available)
|
||||
def reset_pending_shard_work(state: BeaconState) -> None:
|
||||
# Add dummy "empty" PendingShardHeader (default vote if no shard header is available)
|
||||
next_epoch = get_current_epoch(state) + 1
|
||||
next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch)
|
||||
committees_per_slot = get_committee_count_per_slot(state, next_epoch)
|
||||
active_shards = get_active_shard_count(state, next_epoch)
|
||||
|
||||
for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||
for index in range(committees_per_slot):
|
||||
committee_index = CommitteeIndex(index)
|
||||
shard = compute_shard_from_committee_index(state, slot, committee_index)
|
||||
buffer_index = slot % SHARD_STATE_MEMORY_SLOTS
|
||||
|
||||
# Reset the shard work tracking
|
||||
state.shard_buffer[buffer_index] = [ShardWork() for _ in range(active_shards)]
|
||||
|
||||
start_shard = get_start_shard(state, slot)
|
||||
for committee_index in range(committees_per_slot):
|
||||
shard = (start_shard + committee_index) % active_shards
|
||||
# a committee is available, initialize a pending shard-header list
|
||||
committee_length = len(get_beacon_committee(state, slot, committee_index))
|
||||
state.current_epoch_pending_shard_headers.append(PendingShardHeader(
|
||||
slot=slot,
|
||||
shard=shard,
|
||||
commitment=DataCommitment(),
|
||||
root=Root(),
|
||||
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||
confirmed=False,
|
||||
))
|
||||
state.shard_buffer[buffer_index][shard].change(
|
||||
selector=SHARD_WORK_PENDING,
|
||||
value=List[PendingShardHeader, MAX_SHARD_HEADERS_PER_SHARD](
|
||||
PendingShardHeader(
|
||||
commitment=DataCommitment(),
|
||||
root=Root(),
|
||||
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||
weight=0,
|
||||
update_slot=slot,
|
||||
)
|
||||
)
|
||||
)
|
||||
# a shard without committee available defaults to SHARD_WORK_UNCONFIRMED.
|
||||
```
|
||||
|
||||
#### Shard epoch increment
|
||||
#### `process_shard_epoch_increment`
|
||||
|
||||
```python
|
||||
def process_shard_epoch_increment(state: BeaconState) -> None:
|
||||
|
@ -17,9 +17,11 @@
|
||||
- [SignedShardBlob](#signedshardblob)
|
||||
- [Gossip domain](#gossip-domain)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Shard blobs: `shard_blob_{subnet_id}`](#shard-blobs-shard_blob_subnet_id)
|
||||
- [Shard header: `shard_header`](#shard-header-shard_header)
|
||||
- [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing)
|
||||
- [Shard blob subnets](#shard-blob-subnets)
|
||||
- [`shard_blob_{subnet_id}`](#shard_blob_subnet_id)
|
||||
- [Global topics](#global-topics)
|
||||
- [`shard_header`](#shard_header)
|
||||
- [`shard_proposer_slashing`](#shard_proposer_slashing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -28,7 +30,7 @@
|
||||
## Introduction
|
||||
|
||||
The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and
|
||||
[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite.
|
||||
[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite.
|
||||
The adjustments and additions for Shards are outlined in this document.
|
||||
|
||||
## Constants
|
||||
@ -64,6 +66,7 @@ class ShardBlob(Container):
|
||||
# Slot and shard that this blob is intended for
|
||||
slot: Slot
|
||||
shard: Shard
|
||||
# Shard data with related commitments and beacon anchor
|
||||
body: ShardBlobBody
|
||||
# Proposer of the shard-blob
|
||||
proposer_index: ValidatorIndex
|
||||
@ -88,12 +91,16 @@ Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.
|
||||
| Name | Message Type |
|
||||
|----------------------------------|---------------------------|
|
||||
| `shard_blob_{subnet_id}` | `SignedShardBlob` |
|
||||
| `shard_header` | `SignedShardHeader` |
|
||||
| `shard_header` | `SignedShardBlobHeader` |
|
||||
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
||||
|
||||
The [DAS network specification](./das-p2p.md) defines additional topics.
|
||||
|
||||
#### Shard blobs: `shard_blob_{subnet_id}`
|
||||
#### Shard blob subnets
|
||||
|
||||
Shard blob subnets are used to propagate shard blobs to subsections of the network.
|
||||
|
||||
##### `shard_blob_{subnet_id}`
|
||||
|
||||
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{subnet_id}` subnets.
|
||||
|
||||
@ -117,8 +124,10 @@ The following validations MUST pass before forwarding the `signed_blob` (with in
|
||||
(a client MAY queue future blobs for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The `blob` is new enough to be still be processed --
|
||||
i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
|
||||
- _[REJECT]_ The shard should have a committee at slot --
|
||||
i.e. validate that `compute_committee_index_from_shard(state, blob.slot, blob.shard)` doesn't raise an error
|
||||
- _[REJECT]_ The shard blob is for the correct subnet --
|
||||
i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
|
||||
i.e. `compute_subnet_for_shard_blob(state, blob.slot, blob.shard) == subnet_id`
|
||||
- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
|
||||
- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large.
|
||||
- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
|
||||
@ -129,19 +138,25 @@ The following validations MUST pass before forwarding the `signed_blob` (with in
|
||||
the block MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
#### Global topics
|
||||
|
||||
#### Shard header: `shard_header`
|
||||
There are two additional global topics for Sharding, one is used to propagate shard blob headers (`shard_header`) to
|
||||
all nodes on the network. Another one is used to propagate validator message (`shard_proposer_slashing`).
|
||||
|
||||
##### `shard_header`
|
||||
|
||||
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_shard_header` (with inner `message` as `header`) on the network.
|
||||
The following validations MUST pass before forwarding the `signed_shard_blob_header` (with inner `message` as `header`) on the network.
|
||||
- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||
i.e. validate that `header.slot <= current_slot`
|
||||
(a client MAY queue future headers for processing at the appropriate slot).
|
||||
- _[IGNORE]_ The `header` is new enough to be still be processed --
|
||||
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
||||
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
||||
- _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||
- _[REJECT]_ The shard should have a committee at slot --
|
||||
i.e. validate that `compute_committee_index_from_shard(state, header.slot, header.shard)` doesn't raise an error
|
||||
- _[REJECT]_ The proposer signature, `signed_shard_blob_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot
|
||||
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
||||
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||
@ -149,7 +164,7 @@ The following validations MUST pass before forwarding the `signed_shard_header`
|
||||
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
|
||||
#### Shard proposer slashing: `shard_proposer_slashing`
|
||||
##### `shard_proposer_slashing`
|
||||
|
||||
Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic.
|
||||
|
||||
|
@ -17,10 +17,10 @@
|
||||
- [Serialization](#serialization)
|
||||
- [`uintN`](#uintn)
|
||||
- [`boolean`](#boolean)
|
||||
- [`null`](#null)
|
||||
- [`Bitvector[N]`](#bitvectorn)
|
||||
- [`Bitlist[N]`](#bitlistn)
|
||||
- [Vectors, containers, lists, unions](#vectors-containers-lists-unions)
|
||||
- [Vectors, containers, lists](#vectors-containers-lists)
|
||||
- [Union](#union)
|
||||
- [Deserialization](#deserialization)
|
||||
- [Merkleization](#merkleization)
|
||||
- [Summaries and expansions](#summaries-and-expansions)
|
||||
@ -61,7 +61,7 @@
|
||||
* **bitlist**: ordered variable-length collection of `boolean` values, limited to `N` bits
|
||||
* notation `Bitlist[N]`
|
||||
* **union**: union type containing one of the given subtypes
|
||||
* notation `Union[type_0, type_1, ...]`, e.g. `union[null, uint64]`
|
||||
* notation `Union[type_0, type_1, ...]`, e.g. `union[None, uint64, uint32]`
|
||||
|
||||
*Note*: Both `Vector[boolean, N]` and `Bitvector[N]` are valid, yet distinct due to their different serialization requirements. Similarly, both `List[boolean, N]` and `Bitlist[N]` are valid, yet distinct. Generally `Bitvector[N]`/`Bitlist[N]` are preferred because of their serialization efficiencies.
|
||||
|
||||
@ -77,7 +77,6 @@ For convenience we alias:
|
||||
* `byte` to `uint8` (this is a basic type)
|
||||
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
|
||||
* `ByteList[N]` to `List[byte, N]`
|
||||
* `null`: `{}`
|
||||
|
||||
### Default values
|
||||
Assuming a helper function `default(type)` which returns the default value for `type`, we can recursively define the default value for all types.
|
||||
@ -101,7 +100,7 @@ An SSZ object is called zeroed (and thus, `is_zero(object)` returns true) if it
|
||||
|
||||
- Empty vector types (`Vector[type, 0]`, `Bitvector[0]`) are illegal.
|
||||
- Containers with no fields are illegal.
|
||||
- The `null` type is only legal as the first type in a union subtype (i.e. with type index zero).
|
||||
- The `None` type option in a `Union` type is only legal as the first option (i.e. with index zero).
|
||||
|
||||
## Serialization
|
||||
|
||||
@ -123,12 +122,6 @@ assert value in (True, False)
|
||||
return b"\x01" if value is True else b"\x00"
|
||||
```
|
||||
|
||||
### `null`
|
||||
|
||||
```python
|
||||
return b""
|
||||
```
|
||||
|
||||
### `Bitvector[N]`
|
||||
|
||||
```python
|
||||
@ -150,7 +143,7 @@ array[len(value) // 8] |= 1 << (len(value) % 8)
|
||||
return bytes(array)
|
||||
```
|
||||
|
||||
### Vectors, containers, lists, unions
|
||||
### Vectors, containers, lists
|
||||
|
||||
```python
|
||||
# Recursively serialize
|
||||
@ -170,19 +163,31 @@ fixed_parts = [part if part != None else variable_offsets[i] for i, part in enum
|
||||
return b"".join(fixed_parts + variable_parts)
|
||||
```
|
||||
|
||||
If `value` is a union type:
|
||||
### Union
|
||||
|
||||
Define value as an object that has properties `value.value` with the contained value, and `value.type_index` which indexes the type.
|
||||
A `value` as `Union[T...]` type has properties `value.value` with the contained value, and `value.selector` which indexes the selected `Union` type option `T`.
|
||||
|
||||
A `Union`:
|
||||
- May have multiple selectors with the same type.
|
||||
- Should not use selectors above 127 (i.e. highest bit is set), these are reserved for backwards compatible extensions.
|
||||
- Must have at least 1 type option.
|
||||
- May have `None` as first type option, i.e. `selector == 0`
|
||||
- Must have at least 2 type options if the first is `None`
|
||||
- Is always considered a variable-length type, even if all type options have an equal fixed-length.
|
||||
|
||||
```python
|
||||
serialized_bytes = serialize(value.value)
|
||||
serialized_type_index = value.type_index.to_bytes(BYTES_PER_LENGTH_OFFSET, "little")
|
||||
return serialized_type_index + serialized_bytes
|
||||
if value.value is None:
|
||||
assert value.selector == 0
|
||||
return b"\x00"
|
||||
else:
|
||||
serialized_bytes = serialize(value.value)
|
||||
serialized_selector_index = value.selector.to_bytes(1, "little")
|
||||
return serialized_selector_index + serialized_bytes
|
||||
```
|
||||
|
||||
## Deserialization
|
||||
|
||||
Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to.
|
||||
Because serialization is an injective function (i.e. two distinct objects of the same type will serialize to different values) any bytestring has at most one object it could deserialize to.
|
||||
|
||||
Deserialization can be implemented using a recursive algorithm. The deserialization of basic objects is easy, and from there we can find a simple recursive algorithm for all fixed-size objects. For variable-size objects we have to do one of the following depending on what kind of object it is:
|
||||
|
||||
@ -191,12 +196,14 @@ Deserialization can be implemented using a recursive algorithm. The deserializat
|
||||
* The size of each object in the vector/list can be inferred from the difference of two offsets. To get the size of the last object, the total number of bytes has to be known (it is not generally possible to deserialize an SSZ object of unknown length)
|
||||
* Containers follow the same principles as vectors, with the difference that there may be fixed-size objects in a container as well. This means the `fixed_parts` data will contain offsets as well as fixed-size objects.
|
||||
* In the case of bitlists, the length in bits cannot be uniquely inferred from the number of bytes in the object. Because of this, they have a bit at the end that is always set. This bit has to be used to infer the size of the bitlist in bits.
|
||||
* In the case of unions, the first byte of the deserialization scope is deserialized as type selector, the remainder of the scope is deserialized as the selected type.
|
||||
|
||||
Note that deserialization requires hardening against invalid inputs. A non-exhaustive list:
|
||||
|
||||
- Offsets: out of order, out of range, mismatching minimum element size.
|
||||
- Scope: Extra unused bytes, not aligned with element size.
|
||||
- More elements than a list limit allows. Part of enforcing consensus.
|
||||
- An out-of-bounds selected index in an `Union`
|
||||
|
||||
Efficient algorithms for computing this object can be found in [the implementations](#implementations).
|
||||
|
||||
@ -227,7 +234,7 @@ We first define helper functions:
|
||||
- If `1` chunk: the root is the chunk itself.
|
||||
- If `> 1` chunks: merkleize as binary tree.
|
||||
* `mix_in_length`: Given a Merkle root `root` and a length `length` (`"uint256"` little-endian serialization) return `hash(root + length)`.
|
||||
* `mix_in_type`: Given a Merkle root `root` and a type_index `type_index` (`"uint256"` little-endian serialization) return `hash(root + type_index)`.
|
||||
* `mix_in_selector`: Given a Merkle root `root` and a type selector `selector` (`"uint256"` little-endian serialization) return `hash(root + selector)`.
|
||||
|
||||
We now define Merkleization `hash_tree_root(value)` of an object `value` recursively:
|
||||
|
||||
@ -237,7 +244,8 @@ We now define Merkleization `hash_tree_root(value)` of an object `value` recursi
|
||||
* `mix_in_length(merkleize(pack_bits(value), limit=chunk_count(type)), len(value))` if `value` is a bitlist.
|
||||
* `merkleize([hash_tree_root(element) for element in value])` if `value` is a vector of composite objects or a container.
|
||||
* `mix_in_length(merkleize([hash_tree_root(element) for element in value], limit=chunk_count(type)), len(value))` if `value` is a list of composite objects.
|
||||
* `mix_in_type(merkleize(value.value), value.type_index)` if `value` is of union type.
|
||||
* `mix_in_selector(hash_tree_root(value.value), value.selector)` if `value` is of union type, and `value.value` is not `None`
|
||||
* `mix_in_selector(Bytes32(), 0)` if `value` is of union type, and `value.value` is `None`
|
||||
|
||||
## Summaries and expansions
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
from collections import Counter
|
||||
import random
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
@ -13,6 +12,9 @@ from eth2spec.test.helpers.constants import (
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
compute_sync_committee_participant_reward_and_penalty,
|
||||
compute_sync_committee_proposer_reward,
|
||||
compute_committee_indices,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
expect_assertion_error,
|
||||
@ -61,15 +63,6 @@ def get_committee_indices(spec, state, duplicates=False):
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
def compute_committee_indices(spec, state, committee):
|
||||
"""
|
||||
Given a ``committee``, calculate and return the related indices
|
||||
"""
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||
return committee_indices
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@ -115,65 +108,20 @@ def test_invalid_signature_extra_participant(spec, state):
|
||||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
def compute_sync_committee_inclusion_reward(spec,
|
||||
state,
|
||||
participant_index,
|
||||
committee_indices,
|
||||
committee_bits):
|
||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
max_slot_rewards = spec.Gwei(
|
||||
max_epoch_rewards * len(included_indices)
|
||||
// len(committee_indices) // spec.SLOTS_PER_EPOCH
|
||||
)
|
||||
|
||||
# Compute the participant and proposer sync rewards
|
||||
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||
committee_effective_balance = max(spec.EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance)
|
||||
effective_balance = state.validators[participant_index].effective_balance
|
||||
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
multiplicities = Counter(included_indices)
|
||||
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, participant_index, committee_indices, committee_bits,
|
||||
)
|
||||
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
||||
|
||||
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
|
||||
proposer_reward = 0
|
||||
for index, bit in zip(committee_indices, committee_bits):
|
||||
if not bit:
|
||||
continue
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||
spec, state, index, committee_indices, committee_bits,
|
||||
)
|
||||
proposer_reward_denominator = (
|
||||
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
||||
* spec.WEIGHT_DENOMINATOR
|
||||
// spec.PROPOSER_WEIGHT
|
||||
)
|
||||
proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator)
|
||||
return proposer_reward
|
||||
|
||||
|
||||
def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
|
||||
for index in range(len(post_state.validators)):
|
||||
reward = 0
|
||||
penalty = 0
|
||||
if index in committee_indices:
|
||||
reward += compute_sync_committee_participant_reward(
|
||||
_reward, _penalty = compute_sync_committee_participant_reward_and_penalty(
|
||||
spec,
|
||||
pre_state,
|
||||
index,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
reward += _reward
|
||||
penalty += _penalty
|
||||
|
||||
if proposer_index == index:
|
||||
reward += compute_sync_committee_proposer_reward(
|
||||
@ -183,7 +131,7 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indic
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward - penalty
|
||||
|
||||
|
||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||
|
@ -1,7 +1,7 @@
|
||||
import random
|
||||
from eth2spec.test.context import fork_transition_test
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_block
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot, next_epoch_via_signed_block
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
|
||||
from eth2spec.test.helpers.attestations import next_slots_with_attestations
|
||||
|
||||
@ -261,12 +261,12 @@ def _run_transition_test_with_attestations(state,
|
||||
assert current_epoch == spec.GENESIS_EPOCH
|
||||
|
||||
# skip genesis epoch to avoid dealing with some edge cases...
|
||||
block = next_epoch_via_block(spec, state)
|
||||
block = next_epoch_via_signed_block(spec, state)
|
||||
|
||||
# regular state transition until fork:
|
||||
fill_cur_epoch = False
|
||||
fill_prev_epoch = True
|
||||
blocks = [pre_tag(sign_block(spec, state, block))]
|
||||
blocks = [pre_tag(block)]
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
for _ in range(current_epoch, fork_epoch - 1):
|
||||
_, blocks_in_epoch, state = next_slots_with_attestations(
|
||||
@ -414,8 +414,8 @@ def test_transition_with_no_attestations_until_after_fork(state, fork_epoch, spe
|
||||
|
||||
# continue regular state transition but add attestations
|
||||
# for enough epochs to finalize the ``fork_epoch``
|
||||
block = next_epoch_via_block(post_spec, state)
|
||||
blocks.append(post_tag(sign_block(post_spec, state, block)))
|
||||
block = next_epoch_via_signed_block(post_spec, state)
|
||||
blocks.append(post_tag(block))
|
||||
for _ in range(4):
|
||||
_, blocks_in_epoch, state = next_slots_with_attestations(
|
||||
post_spec,
|
||||
|
@ -4,7 +4,7 @@ from eth2spec.utils.ssz.ssz_typing import Bitvector
|
||||
from eth2spec.test.helpers.block import build_empty_block
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.helpers.sync_committee import compute_sync_committee_signature
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
from eth2spec.test.context import (
|
||||
with_altair_and_later,
|
||||
@ -85,12 +85,9 @@ def _get_sync_committee_signature(
|
||||
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
|
||||
privkey = pubkey_to_privkey[pubkey]
|
||||
|
||||
domain = spec.get_domain(
|
||||
state,
|
||||
spec.DOMAIN_SYNC_COMMITTEE,
|
||||
return compute_sync_committee_signature(
|
||||
spec, state, target_slot, privkey, block_root=target_block_root
|
||||
)
|
||||
signing_data = spec.compute_signing_root(target_block_root, domain)
|
||||
return bls.Sign(privkey, spec.hash_tree_root(signing_data))
|
||||
|
||||
|
||||
@only_with_bls()
|
||||
|
@ -2,6 +2,10 @@ from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_committee_indices,
|
||||
compute_sync_committee_participant_reward_and_penalty,
|
||||
)
|
||||
|
||||
|
||||
def get_min_slashing_penalty_quotient(spec):
|
||||
@ -11,7 +15,7 @@ def get_min_slashing_penalty_quotient(spec):
|
||||
return spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
|
||||
|
||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block=None):
|
||||
slashed_validator = state.validators[slashed_index]
|
||||
assert slashed_validator.slashed
|
||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
@ -20,24 +24,51 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
|
||||
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
|
||||
# Altair introduces sync committee (SC) reward and penalty
|
||||
sc_reward_for_slashed = sc_penalty_for_slashed = sc_reward_for_proposer = sc_penalty_for_proposer = 0
|
||||
if is_post_altair(spec) and block is not None:
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
committee_bits = block.body.sync_aggregate.sync_committee_bits
|
||||
sc_reward_for_slashed, sc_penalty_for_slashed = compute_sync_committee_participant_reward_and_penalty(
|
||||
spec,
|
||||
pre_state,
|
||||
slashed_index,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
sc_reward_for_proposer, sc_penalty_for_proposer = compute_sync_committee_participant_reward_and_penalty(
|
||||
spec,
|
||||
pre_state,
|
||||
proposer_index,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
if proposer_index != slashed_index:
|
||||
# slashed validator lost initial slash penalty
|
||||
assert (
|
||||
get_balance(state, slashed_index)
|
||||
== get_balance(pre_state, slashed_index) - slash_penalty
|
||||
== get_balance(pre_state, slashed_index) - slash_penalty + sc_reward_for_slashed - sc_penalty_for_slashed
|
||||
)
|
||||
# block proposer gained whistleblower reward
|
||||
# >= because proposer could have reported multiple
|
||||
assert (
|
||||
get_balance(state, proposer_index)
|
||||
>= get_balance(pre_state, proposer_index) + whistleblower_reward
|
||||
>= (
|
||||
get_balance(pre_state, proposer_index) + whistleblower_reward
|
||||
+ sc_reward_for_proposer - sc_penalty_for_proposer
|
||||
)
|
||||
)
|
||||
else:
|
||||
# proposer reported themself so get penalty and reward
|
||||
# >= because proposer could have reported multiple
|
||||
assert (
|
||||
get_balance(state, slashed_index)
|
||||
>= get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward
|
||||
>= (
|
||||
get_balance(pre_state, slashed_index) - slash_penalty + whistleblower_reward
|
||||
+ sc_reward_for_slashed - sc_penalty_for_slashed
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
|
@ -58,11 +58,19 @@ def next_epoch(spec, state):
|
||||
spec.process_slots(state, slot)
|
||||
|
||||
|
||||
def next_epoch_via_block(spec, state):
|
||||
def next_epoch_via_block(spec, state, insert_state_root=False):
|
||||
"""
|
||||
Transition to the start slot of the next epoch via a full block transition
|
||||
"""
|
||||
return apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||
block = apply_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||
if insert_state_root:
|
||||
block.state_root = state.hash_tree_root()
|
||||
return block
|
||||
|
||||
|
||||
def next_epoch_via_signed_block(spec, state):
|
||||
block = next_epoch_via_block(spec, state, insert_state_root=True)
|
||||
return sign_block(spec, state, block)
|
||||
|
||||
|
||||
def get_state_root(spec, state, slot) -> bytes:
|
||||
|
@ -1,3 +1,5 @@
|
||||
from collections import Counter
|
||||
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
@ -33,3 +35,42 @@ def compute_aggregate_sync_committee_signature(spec, state, slot, participants,
|
||||
)
|
||||
)
|
||||
return bls.Aggregate(signatures)
|
||||
|
||||
|
||||
def compute_sync_committee_inclusion_reward(spec, state):
|
||||
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
total_base_rewards = spec.get_base_reward_per_increment(state) * total_active_increments
|
||||
max_participant_rewards = (total_base_rewards * spec.SYNC_REWARD_WEIGHT
|
||||
// spec.WEIGHT_DENOMINATOR // spec.SLOTS_PER_EPOCH)
|
||||
return max_participant_rewards // spec.SYNC_COMMITTEE_SIZE
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward_and_penalty(
|
||||
spec, state, participant_index, committee_indices, committee_bits):
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(spec, state)
|
||||
|
||||
included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
|
||||
not_included_indices = [index for index, bit in zip(committee_indices, committee_bits) if not bit]
|
||||
included_multiplicities = Counter(included_indices)
|
||||
not_included_multiplicities = Counter(not_included_indices)
|
||||
return (
|
||||
spec.Gwei(inclusion_reward * included_multiplicities[participant_index]),
|
||||
spec.Gwei(inclusion_reward * not_included_multiplicities[participant_index])
|
||||
)
|
||||
|
||||
|
||||
def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
|
||||
proposer_reward_denominator = spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT
|
||||
inclusion_reward = compute_sync_committee_inclusion_reward(spec, state)
|
||||
participant_number = committee_bits.count(True)
|
||||
participant_reward = inclusion_reward * spec.PROPOSER_WEIGHT // proposer_reward_denominator
|
||||
return spec.Gwei(participant_reward * participant_number)
|
||||
|
||||
|
||||
def compute_committee_indices(spec, state, committee):
|
||||
"""
|
||||
Given a ``committee``, calculate and return the related indices
|
||||
"""
|
||||
all_pubkeys = [v.pubkey for v in state.validators]
|
||||
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
|
||||
return committee_indices
|
||||
|
@ -24,6 +24,10 @@ from eth2spec.test.helpers.multi_operations import (
|
||||
run_slash_and_exit,
|
||||
run_test_full_random_operations,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_committee_indices,
|
||||
compute_sync_committee_participant_reward_and_penalty,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import PHASE0, MINIMAL
|
||||
from eth2spec.test.context import (
|
||||
spec_test, spec_state_test, dump_skipping_message,
|
||||
@ -416,7 +420,7 @@ def test_proposer_slashing(spec, state):
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
check_proposer_slashing_effect(spec, pre_state, state, slashed_index)
|
||||
check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -491,7 +495,7 @@ def test_multiple_different_proposer_slashings_same_block(spec, state):
|
||||
|
||||
for proposer_slashing in proposer_slashings:
|
||||
slashed_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
check_proposer_slashing_effect(spec, pre_state, state, slashed_index)
|
||||
check_proposer_slashing_effect(spec, pre_state, state, slashed_index, block)
|
||||
|
||||
|
||||
def check_attester_slashing_effect(spec, pre_state, state, slashed_indices):
|
||||
@ -743,7 +747,8 @@ def test_deposit_top_up(spec, state):
|
||||
initial_balances_len = len(state.balances)
|
||||
validator_pre_balance = get_balance(state, validator_index)
|
||||
|
||||
yield 'pre', state
|
||||
pre_state = state.copy()
|
||||
yield 'pre', pre_state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.deposits.append(deposit)
|
||||
@ -755,7 +760,23 @@ def test_deposit_top_up(spec, state):
|
||||
|
||||
assert len(state.validators) == initial_registry_len
|
||||
assert len(state.balances) == initial_balances_len
|
||||
assert get_balance(state, validator_index) == validator_pre_balance + amount
|
||||
|
||||
# Altair introduces sync committee (sm) reward and penalty
|
||||
sync_committee_reward = sync_committee_penalty = 0
|
||||
if is_post_altair(spec):
|
||||
committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
|
||||
committee_bits = block.body.sync_aggregate.sync_committee_bits
|
||||
sync_committee_reward, sync_committee_penalty = compute_sync_committee_participant_reward_and_penalty(
|
||||
spec,
|
||||
pre_state,
|
||||
validator_index,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
)
|
||||
|
||||
assert get_balance(state, validator_index) == (
|
||||
validator_pre_balance + amount + sync_committee_reward - sync_committee_penalty
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -2,6 +2,7 @@
|
||||
# Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec.
|
||||
|
||||
from remerkleable.complex import Container, Vector, List
|
||||
from remerkleable.union import Union
|
||||
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
|
||||
from remerkleable.bitfields import Bitvector, Bitlist
|
||||
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
||||
|
Loading…
x
Reference in New Issue
Block a user