Merge branch 'dev' into keep-inactivity-function

This commit is contained in:
Danny Ryan 2021-05-12 12:27:15 -06:00
commit a469b467ff
No known key found for this signature in database
GPG Key ID: 2765A792E42CE07A
20 changed files with 701 additions and 150 deletions

View File

@ -23,6 +23,7 @@ The current features are:
* [Deposit Contract](specs/phase0/deposit-contract.md) * [Deposit Contract](specs/phase0/deposit-contract.md)
* [Honest Validator](specs/phase0/validator.md) * [Honest Validator](specs/phase0/validator.md)
* [P2P Networking](specs/phase0/p2p-interface.md) * [P2P Networking](specs/phase0/p2p-interface.md)
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md)
### Altair ### Altair

View File

@ -31,8 +31,8 @@
- [`add_flag`](#add_flag) - [`add_flag`](#add_flag)
- [`has_flag`](#has_flag) - [`has_flag`](#has_flag)
- [Beacon state accessors](#beacon-state-accessors) - [Beacon state accessors](#beacon-state-accessors)
- [`get_sync_committee_indices`](#get_sync_committee_indices) - [`get_next_sync_committee_indices`](#get_next_sync_committee_indices)
- [`get_sync_committee`](#get_sync_committee) - [`get_next_sync_committee`](#get_next_sync_committee)
- [`get_base_reward_per_increment`](#get_base_reward_per_increment) - [`get_base_reward_per_increment`](#get_base_reward_per_increment)
- [`get_base_reward`](#get_base_reward) - [`get_base_reward`](#get_base_reward)
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices) - [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
@ -256,22 +256,22 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
### Beacon state accessors ### Beacon state accessors
#### `get_sync_committee_indices` #### `get_next_sync_committee_indices`
```python ```python
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]: def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
""" """
Return the sequence of sync committee indices for a given ``state`` and ``epoch``. Return the sequence of sync committee indices (which may include duplicate indices)
Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128) for the next sync committee, given a ``state`` at a sync committee period boundary.
Note: This function is not stable during a sync committee period as Note: Committee can contain duplicate indices for small validator sets (< SYNC_COMMITTEE_SIZE + 128)
a validator's effective balance may change enough to affect the sampling.
""" """
epoch = Epoch(get_current_epoch(state) + 1)
MAX_RANDOM_BYTE = 2**8 - 1 MAX_RANDOM_BYTE = 2**8 - 1
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD) active_validator_indices = get_active_validator_indices(state, epoch)
active_validator_indices = get_active_validator_indices(state, base_epoch)
active_validator_count = uint64(len(active_validator_indices)) active_validator_count = uint64(len(active_validator_indices))
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE) seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
i = 0 i = 0
sync_committee_indices: List[ValidatorIndex] = [] sync_committee_indices: List[ValidatorIndex] = []
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE: while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
@ -285,25 +285,25 @@ def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[Val
return sync_committee_indices return sync_committee_indices
``` ```
#### `get_sync_committee` #### `get_next_sync_committee`
```python ```python
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee: def get_next_sync_committee(state: BeaconState) -> SyncCommittee:
""" """
Return the sync committee for a given ``state`` and ``epoch``. Return the *next* sync committee for a given ``state``.
``SyncCommittee`` contains an aggregate pubkey that enables ``SyncCommittee`` contains an aggregate pubkey that enables
resource-constrained clients to save some computation when verifying resource-constrained clients to save some computation when verifying
the sync committee's signature. the sync committee's signature.
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_sync_committee_indices`` ``SyncCommittee`` can also contain duplicate pubkeys, when ``get_next_sync_committee_indices``
returns duplicate indices. Implementations must take care when handling returns duplicate indices. Implementations must take care when handling
optimizations relating to aggregation and verification in the presence of duplicates. optimizations relating to aggregation and verification in the presence of duplicates.
Note: This function should only be called at sync committee period boundaries, as Note: This function should only be called at sync committee period boundaries by ``process_sync_committee_updates``
``get_sync_committee_indices`` is not stable within a given period. as ``get_next_sync_committee_indices`` is not stable within a given period.
""" """
indices = get_sync_committee_indices(state, epoch) indices = get_next_sync_committee_indices(state)
pubkeys = [state.validators[index].pubkey for index in indices] pubkeys = [state.validators[index].pubkey for index in indices]
aggregate_pubkey = bls.AggregatePKs(pubkeys) aggregate_pubkey = bls.AggregatePKs(pubkeys)
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey) return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
@ -668,7 +668,7 @@ def process_sync_committee_updates(state: BeaconState) -> None:
next_epoch = get_current_epoch(state) + Epoch(1) next_epoch = get_current_epoch(state) + Epoch(1)
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0: if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
state.current_sync_committee = state.next_sync_committee state.current_sync_committee = state.next_sync_committee
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD) state.next_sync_committee = get_next_sync_committee(state)
``` ```
## Initialize state for pure Altair testnets and test vectors ## Initialize state for pure Altair testnets and test vectors
@ -713,8 +713,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
state.genesis_validators_root = hash_tree_root(state.validators) state.genesis_validators_root = hash_tree_root(state.validators)
# [New in Altair] Fill in sync committees # [New in Altair] Fill in sync committees
state.current_sync_committee = get_sync_committee(state, get_current_epoch(state)) # Note: A duplicate committee is assigned for the current and next committee at genesis
state.next_sync_committee = get_sync_committee(state, get_current_epoch(state) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD) state.current_sync_committee = get_next_sync_committee(state)
state.next_sync_committee = get_next_sync_committee(state)
return state return state
``` ```

View File

@ -38,7 +38,11 @@ Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since
### Upgrading the state ### Upgrading the state
After `process_slots` of Phase 0 finishes, if `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair. If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `ALTAIR_FORK_EPOCH * SLOTS_PER_EPOCH`.
Care must be taken when transitioning through the fork boundary as implementations will need a modified state transition function that deviates from the Phase 0 spec.
In particular, the outer `state_transition` function defined in the Phase 0 spec will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead the logic must be within `process_slots`.
```python ```python
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState: def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
@ -80,8 +84,10 @@ def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
# Inactivity # Inactivity
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))], inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
) )
# Fill in sync committees # Fill in sync committees
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post)) # Note: A duplicate committee is assigned for the current and next committee at the fork boundary
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD) post.current_sync_committee = get_next_sync_committee(post)
post.next_sync_committee = get_next_sync_committee(post)
return post return post
``` ```

View File

@ -106,9 +106,18 @@ The following validations MUST pass before forwarding the `signed_contribution_a
```python ```python
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]: def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
# Committees assigned to `slot` sign for `slot - 1`
# This creates the exceptional logic below when transitioning between sync committee periods
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
sync_committee = state.current_sync_committee
else:
sync_committee = state.next_sync_committee
# Return pubkeys for the subcommittee index
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
i = subcommittee_index * sync_subcommittee_size i = subcommittee_index * sync_subcommittee_size
return state.current_sync_committee.pubkeys[i:i + sync_subcommittee_size] return sync_committee.pubkeys[i:i + sync_subcommittee_size]
``` ```
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`. - _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.

View File

@ -143,6 +143,11 @@ A validator determines beacon committee assignments and beacon block proposal du
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period. To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period. This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
*Note*: Being assigned to a sync committee for a given `slot` means that the validator produces and broadcasts signatures for `slot - 1` for inclusion in `slot`.
This means that when assigned to an `epoch` sync committee signatures must be produced and broadcast for slots on range `[compute_start_slot_at_epoch(epoch) - 1, compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH - 1)`
rather than for the range `[compute_start_slot_at_epoch(epoch), compute_start_slot_at_epoch(epoch) + SLOTS_PER_EPOCH)`.
To reduce complexity during the Altair fork, sync committees are not expected to produce signatures for `compute_epoch_at_slot(ALTAIR_FORK_EPOCH) - 1`.
```python ```python
def compute_sync_committee_period(epoch: Epoch) -> uint64: def compute_sync_committee_period(epoch: Epoch) -> uint64:
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
@ -172,7 +177,6 @@ At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored. Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries. *Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code. For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation. A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
@ -224,12 +228,12 @@ def process_sync_committee_contributions(block: BeaconBlock,
contributions: Set[SyncCommitteeContribution]) -> None: contributions: Set[SyncCommitteeContribution]) -> None:
sync_aggregate = SyncAggregate() sync_aggregate = SyncAggregate()
signatures = [] signatures = []
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
for contribution in contributions: for contribution in contributions:
subcommittee_index = contribution.subcommittee_index subcommittee_index = contribution.subcommittee_index
for index, participated in enumerate(contribution.aggregation_bits): for index, participated in enumerate(contribution.aggregation_bits):
if participated: if participated:
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
participant_index = sync_subcommittee_size * subcommittee_index + index participant_index = sync_subcommittee_size * subcommittee_index + index
sync_aggregate.sync_committee_bits[participant_index] = True sync_aggregate.sync_committee_bits[participant_index] = True
signatures.append(contribution.signature) signatures.append(contribution.signature)
@ -261,12 +265,12 @@ This process occurs each slot.
##### Prepare sync committee signature ##### Prepare sync committee signature
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every slot in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot. If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every `slot` in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` for the previous slot (`slot - 1`) according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of `slot - 1`.
This logic is triggered upon the same conditions as when producing an attestation. This logic is triggered upon the same conditions as when producing an attestation.
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first. Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
`get_sync_committee_signature()` assumes `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator. `get_sync_committee_signature(state, block_root, validator_index, privkey)` assumes the parameter `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
```python ```python
def get_sync_committee_signature(state: BeaconState, def get_sync_committee_signature(state: BeaconState,
@ -286,17 +290,20 @@ def get_sync_committee_signature(state: BeaconState,
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic. The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees". The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
`subnet_id` can be computed via `compute_subnets_for_sync_committee()` where `state` is a `BeaconState` during the matching sync committee period. `subnet_id` can be computed via `compute_subnets_for_sync_committee(state, validator_index)` where `state` is a `BeaconState` during the matching sync committee period.
*Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees. *Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
```python ```python
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]: def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
next_slot_epoch = compute_epoch_at_slot(Slot(state.slot + 1))
if compute_sync_committee_period(get_current_epoch(state)) == compute_sync_committee_period(next_slot_epoch):
sync_committee = state.current_sync_committee
else:
sync_committee = state.next_sync_committee
target_pubkey = state.validators[validator_index].pubkey target_pubkey = state.validators[validator_index].pubkey
sync_committee_indices = [ sync_committee_indices = [index for index, pubkey in enumerate(sync_committee.pubkeys) if pubkey == target_pubkey]
index for index, pubkey in enumerate(state.current_sync_committee.pubkeys)
if pubkey == target_pubkey
]
return [ return [
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT)) uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
for index in sync_committee_indices for index in sync_committee_indices
@ -359,7 +366,7 @@ Set `contribution.subcommittee_index` to the index for the subcommittee index co
###### Aggregation bits ###### Aggregation bits
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee. Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
An aggregator finds the index in the sync committee (as returned by `get_sync_committee_indices()`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`. An aggregator finds the index in the sync committee (as determined by a reverse pubkey lookup on `state.current_sync_committee.pubkeys`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution. For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.

View File

@ -93,6 +93,8 @@ It consists of four main sections:
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc) - [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests) - [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from) - [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
- [Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?](#why-are-blocksbyrange-requests-only-required-to-be-served-for-the-latest-min_epochs_for_block_requests-epochs)
- [Why must the proposer signature be checked when backfilling blocks in the database?](#why-must-the-proposer-signature-be-checked-when-backfilling-blocks-in-the-database)
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm) - [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
- [Discovery](#discovery) - [Discovery](#discovery)
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht) - [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
@ -171,6 +173,7 @@ This section outlines constants that are used in this spec.
|---|---|---| |---|---|---|
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. | | `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request | | `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
| `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. | | `MAX_CHUNK_SIZE` | `2**20` (1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
| `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). | | `TTFB_TIMEOUT` | `5s` | The maximum time to wait for first byte of request response (time-to-first-byte). |
| `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. | | `RESP_TIMEOUT` | `10s` | The maximum time for complete response transfer. |
@ -179,7 +182,6 @@ This section outlines constants that are used in this spec.
| `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages |
| `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages | | `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages |
## MetaData ## MetaData
Clients MUST locally store the following `MetaData`: Clients MUST locally store the following `MetaData`:
@ -565,6 +567,8 @@ The response code can have one of the following values, encoded as a single unsi
The response payload adheres to the `ErrorMessage` schema (described below). The response payload adheres to the `ErrorMessage` schema (described below).
- 2: **ServerError** -- the responder encountered an error while processing the request. - 2: **ServerError** -- the responder encountered an error while processing the request.
The response payload adheres to the `ErrorMessage` schema (described below). The response payload adheres to the `ErrorMessage` schema (described below).
- 3: **ResourceUnavailable** -- the responder does not have requested resource.
The response payload adheres to the `ErrorMessage` schema (described below).
Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses. Clients MAY use response codes above `128` to indicate alternative, erroneous request-specific responses.
@ -745,10 +749,27 @@ The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`. The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload. Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
Clients MUST keep a record of signed blocks seen since the start of the weak subjectivity period Clients MUST keep a record of signed blocks seen on the epoch range
and MUST support serving requests of blocks up to their own `head_block_root`. `[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]`
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blocks on this range.
Clients MUST respond with at least the first block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOCKS` blocks. Peers that are unable to reply to block requests within the
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` epoch range MAY get descored or disconnected at any time.
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
MUST backfill the local block database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS`
to be fully compliant with `BlocksByRange` requests. To safely perform such a
backfill of blocks to the recent state, the node MUST validate both (1) the
proposer signatures and (2) that the blocks form a valid chain up to the most
recent block referenced in the weak subjectivity state.
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
participating in the networking immediately, other peers MAY
disconnect and/or temporarily ban such an un-synced or semi-synced client.
Clients MUST respond with at least the first block that exists in the range, if they have it,
and no more than `MAX_REQUEST_BLOCKS` blocks.
The following blocks, where they exist, MUST be sent in consecutive order. The following blocks, where they exist, MUST be sent in consecutive order.
@ -1393,6 +1414,45 @@ To avoid this race condition, we allow the responding side to choose which branc
The requesting client then goes on to validate the blocks and incorporate them in their own database The requesting client then goes on to validate the blocks and incorporate them in their own database
-- because they follow the same rules, they should at this point arrive at the same canonical chain. -- because they follow the same rules, they should at this point arrive at the same canonical chain.
### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs?
Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network
the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire
beacon state and then a simple block sync from there to the head. We expect the latter to be the dominant UX strategy.
These checkpoints *in the worst case* (i.e. very large validator set and maximal allowed safety decay) must be from the
most recent `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs, and thus a user must be able to block sync to the head from this starting point.
Thus, this defines the epoch range outside which nodes may prune blocks, and
the epoch range that a new node syncing from a checkpoint must backfill.
`MIN_EPOCHS_FOR_BLOCK_REQUESTS` is calculated using the arithmetic from `compute_weak_subjectivity_period` found in the
[weak subjectivity guide](./weak-subjectivity.md). Specifically to find this max epoch range, we use the worst case event of a very large validator size
(`>= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT`).
```python
MIN_EPOCHS_FOR_BLOCK_REQUESTS = (
MIN_VALIDATOR_WITHDRAWABILITY_DELAY
+ MAX_SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
)
```
Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months).
### Why must the proposer signature be checked when backfilling blocks in the database?
When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state),
the node not only must ensure the `BeaconBlock`s form a chain to the known safe block,
but also must check that the proposer signature is valid in the `SignedBeaconBlock` wrapper.
This is because the signature is not part of the `BeaconBlock` hash chain, and
thus could be corrupted by an attacker serving valid `BeaconBlock`s but invalid
signatures contained in `SignedBeaconBlock`.
Although in this particular use case this does not represent a decay in safety
(due to the assumptions of starting at a weak subjectivity checkpoint), it
would represent invalid historic data and could be unwittingly transmitted to
additional nodes.
### What's the effect of empty slots on the sync algorithm? ### What's the effect of empty slots on the sync algorithm?
When syncing one can only tell that a slot has been skipped on a particular branch When syncing one can only tell that a slot has been skipped on a particular branch

View File

@ -136,7 +136,9 @@ A brief reference for what these values look like in practice ([reference script
## Weak Subjectivity Sync ## Weak Subjectivity Sync
Clients should allow users to input a Weak Subjectivity Checkpoint at startup, and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain. If such a sync is not possible, the client should treat this as a critical and irrecoverable failure. Clients should allow users to input a Weak Subjectivity Checkpoint at startup,
and guarantee that any successful sync leads to the given Weak Subjectivity Checkpoint along the canonical chain.
If such a sync is not possible, the client should treat this as a critical and irrecoverable failure.
### Weak Subjectivity Sync Procedure ### Weak Subjectivity Sync Procedure

View File

@ -7,7 +7,6 @@ from eth2spec.test.helpers.block_processing import run_block_processing_to
from eth2spec.test.helpers.state import ( from eth2spec.test.helpers.state import (
state_transition_and_sign_block, state_transition_and_sign_block,
transition_to, transition_to,
next_epoch,
) )
from eth2spec.test.helpers.constants import ( from eth2spec.test.helpers.constants import (
MAINNET, MINIMAL, MAINNET, MINIMAL,
@ -50,9 +49,9 @@ def get_committee_indices(spec, state, duplicates=False):
""" """
state = state.copy() state = state.copy()
current_epoch = spec.get_current_epoch(state) current_epoch = spec.get_current_epoch(state)
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR randao_index = (current_epoch + 1) % spec.EPOCHS_PER_HISTORICAL_VECTOR
while True: while True:
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee = spec.get_next_sync_committee_indices(state)
if duplicates: if duplicates:
if len(committee) != len(set(committee)): if len(committee) != len(set(committee)):
return committee return committee
@ -62,23 +61,32 @@ def get_committee_indices(spec, state, duplicates=False):
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index]) state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
def compute_committee_indices(spec, state, committee):
"""
Given a ``committee``, calculate and return the related indices
"""
all_pubkeys = [v.pubkey for v in state.validators]
committee_indices = [all_pubkeys.index(pubkey) for pubkey in committee.pubkeys]
return committee_indices
@with_altair_and_later @with_altair_and_later
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_invalid_signature_missing_participant(spec, state): def test_invalid_signature_missing_participant(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(2020) rng = random.Random(2020)
random_participant = rng.choice(committee) random_participant = rng.choice(committee_indices)
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
# Exclude one participant whose signature was included. # Exclude one participant whose signature was included.
block.body.sync_aggregate = spec.SyncAggregate( block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[index != random_participant for index in committee], sync_committee_bits=[index != random_participant for index in committee_indices],
sync_committee_signature=compute_aggregate_sync_committee_signature( sync_committee_signature=compute_aggregate_sync_committee_signature(
spec, spec,
state, state,
block.slot - 1, block.slot - 1,
committee, # full committee signs committee_indices, # full committee signs
) )
) )
yield from run_sync_committee_processing(spec, state, block, expect_exception=True) yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
@ -88,31 +96,38 @@ def test_invalid_signature_missing_participant(spec, state):
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_invalid_signature_extra_participant(spec, state): def test_invalid_signature_extra_participant(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(3030) rng = random.Random(3030)
random_participant = rng.choice(committee) random_participant = rng.choice(committee_indices)
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
# Exclude one signature even though the block claims the entire committee participated. # Exclude one signature even though the block claims the entire committee participated.
block.body.sync_aggregate = spec.SyncAggregate( block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee), sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature( sync_committee_signature=compute_aggregate_sync_committee_signature(
spec, spec,
state, state,
block.slot - 1, block.slot - 1,
[index for index in committee if index != random_participant], [index for index in committee_indices if index != random_participant],
) )
) )
yield from run_sync_committee_processing(spec, state, block, expect_exception=True) yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits): def compute_sync_committee_inclusion_reward(spec,
state,
participant_index,
committee_indices,
committee_bits):
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments) total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR) max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
included_indices = [index for index, bit in zip(committee, committee_bits) if bit] included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH) max_slot_rewards = spec.Gwei(
max_epoch_rewards * len(included_indices)
// len(committee_indices) // spec.SLOTS_PER_EPOCH
)
# Compute the participant and proposer sync rewards # Compute the participant and proposer sync rewards
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices]) committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
@ -121,23 +136,23 @@ def compute_sync_committee_inclusion_reward(spec, state, participant_index, comm
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance) return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits): def compute_sync_committee_participant_reward(spec, state, participant_index, committee_indices, committee_bits):
included_indices = [index for index, bit in zip(committee, committee_bits) if bit] included_indices = [index for index, bit in zip(committee_indices, committee_bits) if bit]
multiplicities = Counter(included_indices) multiplicities = Counter(included_indices)
inclusion_reward = compute_sync_committee_inclusion_reward( inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, participant_index, committee, committee_bits, spec, state, participant_index, committee_indices, committee_bits,
) )
return spec.Gwei(inclusion_reward * multiplicities[participant_index]) return spec.Gwei(inclusion_reward * multiplicities[participant_index])
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits): def compute_sync_committee_proposer_reward(spec, state, committee_indices, committee_bits):
proposer_reward = 0 proposer_reward = 0
for index, bit in zip(committee, committee_bits): for index, bit in zip(committee_indices, committee_bits):
if not bit: if not bit:
continue continue
inclusion_reward = compute_sync_committee_inclusion_reward( inclusion_reward = compute_sync_committee_inclusion_reward(
spec, state, index, committee, committee_bits, spec, state, index, committee_indices, committee_bits,
) )
proposer_reward_denominator = ( proposer_reward_denominator = (
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT) (spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
@ -148,15 +163,15 @@ def compute_sync_committee_proposer_reward(spec, state, committee, committee_bit
return proposer_reward return proposer_reward
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index): def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indices, committee_bits, proposer_index):
for index in range(len(post_state.validators)): for index in range(len(post_state.validators)):
reward = 0 reward = 0
if index in committee: if index in committee_indices:
reward += compute_sync_committee_participant_reward( reward += compute_sync_committee_participant_reward(
spec, spec,
pre_state, pre_state,
index, index,
committee, committee_indices,
committee_bits, committee_bits,
) )
@ -164,14 +179,14 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee, comm
reward += compute_sync_committee_proposer_reward( reward += compute_sync_committee_proposer_reward(
spec, spec,
pre_state, pre_state,
committee, committee_indices,
committee_bits, committee_bits,
) )
assert post_state.balances[index] == pre_state.balances[index] + reward assert post_state.balances[index] == pre_state.balances[index] + reward
def run_successful_sync_committee_test(spec, state, committee, committee_bits): def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
pre_state = state.copy() pre_state = state.copy()
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
@ -181,7 +196,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
spec, spec,
state, state,
block.slot - 1, block.slot - 1,
[index for index, bit in zip(committee, committee_bits) if bit], [index for index, bit in zip(committee_indices, committee_bits) if bit],
) )
) )
@ -191,7 +206,7 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
spec, spec,
pre_state, pre_state,
state, state,
committee, committee_indices,
committee_bits, committee_bits,
block.proposer_index, block.proposer_index,
) )
@ -201,60 +216,60 @@ def run_successful_sync_committee_test(spec, state, committee, committee_bits):
@with_configs([MINIMAL], reason="to create nonduplicate committee") @with_configs([MINIMAL], reason="to create nonduplicate committee")
@spec_state_test @spec_state_test
def test_sync_committee_rewards_nonduplicate_committee(spec, state): def test_sync_committee_rewards_nonduplicate_committee(spec, state):
committee = get_committee_indices(spec, state, duplicates=False) committee_indices = get_committee_indices(spec, state, duplicates=False)
committee_size = len(committee) committee_size = len(committee_indices)
committee_bits = [True] * committee_size committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state))) active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case # Preconditions of this test case
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
assert committee_size == len(set(committee)) assert committee_size == len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later @with_altair_and_later
@with_configs([MAINNET], reason="to create duplicate committee") @with_configs([MAINNET], reason="to create duplicate committee")
@spec_state_test @spec_state_test
def test_sync_committee_rewards_duplicate_committee(spec, state): def test_sync_committee_rewards_duplicate_committee(spec, state):
committee = get_committee_indices(spec, state, duplicates=True) committee_indices = get_committee_indices(spec, state, duplicates=True)
committee_size = len(committee) committee_size = len(committee_indices)
committee_bits = [True] * committee_size committee_bits = [True] * committee_size
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state))) active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
# Preconditions of this test case # Preconditions of this test case
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
assert committee_size > len(set(committee)) assert committee_size > len(set(committee_indices))
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later @with_altair_and_later
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_sync_committee_rewards_not_full_participants(spec, state): def test_sync_committee_rewards_not_full_participants(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
rng = random.Random(1010) rng = random.Random(1010)
committee_bits = [rng.choice([True, False]) for _ in committee] committee_bits = [rng.choice([True, False]) for _ in committee_indices]
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later @with_altair_and_later
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_sync_committee_rewards_empty_participants(spec, state): def test_sync_committee_rewards_empty_participants(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
committee_bits = [False for _ in committee] committee_bits = [False for _ in committee_indices]
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits) yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
@with_altair_and_later @with_altair_and_later
@spec_state_test @spec_state_test
@always_bls @always_bls
def test_invalid_signature_past_block(spec, state): def test_invalid_signature_past_block(spec, state):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) committee_indices = compute_committee_indices(spec, state, state.current_sync_committee)
blocks = [] blocks = []
for _ in range(2): for _ in range(2):
@ -262,12 +277,12 @@ def test_invalid_signature_past_block(spec, state):
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
# Valid sync committee signature here... # Valid sync committee signature here...
block.body.sync_aggregate = spec.SyncAggregate( block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee), sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature( sync_committee_signature=compute_aggregate_sync_committee_signature(
spec, spec,
state, state,
block.slot - 1, block.slot - 1,
committee, committee_indices,
) )
) )
@ -277,12 +292,12 @@ def test_invalid_signature_past_block(spec, state):
invalid_block = build_empty_block_for_next_slot(spec, state) invalid_block = build_empty_block_for_next_slot(spec, state)
# Invalid signature from a slot other than the previous # Invalid signature from a slot other than the previous
invalid_block.body.sync_aggregate = spec.SyncAggregate( invalid_block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee), sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature( sync_committee_signature=compute_aggregate_sync_committee_signature(
spec, spec,
state, state,
invalid_block.slot - 2, invalid_block.slot - 2,
committee, committee_indices,
) )
) )
@ -307,19 +322,18 @@ def test_invalid_signature_previous_committee(spec, state):
transition_to(spec, state, slot_in_future_sync_committee_period) transition_to(spec, state, slot_in_future_sync_committee_period)
# Use the previous sync committee to produce the signature. # Use the previous sync committee to produce the signature.
pubkeys = [validator.pubkey for validator in state.validators]
# Ensure that the pubkey sets are different. # Ensure that the pubkey sets are different.
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys) assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys] committee_indices = compute_committee_indices(spec, state, old_sync_committee)
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate( block.body.sync_aggregate = spec.SyncAggregate(
sync_committee_bits=[True] * len(committee), sync_committee_bits=[True] * len(committee_indices),
sync_committee_signature=compute_aggregate_sync_committee_signature( sync_committee_signature=compute_aggregate_sync_committee_signature(
spec, spec,
state, state,
block.slot - 1, block.slot - 1,
committee, committee_indices,
) )
) )
@ -345,15 +359,13 @@ def test_valid_signature_future_committee(spec, state):
transition_to(spec, state, slot_in_future_sync_committee_period) transition_to(spec, state, slot_in_future_sync_committee_period)
sync_committee = state.current_sync_committee sync_committee = state.current_sync_committee
next_sync_committee = state.next_sync_committee
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period) assert next_sync_committee != sync_committee
assert sync_committee == expected_sync_committee
assert sync_committee != old_current_sync_committee assert sync_committee != old_current_sync_committee
assert sync_committee != old_next_sync_committee assert sync_committee != old_next_sync_committee
pubkeys = [validator.pubkey for validator in state.validators] committee_indices = compute_committee_indices(spec, state, sync_committee)
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
block = build_empty_block_for_next_slot(spec, state) block = build_empty_block_for_next_slot(spec, state)
block.body.sync_aggregate = spec.SyncAggregate( block.body.sync_aggregate = spec.SyncAggregate(
@ -367,43 +379,3 @@ def test_valid_signature_future_committee(spec, state):
) )
yield from run_sync_committee_processing(spec, state, block) yield from run_sync_committee_processing(spec, state, block)
@with_altair_and_later
@spec_state_test
def test_sync_committee_is_only_computed_at_epoch_boundary(spec, state):
"""
Sync committees can only be computed at sync committee period boundaries.
Ensure a client respects the committee in the state (assumed to be derived
in the correct way).
"""
current_epoch = spec.get_current_epoch(state)
# use a "synthetic" committee to simulate the situation
# where ``spec.get_sync_committee`` at the sync committee
# period epoch boundary would have diverged some epochs into the
# period; ``aggregate_pubkey`` is not relevant to this test
pubkeys = []
committee_indices = []
i = 0
active_validator_count = len(spec.get_active_validator_indices(state, current_epoch))
while len(pubkeys) < spec.SYNC_COMMITTEE_SIZE:
v = state.validators[i % active_validator_count]
if spec.is_active_validator(v, current_epoch):
pubkeys.append(v.pubkey)
committee_indices.append(i)
i += 1
synthetic_committee = spec.SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=spec.BLSPubkey())
state.current_sync_committee = synthetic_committee
assert spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD > 3
for _ in range(3):
next_epoch(spec, state)
committee = get_committee_indices(spec, state)
assert committee != committee_indices
committee_size = len(committee_indices)
committee_bits = [True] * committee_size
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)

View File

@ -39,8 +39,7 @@ def run_sync_committees_progress_test(spec, state):
# Can compute the third committee having computed final balances in the last epoch # Can compute the third committee having computed final balances in the last epoch
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` # of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
current_epoch = spec.get_current_epoch(state) third_sync_committee = spec.get_next_sync_committee(state)
third_sync_committee = spec.get_sync_committee(state, current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
assert state.current_sync_committee == second_sync_committee assert state.current_sync_committee == second_sync_committee
assert state.next_sync_committee == third_sync_committee assert state.next_sync_committee == third_sync_committee

View File

@ -18,7 +18,8 @@ from eth2spec.test.context import (
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0): def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
participants = random.sample(committee, int(len(committee) * fraction_full)) participants = random.sample(committee, int(len(committee) * fraction_full))
yield 'pre', state yield 'pre', state

View File

@ -0,0 +1,244 @@
from eth2spec.test.context import fork_transition_test
from eth2spec.test.helpers.constants import PHASE0, ALTAIR
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_slot
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block
def _state_transition_and_sign_block_at_slot(spec, state):
"""
Cribbed from ``transition_unsigned_block`` helper
where the early parts of the state transition have already
been applied to ``state``.
Used to produce a block during an irregular state transition.
"""
block = build_empty_block(spec, state)
assert state.latest_block_header.slot < block.slot
assert state.slot == block.slot
spec.process_block(state, block)
block.state_root = state.hash_tree_root()
return sign_block(spec, state, block)
def _all_blocks(_):
return True
def _skip_slots(*slots):
"""
Skip making a block if its slot is
passed as an argument to this filter
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 not in slots
return f
def _no_blocks(_):
return False
def _only_at(slot):
"""
Only produce a block if its slot is ``slot``.
"""
def f(state_at_prior_slot):
return state_at_prior_slot.slot + 1 == slot
return f
def _state_transition_across_slots(spec, state, to_slot, block_filter=_all_blocks):
assert state.slot < to_slot
while state.slot < to_slot:
should_make_block = block_filter(state)
if should_make_block:
block = build_empty_block_for_next_slot(spec, state)
signed_block = state_transition_and_sign_block(spec, state, block)
yield signed_block
else:
next_slot(spec, state)
def _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=True):
spec.process_slots(state, state.slot + 1)
assert state.slot % spec.SLOTS_PER_EPOCH == 0
assert spec.compute_epoch_at_slot(state.slot) == fork_epoch
state = post_spec.upgrade_to_altair(state)
assert state.fork.epoch == fork_epoch
assert state.fork.previous_version == post_spec.GENESIS_FORK_VERSION
assert state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
if with_block:
return state, _state_transition_and_sign_block_at_slot(post_spec, state)
else:
return state, None
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_normal_transition(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot)
])
# irregular state transition to handle fork:
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
blocks.append(post_tag(block))
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
assert set(range(1, state.slot + 1)) == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_missing_first_post_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way except for the first block
of the new fork.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
to_slot = fork_epoch * spec.SLOTS_PER_EPOCH - 1
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot)
])
# irregular state transition to handle fork:
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
expected_slots = set(range(1, state.slot + 1)).difference(set([fork_epoch * spec.SLOTS_PER_EPOCH]))
assert expected_slots == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_missing_last_pre_fork_block(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
producing blocks for every slot along the way except for the last block
of the old fork.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
to_slot = last_slot_of_pre_fork
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot, block_filter=_skip_slots(last_slot_of_pre_fork))
])
# irregular state transition to handle fork:
state, block = _do_altair_fork(state, spec, post_spec, fork_epoch)
blocks.append(post_tag(block))
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot)
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(set(slots_with_blocks)) == len(slots_with_blocks)
expected_slots = set(range(1, state.slot + 1)).difference(set([last_slot_of_pre_fork]))
assert expected_slots == set(slots_with_blocks)
yield "blocks", blocks
yield "post", state
@fork_transition_test(PHASE0, ALTAIR, fork_epoch=2)
def test_transition_only_blocks_post_fork(state, fork_epoch, spec, post_spec, pre_tag, post_tag):
"""
Transition from the initial ``state`` to the epoch after the ``fork_epoch``,
skipping blocks for every slot along the way except for the first block
in the ending epoch.
"""
yield "pre", state
assert spec.get_current_epoch(state) < fork_epoch
# regular state transition until fork:
last_slot_of_pre_fork = fork_epoch * spec.SLOTS_PER_EPOCH - 1
to_slot = last_slot_of_pre_fork
blocks = []
blocks.extend([
pre_tag(block) for block in
_state_transition_across_slots(spec, state, to_slot, block_filter=_no_blocks)
])
# irregular state transition to handle fork:
state, _ = _do_altair_fork(state, spec, post_spec, fork_epoch, with_block=False)
# continue regular state transition with new spec into next epoch
to_slot = post_spec.SLOTS_PER_EPOCH + state.slot
last_slot = (fork_epoch + 1) * post_spec.SLOTS_PER_EPOCH
blocks.extend([
post_tag(block) for block in
_state_transition_across_slots(post_spec, state, to_slot, block_filter=_only_at(last_slot))
])
assert state.slot % post_spec.SLOTS_PER_EPOCH == 0
assert post_spec.compute_epoch_at_slot(state.slot) == fork_epoch + 1
slots_with_blocks = [block.message.slot for block in blocks]
assert len(slots_with_blocks) == 1
assert slots_with_blocks[0] == last_slot
yield "blocks", blocks
yield "post", state

View File

@ -46,7 +46,8 @@ def test_process_light_client_update_not_updated(spec, state):
body_root=signed_block.message.body.hash_tree_root(), body_root=signed_block.message.body.hash_tree_root(),
) )
# Sync committee signing the header # Sync committee signing the header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee) sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature( sync_committee_signature = compute_aggregate_sync_committee_signature(
spec, spec,
@ -111,7 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
) )
# Sync committee signing the finalized_block_header # Sync committee signing the finalized_block_header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee) sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature( sync_committee_signature = compute_aggregate_sync_committee_signature(
spec, spec,
@ -190,7 +192,8 @@ def test_process_light_client_update_finality_updated(spec, state):
) )
# Sync committee signing the finalized_block_header # Sync committee signing the finalized_block_header
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state)) all_pubkeys = [v.pubkey for v in state.validators]
committee = [all_pubkeys.index(pubkey) for pubkey in state.current_sync_committee.pubkeys]
sync_committee_bits = [True] * len(committee) sync_committee_bits = [True] * len(committee)
sync_committee_signature = compute_aggregate_sync_committee_signature( sync_committee_signature = compute_aggregate_sync_committee_signature(
spec, spec,

View File

@ -8,8 +8,12 @@ from eth2spec.utils import bls
from eth2spec.utils.bls import only_with_bls from eth2spec.utils.bls import only_with_bls
from eth2spec.test.context import ( from eth2spec.test.context import (
with_altair_and_later, with_altair_and_later,
with_configs,
with_state, with_state,
) )
from eth2spec.test.helpers.constants import (
MINIMAL,
)
rng = random.Random(1337) rng = random.Random(1337)
@ -91,6 +95,7 @@ def _get_sync_committee_signature(
@only_with_bls() @only_with_bls()
@with_altair_and_later @with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state @with_state
def test_process_sync_committee_contributions(phases, spec, state): def test_process_sync_committee_contributions(phases, spec, state):
# skip over slots at genesis # skip over slots at genesis
@ -143,20 +148,63 @@ def _subnet_for_sync_committee_index(spec, i):
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT) return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
def _get_expected_subnets_by_pubkey(sync_committee_members):
expected_subnets_by_pubkey = defaultdict(list)
for (subnet, pubkey) in sync_committee_members:
expected_subnets_by_pubkey[pubkey].append(subnet)
return expected_subnets_by_pubkey
@with_altair_and_later @with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state @with_state
def test_compute_subnets_for_sync_committee(state, spec, phases): def test_compute_subnets_for_sync_committee(state, spec, phases):
# Transition to the head of the next period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
== spec.compute_sync_committee_period(next_slot_epoch)
)
some_sync_committee_members = list( some_sync_committee_members = list(
( (
_subnet_for_sync_committee_index(spec, i), _subnet_for_sync_committee_index(spec, i),
pubkey, pubkey,
) )
# use current_sync_committee
for i, pubkey in enumerate(state.current_sync_committee.pubkeys) for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
) )
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
expected_subnets_by_pubkey = defaultdict(list)
for (subnet, pubkey) in some_sync_committee_members: for _, pubkey in some_sync_committee_members:
expected_subnets_by_pubkey[pubkey].append(subnet) validator_index = _validator_index_for_pubkey(state, pubkey)
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
expected_subnets = expected_subnets_by_pubkey[pubkey]
assert subnets == expected_subnets
@with_altair_and_later
@with_configs([MINIMAL], reason="too slow")
@with_state
def test_compute_subnets_for_sync_committee_slot_period_boundary(state, spec, phases):
# Transition to the end of the period
transition_to(spec, state, spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1)
next_slot_epoch = spec.compute_epoch_at_slot(state.slot + 1)
assert (
spec.compute_sync_committee_period(spec.get_current_epoch(state))
!= spec.compute_sync_committee_period(next_slot_epoch)
)
some_sync_committee_members = list(
(
_subnet_for_sync_committee_index(spec, i),
pubkey,
)
# use next_sync_committee
for i, pubkey in enumerate(state.next_sync_committee.pubkeys)
)
expected_subnets_by_pubkey = _get_expected_subnets_by_pubkey(some_sync_committee_members)
for _, pubkey in some_sync_committee_members: for _, pubkey in some_sync_committee_members:
validator_index = _validator_index_for_pubkey(state, pubkey) validator_index = _validator_index_for_pubkey(state, pubkey)

View File

@ -11,7 +11,7 @@ from .helpers.constants import (
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE, ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
) )
from .helpers.genesis import create_genesis_state from .helpers.genesis import create_genesis_state
from .utils import vector_test, with_meta_tags from .utils import vector_test, with_meta_tags, build_transition_test
from random import Random from random import Random
from typing import Any, Callable, Sequence, TypedDict, Protocol from typing import Any, Callable, Sequence, TypedDict, Protocol
@ -383,3 +383,38 @@ def is_post_merge(spec):
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased. with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
with_merge_and_later = with_phases([MERGE]) with_merge_and_later = with_phases([MERGE])
def fork_transition_test(pre_fork_name, post_fork_name, fork_epoch=None):
"""
A decorator to construct a "transition" test from one fork of the eth2 spec
to another.
Decorator assumes a transition from the `pre_fork_name` fork to the
`post_fork_name` fork. The user can supply a `fork_epoch` at which the
fork occurs or they must compute one (yielding to the generator) during the test
if more custom behavior is desired.
A test using this decorator should expect to receive as parameters:
`state`: the default state constructed for the `pre_fork_name` fork
according to the `with_state` decorator.
`fork_epoch`: the `fork_epoch` provided to this decorator, if given.
`spec`: the version of the eth2 spec corresponding to `pre_fork_name`.
`post_spec`: the version of the eth2 spec corresponding to `post_fork_name`.
`pre_tag`: a function to tag data as belonging to `pre_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
`post_tag`: a function to tag data as belonging to `post_fork_name` fork.
Used to discriminate data during consumption of the generated spec tests.
"""
def _wrapper(fn):
@with_phases([pre_fork_name], other_phases=[post_fork_name])
@spec_test
@with_state
def _adapter(*args, **kwargs):
wrapped = build_transition_test(fn,
pre_fork_name,
post_fork_name,
fork_epoch=fork_epoch)
return wrapped(*args, **kwargs)
return _adapter
return _wrapper

View File

@ -69,9 +69,8 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
if spec.fork not in FORKS_BEFORE_ALTAIR: if spec.fork not in FORKS_BEFORE_ALTAIR:
# Fill in sync committees # Fill in sync committees
state.current_sync_committee = spec.get_sync_committee(state, spec.get_current_epoch(state)) # Note: A duplicate committee is assigned for the current and next committee at genesis
state.next_sync_committee = ( state.current_sync_committee = spec.get_next_sync_committee(state)
spec.get_sync_committee(state, spec.get_current_epoch(state) + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD) state.next_sync_committee = spec.get_next_sync_committee(state)
)
return state return state

View File

@ -1,3 +1,4 @@
import inspect
from typing import Dict, Any from typing import Dict, Any
from eth2spec.utils.ssz.ssz_typing import View from eth2spec.utils.ssz.ssz_typing import View
from eth2spec.utils.ssz.ssz_impl import serialize from eth2spec.utils.ssz.ssz_impl import serialize
@ -93,3 +94,50 @@ def with_meta_tags(tags: Dict[str, Any]):
yield k, 'meta', v yield k, 'meta', v
return entry return entry
return runner return runner
def build_transition_test(fn, pre_fork_name, post_fork_name, fork_epoch=None):
"""
Handles the inner plumbing to generate `transition_test`s.
See that decorator in `context.py` for more information.
"""
def _adapter(*args, **kwargs):
post_spec = kwargs["phases"][post_fork_name]
pre_fork_counter = 0
def pre_tag(obj):
nonlocal pre_fork_counter
pre_fork_counter += 1
return obj
def post_tag(obj):
return obj
yield "post_fork", "meta", post_fork_name
has_fork_epoch = False
if fork_epoch:
kwargs["fork_epoch"] = fork_epoch
has_fork_epoch = True
yield "fork_epoch", "meta", fork_epoch
# massage args to handle an optional custom state using
# `with_custom_state` decorator
expected_args = inspect.getfullargspec(fn)
if "phases" not in expected_args.kwonlyargs:
kwargs.pop("phases", None)
for part in fn(*args,
post_spec=post_spec,
pre_tag=pre_tag,
post_tag=post_tag,
**kwargs):
if part[0] == "fork_epoch":
has_fork_epoch = True
yield part
assert has_fork_epoch
if pre_fork_counter > 0:
yield "fork_block", "meta", pre_fork_counter - 1
return _adapter

View File

@ -0,0 +1,72 @@
# Transition testing
Transition tests to cover processing the chain across a fork boundary.
Each test case contains a `post_fork` key in the `meta.yaml` that indicates the target fork which also fixes the fork the test begins in.
Clients should assume forks happen sequentially in the following manner:
0. `phase0`
1. `altair`
For example, if a test case has `post_fork` of `altair`, the test consumer should assume the test begins in `phase0` and use that specification to process the initial state and any blocks up until the fork epoch. After the fork happens, the test consumer should use the specification according to the `altair` fork to process the remaining data.
## Test case format
### `meta.yaml`
```yaml
post_fork: string -- String name of the spec after the fork.
fork_epoch: int -- The epoch at which the fork takes place.
fork_block: int -- Optional. The `<index>` of the last block on the initial fork.
blocks_count: int -- The number of blocks processed in this test.
```
*Note*: There may be a fork transition function to run at the `fork_epoch`.
Refer to the specs for the relevant fork for further details.
### `pre.ssz_snappy`
A SSZ-snappy encoded `BeaconState` according to the specification of
the initial fork, the state before running the block transitions.
### `blocks_<index>.ssz_snappy`
A series of files, with `<index>` in range `[0, blocks_count)`.
Blocks must be processed in order, following the main transition function
(i.e. process slot and epoch transitions in between blocks as normal).
Blocks are encoded as `SignedBeaconBlock`s from the relevant spec version
as indicated by the `post_fork` and `fork_block` data in the `meta.yaml`.
As blocks span fork boundaires, a `fork_block` number is given in
the `meta.yaml` to help resolve which blocks belong to which fork.
The `fork_block` is the index in the test data of the **last** block
of the **initial** fork.
To demonstrate, the following diagram shows slots with `_` and blocks
in those slots as `x`. The fork happens at the epoch delineated by the `|`.
```
x x x x
_ _ _ _ | _ _ _ _
```
The `blocks_count` value in the `meta.yaml` in this case is `4` where the
`fork_block` value in the `meta.yaml` is `1`. If this particular example were
testing the fork from Phase 0 to Altair, blocks with indices `0, 1` represent
`SignedBeaconBlock`s defined in the Phase 0 spec and blocks with indices `2, 3`
represent `SignedBeaconBlock`s defined in the Altair spec.
*Note*: If `fork_block` is missing, then all block data should be
interpreted as belonging to the post fork.
### `post.ssz_snappy`
A SSZ-snappy encoded `BeaconState` according to the specification of
the post fork, the state after running the block transitions.
## Condition
The resulting state should match the expected `post` state.

View File

@ -0,0 +1,42 @@
from importlib import reload
from typing import Iterable
from eth2spec.test.helpers.constants import ALTAIR, MINIMAL, MAINNET, PHASE0
from eth2spec.config import config_util
from eth2spec.test.altair.transition import test_transition as test_altair_transition
from eth2spec.phase0 import spec as spec_phase0
from eth2spec.altair import spec as spec_altair
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests
def create_provider(tests_src, config_name: str, pre_fork_name: str, post_fork_name: str) -> gen_typing.TestProvider:
def prepare_fn(configs_path: str) -> str:
config_util.prepare_config(configs_path, config_name)
reload(spec_phase0)
reload(spec_altair)
return config_name
def cases_fn() -> Iterable[gen_typing.TestCase]:
return generate_from_tests(
runner_name='transition',
handler_name='core',
src=tests_src,
fork_name=post_fork_name,
phase=pre_fork_name,
)
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
TRANSITION_TESTS = ((PHASE0, ALTAIR, test_altair_transition),)
if __name__ == "__main__":
for pre_fork, post_fork, transition_test_module in TRANSITION_TESTS:
gen_runner.run_generator("transition", [
create_provider(transition_test_module, MINIMAL, pre_fork, post_fork),
create_provider(transition_test_module, MAINNET, pre_fork, post_fork),
])

View File

@ -0,0 +1,2 @@
pytest>=4.4
../../../[generator]