mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-02 22:03:37 +00:00
Merge remote-tracking branch 'upstream/dev' into custody-group
This commit is contained in:
commit
eb7518a627
2
.github/workflows/docs.yml
vendored
2
.github/workflows/docs.yml
vendored
@ -3,7 +3,7 @@ name: Publish docs
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- master
|
||||
permissions:
|
||||
contents: write
|
||||
jobs:
|
||||
|
20
.github/workflows/run-tests.yml
vendored
20
.github/workflows/run-tests.yml
vendored
@ -69,6 +69,18 @@ jobs:
|
||||
- name: Run linter for test generators
|
||||
run: make lint_generators
|
||||
|
||||
whitespace:
|
||||
runs-on: [self-hosted-ghr-custom, size-l-x64, profile-consensusSpecs]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Check for trailing whitespace
|
||||
run: |
|
||||
if git grep -n '[[:blank:]]$'; then
|
||||
echo "Trailing whitespace found. Please fix it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pyspec-tests:
|
||||
runs-on: [self-hosted-ghr-custom, size-xl-x64, profile-consensusSpecs]
|
||||
needs: [lint,codespell,table_of_contents]
|
||||
@ -124,4 +136,10 @@ jobs:
|
||||
- name: Install pyspec requirements
|
||||
run: make install_test
|
||||
- name: Run generators with --modcheck
|
||||
run: make generate_tests modcheck=true
|
||||
run: make generate_tests modcheck=true 2>&1 | tee consensustestgen.log
|
||||
- name: Check for errors
|
||||
run: |
|
||||
if grep -q "\[ERROR\]" consensustestgen.log; then
|
||||
echo "There is an error in the log"
|
||||
exit 1
|
||||
fi
|
||||
|
@ -1,7 +1,7 @@
|
||||
# Configurations
|
||||
|
||||
This directory contains a set of configurations used for testing, testnets, and mainnet.
|
||||
A client binary may be compiled for a specific `PRESET_BASE`,
|
||||
A client binary may be compiled for a specific `PRESET_BASE`,
|
||||
and then load different configurations around that preset to participate in different networks or tests.
|
||||
|
||||
Standard configs:
|
||||
@ -24,7 +24,7 @@ In this case, the suffix on the new variable may be removed, and the old variabl
|
||||
|
||||
A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (variables for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations.
|
||||
Instead, the config essentially doubles as fork definition now, e.g. changing the value for `ALTAIR_FORK_EPOCH` changes the fork.
|
||||
|
||||
|
||||
## Format
|
||||
|
||||
Each preset and configuration is a key-value mapping.
|
||||
|
@ -166,7 +166,6 @@ DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128
|
||||
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
||||
SAMPLES_PER_SLOT: 8
|
||||
CUSTODY_REQUIREMENT: 4
|
||||
BLOB_SIDECAR_SUBNET_COUNT_EIP7594: 8
|
||||
MAX_BLOBS_PER_BLOCK_EIP7594: 8
|
||||
# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_EIP7594`
|
||||
MAX_REQUEST_BLOB_SIDECARS_EIP7594: 1024
|
||||
|
@ -165,7 +165,6 @@ DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128
|
||||
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
||||
SAMPLES_PER_SLOT: 8
|
||||
CUSTODY_REQUIREMENT: 4
|
||||
BLOB_SIDECAR_SUBNET_COUNT_EIP7594: 8
|
||||
MAX_BLOBS_PER_BLOCK_EIP7594: 8
|
||||
# `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_EIP7594`
|
||||
MAX_REQUEST_BLOB_SIDECARS_EIP7594: 1024
|
||||
|
16
docs/docs/templates/beacon-chain-template.md
vendored
16
docs/docs/templates/beacon-chain-template.md
vendored
@ -66,19 +66,3 @@ class CONTAINER_NAME(Container):
|
||||
|
||||
|
||||
### Block processing
|
||||
|
||||
|
||||
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure <FORK_NAME> testing only.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
...
|
||||
```
|
||||
|
@ -7,7 +7,7 @@ theme:
|
||||
- scheme: default
|
||||
primary: black
|
||||
toggle:
|
||||
icon: material/brightness-7
|
||||
icon: material/brightness-7
|
||||
name: Switch to dark mode
|
||||
- scheme: slate
|
||||
primary: black
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# `uint64(2**16)` (= 65,536)
|
||||
# `uint64(2**16)` (= 65,536)
|
||||
MAX_STEMS: 65536
|
||||
# `uint64(33)`
|
||||
MAX_COMMITMENTS_PER_STEM: 33
|
||||
|
@ -29,8 +29,8 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096
|
||||
MAX_ATTESTER_SLASHINGS_ELECTRA: 1
|
||||
# `uint64(2**3)` (= 8)
|
||||
MAX_ATTESTATIONS_ELECTRA: 8
|
||||
# `uint64(2**0)` (= 1)
|
||||
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1
|
||||
# `uint64(2**1)` (= 2)
|
||||
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
# `uint64(2**16)` (= 65,536)
|
||||
# `uint64(2**16)` (= 65,536)
|
||||
MAX_STEMS: 65536
|
||||
# `uint64(33)`
|
||||
MAX_COMMITMENTS_PER_STEM: 33
|
||||
|
@ -29,8 +29,8 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096
|
||||
MAX_ATTESTER_SLASHINGS_ELECTRA: 1
|
||||
# `uint64(2**3)` (= 8)
|
||||
MAX_ATTESTATIONS_ELECTRA: 8
|
||||
# `uint64(2**0)` (= 1)
|
||||
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1
|
||||
# `uint64(2**1)` (= 2)
|
||||
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
|
@ -573,7 +573,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
|
||||
# Any signed custody-slashing should result in at least one slashing.
|
||||
# If the custody bits are valid, then the claim itself is slashed.
|
||||
malefactor = state.validators[custody_slashing.malefactor_index]
|
||||
malefactor = state.validators[custody_slashing.malefactor_index]
|
||||
whistleblower = state.validators[custody_slashing.whistleblower_index]
|
||||
domain = get_domain(state, DOMAIN_CUSTODY_BIT_SLASHING, get_current_epoch(state))
|
||||
signing_root = compute_signing_root(custody_slashing, domain)
|
||||
@ -596,7 +596,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
# Verify existence and participation of claimed malefactor
|
||||
attesters = get_attesting_indices(state, attestation)
|
||||
assert custody_slashing.malefactor_index in attesters
|
||||
|
||||
|
||||
# Verify the malefactor custody key
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(custody_slashing.malefactor_index, attestation.data.target.epoch),
|
||||
@ -619,7 +619,7 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||
for attester_index in attesters:
|
||||
if attester_index != custody_slashing.malefactor_index:
|
||||
increase_balance(state, attester_index, whistleblower_reward)
|
||||
# No special whisteblower reward: it is expected to be an attester. Others are free to slash too however.
|
||||
# No special whisteblower reward: it is expected to be an attester. Others are free to slash too however.
|
||||
else:
|
||||
# The claim was false, the custody bit was correct. Slash the whistleblower that induced this work.
|
||||
slash_validator(state, custody_slashing.whistleblower_index)
|
||||
|
@ -28,12 +28,10 @@
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
For an introduction about DAS itself, see [the DAS participation spec](sampling.md#data-availability-sampling).
|
||||
This is not a pre-requisite for the network layer, but will give you valuable context.
|
||||
This is not a pre-requisite for the network layer, but will give you valuable context.
|
||||
|
||||
For sampling, all nodes need to query for `k` random samples each slot.
|
||||
|
||||
@ -55,13 +53,13 @@ The push model does not aim to serve "historical" queries (anything older than t
|
||||
Historical queries are still required for the unhappy case, where messages are not pushed quick enough,
|
||||
and missing samples are not reconstructed by other nodes on the horizontal subnet quick enough.
|
||||
|
||||
The main challenge in supporting historical queries is to target the right nodes,
|
||||
The main challenge in supporting historical queries is to target the right nodes,
|
||||
without concentrating too many requests on a single node, or breaking the network/consensus identity separation.
|
||||
|
||||
## DAS Subnets
|
||||
|
||||
On a high level, the push-model roles are divided into:
|
||||
- Sources: create blobs of shard block data, and transformed into many tiny samples.
|
||||
- Sources: create blobs of shard block data, and transformed into many tiny samples.
|
||||
- Sinks: continuously look for samples
|
||||
|
||||
At full operation, the network has one proposer, per shard, per slot.
|
||||
@ -93,15 +91,15 @@ Peers on the horizontal subnet are expected to at least perform regular propagat
|
||||
Nodes on this same subnet can replicate the sampling efficiently (including a proof for each sample),
|
||||
and distribute it to any vertical networks that are available to them.
|
||||
|
||||
Since the messages are content-addressed (instead of origin-stamped),
|
||||
multiple publishers of the same samples on a vertical subnet do not hurt performance,
|
||||
Since the messages are content-addressed (instead of origin-stamped),
|
||||
multiple publishers of the same samples on a vertical subnet do not hurt performance,
|
||||
but actually improve it by shortcutting regular propagation on the vertical subnet, and thus lowering the latency to a sample.
|
||||
|
||||
|
||||
### Vertical subnets
|
||||
|
||||
Vertical subnets propagate the samples to every peer that is interested.
|
||||
These interests are randomly sampled and rotate quickly: although not perfect,
|
||||
These interests are randomly sampled and rotate quickly: although not perfect,
|
||||
sufficient to avoid any significant amount of nodes from being 100% predictable.
|
||||
|
||||
As soon as a sample is missing after the expected propagation time window,
|
||||
@ -166,7 +164,7 @@ The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and w
|
||||
|
||||
#### Vertical subnets: `das_sample_{subnet_index}`
|
||||
|
||||
Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial),
|
||||
Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial),
|
||||
against the commitment to blob polynomial, specific to that `(shard, slot)` key.
|
||||
|
||||
The following validations MUST pass before forwarding the `sample` on the vertical subnet.
|
||||
@ -192,7 +190,7 @@ This is to serve other peers that may have missed it.
|
||||
|
||||
To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain.
|
||||
|
||||
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md).
|
||||
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md).
|
||||
|
||||
Note that DAS networking uses a different protocol prefix: `/eth2/das/req`
|
||||
|
||||
|
@ -13,7 +13,6 @@
|
||||
- [Execution](#execution)
|
||||
- [Execution payload](#execution-payload)
|
||||
- [Modified `process_execution_payload`](#modified-process_execution_payload)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -77,64 +76,3 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||
excess_blob_gas=payload.excess_blob_gas,
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP7594 testing only.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=EIP7594_FORK_VERSION, # [Modified in EIP7594] for testing only
|
||||
current_version=EIP7594_FORK_VERSION, # [Modified in EIP7594]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process deposit balance updates
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
for deposit in state.pending_deposits:
|
||||
validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey))
|
||||
increase_balance(state, validator_index, deposit.amount)
|
||||
state.pending_deposits = []
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(
|
||||
balance - balance % EFFECTIVE_BALANCE_INCREMENT, get_max_effective_balance(validator))
|
||||
if validator.effective_balance >= MIN_ACTIVATION_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# Initialize the execution payload header
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -25,10 +25,10 @@ This is the modification of the fork choice accompanying EIP-7594.
|
||||
```python
|
||||
def is_data_available(beacon_block_root: Root) -> bool:
|
||||
# `retrieve_column_sidecars` is implementation and context dependent, replacing
|
||||
# `retrieve_blobs_and_proofs`. For the given block root, it returns all column
|
||||
# sidecars to sample, or raises an exception if they are not available.
|
||||
# The p2p network does not guarantee sidecar retrieval outside of
|
||||
# `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs.
|
||||
# `retrieve_blobs_and_proofs`. For the given block root, it returns all column
|
||||
# sidecars to sample, or raises an exception if they are not available.
|
||||
# The p2p network does not guarantee sidecar retrieval outside of
|
||||
# `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` epochs.
|
||||
column_sidecars = retrieve_column_sidecars(beacon_block_root)
|
||||
return all(
|
||||
verify_data_column_sidecar(column_sidecar)
|
||||
|
@ -8,6 +8,7 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in EIP-7594](#modifications-in-eip-7594)
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
@ -40,6 +41,12 @@
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the consensus-layer networking specification for EIP7594.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Modifications in EIP-7594
|
||||
|
||||
### Preset
|
||||
@ -58,7 +65,6 @@
|
||||
| `MAX_REQUEST_DATA_COLUMN_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * NUMBER_OF_COLUMNS` | Maximum number of data column sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve data column sidecars |
|
||||
| `MAX_REQUEST_BLOB_SIDECARS_EIP7594` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK_EIP7594` | Maximum number of blob sidecars in a single request |
|
||||
| `BLOB_SIDECAR_SUBNET_COUNT_EIP7594` | `2**3` (= 8) | The number of blob sidecar subnets used in the gossipsub protocol |
|
||||
|
||||
### Containers
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
# EIP-7594 -- Peer Sampling
|
||||
# EIP-7594 -- Peer Sampling
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
|
@ -61,34 +61,34 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
This is the beacon chain specification of the enshrined proposer builder separation feature.
|
||||
This is the beacon chain specification of the enshrined proposer builder separation feature.
|
||||
|
||||
*Note:* This specification is built upon [Electra](../../electra/beacon-chain.md) and is under active development.
|
||||
|
||||
This feature adds new staked consensus participants called *Builders* and new honest validators duties called *payload timeliness attestations*. The slot is divided in **four** intervals. Honest validators gather *signed bids* (a `SignedExecutionPayloadHeader`) from builders and submit their consensus blocks (a `SignedBeaconBlock`) including these bids at the beginning of the slot. At the start of the second interval, honest validators submit attestations just as they do previous to this feature). At the start of the third interval, aggregators aggregate these attestations and the builder broadcasts either a full payload or a message indicating that they are withholding the payload (a `SignedExecutionPayloadEnvelope`). At the start of the fourth interval, some validators selected to be members of the new **Payload Timeliness Committee** (PTC) attest to the presence and timeliness of the builder's payload.
|
||||
|
||||
At any given slot, the status of the blockchain's head may be either
|
||||
- A block from a previous slot (e.g. the current slot's proposer did not submit its block).
|
||||
- An *empty* block from the current slot (e.g. the proposer submitted a timely block, but the builder did not reveal the payload on time).
|
||||
- A full block for the current slot (both the proposer and the builder revealed on time).
|
||||
At any given slot, the status of the blockchain's head may be either
|
||||
- A block from a previous slot (e.g. the current slot's proposer did not submit its block).
|
||||
- An *empty* block from the current slot (e.g. the proposer submitted a timely block, but the builder did not reveal the payload on time).
|
||||
- A full block for the current slot (both the proposer and the builder revealed on time).
|
||||
|
||||
## Constants
|
||||
|
||||
### Payload status
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PAYLOAD_ABSENT` | `uint8(0)` |
|
||||
| `PAYLOAD_PRESENT` | `uint8(1)` |
|
||||
| `PAYLOAD_WITHHELD` | `uint8(2)` |
|
||||
| `PAYLOAD_PRESENT` | `uint8(1)` |
|
||||
| `PAYLOAD_WITHHELD` | `uint8(2)` |
|
||||
| `PAYLOAD_INVALID_STATUS` | `uint8(3)` |
|
||||
|
||||
## Preset
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PTC_SIZE` | `uint64(2**9)` (=512) # (New in EIP-7732) |
|
||||
|
||||
### Domain types
|
||||
@ -151,7 +151,7 @@ class SignedExecutionPayloadHeader(Container):
|
||||
message: ExecutionPayloadHeader
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
|
||||
#### `ExecutionPayloadEnvelope`
|
||||
|
||||
```python
|
||||
@ -177,7 +177,7 @@ class SignedExecutionPayloadEnvelope(Container):
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
**Note:** The Beacon Block body is modified to contain a `Signed ExecutionPayloadHeader`. The containers `BeaconBlock` and `SignedBeaconBlock` are modified indirectly. The field `execution_requests` is removed from the beacon block body and moved into the signed execution payload envelope.
|
||||
**Note:** The Beacon Block body is modified to contain a `Signed ExecutionPayloadHeader`. The containers `BeaconBlock` and `SignedBeaconBlock` are modified indirectly. The field `execution_requests` is removed from the beacon block body and moved into the signed execution payload envelope.
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
@ -203,7 +203,7 @@ class BeaconBlockBody(Container):
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
||||
**Note:** The `ExecutionPayloadHeader` is modified to only contain the block hash of the committed `ExecutionPayload` in addition to the builder's payment information, gas limit and KZG commitments root to verify the inclusion proofs.
|
||||
**Note:** The `ExecutionPayloadHeader` is modified to only contain the block hash of the committed `ExecutionPayload` in addition to the builder's payment information, gas limit and KZG commitments root to verify the inclusion proofs.
|
||||
|
||||
```python
|
||||
class ExecutionPayloadHeader(Container):
|
||||
@ -219,7 +219,7 @@ class ExecutionPayloadHeader(Container):
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
*Note*: The `BeaconState` is modified to track the last withdrawals honored in the CL. The `latest_execution_payload_header` is modified semantically to refer not to a past committed `ExecutionPayload` but instead it corresponds to the state's slot builder's bid. Another addition is to track the last committed block hash and the last slot that was full, that is in which there were both consensus and execution blocks included.
|
||||
*Note*: The `BeaconState` is modified to track the last withdrawals honored in the CL. The `latest_execution_payload_header` is modified semantically to refer not to a past committed `ExecutionPayload` but instead it corresponds to the state's slot builder's bid. Another addition is to track the last committed block hash and the last slot that was full, that is in which there were both consensus and execution blocks included.
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
@ -311,7 +311,7 @@ def remove_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlag
|
||||
|
||||
```python
|
||||
def is_valid_indexed_payload_attestation(
|
||||
state: BeaconState,
|
||||
state: BeaconState,
|
||||
indexed_payload_attestation: IndexedPayloadAttestation) -> bool:
|
||||
"""
|
||||
Check if ``indexed_payload_attestation`` is not empty, has sorted and unique indices and has
|
||||
@ -335,7 +335,7 @@ def is_valid_indexed_payload_attestation(
|
||||
|
||||
#### `is_parent_block_full`
|
||||
|
||||
This function returns true if the last committed payload header was fulfilled with a payload, this can only happen when both beacon block and payload were present. This function must be called on a beacon state before processing the execution payload header in the block.
|
||||
This function returns true if the last committed payload header was fulfilled with a payload, this can only happen when both beacon block and payload were present. This function must be called on a beacon state before processing the execution payload header in the block.
|
||||
|
||||
```python
|
||||
def is_parent_block_full(state: BeaconState) -> bool:
|
||||
@ -354,8 +354,8 @@ def get_ptc(state: BeaconState, slot: Slot) -> Vector[ValidatorIndex, PTC_SIZE]:
|
||||
epoch = compute_epoch_at_slot(slot)
|
||||
committees_per_slot = bit_floor(min(get_committee_count_per_slot(state, epoch), PTC_SIZE))
|
||||
members_per_committee = PTC_SIZE // committees_per_slot
|
||||
|
||||
validator_indices: List[ValidatorIndex] = []
|
||||
|
||||
validator_indices: List[ValidatorIndex] = []
|
||||
for idx in range(committees_per_slot):
|
||||
beacon_committee = get_beacon_committee(state, slot, CommitteeIndex(idx))
|
||||
validator_indices += beacon_committee[:members_per_committee]
|
||||
@ -390,7 +390,7 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
|
||||
#### `get_payload_attesting_indices`
|
||||
|
||||
```python
|
||||
def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
payload_attestation: PayloadAttestation) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Return the set of attesting indices corresponding to ``payload_attestation``.
|
||||
@ -402,7 +402,7 @@ def get_payload_attesting_indices(state: BeaconState, slot: Slot,
|
||||
#### `get_indexed_payload_attestation`
|
||||
|
||||
```python
|
||||
def get_indexed_payload_attestation(state: BeaconState, slot: Slot,
|
||||
def get_indexed_payload_attestation(state: BeaconState, slot: Slot,
|
||||
payload_attestation: PayloadAttestation) -> IndexedPayloadAttestation:
|
||||
"""
|
||||
Return the indexed payload attestation corresponding to ``payload_attestation``.
|
||||
@ -442,7 +442,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
|
||||
##### Modified `process_withdrawals`
|
||||
|
||||
**Note:** This is modified to take only the `state` as parameter. Withdrawals are deterministic given the beacon state, any execution payload that has the corresponding block as parent beacon block is required to honor these withdrawals in the execution layer. This function must be called before `process_execution_payload_header` as this latter function affects validator balances.
|
||||
**Note:** This is modified to take only the `state` as parameter. Withdrawals are deterministic given the beacon state, any execution payload that has the corresponding block as parent beacon block is required to honor these withdrawals in the execution layer. This function must be called before `process_execution_payload_header` as this latter function affects validator balances.
|
||||
|
||||
```python
|
||||
def process_withdrawals(state: BeaconState) -> None:
|
||||
@ -481,7 +481,7 @@ def process_withdrawals(state: BeaconState) -> None:
|
||||
##### New `verify_execution_payload_header_signature`
|
||||
|
||||
```python
|
||||
def verify_execution_payload_header_signature(state: BeaconState,
|
||||
def verify_execution_payload_header_signature(state: BeaconState,
|
||||
signed_header: SignedExecutionPayloadHeader) -> bool:
|
||||
# Check the signature
|
||||
builder = state.validators[signed_header.message.builder_index]
|
||||
@ -516,7 +516,7 @@ def process_execution_payload_header(state: BeaconState, block: BeaconBlock) ->
|
||||
decrease_balance(state, builder_index, amount)
|
||||
increase_balance(state, block.proposer_index, amount)
|
||||
|
||||
# Cache the signed execution payload header
|
||||
# Cache the signed execution payload header
|
||||
state.latest_execution_payload_header = header
|
||||
```
|
||||
|
||||
@ -557,7 +557,7 @@ def process_payload_attestation(state: BeaconState, payload_attestation: Payload
|
||||
data = payload_attestation.data
|
||||
assert data.beacon_block_root == state.latest_block_header.parent_root
|
||||
# Check that the attestation is for the previous slot
|
||||
assert data.slot + 1 == state.slot
|
||||
assert data.slot + 1 == state.slot
|
||||
|
||||
# Verify signature
|
||||
indexed_payload_attestation = get_indexed_payload_attestation(state, data.slot, payload_attestation)
|
||||
@ -658,11 +658,11 @@ def verify_execution_payload_envelope_signature(
|
||||
*Note*: `process_execution_payload` is now an independent check in state transition. It is called when importing a signed execution payload proposed by the builder of the current slot.
|
||||
|
||||
```python
|
||||
def process_execution_payload(state: BeaconState,
|
||||
signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
def process_execution_payload(state: BeaconState,
|
||||
signed_envelope: SignedExecutionPayloadEnvelope,
|
||||
execution_engine: ExecutionEngine, verify: bool = True) -> None:
|
||||
# Verify signature
|
||||
if verify:
|
||||
if verify:
|
||||
assert verify_execution_payload_envelope_signature(state, signed_envelope)
|
||||
envelope = signed_envelope.message
|
||||
payload = envelope.payload
|
||||
@ -670,7 +670,7 @@ def process_execution_payload(state: BeaconState,
|
||||
previous_state_root = hash_tree_root(state)
|
||||
if state.latest_block_header.state_root == Root():
|
||||
state.latest_block_header.state_root = previous_state_root
|
||||
|
||||
|
||||
# Verify consistency with the beacon block
|
||||
assert envelope.beacon_block_root == hash_tree_root(state.latest_block_header)
|
||||
|
||||
@ -679,14 +679,14 @@ def process_execution_payload(state: BeaconState,
|
||||
assert envelope.builder_index == committed_header.builder_index
|
||||
assert committed_header.blob_kzg_commitments_root == hash_tree_root(envelope.blob_kzg_commitments)
|
||||
|
||||
if not envelope.payload_withheld:
|
||||
if not envelope.payload_withheld:
|
||||
# Verify the withdrawals root
|
||||
assert hash_tree_root(payload.withdrawals) == state.latest_withdrawals_root
|
||||
|
||||
# Verify the gas_limit
|
||||
assert committed_header.gas_limit == payload.gas_limit
|
||||
|
||||
assert committed_header.block_hash == payload.block_hash
|
||||
assert committed_header.block_hash == payload.block_hash
|
||||
# Verify consistency of the parent hash with respect to the previous execution payload
|
||||
assert payload.parent_hash == state.latest_block_hash
|
||||
# Verify prev_randao
|
||||
@ -696,7 +696,7 @@ def process_execution_payload(state: BeaconState,
|
||||
# Verify commitments are under limit
|
||||
assert len(envelope.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
|
||||
# Verify the execution payload is valid
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment)
|
||||
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment)
|
||||
for commitment in envelope.blob_kzg_commitments]
|
||||
requests = envelope.execution_requests
|
||||
assert execution_engine.verify_and_notify_new_payload(
|
||||
@ -722,6 +722,6 @@ def process_execution_payload(state: BeaconState,
|
||||
state.latest_full_slot = state.slot
|
||||
|
||||
# Verify the state root
|
||||
if verify:
|
||||
if verify:
|
||||
assert envelope.state_root == hash_tree_root(state)
|
||||
```
|
||||
|
@ -16,20 +16,20 @@ This is an accompanying document which describes the expected actions of a "buil
|
||||
|
||||
## Introduction
|
||||
|
||||
With the EIP-7732 Fork, the protocol includes new staked participants of the protocol called *Builders*. While Builders are a subset of the validator set, they have extra attributions that are optional. Validators may opt to not be builders and as such we collect the set of guidelines for those validators that want to act as builders in this document.
|
||||
With the EIP-7732 Fork, the protocol includes new staked participants of the protocol called *Builders*. While Builders are a subset of the validator set, they have extra attributions that are optional. Validators may opt to not be builders and as such we collect the set of guidelines for those validators that want to act as builders in this document.
|
||||
|
||||
## Builders attributions
|
||||
|
||||
Builders can submit bids to produce execution payloads. They can broadcast these bids in the form of `SignedExecutionPayloadHeader` objects, these objects encode a commitment to reveal an execution payload in exchange for a payment. When their bids are chosen by the corresponding proposer, builders are expected to broadcast an accompanying `SignedExecutionPayloadEnvelope` object honoring the commitment.
|
||||
Builders can submit bids to produce execution payloads. They can broadcast these bids in the form of `SignedExecutionPayloadHeader` objects, these objects encode a commitment to reveal an execution payload in exchange for a payment. When their bids are chosen by the corresponding proposer, builders are expected to broadcast an accompanying `SignedExecutionPayloadEnvelope` object honoring the commitment.
|
||||
|
||||
Thus, builders tasks are divided in two, submitting bids, and submitting payloads.
|
||||
Thus, builders tasks are divided in two, submitting bids, and submitting payloads.
|
||||
|
||||
### Constructing the payload bid
|
||||
|
||||
Builders can broadcast a payload bid for the current or the next slot's proposer to include. They produce a `SignedExecutionPayloadHeader` as follows.
|
||||
Builders can broadcast a payload bid for the current or the next slot's proposer to include. They produce a `SignedExecutionPayloadHeader` as follows.
|
||||
|
||||
1. Set `header.parent_block_hash` to the current head of the execution chain (this can be obtained from the beacon state as `state.last_block_hash`).
|
||||
2. Set `header.parent_block_root` to be the head of the consensus chain (this can be obtained from the beacon state as `hash_tree_root(state.latest_block_header)`. The `parent_block_root` and `parent_block_hash` must be compatible, in the sense that they both should come from the same `state` by the method described in this and the previous point.
|
||||
2. Set `header.parent_block_root` to be the head of the consensus chain (this can be obtained from the beacon state as `hash_tree_root(state.latest_block_header)`. The `parent_block_root` and `parent_block_hash` must be compatible, in the sense that they both should come from the same `state` by the method described in this and the previous point.
|
||||
3. Construct an execution payload. This can be performed with an external execution engine with a call to `engine_getPayloadV4`.
|
||||
4. Set `header.block_hash` to be the block hash of the constructed payload, that is `payload.block_hash`.
|
||||
5. Set `header.gas_limit` to be the gas limit of the constructed payload, that is `payload.gas_limit`.
|
||||
@ -48,13 +48,13 @@ def get_execution_payload_header_signature(
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
The builder assembles then `signed_execution_payload_header = SignedExecutionPayloadHeader(message=header, signature=signature)` and broadcasts it on the `execution_payload_header` global gossip topic.
|
||||
The builder assembles then `signed_execution_payload_header = SignedExecutionPayloadHeader(message=header, signature=signature)` and broadcasts it on the `execution_payload_header` global gossip topic.
|
||||
|
||||
### Constructing the `BlobSidecar`s
|
||||
|
||||
[Modified in EIP-7732]
|
||||
|
||||
The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. The function `get_blob_sidecars` is modified because the KZG commitments are no longer included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the builder has to send the commitments as parameters to this function.
|
||||
The `BlobSidecar` container is modified indirectly because the constant `KZG_COMMITMENT_INCLUSION_PROOF_DEPTH` is modified. The function `get_blob_sidecars` is modified because the KZG commitments are no longer included in the beacon block but rather in the `ExecutionPayloadEnvelope`, the builder has to send the commitments as parameters to this function.
|
||||
|
||||
```python
|
||||
def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
@ -100,19 +100,19 @@ def get_blob_sidecars(signed_block: SignedBeaconBlock,
|
||||
|
||||
### Constructing the execution payload envelope
|
||||
|
||||
When the proposer publishes a valid `SignedBeaconBlock` containing a signed commitment by the builder, the builder is later expected to broadcast the corresponding `SignedExecutionPayloadEnvelope` that fulfills this commitment. See below for a special case of an *honestly withheld payload*.
|
||||
When the proposer publishes a valid `SignedBeaconBlock` containing a signed commitment by the builder, the builder is later expected to broadcast the corresponding `SignedExecutionPayloadEnvelope` that fulfills this commitment. See below for a special case of an *honestly withheld payload*.
|
||||
|
||||
To construct the `execution_payload_envelope` the builder must perform the following steps, we alias `header` to be the committed `ExecutionPayloadHeader` in the beacon block.
|
||||
To construct the `execution_payload_envelope` the builder must perform the following steps, we alias `header` to be the committed `ExecutionPayloadHeader` in the beacon block.
|
||||
|
||||
1. Set the `payload` field to be the `ExecutionPayload` constructed when creating the corresponding bid. This payload **MUST** have the same block hash as `header.block_hash`.
|
||||
2. Set the `builder_index` field to be the validator index of the builder performing these steps. This field **MUST** be `header.builder_index`.
|
||||
1. Set the `payload` field to be the `ExecutionPayload` constructed when creating the corresponding bid. This payload **MUST** have the same block hash as `header.block_hash`.
|
||||
2. Set the `builder_index` field to be the validator index of the builder performing these steps. This field **MUST** be `header.builder_index`.
|
||||
3. Set `beacon_block_root` to be the `hash_tree_root` of the corresponding beacon block.
|
||||
4. Set `blob_kzg_commitments` to be the `commitments` field of the blobs bundle constructed when constructing the bid. This field **MUST** have a `hash_tree_root` equal to `header.blob_kzg_commitments_root`.
|
||||
5. Set `payload_witheld` to `False`.
|
||||
|
||||
After setting these parameters, the builder should run `process_execution_payload(state, signed_envelope, verify=False)` and this function should not trigger an exception.
|
||||
|
||||
6. Set `state_root` to `hash_tree_root(state)`.
|
||||
6. Set `state_root` to `hash_tree_root(state)`.
|
||||
After preparing the `envelope` the builder should sign the envelope using:
|
||||
```python
|
||||
def get_execution_payload_envelope_signature(
|
||||
@ -121,8 +121,8 @@ def get_execution_payload_envelope_signature(
|
||||
signing_root = compute_signing_root(envelope, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
The builder assembles then `signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` and broadcasts it on the `execution_payload` global gossip topic.
|
||||
The builder assembles then `signed_execution_payload_envelope = SignedExecutionPayloadEnvelope(message=envelope, signature=signature)` and broadcasts it on the `execution_payload` global gossip topic.
|
||||
|
||||
### Honest payload withheld messages
|
||||
|
||||
An honest builder that has seen a `SignedBeaconBlock` referencing his signed bid, but that block was not timely and thus it is not the head of the builder's chain, may choose to withhold their execution payload. For this the builder should simply act as if it were building an empty payload, without any transactions, withdrawals, etc. The `payload.block_hash` may not be equal to `header.block_hash`. The builder may then sets `payload_withheld` to `True`. If the PTC sees this message and votes for it, validators will attribute a *withholding boost* to the builder, which would increase the forkchoice weight of the parent block, favoring it and preventing the builder from being charged for the bid by not revealing.
|
||||
An honest builder that has seen a `SignedBeaconBlock` referencing his signed bid, but that block was not timely and thus it is not the head of the builder's chain, may choose to withhold their execution payload. For this the builder should simply act as if it were building an empty payload, without any transactions, withdrawals, etc. The `payload.block_hash` may not be equal to `header.block_hash`. The builder may then sets `payload_withheld` to `True`. If the PTC sees this message and votes for it, validators will attribute a *withholding boost* to the builder, which would increase the forkchoice weight of the parent block, favoring it and preventing the builder from being charged for the bid by not revealing.
|
||||
|
@ -44,15 +44,15 @@ This is the modification of the fork choice accompanying the EIP-7732 upgrade.
|
||||
|
||||
| Name | Value |
|
||||
| -------------------- | ----------- |
|
||||
| `PAYLOAD_TIMELY_THRESHOLD` | `PTC_SIZE / 2` (=`uint64(256)`) |
|
||||
| `PAYLOAD_TIMELY_THRESHOLD` | `PTC_SIZE / 2` (=`uint64(256)`) |
|
||||
| `INTERVALS_PER_SLOT` | `4` # [modified in EIP-7732] |
|
||||
| `PROPOSER_SCORE_BOOST` | `20` # [modified in EIP-7732] |
|
||||
| `PAYLOAD_WITHHOLD_BOOST` | `40` |
|
||||
| `PAYLOAD_REVEAL_BOOST` | `40` |
|
||||
| `PROPOSER_SCORE_BOOST` | `20` # [modified in EIP-7732] |
|
||||
| `PAYLOAD_WITHHOLD_BOOST` | `40` |
|
||||
| `PAYLOAD_REVEAL_BOOST` | `40` |
|
||||
|
||||
## Containers
|
||||
|
||||
### New `ChildNode`
|
||||
### New `ChildNode`
|
||||
Auxiliary class to consider `(block, slot, bool)` LMD voting
|
||||
|
||||
```python
|
||||
@ -75,7 +75,7 @@ class LatestMessage(object):
|
||||
```
|
||||
|
||||
### Modified `update_latest_messages`
|
||||
**Note:** the function `update_latest_messages` is updated to use the attestation slot instead of target. Notice that this function is only called on validated attestations and validators cannot attest twice in the same epoch without equivocating. Notice also that target epoch number and slot number are validated on `validate_on_attestation`.
|
||||
**Note:** the function `update_latest_messages` is updated to use the attestation slot instead of target. Notice that this function is only called on validated attestations and validators cannot attest twice in the same epoch without equivocating. Notice also that target epoch number and slot number are validated on `validate_on_attestation`.
|
||||
|
||||
```python
|
||||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
||||
@ -87,8 +87,8 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn
|
||||
store.latest_messages[i] = LatestMessage(slot=slot, root=beacon_block_root)
|
||||
```
|
||||
|
||||
### Modified `Store`
|
||||
**Note:** `Store` is modified to track the intermediate states of "empty" consensus blocks, that is, those consensus blocks for which the corresponding execution payload has not been revealed or has not been included on chain.
|
||||
### Modified `Store`
|
||||
**Note:** `Store` is modified to track the intermediate states of "empty" consensus blocks, that is, those consensus blocks for which the corresponding execution payload has not been revealed or has not been included on chain.
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
@ -114,7 +114,7 @@ class Store(object):
|
||||
ptc_vote: Dict[Root, Vector[uint8, PTC_SIZE]] = field(default_factory=dict) # [New in EIP-7732]
|
||||
```
|
||||
|
||||
### Modified `get_forkchoice_store`
|
||||
### Modified `get_forkchoice_store`
|
||||
|
||||
```python
|
||||
def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store:
|
||||
@ -162,8 +162,8 @@ def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations:
|
||||
store,
|
||||
PayloadAttestationMessage(
|
||||
validator_index=idx,
|
||||
data=payload_attestation.data,
|
||||
signature=BLSSignature(),
|
||||
data=payload_attestation.data,
|
||||
signature=BLSSignature(),
|
||||
is_from_block=True
|
||||
)
|
||||
)
|
||||
@ -174,7 +174,7 @@ def notify_ptc_messages(store: Store, state: BeaconState, payload_attestations:
|
||||
```python
|
||||
def is_payload_present(store: Store, beacon_block_root: Root) -> bool:
|
||||
"""
|
||||
Return whether the execution payload for the beacon block with root ``beacon_block_root`` was voted as present
|
||||
Return whether the execution payload for the beacon block with root ``beacon_block_root`` was voted as present
|
||||
by the PTC
|
||||
"""
|
||||
# The beacon block root must be known
|
||||
@ -192,15 +192,15 @@ def is_parent_node_full(store: Store, block: BeaconBlock) -> bool:
|
||||
return parent_block_hash == message_block_hash
|
||||
```
|
||||
|
||||
### Modified `get_ancestor`
|
||||
**Note:** `get_ancestor` is modified to return whether the chain is based on an *empty* or *full* block.
|
||||
### Modified `get_ancestor`
|
||||
**Note:** `get_ancestor` is modified to return whether the chain is based on an *empty* or *full* block.
|
||||
|
||||
```python
|
||||
def get_ancestor(store: Store, root: Root, slot: Slot) -> ChildNode:
|
||||
"""
|
||||
Returns the beacon block root, the slot and the payload status of the ancestor of the beacon block
|
||||
with ``root`` at ``slot``. If the beacon block with ``root`` is already at ``slot`` or we are
|
||||
requesting an ancestor "in the future" it returns its PTC status instead of the actual payload content.
|
||||
Returns the beacon block root, the slot and the payload status of the ancestor of the beacon block
|
||||
with ``root`` at ``slot``. If the beacon block with ``root`` is already at ``slot`` or we are
|
||||
requesting an ancestor "in the future" it returns its PTC status instead of the actual payload content.
|
||||
"""
|
||||
block = store.blocks[root]
|
||||
if block.slot <= slot:
|
||||
@ -235,7 +235,7 @@ def is_supporting_vote(store: Store, node: ChildNode, message: LatestMessage) ->
|
||||
"""
|
||||
if node.root == message.root:
|
||||
# an attestation for a given root always counts for that root regardless if full or empty
|
||||
# as long as the attestation happened after the requested slot.
|
||||
# as long as the attestation happened after the requested slot.
|
||||
return node.slot <= message.slot
|
||||
message_block = store.blocks[message.root]
|
||||
if node.slot >= message_block.slot:
|
||||
@ -245,7 +245,7 @@ def is_supporting_vote(store: Store, node: ChildNode, message: LatestMessage) ->
|
||||
```
|
||||
|
||||
### New `compute_proposer_boost`
|
||||
This is a helper to compute the proposer boost. It applies the proposer boost to any ancestor of the proposer boost root taking into account the payload presence. There is one exception: if the requested node has the same root and slot as the block with the proposer boost root, then the proposer boost is applied to both empty and full versions of the node.
|
||||
This is a helper to compute the proposer boost. It applies the proposer boost to any ancestor of the proposer boost root taking into account the payload presence. There is one exception: if the requested node has the same root and slot as the block with the proposer boost root, then the proposer boost is applied to both empty and full versions of the node.
|
||||
```python
|
||||
def compute_proposer_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei:
|
||||
if store.proposer_boost_root == Root():
|
||||
@ -283,7 +283,7 @@ def compute_withhold_boost(store: Store, state: BeaconState, node: ChildNode) ->
|
||||
```
|
||||
|
||||
### New `compute_reveal_boost`
|
||||
This is a similar helper to the last two, the only difference is that the reveal boost is only applied to the full version of the node when querying for the same slot as the revealed payload.
|
||||
This is a similar helper to the last two, the only difference is that the reveal boost is only applied to the full version of the node when querying for the same slot as the revealed payload.
|
||||
|
||||
```python
|
||||
def compute_reveal_boost(store: Store, state: BeaconState, node: ChildNode) -> Gwei:
|
||||
@ -302,7 +302,7 @@ def compute_reveal_boost(store: Store, state: BeaconState, node: ChildNode) -> G
|
||||
|
||||
### Modified `get_weight`
|
||||
|
||||
**Note:** `get_weight` is modified to only count votes for descending chains that support the status of a triple `Root, Slot, bool`, where the `bool` indicates if the block was full or not. `Slot` is needed for a correct implementation of `(Block, Slot)` voting.
|
||||
**Note:** `get_weight` is modified to only count votes for descending chains that support the status of a triple `Root, Slot, bool`, where the `bool` indicates if the block was full or not. `Slot` is needed for a correct implementation of `(Block, Slot)` voting.
|
||||
|
||||
```python
|
||||
def get_weight(store: Store, node: ChildNode) -> Gwei:
|
||||
@ -326,7 +326,7 @@ def get_weight(store: Store, node: ChildNode) -> Gwei:
|
||||
return attestation_score + proposer_score + builder_reveal_score + builder_withhold_score
|
||||
```
|
||||
|
||||
### Modified `get_head`
|
||||
### Modified `get_head`
|
||||
|
||||
**Note:** `get_head` is a modified to use the new `get_weight` function. It returns the `ChildNode` object corresponidng to the head block.
|
||||
|
||||
@ -343,13 +343,13 @@ def get_head(store: Store) -> ChildNode:
|
||||
while True:
|
||||
children = [
|
||||
ChildNode(root=root, slot=block.slot, is_payload_present=present) for (root, block) in blocks.items()
|
||||
if block.parent_root == best_child.root and block.slot > best_child.slot and
|
||||
if block.parent_root == best_child.root and block.slot > best_child.slot and
|
||||
(best_child.root == justified_root or is_parent_node_full(store, block) == best_child.is_payload_present)
|
||||
for present in (True, False) if root in store.execution_payload_states or not present
|
||||
]
|
||||
if len(children) == 0:
|
||||
return best_child
|
||||
# if we have children we consider the current head advanced as a possible head
|
||||
# if we have children we consider the current head advanced as a possible head
|
||||
highest_child_slot = max(child.slot for child in children)
|
||||
children += [
|
||||
ChildNode(root=best_child.root, slot=best_child.slot + 1, is_payload_present=best_child.is_payload_present)
|
||||
@ -360,10 +360,10 @@ def get_head(store: Store) -> ChildNode:
|
||||
# Ties are then broken by favoring full blocks
|
||||
# Ties then broken by favoring block with lexicographically higher root
|
||||
new_best_child = max(children, key=lambda child: (
|
||||
get_weight(store, child),
|
||||
get_weight(store, child),
|
||||
blocks[child.root].slot,
|
||||
is_payload_present(store, child.root),
|
||||
child.is_payload_present,
|
||||
is_payload_present(store, child.root),
|
||||
child.is_payload_present,
|
||||
child.root
|
||||
)
|
||||
)
|
||||
@ -376,7 +376,7 @@ def get_head(store: Store) -> ChildNode:
|
||||
|
||||
### Modified `on_block`
|
||||
|
||||
*Note*: The handler `on_block` is modified to consider the pre `state` of the given consensus beacon block depending not only on the parent block root, but also on the parent blockhash. In addition we delay the checking of blob data availability until the processing of the execution payload.
|
||||
*Note*: The handler `on_block` is modified to consider the pre `state` of the given consensus beacon block depending not only on the parent block root, but also on the parent blockhash. In addition we delay the checking of blob data availability until the processing of the execution payload.
|
||||
|
||||
```python
|
||||
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
@ -449,14 +449,14 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
|
||||
### New `on_execution_payload`
|
||||
|
||||
The handler `on_execution_payload` is called when the node receives a `SignedExecutionPayloadEnvelope` to sync.
|
||||
The handler `on_execution_payload` is called when the node receives a `SignedExecutionPayloadEnvelope` to sync.
|
||||
|
||||
```python
|
||||
def on_execution_payload(store: Store, signed_envelope: SignedExecutionPayloadEnvelope) -> None:
|
||||
"""
|
||||
Run ``on_execution_payload`` upon receiving a new execution payload.
|
||||
"""
|
||||
envelope = signed_envelope.message
|
||||
envelope = signed_envelope.message
|
||||
# The corresponding beacon block root needs to be known
|
||||
assert envelope.beacon_block_root in store.block_states
|
||||
|
||||
@ -472,7 +472,7 @@ def on_execution_payload(store: Store, signed_envelope: SignedExecutionPayloadEn
|
||||
|
||||
# Add new state for this payload to the store
|
||||
store.execution_payload_states[envelope.beacon_block_root] = state
|
||||
```
|
||||
```
|
||||
|
||||
### `seconds_into_slot`
|
||||
|
||||
@ -497,7 +497,7 @@ def on_tick_per_slot(store: Store, time: uint64) -> None:
|
||||
# If this is a new slot, reset store.proposer_boost_root
|
||||
if current_slot > previous_slot:
|
||||
store.proposer_boost_root = Root()
|
||||
else:
|
||||
else:
|
||||
# Reset the payload boost if this is the attestation time
|
||||
if seconds_into_slot(store) >= SECONDS_PER_SLOT // INTERVALS_PER_SLOT:
|
||||
store.payload_withhold_boost_root = Root()
|
||||
@ -534,7 +534,7 @@ def on_payload_attestation_message(
|
||||
assert data.slot == get_current_slot(store)
|
||||
# Verify the signature
|
||||
assert is_valid_indexed_payload_attestation(
|
||||
state,
|
||||
state,
|
||||
IndexedPayloadAttestation(
|
||||
attesting_indices=[ptc_message.validator_index],
|
||||
data=data,
|
||||
@ -545,7 +545,7 @@ def on_payload_attestation_message(
|
||||
ptc_index = ptc.index(ptc_message.validator_index)
|
||||
ptc_vote = store.ptc_vote[data.beacon_block_root]
|
||||
ptc_vote[ptc_index] = data.payload_status
|
||||
|
||||
|
||||
# Only update payload boosts with attestations from a block if the block is for the current slot and it's early
|
||||
if is_from_block and data.slot + 1 != get_current_slot(store):
|
||||
return
|
||||
|
@ -61,7 +61,7 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other
|
||||
consensus-layer upgrade.
|
||||
For now, we assume the condition will be triggered at epoch `EIP7732_FORK_EPOCH`.
|
||||
|
||||
|
@ -1,10 +1,9 @@
|
||||
# EIP-7732 -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for EIP7732.
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modification in EIP-7732](#modification-in-eip-7732)
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
@ -28,6 +27,12 @@ This document contains the consensus-layer networking specification for EIP7732.
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the consensus-layer networking specification for EIP7732.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Modification in EIP-7732
|
||||
|
||||
### Preset
|
||||
@ -67,7 +72,7 @@ class BlobSidecar(Container):
|
||||
|
||||
##### Modified `verify_blob_sidecar_inclusion_proof`
|
||||
|
||||
`verify_blob_sidecar_inclusion_proof` is modified in EIP-7732 to account for the fact that the KZG commitments are included in the `ExecutionPayloadEnvelope` and no longer in the beacon block body.
|
||||
`verify_blob_sidecar_inclusion_proof` is modified in EIP-7732 to account for the fact that the KZG commitments are included in the `ExecutionPayloadEnvelope` and no longer in the beacon block body.
|
||||
|
||||
```python
|
||||
def verify_blob_sidecar_inclusion_proof(blob_sidecar: BlobSidecar) -> bool:
|
||||
@ -121,7 +126,7 @@ EIP-7732 introduces new global topics for execution header, execution payload an
|
||||
|
||||
[Modified in EIP-7732]
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in [the Beacon Chain changes](./beacon-chain.md).
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in [the Beacon Chain changes](./beacon-chain.md).
|
||||
|
||||
There are no new validations for this topic. However, all validations with regards to the `ExecutionPayload` are removed:
|
||||
|
||||
@ -137,7 +142,7 @@ There are no new validations for this topic. However, all validations with regar
|
||||
And instead the following validations are set in place with the alias `header = signed_execution_payload_header.message`:
|
||||
|
||||
- If `execution_payload` verification of block's execution payload parent by an execution node **is complete**:
|
||||
- [REJECT] The block's execution payload parent (defined by `header.parent_block_hash`) passes all validation.
|
||||
- [REJECT] The block's execution payload parent (defined by `header.parent_block_hash`) passes all validation.
|
||||
- [REJECT] The block's parent (defined by `block.parent_root`) passes validation.
|
||||
|
||||
###### `execution_payload`
|
||||
@ -148,12 +153,12 @@ The following validations MUST pass before forwarding the `signed_execution_payl
|
||||
|
||||
- _[IGNORE]_ The envelope's block root `envelope.block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue payload for processing once the block is retrieved).
|
||||
- _[IGNORE]_ The node has not seen another valid `SignedExecutionPayloadEnvelope` for this block root from this builder.
|
||||
|
||||
Let `block` be the block with `envelope.beacon_block_root`.
|
||||
|
||||
Let `block` be the block with `envelope.beacon_block_root`.
|
||||
Let `header` alias `block.body.signed_execution_payload_header.message` (notice that this can be obtained from the `state.signed_execution_payload_header`)
|
||||
- _[REJECT]_ `block` passes validation.
|
||||
- _[REJECT]_ `envelope.builder_index == header.builder_index`
|
||||
- if `envelope.payload_withheld == False` then
|
||||
- _[REJECT]_ `block` passes validation.
|
||||
- _[REJECT]_ `envelope.builder_index == header.builder_index`
|
||||
- if `envelope.payload_withheld == False` then
|
||||
- _[REJECT]_ `payload.block_hash == header.block_hash`
|
||||
- _[REJECT]_ The builder signature, `signed_execution_payload_envelope.signature`, is valid with respect to the builder's public key.
|
||||
|
||||
@ -163,14 +168,14 @@ This topic is used to propagate signed payload attestation message.
|
||||
|
||||
The following validations MUST pass before forwarding the `payload_attestation_message` on the network, assuming the alias `data = payload_attestation_message.data`:
|
||||
|
||||
- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`.
|
||||
- _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`.
|
||||
- _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`.
|
||||
- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
|
||||
- _[REJECT]_ The message's block `data.beacon_block_root` passes validation.
|
||||
- _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice.
|
||||
- _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index.
|
||||
|
||||
- _[IGNORE]_ The message's slot is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e. `data.slot == current_slot`.
|
||||
- _[REJECT]_ The message's payload status is a valid status, i.e. `data.payload_status < PAYLOAD_INVALID_STATUS`.
|
||||
- _[IGNORE]_ The `payload_attestation_message` is the first valid message received from the validator with index `payload_attestation_message.validate_index`.
|
||||
- _[IGNORE]_ The message's block `data.beacon_block_root` has been seen (via both gossip and non-gossip sources) (a client MAY queue attestation for processing once the block is retrieved. Note a client might want to request payload after).
|
||||
- _[REJECT]_ The message's block `data.beacon_block_root` passes validation.
|
||||
- _[REJECT]_ The message's validator index is within the payload committee in `get_ptc(state, data.slot)`. The `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice.
|
||||
- _[REJECT]_ The message's signature of `payload_attestation_message.signature` is valid with respect to the validator index.
|
||||
|
||||
###### `execution_payload_header`
|
||||
|
||||
This topic is used to propagate signed bids as `SignedExecutionPayloadHeader`.
|
||||
@ -182,8 +187,8 @@ The following validations MUST pass before forwarding the `signed_execution_payl
|
||||
- _[REJECT]_ The signed builder bid, `header.builder_index` is a valid, active, and non-slashed builder index in state.
|
||||
- _[IGNORE]_ The signed builder bid value, `header.value`, is less or equal than the builder's balance in state. i.e. `MIN_BUILDER_BALANCE + header.value < state.builder_balances[header.builder_index]`.
|
||||
- _[IGNORE]_ `header.parent_block_hash` is the block hash of a known execution payload in fork choice.
|
||||
_ _[IGNORE]_ `header.parent_block_root` is the hash tree root of a known beacon block in fork choice.
|
||||
- _[IGNORE]_ `header.slot` is the current slot or the next slot.
|
||||
_ _[IGNORE]_ `header.parent_block_root` is the hash tree root of a known beacon block in fork choice.
|
||||
- _[IGNORE]_ `header.slot` is the current slot or the next slot.
|
||||
- _[REJECT]_ The builder signature, `signed_execution_payload_header_envelope.signature`, is valid with respect to the `header_envelope.builder_index`.
|
||||
|
||||
### The Req/Resp domain
|
||||
@ -220,7 +225,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` |
|
||||
| `EIP7732_FORK_VERSION` | `eip7732.SignedBeaconBlock` |
|
||||
| `EIP7732_FORK_VERSION` | `eip7732.SignedBeaconBlock` |
|
||||
|
||||
|
||||
##### BlobSidecarsByRoot v2
|
||||
|
@ -1,10 +1,10 @@
|
||||
# EIP-7732 -- Honest Validator
|
||||
|
||||
This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork.
|
||||
This document represents the changes and additions to the Honest validator guide included in the EIP-7732 fork.
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents**
|
||||
**Table of Contents**
|
||||
|
||||
- [Validator assignment](#validator-assignment)
|
||||
- [Lookahead](#lookahead)
|
||||
@ -33,7 +33,7 @@ def get_ptc_assignment(
|
||||
validator_index: ValidatorIndex) -> Optional[Slot]:
|
||||
"""
|
||||
Returns the slot during the requested epoch in which the validator with index `validator_index`
|
||||
is a member of the PTC. Returns None if no assignment is found.
|
||||
is a member of the PTC. Returns None if no assignment is found.
|
||||
"""
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
assert epoch <= next_epoch
|
||||
@ -49,22 +49,22 @@ def get_ptc_assignment(
|
||||
|
||||
[New in EIP-7732]
|
||||
|
||||
`get_ptc_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting their assigned PTC slot.
|
||||
`get_ptc_assignment` should be called at the start of each epoch to get the assignment for the next epoch (`current_epoch + 1`). A validator should plan for future assignments by noting their assigned PTC slot.
|
||||
|
||||
## Beacon chain responsibilities
|
||||
|
||||
All validator responsibilities remain unchanged other than the following:
|
||||
|
||||
- Proposers are no longer required to broadcast `BlobSidecar` objects, as this becomes a builder's duty.
|
||||
- Some validators are selected per slot to become PTC members, these validators must broadcast `PayloadAttestationMessage` objects during the assigned slot before the deadline of `3 * SECONDS_PER_SLOT // INTERVALS_PER_SLOT` seconds into the slot.
|
||||
- Proposers are no longer required to broadcast `BlobSidecar` objects, as this becomes a builder's duty.
|
||||
- Some validators are selected per slot to become PTC members, these validators must broadcast `PayloadAttestationMessage` objects during the assigned slot before the deadline of `3 * SECONDS_PER_SLOT // INTERVALS_PER_SLOT` seconds into the slot.
|
||||
|
||||
### Attestation
|
||||
|
||||
Attestation duties are not changed for validators, however the attestation deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
|
||||
Attestation duties are not changed for validators, however the attestation deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
|
||||
|
||||
### Sync Committee participations
|
||||
|
||||
Sync committee duties are not changed for validators, however the submission deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
|
||||
Sync committee duties are not changed for validators, however the submission deadline is implicitly changed by the change in `INTERVALS_PER_SLOT`.
|
||||
|
||||
|
||||
### Block proposal
|
||||
@ -74,32 +74,32 @@ Validators are still expected to propose `SignedBeaconBlock` at the beginning of
|
||||
#### Constructing the new `signed_execution_payload_header` field in `BeaconBlockBody`
|
||||
|
||||
To obtain `signed_execution_payload_header`, a block proposer building a block on top of a `state` must take the following actions:
|
||||
* Listen to the `execution_payload_header` gossip global topic and save an accepted `signed_execution_payload_header` from a builder. Proposer MAY obtain these signed messages by other off-protocol means.
|
||||
* The `signed_execution_payload_header` must satisfy the verification conditions found in `process_execution_payload_header`, that is
|
||||
* Listen to the `execution_payload_header` gossip global topic and save an accepted `signed_execution_payload_header` from a builder. Proposer MAY obtain these signed messages by other off-protocol means.
|
||||
* The `signed_execution_payload_header` must satisfy the verification conditions found in `process_execution_payload_header`, that is
|
||||
- The header signature must be valid
|
||||
- The builder balance can cover the header value
|
||||
- The header slot is for the proposal block slot
|
||||
- The header parent block hash equals the state's `latest_block_hash`.
|
||||
- The header parent block hash equals the state's `latest_block_hash`.
|
||||
- The header parent block root equals the current block's `parent_root`.
|
||||
* Select one bid and set `body.signed_execution_payload_header = signed_execution_payload_header`
|
||||
|
||||
#### Constructing the new `payload_attestations` field in `BeaconBlockBody`
|
||||
|
||||
Up to `MAX_PAYLOAD_ATTESTATIONS`, aggregate payload attestations can be included in the block. The validator will have to
|
||||
* Listen to the `payload_attestation_message` gossip global topic
|
||||
Up to `MAX_PAYLOAD_ATTESTATIONS`, aggregate payload attestations can be included in the block. The validator will have to
|
||||
* Listen to the `payload_attestation_message` gossip global topic
|
||||
* The payload attestations added must satisfy the verification conditions found in payload attestation gossip validation and payload attestation processing. This means
|
||||
- The `data.beacon_block_root` corresponds to `block.parent_root`.
|
||||
- The slot of the parent block is exactly one slot before the proposing slot.
|
||||
- The signature of the payload attestation data message verifies correctly.
|
||||
* The proposer needs to aggregate all payload attestations with the same data into a given `PayloadAttestation` object. For this it needs to fill the `aggregation_bits` field by using the relative position of the validator indices with respect to the PTC that is obtained from `get_ptc(state, block_slot - 1)`.
|
||||
* The proposer should only include payload attestations that are consistent with the current block they are proposing. That is, if the previous block had a payload, they should only include attestations with `payload_status = PAYLOAD_PRESENT`. Proposers are penalized for attestations that are not-consistent with their view.
|
||||
- The slot of the parent block is exactly one slot before the proposing slot.
|
||||
- The signature of the payload attestation data message verifies correctly.
|
||||
* The proposer needs to aggregate all payload attestations with the same data into a given `PayloadAttestation` object. For this it needs to fill the `aggregation_bits` field by using the relative position of the validator indices with respect to the PTC that is obtained from `get_ptc(state, block_slot - 1)`.
|
||||
* The proposer should only include payload attestations that are consistent with the current block they are proposing. That is, if the previous block had a payload, they should only include attestations with `payload_status = PAYLOAD_PRESENT`. Proposers are penalized for attestations that are not-consistent with their view.
|
||||
|
||||
#### Blob sidecars
|
||||
The blob sidecars are no longer broadcast by the validator, and thus their construction is not necessary. This deprecates the corresponding sections from the honest validator guide in the Electra fork, moving them, albeit with some modifications, to the [honest Builder guide](./builder.md)
|
||||
|
||||
### Payload timeliness attestation
|
||||
|
||||
Some validators are selected to submit payload timeliness attestations. Validators should call `get_ptc_assignment` at the beginning of an epoch to be prepared to submit their PTC attestations during the next epoch.
|
||||
Some validators are selected to submit payload timeliness attestations. Validators should call `get_ptc_assignment` at the beginning of an epoch to be prepared to submit their PTC attestations during the next epoch.
|
||||
|
||||
A validator should create and broadcast the `payload_attestation_message` to the global execution attestation subnet not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of `slot`
|
||||
|
||||
@ -109,9 +109,9 @@ If a validator is in the payload attestation committee for the current slot (as
|
||||
according to the logic in `get_payload_attestation_message` below and broadcast it not after `SECONDS_PER_SLOT * 3 / INTERVALS_PER_SLOT` seconds since the start of the slot, to the global `payload_attestation_message` pubsub topic.
|
||||
|
||||
The validator creates `payload_attestation_message` as follows:
|
||||
* If the validator has not seen any beacon block for the assigned slot, do not submit a payload attestation. It will be ignored anyway.
|
||||
* If the validator has not seen any beacon block for the assigned slot, do not submit a payload attestation. It will be ignored anyway.
|
||||
* Set `data.beacon_block_root` be the HTR of the beacon block seen for the assigned slot
|
||||
* Set `data.slot` to be the assigned slot.
|
||||
* Set `data.slot` to be the assigned slot.
|
||||
* Set `data.payload_status` as follows
|
||||
- If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = False`, set to `PAYLOAD_PRESENT`.
|
||||
- If a `SignedExecutionPayloadEnvelope` has been seen referencing the block `data.beacon_block_root` and the envelope has `payload_withheld = True`, set to `PAYLOAD_WITHHELD`.
|
||||
@ -119,7 +119,7 @@ The validator creates `payload_attestation_message` as follows:
|
||||
* Set `payload_attestation_message.validator_index = validator_index` where `validator_index` is the validator chosen to submit. The private key mapping to `state.validators[validator_index].pubkey` is used to sign the payload timeliness attestation.
|
||||
* Sign the `payload_attestation_message.data` using the helper `get_payload_attestation_message_signature`.
|
||||
|
||||
Notice that the attester only signs the `PayloadAttestationData` and not the `validator_index` field in the message. Proposers need to aggregate these attestations as described above.
|
||||
Notice that the attester only signs the `PayloadAttestationData` and not the `validator_index` field in the message. Proposers need to aggregate these attestations as described above.
|
||||
|
||||
```python
|
||||
def get_payload_attestation_message_signature(
|
||||
@ -129,6 +129,6 @@ def get_payload_attestation_message_signature(
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
**Remark** Validators do not need to check the full validity of the `ExecutionPayload` contained in within the envelope, but the checks in the [P2P guide](./p2p-interface.md) should pass for the `SignedExecutionPayloadEnvelope`.
|
||||
**Remark** Validators do not need to check the full validity of the `ExecutionPayload` contained in within the envelope, but the checks in the [P2P guide](./p2p-interface.md) should pass for the `SignedExecutionPayloadEnvelope`.
|
||||
|
||||
|
||||
|
@ -82,7 +82,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| - | - | - |
|
||||
| `MAX_SHARDS` | `uint64(2**12)` (= 4,096) | Theoretical max shard count (used to determine data structure sizes) |
|
||||
| `ACTIVE_SHARDS` | `uint64(2**8)` (= 256) | Initial shard count |
|
||||
| `MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS` | `uint64(2**4)` (= 16) | TODO: Need to define what happens if there were more blocks without builder blocks |
|
||||
| `MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS` | `uint64(2**4)` (= 16) | TODO: Need to define what happens if there were more blocks without builder blocks |
|
||||
|
||||
### Time parameters
|
||||
|
||||
@ -100,7 +100,7 @@ With the introduction of builder blocks the number of slots per epoch is doubled
|
||||
|
||||
## Configuration
|
||||
|
||||
Note: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable.
|
||||
Note: Some preset variables may become run-time configurable for testnets, but default to a preset while the spec is unstable.
|
||||
E.g. `ACTIVE_SHARDS` and `SAMPLES_PER_BLOB`.
|
||||
|
||||
### Time parameters
|
||||
@ -129,12 +129,12 @@ class BuilderBlockBid(Container):
|
||||
bid: Gwei # Block builder bid paid to proposer
|
||||
|
||||
validator_index: ValidatorIndex # Validator index for this bid
|
||||
|
||||
|
||||
# Block builders use an Eth1 address -- need signature as
|
||||
# block bid and data gas base fees will be charged to this address
|
||||
signature_y_parity: bool
|
||||
signature_r: uint256
|
||||
signature_s: uint256
|
||||
signature_s: uint256
|
||||
```
|
||||
|
||||
#### `BuilderBlockBidWithRecipientAddress`
|
||||
@ -156,7 +156,7 @@ class ShardedCommitmentsContainer(Container):
|
||||
|
||||
# The sizes of the blocks encoded in the commitments (last builder and all beacon blocks since)
|
||||
included_block_sizes: List[uint64, MAX_PROPOSER_BLOCKS_BETWEEN_BUILDER_BLOCKS + 1]
|
||||
|
||||
|
||||
# Number of commitments that are for sharded data (no blocks)
|
||||
included_sharded_data_commitments: uint64
|
||||
|
||||
@ -192,7 +192,7 @@ class BeaconState(bellatrix.BeaconState):
|
||||
class BuilderBlockData(Container):
|
||||
execution_payload: ExecutionPayload
|
||||
sharded_commitments_container: ShardedCommitmentsContainer
|
||||
```
|
||||
```
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
|
@ -203,7 +203,7 @@ def low_degree_check(commitments: List[KZGCommitment]):
|
||||
coefs.append( - (r_to_K - 1) * bls_modular_inverse(K * roots[i * (K - 1) % K] * (r - roots[i])) % BLS_MODULUS)
|
||||
for i in range(d + 1):
|
||||
coefs[i] = (coefs[i] + B(r) * bls_modular_inverse(Bprime(r) * (r - roots[i]))) % BLS_MODULUS
|
||||
|
||||
|
||||
assert elliptic_curve_lincomb(commitments, coefs) == bls.inf_G1()
|
||||
```
|
||||
|
||||
@ -279,7 +279,7 @@ def interpolate_polynomial(xs: List[BLSFieldElement], ys: List[BLSFieldElement])
|
||||
summand, [weight_adjustment, ((BLS_MODULUS - weight_adjustment) * xs[i])]
|
||||
)
|
||||
r = add_polynomials(r, summand)
|
||||
|
||||
|
||||
return r
|
||||
```
|
||||
|
||||
@ -300,7 +300,7 @@ def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x:
|
||||
return r
|
||||
|
||||
def Aprime(z):
|
||||
return field_elements_per_blob * pow(z, field_elements_per_blob - 1, BLS_MODULUS)
|
||||
return field_elements_per_blob * pow(z, field_elements_per_blob - 1, BLS_MODULUS)
|
||||
|
||||
r = 0
|
||||
inverses = [bls_modular_inverse(z - x) for z in roots]
|
||||
@ -312,7 +312,7 @@ def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x:
|
||||
|
||||
## KZG Operations
|
||||
|
||||
We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf).
|
||||
We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf).
|
||||
|
||||
### Elliptic curve helper functions
|
||||
|
||||
@ -387,7 +387,7 @@ def verify_kzg_multiproof(commitment: KZGCommitment,
|
||||
```python
|
||||
def verify_degree_proof(commitment: KZGCommitment, degree_bound: uint64, proof: KZGCommitment):
|
||||
"""
|
||||
Verifies that the commitment is of polynomial degree < degree_bound.
|
||||
Verifies that the commitment is of polynomial degree < degree_bound.
|
||||
"""
|
||||
|
||||
assert (
|
||||
|
@ -25,7 +25,6 @@
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Deposits](#deposits)
|
||||
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -96,7 +95,7 @@ def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
| --------------------- | ------------------------------------------------------------------------------- |
|
||||
| `BLS_G1_GENERATOR` | `BLSG1Point('0x97f1d3a73197d7942695638c4fa9ac0fc3688c4f9774b905a14e3a3f171bac586c55e83ff97a1aeffb3af00adb22c6bb') # noqa: E501` |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
|
||||
| `CURDLEPROOFS_CRS` | TBD |
|
||||
| `CURDLEPROOFS_CRS` | TBD |
|
||||
|
||||
### Curdleproofs and opening proofs
|
||||
|
||||
@ -423,7 +422,7 @@ def add_validator_to_registry(state: BeaconState,
|
||||
# [New in Whisk]
|
||||
k = get_unique_whisk_k(state, ValidatorIndex(len(state.validators) - 1))
|
||||
state.whisk_trackers.append(get_initial_tracker(k))
|
||||
state.whisk_k_commitments.append(get_k_commitment(k))
|
||||
state.whisk_k_commitments.append(get_k_commitment(k))
|
||||
```
|
||||
|
||||
### `get_beacon_proposer_index`
|
||||
@ -436,25 +435,3 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
||||
assert state.latest_block_header.slot == state.slot # sanity check `process_block_header` has been called
|
||||
return state.latest_block_header.proposer_index
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified purely for Whisk testing.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
state_capella = capella.initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash,
|
||||
eth1_timestamp,
|
||||
deposits,
|
||||
execution_payload_header,
|
||||
)
|
||||
state = upgrade_to_whisk(state_capella)
|
||||
state.fork.previous_version = WHISK_FORK_VERSION
|
||||
state.fork.current_version = WHISK_FORK_VERSION
|
||||
return state
|
||||
```
|
||||
|
@ -54,7 +54,6 @@
|
||||
- [Slashings](#slashings)
|
||||
- [Participation flags updates](#participation-flags-updates)
|
||||
- [Sync committee updates](#sync-committee-updates)
|
||||
- [Initialize state for pure Altair testnets and test vectors](#initialize-state-for-pure-altair-testnets-and-test-vectors)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -672,52 +671,3 @@ def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
```
|
||||
|
||||
## Initialize state for pure Altair testnets and test vectors
|
||||
|
||||
This helper function is only for initializing the state for pure Altair testnets and tests.
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the previous and current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit]) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=ALTAIR_FORK_VERSION, # [Modified in Altair] for testing only
|
||||
current_version=ALTAIR_FORK_VERSION, # [Modified in Altair]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# [New in Altair] Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -71,7 +71,7 @@ For light clients, the following validations MUST additionally pass before forwa
|
||||
|
||||
Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`.
|
||||
The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
@ -99,7 +99,7 @@ For light clients, the following validations MUST additionally pass before forwa
|
||||
|
||||
Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`.
|
||||
The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.beacon.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
|
@ -1,18 +1,12 @@
|
||||
# Altair -- Networking
|
||||
|
||||
This document contains the networking specification for Altair.
|
||||
This document should be viewed as additive to the [document from Phase 0](../phase0/p2p-interface.md) and will be referred to as the "Phase 0 document" hereafter.
|
||||
Readers should understand the Phase 0 document and use it as a basis to understand the changes outlined in this document.
|
||||
|
||||
Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery domain. Some Phase 0 features will be deprecated, but not removed immediately.
|
||||
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in Altair](#modifications-in-altair)
|
||||
- [MetaData](#metadata)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
@ -39,6 +33,14 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the networking specification for Altair.
|
||||
This document should be viewed as additive to the [document from Phase 0](../phase0/p2p-interface.md) and will be referred to as the "Phase 0 document" hereafter.
|
||||
Readers should understand the Phase 0 document and use it as a basis to understand the changes outlined in this document.
|
||||
|
||||
Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery domain. Some Phase 0 features will be deprecated, but not removed immediately.
|
||||
|
||||
## Modifications in Altair
|
||||
|
||||
### MetaData
|
||||
|
@ -44,7 +44,6 @@
|
||||
- [`process_execution_payload`](#process_execution_payload)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Slashings](#slashings)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -441,62 +440,3 @@ def process_slashings(state: BeaconState) -> None:
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Bellatrix testing only.
|
||||
Modifications include:
|
||||
1. Use `BELLATRIX_FORK_VERSION` as the previous and current fork version.
|
||||
2. Utilize the Bellatrix `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
||||
3. Initialize `latest_execution_payload_header`.
|
||||
If `execution_payload_header == ExecutionPayloadHeader()`, then the Merge has not yet occurred.
|
||||
Else, the Merge starts from genesis and the transition is incomplete.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=BELLATRIX_FORK_VERSION, # [Modified in Bellatrix] for testing only
|
||||
current_version=BELLATRIX_FORK_VERSION, # [Modified in Bellatrix]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# [New in Bellatrix] Initialize the execution payload header
|
||||
# If empty, will initialize a chain that has not yet gone through the Merge transition
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -1,17 +1,12 @@
|
||||
# Bellatrix -- Networking
|
||||
|
||||
This document contains the networking specification for the Bellatrix.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. This document should be viewed as additive to the documents from [Phase 0](../phase0/p2p-interface.md) and from [Altair](../altair/p2p-interface.md)
|
||||
and will be referred to as the "Phase 0 document" and "Altair document" respectively, hereafter.
|
||||
Readers should understand the Phase 0 and Altair documents and use them as a basis to understand the changes outlined in this document.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in Bellatrix](#modifications-in-bellatrix)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
@ -32,6 +27,14 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the networking specification for Bellatrix.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. This document should be viewed as additive to the documents from [Phase 0](../phase0/p2p-interface.md) and from [Altair](../altair/p2p-interface.md)
|
||||
and will be referred to as the "Phase 0 document" and "Altair document" respectively, hereafter.
|
||||
Readers should understand the Phase 0 and Altair documents and use them as a basis to understand the changes outlined in this document.
|
||||
|
||||
## Modifications in Bellatrix
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
@ -146,7 +149,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
|
||||
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
|
||||
place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network.
|
||||
place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network.
|
||||
At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
|
||||
filled entirely with data at a cost of 16 gas per byte can create a valid
|
||||
`ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for
|
||||
|
@ -38,7 +38,6 @@
|
||||
- [Modified `process_execution_payload`](#modified-process_execution_payload)
|
||||
- [Modified `process_operations`](#modified-process_operations)
|
||||
- [New `process_bls_to_execution_change`](#new-process_bls_to_execution_change)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -484,58 +483,3 @@ def process_bls_to_execution_change(state: BeaconState,
|
||||
+ address_change.to_execution_address
|
||||
)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Capella testing only.
|
||||
Modifications include:
|
||||
1. Use `CAPELLA_FORK_VERSION` as the previous and current fork version.
|
||||
2. Utilize the Capella `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only
|
||||
current_version=CAPELLA_FORK_VERSION, # [Modified in Capella]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# Initialize the execution payload header
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -1,15 +1,12 @@
|
||||
# Capella -- Networking
|
||||
|
||||
This document contains the networking specification for Capella.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
### Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in Capella](#modifications-in-capella)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
@ -25,6 +22,11 @@ The specification of these changes continues in the same format as the network s
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the networking specification for Capella.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Modifications in Capella
|
||||
|
||||
|
@ -110,7 +110,7 @@ Validator balances are withdrawn periodically via an automatic process. For exit
|
||||
There is one prerequisite for this automated process:
|
||||
the validator's withdrawal credentials pointing to an execution layer address, i.e. having an `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
|
||||
|
||||
If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to participate in withdrawals the validator must
|
||||
If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to participate in withdrawals the validator must
|
||||
create a one-time message to change their withdrawal credential from the version authenticated with a BLS key to the
|
||||
version compatible with the execution layer. This message -- a `BLSToExecutionChange` -- is available starting in Capella
|
||||
|
||||
|
@ -42,7 +42,6 @@
|
||||
- [Modified `process_voluntary_exit`](#modified-process_voluntary_exit)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -466,59 +465,3 @@ def process_registry_updates(state: BeaconState) -> None:
|
||||
validator = state.validators[index]
|
||||
validator.activation_epoch = compute_activation_exit_epoch(get_current_epoch(state))
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only.
|
||||
|
||||
The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type
|
||||
when initializing the first body-root.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=DENEB_FORK_VERSION, # [Modified in Deneb] for testing only
|
||||
current_version=DENEB_FORK_VERSION, # [Modified in Deneb]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# Initialize the execution payload header
|
||||
# If empty, will initialize a chain that has not yet gone through the Merge transition
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -1,15 +1,12 @@
|
||||
# Deneb -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for Deneb.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in Deneb](#modifications-in-deneb)
|
||||
- [Constant](#constant)
|
||||
- [Preset](#preset)
|
||||
@ -42,6 +39,12 @@ The specification of these changes continues in the same format as the network s
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the consensus-layer networking specification for Deneb.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Modifications in Deneb
|
||||
|
||||
### Constant
|
||||
@ -188,6 +191,16 @@ The following validations MUST pass before forwarding the `blob_sidecar` on the
|
||||
- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_header.parent_root`/`block_header.slot`).
|
||||
If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||
|
||||
The gossip `ForkDigestValue` is determined based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------------|---------------------|
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` |
|
||||
|
||||
##### Attestation subnets
|
||||
|
||||
###### `beacon_attestation_{subnet_id}`
|
||||
@ -242,7 +255,7 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
@ -264,14 +277,6 @@ Clients SHOULD NOT respond with blocks that fail the beacon chain state transiti
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
|
||||
|
||||
Request Content:
|
||||
|
||||
```
|
||||
@ -310,6 +315,16 @@ Clients SHOULD include a sidecar in the response as soon as it passes the gossip
|
||||
Clients SHOULD NOT respond with sidecars related to blocks that fail gossip validation rules.
|
||||
Clients SHOULD NOT respond with sidecars related to blocks that fail the beacon chain state transition
|
||||
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------------|---------------------|
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` |
|
||||
|
||||
###### Blob retrieval via local execution layer client
|
||||
|
||||
In addition to `BlobSidecarsByRoot` requests, recent blobs MAY be retrieved by querying the Execution Layer (i.e. via `engine_getBlobsV1`).
|
||||
@ -325,14 +340,6 @@ When clients use the local execution layer to retrieve blobs, they MUST behave a
|
||||
|
||||
*[New in Deneb:EIP4844]*
|
||||
|
||||
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` |
|
||||
|
||||
Request Content:
|
||||
```
|
||||
(
|
||||
@ -399,6 +406,16 @@ Clients MUST respond with blob sidecars that are consistent from a single chain
|
||||
|
||||
After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request.
|
||||
|
||||
For each `response_chunk`, a `ForkDigest`-context based on `compute_fork_version(compute_epoch_at_slot(blob_sidecar.signed_block_header.message.slot))` is used to select the fork namespace of the Response type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------------|---------------------|
|
||||
| `DENEB_FORK_VERSION` and later | `deneb.BlobSidecar` |
|
||||
|
||||
## Design decision rationale
|
||||
|
||||
### Why are blobs relayed as a sidecar, separate from beacon blocks?
|
||||
|
@ -438,7 +438,7 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment],
|
||||
for commitment, y in zip(commitments, ys)]
|
||||
C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys]
|
||||
C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers)
|
||||
|
||||
|
||||
return bls.pairing_check([
|
||||
[bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2_MONOMIAL[1]))],
|
||||
[bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()]
|
||||
|
@ -31,8 +31,8 @@
|
||||
- [`DepositRequest`](#depositrequest)
|
||||
- [`WithdrawalRequest`](#withdrawalrequest)
|
||||
- [`ConsolidationRequest`](#consolidationrequest)
|
||||
- [`SingleAttestation`](#singleattestation)
|
||||
- [`ExecutionRequests`](#executionrequests)
|
||||
- [`SingleAttestation`](#singleattestation)
|
||||
- [Modified Containers](#modified-containers)
|
||||
- [`AttesterSlashing`](#attesterslashing)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
@ -107,7 +107,6 @@
|
||||
- [Execution layer consolidation requests](#execution-layer-consolidation-requests)
|
||||
- [New `is_valid_switch_to_compounding_request`](#new-is_valid_switch_to_compounding_request)
|
||||
- [New `process_consolidation_request`](#new-process_consolidation_request)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -130,14 +129,14 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `UNSET_DEPOSIT_REQUESTS_START_INDEX` | `uint64(2**64 - 1)` | *[New in Electra:EIP6110]* |
|
||||
| `FULL_EXIT_REQUEST_AMOUNT` | `uint64(0)` | *[New in Electra:EIP7002]* |
|
||||
| `UNSET_DEPOSIT_REQUESTS_START_INDEX` | `uint64(2**64 - 1)` | *[New in Electra:EIP6110]* Value which indicates no start index has been assigned |
|
||||
| `FULL_EXIT_REQUEST_AMOUNT` | `uint64(0)` | *[New in Electra:EIP7002]* Withdrawal amount used to signal a full validator exit |
|
||||
|
||||
### Withdrawal prefixes
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `COMPOUNDING_WITHDRAWAL_PREFIX` | `Bytes1('0x02')` |
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `COMPOUNDING_WITHDRAWAL_PREFIX` | `Bytes1('0x02')` | *[New in Electra:EIP7251]* Withdrawal credential prefix for a compounding validator |
|
||||
|
||||
### Execution layer triggered requests
|
||||
|
||||
@ -151,17 +150,17 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
### Gwei values
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MIN_ACTIVATION_BALANCE` | `Gwei(2**5 * 10**9)` (= 32,000,000,000) |
|
||||
| `MAX_EFFECTIVE_BALANCE_ELECTRA` | `Gwei(2**11 * 10**9)` (= 2048,000,000,000) |
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `MIN_ACTIVATION_BALANCE` | `Gwei(2**5 * 10**9)` (= 32,000,000,000) | *[New in Electra:EIP7251]* Minimum balance for a validator to become active |
|
||||
| `MAX_EFFECTIVE_BALANCE_ELECTRA` | `Gwei(2**11 * 10**9)` (= 2048,000,000,000) | *[New in Electra:EIP7251]* Maximum effective balance for a compounding validator |
|
||||
|
||||
### Rewards and penalties
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA` | `uint64(2**12)` (= 4,096) |
|
||||
| `WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA` | `uint64(2**12)` (= 4,096) |
|
||||
| `MIN_SLASHING_PENALTY_QUOTIENT_ELECTRA` | `uint64(2**12)` (= 4,096) |
|
||||
| `WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA` | `uint64(2**12)` (= 4,096) |
|
||||
|
||||
### State list lengths
|
||||
|
||||
@ -175,16 +174,16 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_ATTESTER_SLASHINGS_ELECTRA` | `2**0` (= 1) | *[New in Electra:EIP7549]* |
|
||||
| `MAX_ATTESTATIONS_ELECTRA` | `2**3` (= 8) | *[New in Electra:EIP7549]* |
|
||||
| `MAX_ATTESTER_SLASHINGS_ELECTRA` | `2**0` (= 1) |
|
||||
| `MAX_ATTESTATIONS_ELECTRA` | `2**3` (= 8) |
|
||||
|
||||
### Execution
|
||||
|
||||
| Name | Value | Description |
|
||||
| - | - | - |
|
||||
| `MAX_DEPOSIT_REQUESTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | *[New in Electra:EIP6110]* Maximum number of deposit receipts allowed in each payload |
|
||||
| `MAX_DEPOSIT_REQUESTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | *[New in Electra:EIP6110]* Maximum number of execution layer deposit requests in each payload |
|
||||
| `MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD` | `uint64(2**4)` (= 16)| *[New in Electra:EIP7002]* Maximum number of execution layer withdrawal requests in each payload |
|
||||
| `MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD` | `uint64(1)` (= 1) | *[New in Electra:EIP7251]* Maximum number of execution layer consolidation requests in each payload |
|
||||
| `MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD` | `uint64(2**1)` (= 2) | *[New in Electra:EIP7251]* Maximum number of execution layer consolidation requests in each payload |
|
||||
|
||||
### Withdrawals processing
|
||||
|
||||
@ -280,16 +279,6 @@ class ConsolidationRequest(Container):
|
||||
target_pubkey: BLSPubkey
|
||||
```
|
||||
|
||||
#### `SingleAttestation`
|
||||
|
||||
```python
|
||||
class SingleAttestation(Container):
|
||||
committee_index: CommitteeIndex
|
||||
attester_index: ValidatorIndex
|
||||
data: AttestationData
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
#### `ExecutionRequests`
|
||||
|
||||
*Note*: This container holds requests from the execution layer that are received in [
|
||||
@ -303,6 +292,16 @@ class ExecutionRequests(Container):
|
||||
consolidations: List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD] # [New in Electra:EIP7251]
|
||||
```
|
||||
|
||||
#### `SingleAttestation`
|
||||
|
||||
```python
|
||||
class SingleAttestation(Container):
|
||||
committee_index: CommitteeIndex
|
||||
attester_index: ValidatorIndex
|
||||
data: AttestationData
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### Modified Containers
|
||||
|
||||
#### `AttesterSlashing`
|
||||
@ -1643,8 +1642,8 @@ def process_consolidation_request(
|
||||
if not (has_correct_credential and is_correct_source_address):
|
||||
return
|
||||
|
||||
# Verify that target has execution withdrawal credentials
|
||||
if not has_execution_withdrawal_credential(target_validator):
|
||||
# Verify that target has compounding withdrawal credentials
|
||||
if not has_compounding_withdrawal_credential(target_validator):
|
||||
return
|
||||
|
||||
# Verify the source and the target are active
|
||||
@ -1676,75 +1675,4 @@ def process_consolidation_request(
|
||||
source_index=source_index,
|
||||
target_index=target_index
|
||||
))
|
||||
|
||||
# Churn any target excess active balance of target and raise its max
|
||||
if has_eth1_withdrawal_credential(target_validator):
|
||||
switch_to_compounding_validator(state, target_index)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Electra testing only.
|
||||
Modifications include:
|
||||
1. Use `ELECTRA_FORK_VERSION` as the previous and current fork version.
|
||||
2. Utilize the Electra `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
||||
3. *[New in Electra:EIP6110]* Add `deposit_requests_start_index` variable to the genesis state initialization.
|
||||
4. *[New in Electra:EIP7251]* Initialize new fields to support increasing the maximum effective balance.
|
||||
|
||||
```python
|
||||
def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||
eth1_timestamp: uint64,
|
||||
deposits: Sequence[Deposit],
|
||||
execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader()
|
||||
) -> BeaconState:
|
||||
fork = Fork(
|
||||
previous_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110] for testing only
|
||||
current_version=ELECTRA_FORK_VERSION, # [Modified in Electra:EIP6110]
|
||||
epoch=GENESIS_EPOCH,
|
||||
)
|
||||
state = BeaconState(
|
||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||
fork=fork,
|
||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||
deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX, # [New in Electra:EIP6110]
|
||||
)
|
||||
|
||||
# Process deposits
|
||||
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||
for index, deposit in enumerate(deposits):
|
||||
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||
process_deposit(state, deposit)
|
||||
|
||||
# Process deposit balance updates
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
for deposit in state.pending_deposits:
|
||||
validator_index = ValidatorIndex(validator_pubkeys.index(deposit.pubkey))
|
||||
increase_balance(state, validator_index, deposit.amount)
|
||||
state.pending_deposits = []
|
||||
|
||||
# Process activations
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
# [Modified in Electra:EIP7251]
|
||||
validator.effective_balance = min(
|
||||
balance - balance % EFFECTIVE_BALANCE_INCREMENT, get_max_effective_balance(validator))
|
||||
if validator.effective_balance >= MIN_ACTIVATION_BALANCE:
|
||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = get_next_sync_committee(state)
|
||||
state.next_sync_committee = get_next_sync_committee(state)
|
||||
|
||||
# Initialize the execution payload header
|
||||
state.latest_execution_payload_header = execution_payload_header
|
||||
|
||||
return state
|
||||
```
|
||||
|
@ -1,15 +1,12 @@
|
||||
# Electra -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for Electra.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Modifications in Electra](#modifications-in-electra)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
@ -21,6 +18,12 @@ The specification of these changes continues in the same format as the network s
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the consensus-layer networking specification for Electra.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Modifications in Electra
|
||||
|
||||
### The gossip domain: gossipsub
|
||||
|
@ -1,19 +1,11 @@
|
||||
# Phase 0 -- Networking
|
||||
|
||||
This document contains the networking specification for Phase 0.
|
||||
|
||||
It consists of four main sections:
|
||||
|
||||
1. A specification of the network fundamentals.
|
||||
2. A specification of the three network interaction *domains* of the proof-of-stake consensus layer: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
||||
3. The rationale and further explanation for the design choices made in the previous two sections.
|
||||
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which clients are being developed.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Network fundamentals](#network-fundamentals)
|
||||
- [Transport](#transport)
|
||||
- [Encryption and identification](#encryption-and-identification)
|
||||
@ -115,6 +107,17 @@ It consists of four main sections:
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document contains the networking specification for Phase 0.
|
||||
|
||||
It consists of four main sections:
|
||||
|
||||
1. A specification of the network fundamentals.
|
||||
2. A specification of the three network interaction *domains* of the proof-of-stake consensus layer: (a) the gossip domain, (b) the discovery domain, and (c) the Req/Resp domain.
|
||||
3. The rationale and further explanation for the design choices made in the previous two sections.
|
||||
4. An analysis of the maturity/state of the libp2p features required by this spec across the languages in which clients are being developed.
|
||||
|
||||
## Network fundamentals
|
||||
|
||||
This section outlines the specification for the networking stack in Ethereum consensus-layer clients.
|
||||
@ -960,7 +963,8 @@ The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the
|
||||
The ENR MAY contain the following entries:
|
||||
|
||||
- An IPv4 address (`ip` field) and/or IPv6 address (`ip6` field).
|
||||
- A TCP port (`tcp` field) representing the local libp2p listening port.
|
||||
- A TCP port (`tcp` field) representing the local libp2p TCP listening port.
|
||||
- A QUIC port (`quic` field) representing the local libp2p QUIC (UDP) listening port.
|
||||
- A UDP port (`udp` field) representing the local discv5 listening port.
|
||||
|
||||
Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778).
|
||||
|
@ -86,9 +86,9 @@ A detailed analysis of the calculation of the weak subjectivity period is made i
|
||||
```python
|
||||
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
"""
|
||||
Returns the weak subjectivity period for the current ``state``.
|
||||
Returns the weak subjectivity period for the current ``state``.
|
||||
This computation takes into account the effect of:
|
||||
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||
- validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
|
||||
A detailed calculation can be found at:
|
||||
https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
|
||||
@ -113,7 +113,7 @@ def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
ws_period += (
|
||||
3 * N * D * t // (200 * Delta * (T - t))
|
||||
)
|
||||
|
||||
|
||||
return ws_period
|
||||
```
|
||||
|
||||
|
@ -71,7 +71,7 @@ Note that the generalized index has the convenient property that the two childre
|
||||
```python
|
||||
def merkle_tree(leaves: Sequence[Bytes32]) -> Sequence[Bytes32]:
|
||||
"""
|
||||
Return an array representing the tree nodes by generalized index:
|
||||
Return an array representing the tree nodes by generalized index:
|
||||
[0, 1, 2, 3, 4, 5, 6, 7], where each layer is a power of 2. The 0 index is ignored. The 1 index is the root.
|
||||
The result will be twice the size as the padded bottom layer for the input leaves.
|
||||
"""
|
||||
|
@ -326,7 +326,7 @@ optimistic blocks (and vice-versa).
|
||||
|
||||
### Why sync optimistically?
|
||||
|
||||
Most execution engines use state sync as a default sync mechanism on Ethereum Mainnet
|
||||
Most execution engines use state sync as a default sync mechanism on Ethereum Mainnet
|
||||
because executing blocks from genesis takes several weeks on commodity hardware.
|
||||
|
||||
State sync requires the knowledge of the current head of the chain to converge eventually.
|
||||
|
@ -15,7 +15,7 @@ Use an OS that has Python 3.8 or above. For example, Debian 11 (bullseye)
|
||||
git clone https://github.com/ethereum/consensus-specs.git
|
||||
cd consensus-specs
|
||||
```
|
||||
3. Create the specifications and tests:
|
||||
3. Create the specifications and tests:
|
||||
```sh
|
||||
make install_test
|
||||
make pyspec
|
||||
@ -32,7 +32,7 @@ To read more about creating the environment, [see here](core/pyspec/README.md).
|
||||
. venv/bin/activate
|
||||
```
|
||||
2. Run a sanity check test against Altair fork:
|
||||
```sh
|
||||
```sh
|
||||
cd tests/core/pyspec
|
||||
python -m pytest -k test_empty_block_transition --fork altair eth2spec
|
||||
```
|
||||
@ -49,21 +49,21 @@ To read more about creating the environment, [see here](core/pyspec/README.md).
|
||||
|
||||
=============================== warnings summary ===============================
|
||||
../../../venv/lib/python3.9/site-packages/cytoolz/compatibility.py:2
|
||||
/home/qbzzt1/consensus-specs/venv/lib/python3.9/site-packages/cytoolz/compatibility.py:2:
|
||||
DeprecationWarning: The toolz.compatibility module is no longer needed in Python 3 and has
|
||||
been deprecated. Please import these utilities directly from the standard library. This
|
||||
/home/qbzzt1/consensus-specs/venv/lib/python3.9/site-packages/cytoolz/compatibility.py:2:
|
||||
DeprecationWarning: The toolz.compatibility module is no longer needed in Python 3 and has
|
||||
been deprecated. Please import these utilities directly from the standard library. This
|
||||
module will be removed in a future release.
|
||||
warnings.warn("The toolz.compatibility module is no longer "
|
||||
|
||||
-- Docs: https://docs.pytest.org/en/stable/warnings.html
|
||||
================ 3 passed, 626 deselected, 1 warning in 16.81s =================
|
||||
================ 3 passed, 626 deselected, 1 warning in 16.81s =================
|
||||
```
|
||||
|
||||
|
||||
## The "Hello, World" of Consensus Spec Tests
|
||||
|
||||
One of the `test_empty_block_transition` tests is implemented by a function with the same
|
||||
name located in
|
||||
name located in
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py).
|
||||
To learn how consensus spec tests are written, let's go over the code:
|
||||
|
||||
@ -94,10 +94,10 @@ This type of test receives two parameters:
|
||||
|
||||
```python
|
||||
pre_slot = state.slot
|
||||
```
|
||||
```
|
||||
|
||||
A slot is a unit of time (every 12 seconds in mainnet), for which a specific validator (selected randomly but in a
|
||||
deterministic manner) is a proposer. The proposer can propose a block during that slot.
|
||||
deterministic manner) is a proposer. The proposer can propose a block during that slot.
|
||||
|
||||
```python
|
||||
pre_eth1_votes = len(state.eth1_data_votes)
|
||||
@ -151,7 +151,7 @@ More `yield` statements. The output of a consensus test is:
|
||||
|
||||
# Check that the new parent root is correct
|
||||
assert spec.get_block_root_at_slot(state, pre_slot) == signed_block.message.parent_root
|
||||
|
||||
|
||||
# Random data changed
|
||||
assert spec.get_randao_mix(state, spec.get_current_epoch(state)) != pre_mix
|
||||
```
|
||||
@ -160,16 +160,16 @@ Finally we assertions that test the transition was legitimate. In this case we h
|
||||
|
||||
1. One item was added to `eth1_data_votes`
|
||||
2. The new block's `parent_root` is the same as the block in the previous location
|
||||
3. The random data that every block includes was changed.
|
||||
3. The random data that every block includes was changed.
|
||||
|
||||
|
||||
## New Tests
|
||||
|
||||
The easiest way to write a new test is to copy and modify an existing one. For example,
|
||||
lets write a test where the first slot of the beacon chain is empty (because the assigned
|
||||
lets write a test where the first slot of the beacon chain is empty (because the assigned
|
||||
proposer is offline, for example), and then there's an empty block in the second slot.
|
||||
|
||||
We already know how to accomplish most of what we need for this test, but the only way we know
|
||||
We already know how to accomplish most of what we need for this test, but the only way we know
|
||||
to advance the state is `state_transition_and_sign_block`, a function that also puts a block
|
||||
into the slot. So let's see if the function's definition tells us how to advance the state without
|
||||
a block.
|
||||
@ -180,7 +180,7 @@ First, we need to find out where the function is located. Run:
|
||||
find . -name '*.py' -exec grep 'def state_transition_and_sign_block' {} \; -print
|
||||
```
|
||||
|
||||
And you'll find that the function is defined in
|
||||
And you'll find that the function is defined in
|
||||
[`eth2spec/test/helpers/state.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/state.py). Looking
|
||||
in that file, we see that the second function is:
|
||||
|
||||
@ -209,7 +209,7 @@ This looks like exactly what we need. So we add this call before we create the e
|
||||
.
|
||||
```
|
||||
|
||||
That's it. Our new test works (copy `test_empty_block_transition`, rename it, add the `next_slot` call, and then run it to
|
||||
That's it. Our new test works (copy `test_empty_block_transition`, rename it, add the `next_slot` call, and then run it to
|
||||
verify this).
|
||||
|
||||
|
||||
@ -218,7 +218,7 @@ verify this).
|
||||
|
||||
It is important to make sure that the system rejects invalid input, so our next step is to deal with cases where the protocol
|
||||
is supposed to reject something. To see such a test, look at `test_prev_slot_block_transition` (in the same
|
||||
file we used previously,
|
||||
file we used previously,
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/phase0/sanity/test_blocks.py)).
|
||||
|
||||
```python
|
||||
@ -249,7 +249,7 @@ Transition to the new slot, which naturally has a different proposer.
|
||||
```
|
||||
|
||||
Specify that the function `transition_unsigned_block` will cause an assertion error.
|
||||
You can see this function in
|
||||
You can see this function in
|
||||
[`~/consensus-specs/tests/core/pyspec/eth2spec/test/helpers/block.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/block.py),
|
||||
and one of the tests is that the block must be for this slot:
|
||||
> ```python
|
||||
@ -265,14 +265,14 @@ be called later.
|
||||
```
|
||||
|
||||
Set the block's state root to the current state hash tree root, which identifies this block as
|
||||
belonging to this slot (even though it was created for the previous slot).
|
||||
belonging to this slot (even though it was created for the previous slot).
|
||||
|
||||
```python
|
||||
```python
|
||||
signed_block = sign_block(spec, state, block, proposer_index=proposer_index)
|
||||
```
|
||||
|
||||
Notice that `proposer_index` is the variable we set earlier, *before* we advanced
|
||||
the slot with `spec.process_slots(state, state.slot + 1)`. It is not the proposer
|
||||
the slot with `spec.process_slots(state, state.slot + 1)`. It is not the proposer
|
||||
for the current state.
|
||||
|
||||
```python
|
||||
@ -296,8 +296,8 @@ includes the block hash of the proposed new head of the execution layer.
|
||||
|
||||
For every slot there is also a randomly selected committee of validators that needs to vote whether
|
||||
the new consensus layer block is valid, which requires the proposed head of the execution chain to
|
||||
also be a valid block. These votes are called [attestations](https://notes.ethereum.org/@hww/aggregation#112-Attestation),
|
||||
and they are sent as independent messages. The proposer for a block is able to include attestations from previous slots,
|
||||
also be a valid block. These votes are called [attestations](https://notes.ethereum.org/@hww/aggregation#112-Attestation),
|
||||
and they are sent as independent messages. The proposer for a block is able to include attestations from previous slots,
|
||||
which is how they get on chain to form consensus, reward honest validators, etc.
|
||||
|
||||
[You can see a simple successful attestation test here](https://github.com/ethereum/consensus-specs/blob/926e5a3d722df973b9a12f12c015783de35cafa9/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py#L26-L30):
|
||||
@ -326,8 +326,8 @@ To see an attestion "from the inside" we need to follow it.
|
||||
> ```
|
||||
>
|
||||
> Only two parameters, `spec` and `state` are required. However, there are four other parameters that can affect
|
||||
> the attestation created by this function.
|
||||
>
|
||||
> the attestation created by this function.
|
||||
>
|
||||
>
|
||||
> ```python
|
||||
> # If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
||||
@ -345,10 +345,10 @@ To see an attestion "from the inside" we need to follow it.
|
||||
> attestation_data = build_attestation_data(
|
||||
> spec, state, slot=slot, index=index
|
||||
> )
|
||||
> ```
|
||||
> ```
|
||||
>
|
||||
> Build the actual attestation. You can see this function
|
||||
> [here](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L53-L85)
|
||||
> Build the actual attestation. You can see this function
|
||||
> [here](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L53-L85)
|
||||
> to see the exact data in an attestation.
|
||||
>
|
||||
> ```python
|
||||
@ -358,17 +358,17 @@ To see an attestion "from the inside" we need to follow it.
|
||||
> attestation_data.index,
|
||||
> )
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> This is the committee that is supposed to approve or reject the proposed block.
|
||||
>
|
||||
> ```python
|
||||
>
|
||||
>
|
||||
> ```python
|
||||
>
|
||||
> committee_size = len(beacon_committee)
|
||||
> aggregation_bits = Bitlist[spec.MAX_VALIDATORS_PER_COMMITTEE](*([0] * committee_size))
|
||||
> ```
|
||||
>
|
||||
>
|
||||
> There's a bit for every committee member to see if it approves or not.
|
||||
>
|
||||
>
|
||||
> ```python
|
||||
> attestation = spec.Attestation(
|
||||
> aggregation_bits=aggregation_bits,
|
||||
@ -376,15 +376,15 @@ To see an attestion "from the inside" we need to follow it.
|
||||
> )
|
||||
> # fill the attestation with (optionally filtered) participants, and optionally sign it
|
||||
> fill_aggregate_attestation(spec, state, attestation, signed=signed, filter_participant_set=filter_participant_set)
|
||||
>
|
||||
> return attestation
|
||||
>
|
||||
> return attestation
|
||||
> ```
|
||||
|
||||
```python
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
```
|
||||
|
||||
Attestations have to appear after the block they attest for, so we advance
|
||||
Attestations have to appear after the block they attest for, so we advance
|
||||
`spec.MIN_ATTESTATION_INCLUSION_DELAY` slots before creating the block that includes the attestation.
|
||||
Currently a single block is sufficient, but that may change in the future.
|
||||
|
||||
@ -392,7 +392,7 @@ Currently a single block is sufficient, but that may change in the future.
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
```
|
||||
|
||||
[This function](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L13-L50)
|
||||
[This function](https://github.com/ethereum/consensus-specs/blob/30fe7ba1107d976100eb0c3252ca7637b791e43a/tests/core/pyspec/eth2spec/test/helpers/attestations.py#L13-L50)
|
||||
processes the attestation and returns the result.
|
||||
|
||||
|
||||
@ -419,15 +419,15 @@ In the last line you can see two conditions being asserted:
|
||||
arrive too early.
|
||||
2. `state.slot <= data.slot + SLOTS_PER_EPOCH` which verifies that the attestation doesn't
|
||||
arrive too late.
|
||||
|
||||
|
||||
This is how the consensus layer tests deal with edge cases, by asserting the conditions required for the
|
||||
values to be legitimate. In the case of these particular conditions, they are tested
|
||||
values to be legitimate. In the case of these particular conditions, they are tested
|
||||
[here](https://github.com/ethereum/consensus-specs/blob/926e5a3d722df973b9a12f12c015783de35cafa9/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py#L87-L104).
|
||||
One test checks what happens if the attestation is too early, and another if it is too late.
|
||||
|
||||
However, it is not enough to ensure we reject invalid blocks. It is also necessary to ensure we accept all valid blocks. You saw earlier
|
||||
a test (`test_success`) that tested that being `MIN_ATTESTATION_INCLUSION_DELAY` after the data for which we attest is enough.
|
||||
Now we'll write a similar test that verifies that being `SLOTS_PER_EPOCH` away is still valid. To do this, we modify the
|
||||
a test (`test_success`) that tested that being `MIN_ATTESTATION_INCLUSION_DELAY` after the data for which we attest is enough.
|
||||
Now we'll write a similar test that verifies that being `SLOTS_PER_EPOCH` away is still valid. To do this, we modify the
|
||||
`test_after_epoch_slots` function. We need two changes:
|
||||
|
||||
1. Call `transition_to_slot_via_block` with one less slot to advance
|
||||
@ -445,7 +445,7 @@ def test_almost_after_epoch_slots(spec, state):
|
||||
transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
```
|
||||
```
|
||||
|
||||
Add this function to the file `consensus-specs/tests/core/pyspec/eth2spec/test/phase0/block_processing/test_process_attestation.py`,
|
||||
and run the test against Altair fork:
|
||||
@ -463,7 +463,7 @@ You should see it ran successfully (although you might get a warning, you can ig
|
||||
|
||||
So far we've ran tests against the formal specifications. This is a way to check the specifications
|
||||
are what we expect, but it doesn't actually check the beacon chain clients. The way these tests get applied
|
||||
by clients is that every few weeks
|
||||
by clients is that every few weeks
|
||||
[new test specifications are released](https://github.com/ethereum/consensus-spec-tests/releases),
|
||||
in a format [documented here](https://github.com/ethereum/consensus-specs/tree/dev/tests/formats).
|
||||
All the consensus layer clients implement test-runners that consume the test vectors in this standard format.
|
||||
|
@ -1 +1 @@
|
||||
1.5.0-alpha.9
|
||||
1.5.0-alpha.10
|
||||
|
@ -19,5 +19,5 @@ spec.config = spec.Configuration(**config_util.load_config_file(Path('mytestnet.
|
||||
```
|
||||
|
||||
Note: previously the testnet config files included both preset and runtime-configuration data.
|
||||
The new config loader is compatible with this: all config vars are loaded from the file,
|
||||
but those that have become presets can be ignored.
|
||||
The new config loader is compatible with this: all config vars are loaded from the file,
|
||||
but those that have become presets can be ignored.
|
||||
|
@ -17,10 +17,10 @@ Options:
|
||||
If true, all cases will run regardless, and files will be overwritten.
|
||||
Other existing files are not deleted.
|
||||
|
||||
-c CONFIGS_PATH -- The directory to load configs for pyspec from. A config is a simple key-value yaml file.
|
||||
-c CONFIGS_PATH -- The directory to load configs for pyspec from. A config is a simple key-value yaml file.
|
||||
Use `../../configs/` when running from the root dir of a generator, and requiring the standard spec configs.
|
||||
|
||||
[-l [CONFIG_LIST [CONFIG_LIST ...]]] -- Optional. Define which configs to run.
|
||||
[-l [CONFIG_LIST [CONFIG_LIST ...]]] -- Optional. Define which configs to run.
|
||||
Test providers loading other configs will be ignored. If none are specified, no config will be ignored.
|
||||
```
|
||||
|
||||
@ -45,10 +45,10 @@ The yielding pattern is:
|
||||
Test part output kinds:
|
||||
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz_snappy` file.
|
||||
- `data`: value is expected to be any Python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
||||
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
||||
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
||||
file named `meta.yaml`, if anything is yielded with `meta` empty.
|
||||
|
||||
The `vector_test()` decorator can detect pyspec SSZ types, and output them both as `data` and `ssz`, for the test consumer to choose.
|
||||
|
||||
Note that the yielded outputs are processed before the test continues. It is safe to yield information that later mutates,
|
||||
Note that the yielded outputs are processed before the test continues. It is safe to yield information that later mutates,
|
||||
as the output will already be encoded to yaml or ssz bytes. This avoids the need to deep-copy the whole object.
|
||||
|
@ -1,122 +0,0 @@
|
||||
from eth2spec.test.context import (
|
||||
BELLATRIX,
|
||||
single_phase,
|
||||
spec_test,
|
||||
with_presets,
|
||||
with_phases,
|
||||
with_bellatrix_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_full_genesis_deposits,
|
||||
)
|
||||
from eth2spec.test.helpers.genesis import (
|
||||
get_sample_genesis_execution_payload_header,
|
||||
)
|
||||
|
||||
|
||||
def eth1_init_data(eth1_block_hash, eth1_timestamp):
|
||||
yield 'eth1', {
|
||||
'eth1_block_hash': '0x' + eth1_block_hash.hex(),
|
||||
'eth1_timestamp': int(eth1_timestamp),
|
||||
}
|
||||
|
||||
|
||||
@with_phases([BELLATRIX])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_initialize_pre_transition_no_param(spec):
|
||||
deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.config.MIN_GENESIS_TIME
|
||||
|
||||
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state *without* an execution_payload_header
|
||||
yield 'execution_payload_header', 'meta', False
|
||||
state = spec.initialize_beacon_state_from_eth1(eth1_block_hash, eth1_timestamp, deposits)
|
||||
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_initialize_pre_transition_empty_payload(spec):
|
||||
deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.config.MIN_GENESIS_TIME
|
||||
|
||||
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state *with* an *empty* execution_payload_header
|
||||
yield 'execution_payload_header', 'meta', True
|
||||
execution_payload_header = spec.ExecutionPayloadHeader()
|
||||
state = spec.initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash,
|
||||
eth1_timestamp,
|
||||
deposits,
|
||||
execution_payload_header=execution_payload_header,
|
||||
)
|
||||
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'execution_payload_header', execution_payload_header
|
||||
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_initialize_post_transition(spec):
|
||||
deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_full_genesis_deposits(
|
||||
spec,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
deposit_count,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
eth1_block_hash = b'\x12' * 32
|
||||
eth1_timestamp = spec.config.MIN_GENESIS_TIME
|
||||
|
||||
yield from eth1_init_data(eth1_block_hash, eth1_timestamp)
|
||||
yield 'deposits', deposits
|
||||
|
||||
# initialize beacon_state *with* an execution_payload_header
|
||||
yield 'execution_payload_header', 'meta', True
|
||||
genesis_execution_payload_header = get_sample_genesis_execution_payload_header(spec)
|
||||
state = spec.initialize_beacon_state_from_eth1(
|
||||
eth1_block_hash,
|
||||
eth1_timestamp,
|
||||
deposits,
|
||||
execution_payload_header=genesis_execution_payload_header,
|
||||
)
|
||||
|
||||
yield 'execution_payload_header', genesis_execution_payload_header
|
||||
|
||||
assert spec.is_merge_transition_complete(state)
|
||||
|
||||
yield 'state', state
|
@ -36,5 +36,3 @@ def test_networking(spec):
|
||||
spec.config.MAX_REQUEST_BLOB_SIDECARS_EIP7594 ==
|
||||
spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.config.MAX_BLOBS_PER_BLOCK_EIP7594
|
||||
)
|
||||
# Start with the same size, but `BLOB_SIDECAR_SUBNET_COUNT` could potentially increase later.
|
||||
assert spec.config.BLOB_SIDECAR_SUBNET_COUNT_EIP7594 == spec.config.MAX_BLOBS_PER_BLOCK_EIP7594
|
||||
|
@ -11,6 +11,7 @@ from eth2spec.test.context import (
|
||||
)
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
set_eth1_withdrawal_credential_with_balance,
|
||||
set_compounding_withdrawal_credential_with_balance,
|
||||
set_compounding_withdrawal_credential,
|
||||
)
|
||||
|
||||
@ -47,8 +48,8 @@ def test_basic_consolidation_in_current_consolidation_epoch(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set earliest consolidation epoch to the expected exit epoch
|
||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||
@ -96,59 +97,7 @@ def test_basic_consolidation_with_excess_target_balance(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Set earliest consolidation epoch to the expected exit epoch
|
||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||
state.earliest_consolidation_epoch = expected_exit_epoch
|
||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||
# Set the consolidation balance to consume equal to churn limit
|
||||
state.consolidation_balance_to_consume = consolidation_churn_limit
|
||||
|
||||
# Add excess balance
|
||||
state.balances[target_index] = state.balances[target_index] + spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
|
||||
yield from run_consolidation_processing(spec, state, consolidation)
|
||||
|
||||
# Check consolidation churn is decremented correctly
|
||||
assert (
|
||||
state.consolidation_balance_to_consume
|
||||
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
||||
)
|
||||
# Check exit epoch
|
||||
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||
@with_custom_state(
|
||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||
threshold_fn=default_activation_threshold,
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_basic_consolidation_with_excess_target_balance_and_compounding_credentials(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for consolidation
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||
|
||||
# Set source to eth1 credentials
|
||||
source_address = b"\x22" * 20
|
||||
set_eth1_withdrawal_credential_with_balance(
|
||||
spec, state, source_index, address=source_address
|
||||
)
|
||||
# Make consolidation with source address
|
||||
consolidation = spec.ConsolidationRequest(
|
||||
source_address=source_address,
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set earliest consolidation epoch to the expected exit epoch
|
||||
@ -202,8 +151,8 @@ def test_basic_consolidation_in_new_consolidation_epoch(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
yield from run_consolidation_processing(spec, state, consolidation)
|
||||
|
||||
@ -247,8 +196,8 @@ def test_basic_consolidation_with_preexisting_churn(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set earliest consolidation epoch to the expected exit epoch
|
||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||
@ -296,8 +245,8 @@ def test_basic_consolidation_with_insufficient_preexisting_churn(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set earliest consolidation epoch to the first available epoch
|
||||
state.earliest_consolidation_epoch = spec.compute_activation_exit_epoch(
|
||||
@ -337,7 +286,7 @@ def test_basic_consolidation_with_compounding_credentials(spec, state):
|
||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||
|
||||
# Set source to eth1 credentials
|
||||
# Set source to compounding credentials
|
||||
source_address = b"\x22" * 20
|
||||
set_compounding_withdrawal_credential(
|
||||
spec, state, source_index, address=source_address
|
||||
@ -396,8 +345,8 @@ def test_consolidation_churn_limit_balance(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set source effective balance to consolidation churn limit
|
||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||
@ -446,8 +395,8 @@ def test_consolidation_balance_larger_than_churn_limit(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set source effective balance to 2 * consolidation churn limit
|
||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||
@ -495,8 +444,8 @@ def test_consolidation_balance_through_two_churn_epochs(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# Set target to compounding credentials
|
||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||
|
||||
# Set source balance higher to 3 * consolidation churn limit
|
||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||
@ -625,7 +574,7 @@ def test_incorrect_exceed_pending_consolidations_limit(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert len(state.pending_consolidations) == spec.PENDING_CONSOLIDATIONS_LIMIT
|
||||
@ -660,7 +609,7 @@ def test_incorrect_not_enough_consolidation_churn_available(spec, state):
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert spec.get_consolidation_churn_limit(state) <= spec.MIN_ACTIVATION_BALANCE
|
||||
@ -694,7 +643,7 @@ def test_incorrect_exited_source(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# exit source
|
||||
spec.initiate_validator_exit(state, source_index)
|
||||
@ -731,7 +680,7 @@ def test_incorrect_exited_target(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
# exit target
|
||||
spec.initiate_validator_exit(state, 1)
|
||||
|
||||
@ -767,7 +716,7 @@ def test_incorrect_inactive_source(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# set source validator as not yet activated
|
||||
state.validators[source_index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||
@ -804,7 +753,7 @@ def test_incorrect_inactive_target(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# set target validator as not yet activated
|
||||
state.validators[1].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||
@ -839,7 +788,7 @@ def test_incorrect_no_source_execution_withdrawal_credential(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert not spec.has_execution_withdrawal_credential(state.validators[source_index])
|
||||
@ -857,7 +806,7 @@ def test_incorrect_no_source_execution_withdrawal_credential(spec, state):
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_incorrect_no_target_execution_withdrawal_credential(spec, state):
|
||||
def test_incorrect_target_with_bls_credential(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for consolidation
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
# Set up a correct consolidation, but target does not have
|
||||
@ -883,6 +832,39 @@ def test_incorrect_no_target_execution_withdrawal_credential(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||
@with_custom_state(
|
||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||
threshold_fn=default_activation_threshold,
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_incorrect_target_with_eth1_credential(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for consolidation
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
# Set up an otherwise correct consolidation
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||
source_address = b"\x22" * 20
|
||||
set_eth1_withdrawal_credential_with_balance(
|
||||
spec, state, source_index, address=source_address
|
||||
)
|
||||
consolidation = spec.ConsolidationRequest(
|
||||
source_address=source_address,
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
|
||||
# Set target to eth1 credentials
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
yield from run_consolidation_processing(
|
||||
spec, state, consolidation, success=False
|
||||
)
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||
@with_custom_state(
|
||||
@ -908,7 +890,7 @@ def test_incorrect_incorrect_source_address(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert not state.validators[source_index].withdrawal_credentials[12:] == consolidation.source_address
|
||||
@ -943,7 +925,7 @@ def test_incorrect_unknown_source_pubkey(spec, state):
|
||||
source_pubkey=b"\x00" * 48,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert not state.validators[source_index].pubkey == consolidation.source_pubkey
|
||||
@ -978,7 +960,7 @@ def test_incorrect_unknown_target_pubkey(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=b"\x00" * 48,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the the return condition
|
||||
assert not state.validators[target_index].pubkey == consolidation.target_pubkey
|
||||
@ -1013,7 +995,7 @@ def test_incorrect_source_has_pending_withdrawal(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Create pending withdrawal
|
||||
pending_withdrawal = spec.PendingPartialWithdrawal(
|
||||
@ -1052,7 +1034,7 @@ def test_incorrect_source_not_active_long_enough(spec, state):
|
||||
source_pubkey=state.validators[source_index].pubkey,
|
||||
target_pubkey=state.validators[target_index].pubkey,
|
||||
)
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
set_compounding_withdrawal_credential_with_balance(spec, state, target_index)
|
||||
|
||||
# Check the return condition
|
||||
assert current_epoch < state.validators[source_index].activation_epoch + spec.config.SHARD_COMMITTEE_PERIOD
|
||||
@ -1228,7 +1210,7 @@ def run_consolidation_processing(spec, state, consolidation, success=True):
|
||||
pre_exit_epoch_source = source_validator.exit_epoch
|
||||
pre_exit_epoch_target = target_validator.exit_epoch
|
||||
pre_pending_consolidations = state.pending_consolidations.copy()
|
||||
pre_target_withdrawal_credentials = target_validator.withdrawal_credentials
|
||||
pre_source_balance = state.balances[source_index]
|
||||
pre_target_balance = state.balances[target_index]
|
||||
else:
|
||||
pre_state = state.copy()
|
||||
@ -1266,23 +1248,9 @@ def run_consolidation_processing(spec, state, consolidation, success=True):
|
||||
target_index=target_index,
|
||||
)
|
||||
assert state.pending_consolidations == pre_pending_consolidations + [expected_new_pending_consolidation]
|
||||
# Check excess balance is queued if the target switched to compounding
|
||||
if pre_target_withdrawal_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX:
|
||||
post_target_withdrawal_credentials = (
|
||||
spec.COMPOUNDING_WITHDRAWAL_PREFIX + pre_target_withdrawal_credentials[1:]
|
||||
)
|
||||
assert state.validators[target_index].withdrawal_credentials == post_target_withdrawal_credentials
|
||||
assert state.balances[target_index] == spec.MIN_ACTIVATION_BALANCE
|
||||
if pre_target_balance > spec.MIN_ACTIVATION_BALANCE:
|
||||
assert len(state.pending_deposits) == 1
|
||||
pending_deposit = state.pending_deposits[0]
|
||||
assert pending_deposit.pubkey == target_validator.pubkey
|
||||
assert pending_deposit.withdrawal_credentials == post_target_withdrawal_credentials
|
||||
assert pending_deposit.amount == (pre_target_balance - spec.MIN_ACTIVATION_BALANCE)
|
||||
assert pending_deposit.signature == spec.G2_POINT_AT_INFINITY
|
||||
assert pending_deposit.slot == spec.GENESIS_SLOT
|
||||
else:
|
||||
assert state.balances[target_index] == pre_target_balance
|
||||
# Check no balance move happened
|
||||
assert state.balances[source_index] == pre_source_balance
|
||||
assert state.balances[target_index] == pre_target_balance
|
||||
else:
|
||||
assert pre_state == state
|
||||
|
||||
|
@ -13,6 +13,92 @@ from eth2spec.test.helpers.withdrawals import (
|
||||
set_eth1_withdrawal_credential_with_balance,
|
||||
set_compounding_withdrawal_credential,
|
||||
)
|
||||
#
|
||||
# Run processing
|
||||
#
|
||||
|
||||
|
||||
def run_withdrawal_request_processing(
|
||||
spec, state, withdrawal_request, valid=True, success=True
|
||||
):
|
||||
"""
|
||||
Run ``process_withdrawal_request``, yielding:
|
||||
- pre-state ('pre')
|
||||
- withdrawal_request ('withdrawal_request')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
If ``success == False``, it doesn't initiate exit successfully
|
||||
"""
|
||||
yield "pre", state
|
||||
yield "withdrawal_request", withdrawal_request
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(
|
||||
lambda: spec.process_withdrawal_request(
|
||||
state, withdrawal_request
|
||||
)
|
||||
)
|
||||
yield "post", None
|
||||
return
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
spec.process_withdrawal_request(
|
||||
state, withdrawal_request
|
||||
)
|
||||
|
||||
yield "post", state
|
||||
|
||||
if not success:
|
||||
# No-op
|
||||
assert pre_state == state
|
||||
else:
|
||||
validator_index = get_validator_index_by_pubkey(
|
||||
state, withdrawal_request.validator_pubkey
|
||||
)
|
||||
pre_exit_epoch = pre_state.validators[validator_index].exit_epoch
|
||||
pre_pending_partial_withdrawals = pre_state.pending_partial_withdrawals.copy()
|
||||
pre_balance = pre_state.balances[validator_index]
|
||||
pre_effective_balance = pre_state.validators[validator_index].effective_balance
|
||||
assert state.balances[validator_index] == pre_balance
|
||||
assert (
|
||||
state.validators[validator_index].effective_balance == pre_effective_balance
|
||||
)
|
||||
# Full exit request
|
||||
if withdrawal_request.amount == spec.FULL_EXIT_REQUEST_AMOUNT:
|
||||
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert spec.get_pending_balance_to_withdraw(state, validator_index) == 0
|
||||
assert state.pending_partial_withdrawals == pre_pending_partial_withdrawals
|
||||
# Partial withdrawal request
|
||||
else:
|
||||
expected_amount_to_withdraw = compute_amount_to_withdraw(
|
||||
spec, pre_state, validator_index, withdrawal_request.amount
|
||||
)
|
||||
assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||
expected_withdrawable_epoch = (
|
||||
state.earliest_exit_epoch
|
||||
+ spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
)
|
||||
expected_partial_withdrawal = spec.PendingPartialWithdrawal(
|
||||
index=validator_index,
|
||||
amount=expected_amount_to_withdraw,
|
||||
withdrawable_epoch=expected_withdrawable_epoch,
|
||||
)
|
||||
assert (
|
||||
state.pending_partial_withdrawals
|
||||
== pre_pending_partial_withdrawals + [expected_partial_withdrawal]
|
||||
)
|
||||
|
||||
|
||||
def compute_amount_to_withdraw(spec, state, index, amount):
|
||||
pending_balance_to_withdraw = spec.get_pending_balance_to_withdraw(state, index)
|
||||
return min(
|
||||
state.balances[index]
|
||||
- spec.MIN_ACTIVATION_BALANCE
|
||||
- pending_balance_to_withdraw,
|
||||
amount,
|
||||
)
|
||||
|
||||
|
||||
# Modified tests from 7002. Just testing EL-triggered exits, not partial withdrawals
|
||||
@ -887,12 +973,11 @@ def test_incorrect_inactive_validator(spec, state):
|
||||
validator_index = rng.choice(spec.get_active_validator_indices(state, current_epoch))
|
||||
validator_pubkey = state.validators[validator_index].pubkey
|
||||
address = b"\x22" * 20
|
||||
incorrect_address = b"\x33" * 20
|
||||
set_eth1_withdrawal_credential_with_balance(
|
||||
spec, state, validator_index, address=address
|
||||
)
|
||||
withdrawal_request = spec.WithdrawalRequest(
|
||||
source_address=incorrect_address,
|
||||
source_address=address,
|
||||
validator_pubkey=validator_pubkey,
|
||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||
)
|
||||
@ -904,90 +989,3 @@ def test_incorrect_inactive_validator(spec, state):
|
||||
yield from run_withdrawal_request_processing(
|
||||
spec, state, withdrawal_request, success=False
|
||||
)
|
||||
|
||||
#
|
||||
# Run processing
|
||||
#
|
||||
|
||||
|
||||
def run_withdrawal_request_processing(
|
||||
spec, state, withdrawal_request, valid=True, success=True
|
||||
):
|
||||
"""
|
||||
Run ``process_withdrawal_request``, yielding:
|
||||
- pre-state ('pre')
|
||||
- withdrawal_request ('withdrawal_request')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
If ``success == False``, it doesn't initiate exit successfully
|
||||
"""
|
||||
yield "pre", state
|
||||
yield "withdrawal_request", withdrawal_request
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(
|
||||
lambda: spec.process_withdrawal_request(
|
||||
state, withdrawal_request
|
||||
)
|
||||
)
|
||||
yield "post", None
|
||||
return
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
spec.process_withdrawal_request(
|
||||
state, withdrawal_request
|
||||
)
|
||||
|
||||
yield "post", state
|
||||
|
||||
if not success:
|
||||
# No-op
|
||||
assert pre_state == state
|
||||
else:
|
||||
validator_index = get_validator_index_by_pubkey(
|
||||
state, withdrawal_request.validator_pubkey
|
||||
)
|
||||
pre_exit_epoch = pre_state.validators[validator_index].exit_epoch
|
||||
pre_pending_partial_withdrawals = pre_state.pending_partial_withdrawals.copy()
|
||||
pre_balance = pre_state.balances[validator_index]
|
||||
pre_effective_balance = pre_state.validators[validator_index].effective_balance
|
||||
assert state.balances[validator_index] == pre_balance
|
||||
assert (
|
||||
state.validators[validator_index].effective_balance == pre_effective_balance
|
||||
)
|
||||
# Full exit request
|
||||
if withdrawal_request.amount == spec.FULL_EXIT_REQUEST_AMOUNT:
|
||||
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert spec.get_pending_balance_to_withdraw(state, validator_index) == 0
|
||||
assert state.pending_partial_withdrawals == pre_pending_partial_withdrawals
|
||||
# Partial withdrawal request
|
||||
else:
|
||||
expected_amount_to_withdraw = compute_amount_to_withdraw(
|
||||
spec, pre_state, validator_index, withdrawal_request.amount
|
||||
)
|
||||
assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||
expected_withdrawable_epoch = (
|
||||
state.earliest_exit_epoch
|
||||
+ spec.config.MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
)
|
||||
expected_partial_withdrawal = spec.PendingPartialWithdrawal(
|
||||
index=validator_index,
|
||||
amount=expected_amount_to_withdraw,
|
||||
withdrawable_epoch=expected_withdrawable_epoch,
|
||||
)
|
||||
assert (
|
||||
state.pending_partial_withdrawals
|
||||
== pre_pending_partial_withdrawals + [expected_partial_withdrawal]
|
||||
)
|
||||
|
||||
|
||||
def compute_amount_to_withdraw(spec, state, index, amount):
|
||||
pending_balance_to_withdraw = spec.get_pending_balance_to_withdraw(state, index)
|
||||
return min(
|
||||
state.balances[index]
|
||||
- spec.MIN_ACTIVATION_BALANCE
|
||||
- pending_balance_to_withdraw,
|
||||
amount,
|
||||
)
|
||||
|
134
tests/core/pyspec/eth2spec/test/electra/sanity/test_slots.py
Normal file
134
tests/core/pyspec/eth2spec/test/electra/sanity/test_slots.py
Normal file
@ -0,0 +1,134 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_electra_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import prepare_pending_deposit
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_epoch_processing(spec, state, pending_deposits=None, pending_consolidations=None):
|
||||
if pending_deposits is None:
|
||||
pending_deposits = []
|
||||
if pending_consolidations is None:
|
||||
pending_consolidations = []
|
||||
# Transition to the last slot of the epoch
|
||||
slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1
|
||||
transition_to(spec, state, slot)
|
||||
state.pending_deposits = pending_deposits
|
||||
state.pending_consolidations = pending_consolidations
|
||||
yield 'pre', state
|
||||
yield 'slots', 1
|
||||
spec.process_slots(state, state.slot + 1)
|
||||
yield 'post', state
|
||||
|
||||
assert state.pending_deposits == []
|
||||
assert state.pending_consolidations == []
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@spec_state_test
|
||||
def test_multiple_pending_deposits_same_pubkey(spec, state):
|
||||
# Create multiple deposits with the same pubkey
|
||||
index = len(state.validators)
|
||||
deposit = prepare_pending_deposit(spec, validator_index=index, amount=spec.MIN_ACTIVATION_BALANCE, signed=True)
|
||||
pending_deposits = [deposit, deposit]
|
||||
|
||||
yield from run_epoch_processing(spec, state, pending_deposits=pending_deposits)
|
||||
|
||||
# Check deposit balance is applied correctly
|
||||
assert state.balances[index] == sum(d.amount for d in pending_deposits)
|
||||
assert state.validators[index].effective_balance == spec.MIN_ACTIVATION_BALANCE
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@spec_state_test
|
||||
def test_multiple_pending_deposits_same_pubkey_compounding(spec, state):
|
||||
# Create multiple deposits with the same pubkey and compounding creds
|
||||
index = len(state.validators)
|
||||
deposit = prepare_pending_deposit(
|
||||
spec, validator_index=index, amount=spec.MIN_ACTIVATION_BALANCE, signed=True,
|
||||
withdrawal_credentials=(spec.COMPOUNDING_WITHDRAWAL_PREFIX + b'\x00' * 11 + b'\x11' * 20)
|
||||
)
|
||||
pending_deposits = [deposit, deposit]
|
||||
|
||||
yield from run_epoch_processing(spec, state, pending_deposits=pending_deposits)
|
||||
|
||||
# Check deposit balance is applied correctly
|
||||
assert state.balances[index] == sum(d.amount for d in pending_deposits)
|
||||
assert state.validators[index].effective_balance == state.balances[index]
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@spec_state_test
|
||||
def test_multiple_pending_deposits_same_pubkey_below_upward_threshold(spec, state):
|
||||
# Create multiple deposits with top up lower than the upward threshold
|
||||
index = len(state.validators)
|
||||
deposit_0 = prepare_pending_deposit(
|
||||
spec, validator_index=index,
|
||||
amount=(spec.MIN_ACTIVATION_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT), signed=True
|
||||
)
|
||||
deposit_1 = prepare_pending_deposit(
|
||||
spec, validator_index=index,
|
||||
amount=spec.EFFECTIVE_BALANCE_INCREMENT, signed=True
|
||||
)
|
||||
pending_deposits = [deposit_0, deposit_1]
|
||||
|
||||
yield from run_epoch_processing(spec, state, pending_deposits=pending_deposits)
|
||||
|
||||
# Check deposit balance is applied correctly
|
||||
assert state.balances[index] == sum(d.amount for d in pending_deposits)
|
||||
assert state.validators[index].effective_balance == deposit_0.amount
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@spec_state_test
|
||||
def test_multiple_pending_deposits_same_pubkey_above_upward_threshold(spec, state):
|
||||
# Create multiple deposits with top up greater than the upward threshold
|
||||
index = len(state.validators)
|
||||
deposit_0 = prepare_pending_deposit(
|
||||
spec, validator_index=index,
|
||||
amount=(spec.MIN_ACTIVATION_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT), signed=True
|
||||
)
|
||||
amount = spec.EFFECTIVE_BALANCE_INCREMENT // spec.HYSTERESIS_QUOTIENT * spec.HYSTERESIS_UPWARD_MULTIPLIER + 1
|
||||
deposit_1 = prepare_pending_deposit(spec, validator_index=index, amount=amount, signed=True)
|
||||
pending_deposits = [deposit_0, deposit_1]
|
||||
|
||||
yield from run_epoch_processing(spec, state, pending_deposits)
|
||||
|
||||
# Check deposit balance is applied correctly
|
||||
balance = state.balances[index]
|
||||
assert balance == sum(d.amount for d in pending_deposits)
|
||||
assert state.validators[index].effective_balance == balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
|
||||
|
||||
@with_electra_and_later
|
||||
@spec_state_test
|
||||
def test_pending_consolidation(spec, state):
|
||||
# Create pending consolidation
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||
# Set withdrawable epoch to current epoch to allow processing
|
||||
state.validators[source_index].withdrawable_epoch = current_epoch
|
||||
# Set the source withdrawal credential to eth1
|
||||
state.validators[target_index].withdrawal_credentials = (
|
||||
spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x11" * 20
|
||||
)
|
||||
# Set the target withdrawal credential to compounding
|
||||
state.validators[target_index].withdrawal_credentials = (
|
||||
spec.COMPOUNDING_WITHDRAWAL_PREFIX + b"\x00" * 11 + b"\x11" * 20
|
||||
)
|
||||
pending_consolidations = [spec.PendingConsolidation(source_index=source_index, target_index=target_index)]
|
||||
|
||||
assert state.balances[source_index] == spec.MIN_ACTIVATION_BALANCE
|
||||
assert state.validators[source_index].effective_balance == spec.MIN_ACTIVATION_BALANCE
|
||||
assert state.balances[target_index] == spec.MIN_ACTIVATION_BALANCE
|
||||
assert state.validators[target_index].effective_balance == spec.MIN_ACTIVATION_BALANCE
|
||||
|
||||
yield from run_epoch_processing(spec, state, pending_consolidations=pending_consolidations)
|
||||
|
||||
# Check the consolidation is processed correctly
|
||||
assert state.balances[source_index] == 0
|
||||
assert state.validators[source_index].effective_balance == 0
|
||||
assert state.balances[target_index] == spec.MIN_ACTIVATION_BALANCE * 2
|
||||
assert state.validators[target_index].effective_balance == spec.MIN_ACTIVATION_BALANCE * 2
|
@ -23,11 +23,15 @@ from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
prepare_deposit_request,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash_for_block,
|
||||
)
|
||||
from eth2spec.test.helpers.proposer_slashings import (
|
||||
get_valid_proposer_slashing,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
get_next_fork_transition,
|
||||
is_post_bellatrix,
|
||||
is_post_electra,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
@ -57,13 +61,15 @@ class OperationType(Enum):
|
||||
CONSOLIDATION_REQUEST = auto()
|
||||
|
||||
|
||||
def _set_operations_by_dict(block, operation_dict):
|
||||
def _set_operations_by_dict(spec, block, operation_dict):
|
||||
for key, value in operation_dict.items():
|
||||
# to handle e.g. `execution_requests.deposits` and `deposits`
|
||||
obj = block.body
|
||||
for attr in key.split('.')[:-1]:
|
||||
obj = getattr(obj, attr)
|
||||
setattr(obj, key.split('.')[-1], value)
|
||||
if is_post_bellatrix(spec):
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash_for_block(spec, block)
|
||||
|
||||
|
||||
def _state_transition_and_sign_block_at_slot(spec,
|
||||
@ -87,7 +93,7 @@ def _state_transition_and_sign_block_at_slot(spec,
|
||||
block.body.sync_aggregate = sync_aggregate
|
||||
|
||||
if operation_dict:
|
||||
_set_operations_by_dict(block, operation_dict)
|
||||
_set_operations_by_dict(spec, block, operation_dict)
|
||||
|
||||
assert state.latest_block_header.slot < block.slot
|
||||
assert state.slot == block.slot
|
||||
@ -403,7 +409,7 @@ def run_transition_with_operation(state,
|
||||
if is_right_before_fork:
|
||||
# add a block with operation.
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
_set_operations_by_dict(block, operation_dict)
|
||||
_set_operations_by_dict(spec, block, operation_dict)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(pre_tag(signed_block))
|
||||
|
||||
|
@ -1,8 +1,9 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
single_phase,
|
||||
spec_test,
|
||||
with_presets,
|
||||
with_all_phases,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
@ -26,7 +27,7 @@ def eth1_init_data(eth1_block_hash, eth1_timestamp):
|
||||
}
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -62,7 +63,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -113,7 +114,7 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -162,7 +163,7 @@ def test_initialize_beacon_state_one_topup_activation(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -189,7 +190,7 @@ def test_initialize_beacon_state_random_invalid_genesis(spec):
|
||||
yield 'state', state
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
|
@ -1,8 +1,9 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
spec_test,
|
||||
single_phase,
|
||||
with_presets,
|
||||
with_all_phases,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
@ -43,7 +44,7 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
||||
assert is_valid == valid
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -56,7 +57,7 @@ def test_full_genesis_deposits(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -70,7 +71,7 @@ def test_invalid_invalid_timestamp(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -84,7 +85,7 @@ def test_extra_balance(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
@ -107,7 +108,7 @@ def test_one_more_validator(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
|
@ -50,7 +50,7 @@ Test formats:
|
||||
## Glossary
|
||||
|
||||
- `generator`: a program that outputs one or more test-cases, each organized into a `config > runner > handler > suite` hierarchy.
|
||||
- `config`: tests are grouped by configuration used for spec presets. In addition to the standard configurations,
|
||||
- `config`: tests are grouped by configuration used for spec presets. In addition to the standard configurations,
|
||||
`general` may be used as a catch-all for tests not restricted to one configuration. (E.g. BLS).
|
||||
- `type`: the specialization of one single `generator`. E.g. epoch processing.
|
||||
- `runner`: where a generator is a *"producer"*, this is the *"consumer"*.
|
||||
@ -59,10 +59,10 @@ Test formats:
|
||||
To facilitate this, you specify a `handler`: the runner can deal with the format by using the specified handler.
|
||||
- `suite`: a directory containing test cases that are coherent. Each `suite` under the same `handler` shares the same format.
|
||||
This is an organizational/cosmetic hierarchy layer.
|
||||
- `case`: a test case, a directory in a `suite`. A case can be anything in general,
|
||||
- `case`: a test case, a directory in a `suite`. A case can be anything in general,
|
||||
but its format should be well-defined in the documentation corresponding to the `type` (and `handler`).
|
||||
- `case part`: a test case consists of different files, possibly in different formats, to facilitate the specific test case format better.
|
||||
Optionally, a `meta.yaml` is included to declare meta-data for the test, e.g. BLS requirements.
|
||||
Optionally, a `meta.yaml` is included to declare meta-data for the test, e.g. BLS requirements.
|
||||
|
||||
## Test format philosophy
|
||||
|
||||
@ -70,12 +70,12 @@ Test formats:
|
||||
|
||||
The configuration constant types are:
|
||||
- Never changing: genesis data.
|
||||
- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion
|
||||
- Changing, but reliant on old value: e.g. an epoch time may change, but if you want to do the conversion
|
||||
`(genesis data, timestamp) -> epoch number`, you end up needing both constants.
|
||||
- Changing, but kept around during fork transition: finalization may take a while,
|
||||
e.g. an executable has to deal with new deposits and old deposits at the same time. Another example may be economic constants.
|
||||
- Additional, backwards compatible: new constants are introduced for later phases.
|
||||
- Changing: there is a very small chance some constant may really be *replaced*.
|
||||
- Changing: there is a very small chance some constant may really be *replaced*.
|
||||
In this off-chance, it is likely better to include it as an additional variable,
|
||||
and some clients may simply stop supporting the old one if they do not want to sync from genesis.
|
||||
The change of functionality goes through a phase of deprecation of the old constant, and eventually only the new constant is kept around in the config (when old state is not supported anymore).
|
||||
@ -157,7 +157,7 @@ Between all types of tests, a few formats are common:
|
||||
|
||||
##### `meta.yaml`
|
||||
|
||||
If present (it is optional), the test is enhanced with extra data to describe usage. Specialized data is described in the documentation of the specific test format.
|
||||
If present (it is optional), the test is enhanced with extra data to describe usage. Specialized data is described in the documentation of the specific test format.
|
||||
|
||||
Common data is documented here:
|
||||
|
||||
@ -205,7 +205,7 @@ The basic pattern for test-suite loading and running is:
|
||||
|
||||
1. For a specific config, load it first (and only need to do so once),
|
||||
then continue with the tests defined in the config folder.
|
||||
2. Select a fork. Repeat for each fork if running tests for multiple forks.
|
||||
2. Select a fork. Repeat for each fork if running tests for multiple forks.
|
||||
3. Select the category and specialization of interest (e.g. `operations > deposits`). Again, repeat for each if running all.
|
||||
4. Select a test suite. Or repeat for each.
|
||||
5. Select a test case. Or repeat for each.
|
||||
@ -213,4 +213,4 @@ The basic pattern for test-suite loading and running is:
|
||||
7. Run the test, as defined by the test format.
|
||||
|
||||
Step 1 may be a step with compile time selection of a configuration, if desired for optimization.
|
||||
The base requirement is just to use the same set of constants, independent of the loading process.
|
||||
The base requirement is just to use the same set of constants, independent of the loading process.
|
||||
|
@ -25,7 +25,7 @@ An SSZ-snappy encoded `BeaconState`, the state after applying the epoch sub-tran
|
||||
|
||||
## Condition
|
||||
|
||||
A handler of the `epoch_processing` test-runner should process these cases,
|
||||
A handler of the `epoch_processing` test-runner should process these cases,
|
||||
calling the corresponding processing implementation (same name, prefixed with `process_`).
|
||||
This excludes the other parts of the epoch-transition.
|
||||
The provided pre-state is already transitioned to just before the specific sub-transition of focus of the handler.
|
||||
|
@ -90,7 +90,7 @@ The parameter that is required for executing `on_block(store, block)`.
|
||||
proofs: array of byte48 hex string -- optional, the proofs of blob commitments.
|
||||
valid: bool -- optional, default to `true`.
|
||||
If it's `false`, this execution step is expected to be invalid.
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The file is located in the same folder (see below).
|
||||
|
@ -49,7 +49,7 @@ An SSZ-snappy encoded `Deltas` representing the rewards and penalties returned b
|
||||
|
||||
## Condition
|
||||
|
||||
A handler of the `rewards` test-runner should process these cases,
|
||||
A handler of the `rewards` test-runner should process these cases,
|
||||
calling the corresponding rewards deltas function for each set of deltas.
|
||||
|
||||
The provided pre-state is ready to be input into each rewards deltas function.
|
||||
|
@ -35,4 +35,4 @@ I.e. `mapping[i]` is the shuffled location of `i`.
|
||||
## Condition
|
||||
|
||||
The resulting list should match the expected output after shuffling the implied input, using the given `seed`.
|
||||
The output is checked using the `mapping`, based on the shuffling test type (e.g. can be backwards shuffling).
|
||||
The output is checked using the `mapping`, based on the shuffling test type (e.g. can be backwards shuffling).
|
||||
|
@ -59,7 +59,7 @@ The object, encoded as a YAML structure. Using the same familiar encoding as YAM
|
||||
The conditions are the same for each type:
|
||||
|
||||
- Encoding: After encoding the given `value` object, the output should match `serialized`.
|
||||
- Decoding: After decoding the given `serialized` bytes, it should match the `value` object.
|
||||
- Decoding: After decoding the given `serialized` bytes, it should match the `value` object.
|
||||
- Hash-tree-root: the root should match the root declared in the metadata.
|
||||
|
||||
## `invalid`
|
||||
|
@ -42,7 +42,7 @@ A test-runner can implement the following assertions:
|
||||
- Serialization: After parsing the `value`, SSZ-serialize it: the output should match `serialized`
|
||||
- Deserialization: SSZ-deserialize the `serialized` value, and see if it matches the parsed `value`
|
||||
- If YAML decoding of SSZ objects is not supported by the implementation:
|
||||
- Serialization in 2 steps: deserialize `serialized`, then serialize the result,
|
||||
- Serialization in 2 steps: deserialize `serialized`, then serialize the result,
|
||||
and verify if the bytes match the original `serialized`.
|
||||
- Hash-tree-root: After parsing the `value` (or deserializing `serialized`), Hash-tree-root it: the output should match `root`
|
||||
|
||||
|
@ -7,7 +7,7 @@ Any issues with the generators and/or generated tests should be filed in the rep
|
||||
|
||||
On releases, test generators are run by the release manager. Test-generation of mainnet tests can take a significant amount of time, and is better left out of a CI setup.
|
||||
|
||||
An automated nightly tests release system, with a config filter applied, is being considered as implementation needs mature.
|
||||
An automated nightly tests release system, with a config filter applied, is being considered as implementation needs mature.
|
||||
|
||||
## Table of contents
|
||||
|
||||
@ -39,7 +39,7 @@ Prerequisites:
|
||||
This removes the existing virtual environments (`/tests/generators/<generator>/venv`) and generated tests (`../consensus-spec-tests/tests`).
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make clean
|
||||
```
|
||||
|
||||
### Running all test generators
|
||||
@ -196,9 +196,9 @@ Recommendations:
|
||||
- You can have more than just one test provider.
|
||||
- Your test provider is free to output any configuration and combination of runner/handler/fork/case name.
|
||||
- You can split your test case generators into different Python files/packages; this is good for code organization.
|
||||
- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary.
|
||||
- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary.
|
||||
- You may be able to write your test case provider in a way where it does not make assumptions on constants.
|
||||
If so, you can generate test cases with different configurations for the same scenario (see example).
|
||||
If so, you can generate test cases with different configurations for the same scenario (see example).
|
||||
- See [`tests/core/gen_helpers/README.md`](../core/pyspec/eth2spec/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
## How to add a new test generator
|
||||
|
@ -7,5 +7,5 @@ An epoch-processing test-runner can consume these sub-transition test-suites,
|
||||
|
||||
Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../formats/epoch_processing/README.md).
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods, check_mods
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, check_mods
|
||||
from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ELECTRA
|
||||
|
||||
|
||||
@ -8,13 +8,8 @@ if __name__ == "__main__":
|
||||
'validity',
|
||||
]}
|
||||
|
||||
altair_mods = phase_0_mods
|
||||
|
||||
# we have new unconditional lines in `initialize_beacon_state_from_eth1` and we want to test it
|
||||
_new_bellatrix_mods = {key: 'eth2spec.test.bellatrix.genesis.test_' + key for key in [
|
||||
'initialization',
|
||||
]}
|
||||
bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods)
|
||||
altair_mods = phase_0_mods # No additional Altair specific genesis tests
|
||||
bellatrix_mods = altair_mods # No additional Bellatrix specific genesis tests
|
||||
capella_mods = bellatrix_mods # No additional Capella specific genesis tests
|
||||
deneb_mods = capella_mods # No additional Deneb specific genesis tests
|
||||
electra_mods = deneb_mods # No additional Electra specific genesis tests
|
||||
|
@ -8,5 +8,5 @@ An operation test-runner can consume these operation test-suites,
|
||||
|
||||
Information on the format of the tests can be found in the [operations test formats documentation](../../formats/operations/README.md).
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -4,5 +4,5 @@ Sanity tests cover regular state-transitions in a common block-list format, to e
|
||||
|
||||
Information on the format of the tests can be found in the [sanity test formats documentation](../../formats/sanity/README.md).
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -31,9 +31,13 @@ if __name__ == "__main__":
|
||||
# This is a "hack" which allows other test files (e.g., test_deposit_transition.py)
|
||||
# to reuse the sanity/block test format. If a new test file is added or removed,
|
||||
# do not forget to update sanity/block/__init__.py accordingly.
|
||||
_new_electra_mods = {key: 'eth2spec.test.electra.sanity.' + key for key in [
|
||||
_new_electra_mods_1 = {key: 'eth2spec.test.electra.sanity.' + key for key in [
|
||||
'blocks',
|
||||
]}
|
||||
_new_electra_mods_2 = {key: 'eth2spec.test.electra.sanity.test_' + key for key in [
|
||||
'slots',
|
||||
]}
|
||||
_new_electra_mods = {**_new_electra_mods_1, **_new_electra_mods_2}
|
||||
electra_mods = combine_mods(_new_electra_mods, deneb_mods)
|
||||
|
||||
all_mods = {
|
||||
|
Loading…
x
Reference in New Issue
Block a user