Merge branch 'withdrawals-push' into 00-to-01
This commit is contained in:
commit
0da78ebc4b
4
Makefile
4
Makefile
|
@ -25,9 +25,11 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
|||
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/bellatrix/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/capella/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/custody/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/das/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/sharding/*.md)
|
||||
$(wildcard $(SPEC_DIR)/sharding/*.md) \
|
||||
$(wildcard $(SPEC_DIR)/eip4844/*.md)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||
|
|
1
setup.py
1
setup.py
|
@ -534,6 +534,7 @@ class NoopExecutionEngine(ExecutionEngine):
|
|||
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
pass
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [`LightClientStore`](#lightclientstore)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`is_finality_update`](#is_finality_update)
|
||||
- [`get_subtree_index`](#get_subtree_index)
|
||||
- [`get_active_header`](#get_active_header)
|
||||
- [`get_safety_threshold`](#get_safety_threshold)
|
||||
|
@ -52,7 +53,7 @@ uses sync committees introduced in [this beacon chain extension](./beacon-chain.
|
|||
| Name | Value | Unit | Duration |
|
||||
| - | - | - | - |
|
||||
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` | validators |
|
||||
| `UPDATE_TIMEOUT` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | epochs | ~27.3 hours |
|
||||
| `UPDATE_TIMEOUT` | `SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | slots | ~27.3 hours |
|
||||
|
||||
## Containers
|
||||
|
||||
|
@ -95,6 +96,13 @@ class LightClientStore(object):
|
|||
|
||||
## Helper functions
|
||||
|
||||
### `is_finality_update`
|
||||
|
||||
```python
|
||||
def is_finality_update(update: LightClientUpdate) -> bool:
|
||||
return update.finalized_header != BeaconBlockHeader()
|
||||
```
|
||||
|
||||
### `get_subtree_index`
|
||||
|
||||
```python
|
||||
|
@ -109,7 +117,7 @@ def get_active_header(update: LightClientUpdate) -> BeaconBlockHeader:
|
|||
# The "active header" is the header that the update is trying to convince us
|
||||
# to accept. If a finalized header is present, it's the finalized header,
|
||||
# otherwise it's the attested header
|
||||
if update.finalized_header != BeaconBlockHeader():
|
||||
if is_finality_update(update):
|
||||
return update.finalized_header
|
||||
else:
|
||||
return update.attested_header
|
||||
|
@ -163,7 +171,7 @@ def validate_light_client_update(store: LightClientStore,
|
|||
|
||||
# Verify that the `finalized_header`, if present, actually is the finalized header saved in the
|
||||
# state of the `attested header`
|
||||
if update.finalized_header == BeaconBlockHeader():
|
||||
if not is_finality_update(update):
|
||||
assert update.finality_branch == [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||
else:
|
||||
assert is_valid_merkle_branch(
|
||||
|
@ -252,7 +260,7 @@ def process_light_client_update(store: LightClientStore,
|
|||
# Update finalized header
|
||||
if (
|
||||
sum(sync_committee_bits) * 3 >= len(sync_committee_bits) * 2
|
||||
and update.finalized_header != BeaconBlockHeader()
|
||||
and is_finality_update(update)
|
||||
):
|
||||
# Normal update through 2/3 threshold
|
||||
apply_light_client_update(store, update)
|
||||
|
|
|
@ -47,8 +47,9 @@ The Engine API may be used to implement it with an external execution engine.
|
|||
|
||||
#### `notify_forkchoice_updated`
|
||||
|
||||
This function performs two actions *atomically*:
|
||||
This function performs three actions *atomically*:
|
||||
* Re-organizes the execution payload chain and corresponding state to make `head_block_hash` the head.
|
||||
* Updates safe block hash with the value provided by `safe_block_hash` parameter.
|
||||
* Applies finality to the execution state: it irreversibly persists the chain of all execution payloads
|
||||
and corresponding state, up to and including `finalized_block_hash`.
|
||||
|
||||
|
@ -58,18 +59,21 @@ Additionally, if `payload_attributes` is provided, this function sets in motion
|
|||
```python
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
...
|
||||
```
|
||||
|
||||
*Note*: The call of the `notify_forkchoice_updated` function maps on the `POS_FORKCHOICE_UPDATED` event defined in the [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#definitions).
|
||||
*Note*: The `(head_block_hash, finalized_block_hash)` values of the `notify_forkchoice_updated` function call maps on the `POS_FORKCHOICE_UPDATED` event defined in the [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#definitions).
|
||||
As per EIP-3675, before a post-transition block is finalized, `notify_forkchoice_updated` MUST be called with `finalized_block_hash = Hash32()`.
|
||||
|
||||
*Note*: Client software MUST NOT call this function until the transition conditions are met on the PoW network, i.e. there exists a block for which `is_valid_terminal_pow_block` function returns `True`.
|
||||
|
||||
*Note*: Client software MUST call this function to initiate the payload build process to produce the merge transition block; the `head_block_hash` parameter MUST be set to the hash of a terminal PoW block in this case.
|
||||
|
||||
*Note*: Until safe head function is implemented, `safe_block_hash` parameter MUST be stubbed with the `head_block_hash` value.
|
||||
|
||||
## Helpers
|
||||
|
||||
### `PayloadAttributes`
|
||||
|
|
|
@ -110,7 +110,7 @@ The following gossip validation from prior specifications MUST NOT be applied if
|
|||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for Bellatrix.
|
||||
details on how to handle transitioning gossip topics for EIP-4844.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
|
|
|
@ -146,7 +146,14 @@ def prepare_execution_payload(state: BeaconState,
|
|||
prev_randao=get_randao_mix(state, get_current_epoch(state)),
|
||||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(parent_hash, finalized_block_hash, payload_attributes)
|
||||
# Set safe and head block hashes to the same value
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
# TODO: Use `parent_hash` as a stub for now.
|
||||
safe_block_hash=parent_hash,
|
||||
finalized_block_hash=finalized_block_hash,
|
||||
payload_attributes=payload_attributes,
|
||||
)
|
||||
```
|
||||
|
||||
2. Set `block.body.execution_payload = get_execution_payload(payload_id, execution_engine)`, where:
|
||||
|
|
|
@ -58,8 +58,82 @@ We define the following Python custom types for type hinting and readability:
|
|||
|
||||
## Containers
|
||||
|
||||
### New containers
|
||||
|
||||
#### `Withdrawal`
|
||||
|
||||
```python
|
||||
class Withdrawal(Container):
|
||||
index: WithdrawalIndex
|
||||
address: ExecutionAddress
|
||||
amount: Gwei
|
||||
```
|
||||
|
||||
#### `BLSToExecutionChange`
|
||||
|
||||
```python
|
||||
class BLSToExecutionChange(Container):
|
||||
validator_index: ValidatorIndex
|
||||
from_bls_pubkey: BLSPubkey
|
||||
to_execution_address: ExecutionAddress
|
||||
```
|
||||
|
||||
#### `SignedBLSToExecutionChange`
|
||||
|
||||
```python
|
||||
class SignedBLSToExecutionChange(Container):
|
||||
message: BLSToExecutionChange
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### Extended Containers
|
||||
|
||||
#### `ExecutionPayload`
|
||||
|
||||
```python
|
||||
class ExecutionPayload(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32 # 'difficulty' in the yellow paper
|
||||
block_number: uint64 # 'number' in the yellow paper
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] # [New in Capella]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
||||
```python
|
||||
class ExecutionPayloadHeader(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32
|
||||
block_number: uint64
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root # [New in Capella]
|
||||
```
|
||||
|
||||
#### `Validator`
|
||||
|
||||
```python
|
||||
|
@ -73,7 +147,7 @@ class Validator(Container):
|
|||
activation_epoch: Epoch
|
||||
exit_epoch: Epoch
|
||||
withdrawable_epoch: Epoch # When validator can withdraw funds
|
||||
withdrawn_epoch: Epoch # [New in Capella]
|
||||
fully_withdrawn_epoch: Epoch # [New in Capella]
|
||||
```
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
@ -140,80 +214,6 @@ class BeaconState(Container):
|
|||
withdrawals_queue: List[Withdrawal, WITHDRAWALS_QUEUE_LIMIT] # [New in Capella]
|
||||
```
|
||||
|
||||
#### `ExecutionPayload`
|
||||
|
||||
```python
|
||||
class ExecutionPayload(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32 # 'difficulty' in the yellow paper
|
||||
block_number: uint64 # 'number' in the yellow paper
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] # [New in Capella]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
||||
```python
|
||||
class ExecutionPayloadHeader(Container):
|
||||
# Execution block header fields
|
||||
parent_hash: Hash32
|
||||
fee_recipient: ExecutionAddress
|
||||
state_root: Bytes32
|
||||
receipts_root: Bytes32
|
||||
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||
prev_randao: Bytes32
|
||||
block_number: uint64
|
||||
gas_limit: uint64
|
||||
gas_used: uint64
|
||||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root # [New in Capella]
|
||||
```
|
||||
|
||||
### New containers
|
||||
|
||||
#### `Withdrawal`
|
||||
|
||||
```python
|
||||
class Withdrawal(Container):
|
||||
index: WithdrawalIndex
|
||||
address: ExecutionAddress
|
||||
amount: Gwei
|
||||
```
|
||||
|
||||
#### `BLSToExecutionChange`
|
||||
|
||||
```python
|
||||
class BLSToExecutionChange(Container):
|
||||
validator_index: ValidatorIndex
|
||||
from_bls_pubkey: BLSPubkey
|
||||
to_execution_address: ExecutionAddress
|
||||
```
|
||||
|
||||
#### `SignedBLSToExecutionChange`
|
||||
|
||||
```python
|
||||
class SignedBLSToExecutionChange(Container):
|
||||
message: BLSToExecutionChange
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
## Helpers
|
||||
|
||||
### Beacon state mutators
|
||||
|
@ -221,7 +221,7 @@ class SignedBLSToExecutionChange(Container):
|
|||
#### `withdraw`
|
||||
|
||||
```python
|
||||
def withdraw(state: BeaconState, index: ValidatorIndex, amount: Gwei) -> None:
|
||||
def withdraw_balance(state: BeaconState, index: ValidatorIndex, amount: Gwei) -> None:
|
||||
# Decrease the validator's balance
|
||||
decrease_balance(state, index, amount)
|
||||
# Create a corresponding withdrawal receipt
|
||||
|
@ -243,8 +243,8 @@ def is_fully_withdrawable_validator(validator: Validator, epoch: Epoch) -> bool:
|
|||
"""
|
||||
Check if ``validator`` is fully withdrawable.
|
||||
"""
|
||||
is_eth1_withdrawal_prefix = validator.withdrawal_credentials[0:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
return is_eth1_withdrawal_prefix and validator.withdrawable_epoch <= epoch < validator.withdrawn_epoch
|
||||
is_eth1_withdrawal_prefix = validator.withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
return is_eth1_withdrawal_prefix and validator.withdrawable_epoch <= epoch < validator.fully_withdrawn_epoch
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
@ -278,8 +278,8 @@ def process_full_withdrawals(state: BeaconState) -> None:
|
|||
for index, validator in enumerate(state.validators):
|
||||
if is_fully_withdrawable_validator(validator, current_epoch):
|
||||
# TODO, consider the zero-balance case
|
||||
withdraw(state, ValidatorIndex(index), state.balances[index])
|
||||
validator.withdrawn_epoch = current_epoch
|
||||
withdraw_balance(state, ValidatorIndex(index), state.balances[index])
|
||||
validator.fully_withdrawn_epoch = current_epoch
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
@ -304,10 +304,10 @@ def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
|||
dequeued_withdrawals = state.withdrawals_queue[:num_withdrawals]
|
||||
|
||||
assert len(dequeued_withdrawals) == len(payload.withdrawals)
|
||||
for dequeued_receipt, withdrawal in zip(dequeued_withdrawals, payload.withdrawals):
|
||||
assert dequeued_receipt == withdrawal
|
||||
for dequeued_withdrawal, withdrawal in zip(dequeued_withdrawals, payload.withdrawals):
|
||||
assert dequeued_withdrawal == withdrawal
|
||||
|
||||
# Remove dequeued receipts from state
|
||||
# Remove dequeued withdrawals from state
|
||||
state.withdrawals_queue = state.withdrawals_queue[num_withdrawals:]
|
||||
```
|
||||
|
||||
|
@ -342,7 +342,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
|||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals), # [New in Capella]
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ Otherwise, `notify_forkchoice_updated` inherits all prior functionality.
|
|||
```python
|
||||
def notify_forkchoice_updated(self: ExecutionEngine,
|
||||
head_block_hash: Hash32,
|
||||
safe_block_hash: Hash32,
|
||||
finalized_block_hash: Hash32,
|
||||
payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]:
|
||||
...
|
||||
|
@ -49,5 +50,5 @@ class PayloadAttributes(object):
|
|||
timestamp: uint64
|
||||
prev_randao: Bytes32
|
||||
suggested_fee_recipient: ExecutionAddress
|
||||
withdrawals: Sequence[Withdrawal]
|
||||
withdrawals: Sequence[Withdrawal] # new in Capella
|
||||
```
|
||||
|
|
|
@ -97,7 +97,7 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
|||
activation_epoch=pre_validator.activation_epoch,
|
||||
exit_epoch=pre_validator.exit_epoch,
|
||||
withdrawable_epoch=pre_validator.withdrawable_epoch,
|
||||
withdrawn_epoch=FAR_FUTURE_EPOCH,
|
||||
fully_withdrawn_epoch=FAR_FUTURE_EPOCH,
|
||||
)
|
||||
post.validators.append(post_validator)
|
||||
|
||||
|
|
|
@ -87,5 +87,12 @@ def prepare_execution_payload(state: BeaconState,
|
|||
suggested_fee_recipient=suggested_fee_recipient,
|
||||
withdrawals=get_expected_withdrawals(state), # [New in Capella]
|
||||
)
|
||||
return execution_engine.notify_forkchoice_updated(parent_hash, finalized_block_hash, payload_attributes)
|
||||
# Set safe and head block hashes to the same value
|
||||
return execution_engine.notify_forkchoice_updated(
|
||||
head_block_hash=parent_hash,
|
||||
# TODO: Use `parent_hash` as a stub for now.
|
||||
safe_block_hash=parent_hash,
|
||||
finalized_block_hash=finalized_block_hash,
|
||||
payload_attributes=payload_attributes,
|
||||
)
|
||||
```
|
||||
|
|
|
@ -0,0 +1,190 @@
|
|||
# EIP-4844 -- The Beacon Chain
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Domain types](#domain-types)
|
||||
- [Preset](#preset)
|
||||
- [Trusted setup](#trusted-setup)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [Extended containers](#extended-containers)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [KZG core](#kzg-core)
|
||||
- [`blob_to_kzg`](#blob_to_kzg)
|
||||
- [`kzg_to_versioned_hash`](#kzg_to_versioned_hash)
|
||||
- [Misc](#misc)
|
||||
- [`tx_peek_blob_versioned_hashes`](#tx_peek_blob_versioned_hashes)
|
||||
- [`verify_kzgs_against_transactions`](#verify_kzgs_against_transactions)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Block processing](#block-processing)
|
||||
- [Blob KZGs](#blob-kzgs)
|
||||
- [Testing](#testing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds blobs to the beacon chain as part of EIP-4844.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `BLSFieldElement` | `uint256` | `x < BLS_MODULUS` |
|
||||
| `Blob` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | |
|
||||
| `VersionedHash` | `Bytes32` | |
|
||||
| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLOB_TX_TYPE` | `uint8(0x05)` |
|
||||
| `FIELD_ELEMENTS_PER_BLOB` | `4096` |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BLOBS_SIDECAR` | `DomainType('0x0a000000')` |
|
||||
|
||||
## Preset
|
||||
|
||||
### Trusted setup
|
||||
|
||||
The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
|
||||
but reusing the `mainnet` settings in public networks is a critical security requirement.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[BLSCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
|
||||
## Configuration
|
||||
|
||||
|
||||
## Containers
|
||||
|
||||
### Extended containers
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
Note: `BeaconBlock` and `SignedBeaconBlock` types are updated indirectly.
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Operations
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload
|
||||
blob_kzgs: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### KZG core
|
||||
|
||||
KZG core functions. These are also defined in EIP-4844 execution specs.
|
||||
|
||||
#### `blob_to_kzg`
|
||||
|
||||
```python
|
||||
def blob_to_kzg(blob: Blob) -> KZGCommitment:
|
||||
computed_kzg = bls.Z1
|
||||
for value, point_kzg in zip(blob, KZG_SETUP_LAGRANGE):
|
||||
assert value < BLS_MODULUS
|
||||
computed_kzg = bls.add(
|
||||
computed_kzg,
|
||||
bls.multiply(point_kzg, value)
|
||||
)
|
||||
return computed_kzg
|
||||
```
|
||||
|
||||
#### `kzg_to_versioned_hash`
|
||||
|
||||
```python
|
||||
def kzg_to_versioned_hash(kzg: KZGCommitment) -> VersionedHash:
|
||||
return BLOB_COMMITMENT_VERSION_KZG + hash(kzg)[1:]
|
||||
```
|
||||
|
||||
### Misc
|
||||
|
||||
#### `tx_peek_blob_versioned_hashes`
|
||||
|
||||
This function retrieves the hashes from the `SignedBlobTransaction` as defined in EIP-4844, using SSZ offsets.
|
||||
Offsets are little-endian `uint32` values, as defined in the [SSZ specification](../../ssz/simple-serialize.md).
|
||||
|
||||
```python
|
||||
def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedHash]:
|
||||
assert opaque_tx[0] == BLOB_TX_TYPE
|
||||
message_offset = 1 + uint32.decode_bytes(opaque_tx[1:5])
|
||||
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 = 156
|
||||
blob_versioned_hashes_offset = uint32.decode_bytes(opaque_tx[message_offset+156:message_offset+160])
|
||||
return [VersionedHash(opaque_tx[x:x+32]) for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32)]
|
||||
```
|
||||
|
||||
#### `verify_kzgs_against_transactions`
|
||||
|
||||
```python
|
||||
def verify_kzgs_against_transactions(transactions: Sequence[Transaction], blob_kzgs: Sequence[KZGCommitment]) -> bool:
|
||||
all_versioned_hashes = []
|
||||
for tx in transactions:
|
||||
if opaque_tx[0] == BLOB_TX_TYPE:
|
||||
all_versioned_hashes.extend(tx_peek_blob_versioned_hashes(tx))
|
||||
return all_versioned_hashes == [ksg_to_version_hash(kzg) for kzg in blob_kzgs]
|
||||
```
|
||||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzgs(state, block.body) # [New in EIP-4844]
|
||||
```
|
||||
|
||||
#### Blob KZGs
|
||||
|
||||
```python
|
||||
def process_blob_kzgs(state: BeaconState, body: BeaconBlockBody):
|
||||
assert verify_kzgs_against_transactions(body.execution_payload.transactions, body.blob_kzgs)
|
||||
```
|
||||
|
||||
## Testing
|
||||
|
||||
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only.
|
||||
|
||||
The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type
|
||||
when initializing the first body-root:
|
||||
|
||||
```python
|
||||
state.latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||
```
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
# EIP-4844 -- Fork Logic
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to EIP-4844](#fork-to-eip-4844)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of EIP-4844 upgrade.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EIP4844_FORK_VERSION` | `Version('0x03000000')` |
|
||||
| `EIP4844_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||
|
||||
## Fork to EIP-4844
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
|
||||
For now we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
|
||||
|
||||
Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` since it starts with EIP-4844 version logic.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
The `eip4844.BeaconState` format is equal to the `bellatrix.BeaconState` format, no upgrade has to be performed.
|
||||
|
|
@ -0,0 +1,260 @@
|
|||
# EIP-4844 -- Networking
|
||||
|
||||
This document contains the consensus-layer networking specification for EIP-4844.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Preset](#preset)
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobsSidecar`](#blobssidecar)
|
||||
- [`SignedBlobsSidecar`](#signedblobssidecar)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`blobs_sidecar`](#blobs_sidecar)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
## Preset
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**4)` (= 16) |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value | Description |
|
||||
|------------------------------------------|-------------------------------|---------------------------------------------------------------------|
|
||||
| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request |
|
||||
| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**13` (= 8192, ~1.2 months) | The minimum epoch range over which a node must serve blobs sidecars |
|
||||
|
||||
|
||||
|
||||
## Containers
|
||||
|
||||
### `BlobsSidecar`
|
||||
|
||||
```python
|
||||
class BlobsSidecar(Container):
|
||||
beacon_block_root: Root
|
||||
beacon_block_slot: Slot
|
||||
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
|
||||
```
|
||||
|
||||
### `SignedBlobsSidecar`
|
||||
|
||||
```python
|
||||
class SignedBlobsSidecar(Container):
|
||||
message: BlobsSidecar
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in the fork of EIP4844 to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
All topics remain stable except the beacon block topic which is updated with the modified type.
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Bellatrix document unless explicitly noted here.
|
||||
|
||||
The derivation of the `message-id` remains stable.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||
| `blobs_sidecar` | `SignedBlobsSidecar` (new) |
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
|
||||
EIP4844 changes the type of the global beacon block topic and introduces a new global topic for blobs-sidecars.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in EIP4844.
|
||||
|
||||
In addition to the gossip validations for this topic from prior specifications,
|
||||
the following validations MUST pass before forwarding the `signed_beacon_block` on the network.
|
||||
Alias `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
|
||||
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 Points.
|
||||
-- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzgs)`
|
||||
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list.
|
||||
-- i.e. `verify_kzgs_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzgs)`
|
||||
|
||||
##### `blobs_sidecar`
|
||||
|
||||
This topic is used to propagate data blobs included in any given beacon block.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_blobs_sidecar` on the network;
|
||||
Alias `sidecar = signed_blobs_sidecar.message`.
|
||||
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `blobs_sidecar.beacon_block_slot == current_slot`.
|
||||
- _[REJECT]_ the `sidecar.blobs` are all well formatted, i.e. the `BLSFieldElement` in valid range (`x < BLS_MODULUS`).
|
||||
- _[REJECT]_ the beacon proposer signature, `signed_blobs_sidecar.signature`, is valid -- i.e.
|
||||
```python
|
||||
domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, blobs_sidecar.beacon_block_slot // SLOTS_PER_EPOCH)
|
||||
signing_root = compute_signing_root(blobs_sidecar, domain)
|
||||
assert bls.Verify(proposer_pubkey, signing_root, signed_blob_header.signature)
|
||||
```
|
||||
where `proposer_pubkey` is the pubkey of the beacon block proposer of `blobs_sidecar.beacon_block_slot`
|
||||
- _[IGNORE]_ The sidecar is the first sidecar with valid signature received for the `(proposer_index, sidecar.beacon_block_slot)` combination,
|
||||
where `proposer_index` is the validator index of the beacon block proposer of `blobs_sidecar.beacon_block_slot`
|
||||
|
||||
Note that a sidecar may be propagated before or after the corresponding beacon block.
|
||||
|
||||
Once both sidecar and beacon block are received, `verify_blobs_sidecar` can unlock the data-availability fork-choice dependency.
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for this upgrade.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
The EIP-4844 fork-digest is introduced to the `context` enum to specify EIP-4844 beacon block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
The EIP-4844 fork-digest is introduced to the `context` enum to specify EIP-4844 beacon block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| ------------------------ | -------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` |
|
||||
|
||||
#### BlobsSidecarsByRange v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/blobs_sidecars_by_range/1/`
|
||||
|
||||
Request Content:
|
||||
```
|
||||
(
|
||||
start_slot: Slot
|
||||
count: uint64
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
```
|
||||
(
|
||||
List[BlobsSidecar, MAX_REQUEST_BLOBS_SIDECARS]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blobs sidecars in the slot range `[start_slot, start_slot + count)`,
|
||||
leading up to the current head block as selected by fork choice.
|
||||
|
||||
The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the beacon block proposer
|
||||
may not be available beyond the initial distribution via gossip.
|
||||
|
||||
Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and
|
||||
correct w.r.t. the expected KZG commitments through `verify_blobs_sidecar`.
|
||||
|
||||
`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip.
|
||||
|
||||
The request MUST be encoded as an SSZ-container.
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `SignedBlobsSidecar` payload.
|
||||
|
||||
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
||||
`[max(GENESIS_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS), current_epoch]`
|
||||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blocks on this range.
|
||||
|
||||
Peers that are unable to reply to block requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Such peers that are unable to successfully reply to this range of requests MAY get descored
|
||||
or disconnected at any time.
|
||||
|
||||
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
|
||||
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
to be fully compliant with `BlobsSidecarsByRange` requests. To safely perform such a
|
||||
backfill of blocks to the recent state, the node MUST validate both (1) the
|
||||
proposer signatures and (2) that the blocks form a valid chain up to the most
|
||||
recent block referenced in the weak subjectivity state.
|
||||
|
||||
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin
|
||||
participating in the networking immediately, other peers MAY
|
||||
disconnect and/or temporarily ban such an un-synced or semi-synced client.
|
||||
|
||||
Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it,
|
||||
and no more than `MAX_REQUEST_BLOBS_SIDECARS` sidecars.
|
||||
|
||||
The following blobs sidecars, where they exist, MUST be sent in consecutive order.
|
||||
|
||||
Clients MAY limit the number of blobs sidecars in the response.
|
||||
|
||||
The response MUST contain no more than `count` blobs sidecars.
|
||||
|
||||
Clients MUST respond with blobs sidecars from their view of the current fork choice
|
||||
-- that is, blobs sidecars as included by blocks from the single chain defined by the current head.
|
||||
Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
||||
|
||||
Clients MUST respond with blobs sidecars that are consistent from a single chain within the context of the request.
|
||||
|
||||
After the initial blobs sidecar, clients MAY stop in the process of responding
|
||||
if their fork choice changes the view of the chain in the context of the request.
|
||||
|
||||
|
||||
|
||||
# Design decision rationale
|
||||
|
||||
## Why are blobs relayed as a sidecar, separate from beacon blocks?
|
||||
|
||||
This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`:
|
||||
with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS)
|
||||
thus avoiding all blobs being downloaded by all beacon nodes on the network.
|
||||
|
||||
Such sharding design may introduce an updated `BlobsSidecar` to identify the shard,
|
||||
but does not affect the `BeaconBlock` structure.
|
||||
|
|
@ -0,0 +1,134 @@
|
|||
# EIP-4844 -- Honest Validator
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Helpers](#helpers)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [`verify_blobs_sidecar`](#verify_blobs_sidecar)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [Blob commitments](#blob-commitments)
|
||||
- [Beacon Block publishing time](#beacon-block-publishing-time)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document represents the changes to be made in the code of an "honest validator" to implement EIP-4844.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP4844](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Helpers
|
||||
|
||||
### `is_data_available`
|
||||
|
||||
The implementation of `is_data_available` is meant to change with later sharding upgrades.
|
||||
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
|
||||
and verify the sidecar with `verify_blobs`.
|
||||
|
||||
Without the sidecar the block may be processed further optimistically,
|
||||
but MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
|
||||
|
||||
```python
|
||||
def is_data_available(slot: Slot, beacon_block_root: Root, kzgs: Sequence[KZGCommitment]):
|
||||
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root) # implementation dependent, raises an exception if not available
|
||||
verify_blobs_sidecar(slot, beacon_block_root, kzgs, sidecar)
|
||||
```
|
||||
|
||||
### `verify_blobs_sidecar`
|
||||
|
||||
```python
|
||||
def verify_blobs_sidecar(slot: Slot, beacon_block_root: Root,
|
||||
expected_kzgs: Sequence[KZGCommitment], blobs_sidecar: BlobsSidecar):
|
||||
assert slot == blobs_sidecar.beacon_block_slot
|
||||
assert beacon_block_root == blobs_sidecar.beacon_block_root
|
||||
blobs = blobs_sidecar.blobs
|
||||
assert len(expected_kzgs) == len(blobs)
|
||||
for kzg, blob in zip(expected_kzgs, blobs):
|
||||
assert blob_to_kzg(blob) == kzg
|
||||
```
|
||||
|
||||
|
||||
## Beacon chain responsibilities
|
||||
|
||||
All validator responsibilities remain unchanged other than those noted below.
|
||||
Namely, the blob handling and the addition of `BlobsSidecar`.
|
||||
|
||||
### Block proposal
|
||||
|
||||
#### Constructing the `BeaconBlockBody`
|
||||
|
||||
##### Blob commitments
|
||||
|
||||
After retrieving the execution payload from the execution engine as specified in Bellatrix,
|
||||
the blobs are retrieved and processed:
|
||||
|
||||
```python
|
||||
# execution_payload = execution_engine.get_payload(payload_id)
|
||||
# block.body.execution_payload = execution_payload
|
||||
# ...
|
||||
|
||||
kzgs, blobs = get_blobs(payload_id)
|
||||
|
||||
# Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions
|
||||
assert verify_kzgs_against_transactions(execution_payload.transactions, kzgs)
|
||||
|
||||
# Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine)
|
||||
assert len(kzgs) == len(blobs) and [blob_to_kzg(blob) == kzg for blob, kzg in zip(blobs, kzgs)]
|
||||
|
||||
# Update the block body
|
||||
block.body.blob_kzgs = kzgs
|
||||
```
|
||||
|
||||
The `blobs` should be held with the block in preparation of publishing.
|
||||
Without the `blobs`, the published block will effectively be ignored by honest validators.
|
||||
|
||||
Note: This API is *unstable*. `get_blobs` and `get_payload` may be unified.
|
||||
Implementers may also retrieve blobs individually per transaction.
|
||||
|
||||
### Beacon Block publishing time
|
||||
|
||||
Before publishing a prepared beacon block proposal, the corresponding blobs are packaged into a sidecar object for distribution to the network:
|
||||
|
||||
```python
|
||||
blobs_sidecar = BlobsSidecar(
|
||||
beacon_block_root=hash_tree_root(beacon_block)
|
||||
beacon_block_slot=beacon_block.slot
|
||||
shard=0,
|
||||
blobs=blobs,
|
||||
)
|
||||
```
|
||||
|
||||
And then signed:
|
||||
|
||||
```python
|
||||
domain = get_domain(state, DOMAIN_BLOBS_SIDECAR, blobs_sidecar.beacon_block_slot / SLOTS_PER_EPOCH)
|
||||
signing_root = compute_signing_root(blobs_sidecar, domain)
|
||||
signature = bls.Sign(privkey, signing_root)
|
||||
signed_blobs_sidecar = SignedBlobsSidecar(message=blobs_sidecar, signature=signature)
|
||||
```
|
||||
|
||||
This `signed_blobs_sidecar` is then published to the global `blobs_sidecar` topic as soon as the `beacon_block` is published.
|
||||
|
||||
After publishing the sidecar peers on the network may request the sidecar through sync-requests, or a local user may be interested.
|
||||
The validator MUST hold on to blobs for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable,
|
||||
to ensure the data-availability of these blobs throughout the network.
|
||||
|
||||
After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the blobs and/or stop serving them.
|
||||
|
|
@ -32,6 +32,7 @@
|
|||
- [`on_tick`](#on_tick)
|
||||
- [`on_block`](#on_block)
|
||||
- [`on_attestation`](#on_attestation)
|
||||
- [`on_attester_slashing`](#on_attester_slashing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
@ -101,6 +102,7 @@ class Store(object):
|
|||
finalized_checkpoint: Checkpoint
|
||||
best_justified_checkpoint: Checkpoint
|
||||
proposer_boost_root: Root
|
||||
equivocating_indices: Set[ValidatorIndex]
|
||||
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
||||
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
||||
|
@ -129,6 +131,7 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -
|
|||
finalized_checkpoint=finalized_checkpoint,
|
||||
best_justified_checkpoint=justified_checkpoint,
|
||||
proposer_boost_root=proposer_boost_root,
|
||||
equivocating_indices=set(),
|
||||
blocks={anchor_root: copy(anchor_block)},
|
||||
block_states={anchor_root: copy(anchor_state)},
|
||||
checkpoint_states={justified_checkpoint: copy(anchor_state)},
|
||||
|
@ -179,6 +182,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei:
|
|||
attestation_score = Gwei(sum(
|
||||
state.validators[i].effective_balance for i in active_indices
|
||||
if (i in store.latest_messages
|
||||
and i not in store.equivocating_indices
|
||||
and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root)
|
||||
))
|
||||
if store.proposer_boost_root == Root():
|
||||
|
@ -357,7 +361,8 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None:
|
|||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
||||
target = attestation.data.target
|
||||
beacon_block_root = attestation.data.beacon_block_root
|
||||
for i in attesting_indices:
|
||||
non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices]
|
||||
for i in non_equivocating_attesting_indices:
|
||||
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root)
|
||||
```
|
||||
|
@ -459,3 +464,25 @@ def on_attestation(store: Store, attestation: Attestation, is_from_block: bool=F
|
|||
# Update latest messages for attesting indices
|
||||
update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
```
|
||||
|
||||
#### `on_attester_slashing`
|
||||
|
||||
*Note*: `on_attester_slashing` should be called while syncing and a client MUST maintain the equivocation set of `AttesterSlashing`s from at least the latest finalized checkpoint.
|
||||
|
||||
```python
|
||||
def on_attester_slashing(store: Store, attester_slashing: AttesterSlashing) -> None:
|
||||
"""
|
||||
Run ``on_attester_slashing`` immediately upon receiving a new ``AttesterSlashing``
|
||||
from either within a block or directly on the wire.
|
||||
"""
|
||||
attestation_1 = attester_slashing.attestation_1
|
||||
attestation_2 = attester_slashing.attestation_2
|
||||
assert is_slashable_attestation_data(attestation_1.data, attestation_2.data)
|
||||
state = store.block_states[store.justified_checkpoint.root]
|
||||
assert is_valid_indexed_attestation(state, attestation_1)
|
||||
assert is_valid_indexed_attestation(state, attestation_2)
|
||||
|
||||
indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)
|
||||
for index in indices:
|
||||
store.equivocating_indices.add(index)
|
||||
```
|
|
@ -337,6 +337,8 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
|||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||
compute_epoch_at_slot(aggregate.data.slot)`
|
||||
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
|
||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||
- _[REJECT]_ The attestation has participants --
|
||||
|
@ -353,7 +355,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
|||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e.
|
||||
- _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e.
|
||||
`get_ancestor(store, aggregate.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
== store.finalized_checkpoint.root`
|
||||
|
||||
|
|
|
@ -81,10 +81,17 @@ def is_execution_block(block: BeaconBlock) -> bool:
|
|||
|
||||
```python
|
||||
def is_optimistic_candidate_block(opt_store: OptimisticStore, current_slot: Slot, block: BeaconBlock) -> bool:
|
||||
if is_execution_block(opt_store.blocks[block.parent_root]):
|
||||
return True
|
||||
|
||||
justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
|
||||
justified_is_execution_block = is_execution_block(opt_store.blocks[justified_root])
|
||||
block_is_deep = block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot
|
||||
return justified_is_execution_block or block_is_deep
|
||||
if is_execution_block(opt_store.blocks[justified_root]):
|
||||
return True
|
||||
|
||||
if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
|
||||
return True
|
||||
|
||||
return False
|
||||
```
|
||||
|
||||
Let only a node which returns `is_optimistic(opt_store, head) is True` be an *optimistic
|
||||
|
@ -99,14 +106,21 @@ behaviours without regard for optimistic sync.
|
|||
### When to optimistically import blocks
|
||||
|
||||
A block MAY be optimistically imported when
|
||||
`is_optimistic_candidate_block(opt_store, current_slot, block)` returns
|
||||
`True`. This ensures that blocks are only optimistically imported if either:
|
||||
`is_optimistic_candidate_block(opt_store, current_slot, block)` returns `True`.
|
||||
This ensures that blocks are only optimistically imported if one or more of the
|
||||
following are true:
|
||||
|
||||
1. The parent of the block has execution enabled.
|
||||
1. The justified checkpoint has execution enabled.
|
||||
1. The current slot (as per the system clock) is at least
|
||||
`SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` ahead of the slot of the block being
|
||||
imported.
|
||||
|
||||
In effect, there are restrictions on when a *merge block* can be optimistically
|
||||
imported. The merge block is the first block in any chain where
|
||||
`is_execution_block(block) == True`. Any descendant of a merge block may be
|
||||
imported optimistically at any time.
|
||||
|
||||
*See [Fork Choice Poisoning](#fork-choice-poisoning) for the motivations behind
|
||||
these conditions.*
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
from copy import deepcopy
|
||||
from typing import Optional
|
||||
|
||||
from eth2spec.test.helpers.pow_block import (
|
||||
prepare_random_pow_chain,
|
||||
|
@ -146,7 +147,11 @@ def test_prepare_execution_payload(spec, state):
|
|||
|
||||
# Mock execution_engine
|
||||
class TestEngine(spec.NoopExecutionEngine):
|
||||
def notify_forkchoice_updated(self, parent_hash, finalized_block_hash, payload_attributes) -> bool:
|
||||
def notify_forkchoice_updated(self,
|
||||
head_block_hash,
|
||||
safe_block_hash,
|
||||
finalized_block_hash,
|
||||
payload_attributes) -> Optional[spec.PayloadId]:
|
||||
return SAMPLE_PAYLOAD_ID
|
||||
|
||||
payload_id = spec.prepare_execution_payload(
|
||||
|
|
|
@ -31,7 +31,7 @@ def run_process_full_withdrawals(spec, state, num_expected_withdrawals=None):
|
|||
|
||||
for index in to_be_withdrawn_indices:
|
||||
validator = state.validators[index]
|
||||
assert validator.withdrawn_epoch == spec.get_current_epoch(state)
|
||||
assert validator.fully_withdrawn_epoch == spec.get_current_epoch(state)
|
||||
assert state.balances[index] == 0
|
||||
|
||||
assert len(state.withdrawals_queue) == len(pre_withdrawals_queue) + num_expected_withdrawals
|
||||
|
|
|
@ -92,6 +92,10 @@ def get_attestation_file_name(attestation):
|
|||
return f"attestation_{encode_hex(attestation.hash_tree_root())}"
|
||||
|
||||
|
||||
def get_attester_slashing_file_name(attester_slashing):
|
||||
return f"attester_slashing_{encode_hex(attester_slashing.hash_tree_root())}"
|
||||
|
||||
|
||||
def on_tick_and_append_step(spec, store, time, test_steps):
|
||||
spec.on_tick(store, time)
|
||||
test_steps.append({'tick': int(time)})
|
||||
|
@ -142,6 +146,10 @@ def add_block(spec,
|
|||
for attestation in signed_block.message.body.attestations:
|
||||
run_on_attestation(spec, store, attestation, is_from_block=True, valid=True)
|
||||
|
||||
# An on_block step implies receiving block's attester slashings
|
||||
for attester_slashing in signed_block.message.body.attester_slashings:
|
||||
run_on_attester_slashing(spec, store, attester_slashing, valid=True)
|
||||
|
||||
block_root = signed_block.message.hash_tree_root()
|
||||
assert store.blocks[block_root] == signed_block.message
|
||||
assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root
|
||||
|
@ -168,6 +176,38 @@ def add_block(spec,
|
|||
return store.block_states[signed_block.message.hash_tree_root()]
|
||||
|
||||
|
||||
def run_on_attester_slashing(spec, store, attester_slashing, valid=True):
|
||||
if not valid:
|
||||
try:
|
||||
spec.on_attester_slashing(store, attester_slashing)
|
||||
except AssertionError:
|
||||
return
|
||||
else:
|
||||
assert False
|
||||
|
||||
spec.on_attester_slashing(store, attester_slashing)
|
||||
|
||||
|
||||
def add_attester_slashing(spec, store, attester_slashing, test_steps, valid=True):
|
||||
slashing_file_name = get_attester_slashing_file_name(attester_slashing)
|
||||
yield get_attester_slashing_file_name(attester_slashing), attester_slashing
|
||||
|
||||
if not valid:
|
||||
try:
|
||||
run_on_attester_slashing(spec, store, attester_slashing)
|
||||
except AssertionError:
|
||||
test_steps.append({
|
||||
'attester_slashing': slashing_file_name,
|
||||
'valid': False,
|
||||
})
|
||||
return
|
||||
else:
|
||||
assert False
|
||||
|
||||
run_on_attester_slashing(spec, store, attester_slashing)
|
||||
test_steps.append({'attester_slashing': slashing_file_name})
|
||||
|
||||
|
||||
def get_formatted_head_output(spec, store):
|
||||
head = spec.get_head(store)
|
||||
slot = store.blocks[head].slot
|
||||
|
|
|
@ -20,7 +20,7 @@ def build_mock_validator(spec, i: int, balance: int):
|
|||
)
|
||||
|
||||
if spec.fork not in FORKS_BEFORE_CAPELLA:
|
||||
validator.withdrawn_epoch = spec.FAR_FUTURE_EPOCH
|
||||
validator.fully_withdrawn_epoch = spec.FAR_FUTURE_EPOCH
|
||||
|
||||
return validator
|
||||
|
||||
|
|
|
@ -8,16 +8,21 @@ from eth2spec.test.context import (
|
|||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.block import (
|
||||
apply_empty_block,
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
tick_and_run_on_attestation,
|
||||
tick_and_add_block,
|
||||
add_attester_slashing,
|
||||
add_block,
|
||||
get_anchor_root,
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
get_formatted_head_output,
|
||||
on_tick_and_append_step,
|
||||
add_block,
|
||||
run_on_attestation,
|
||||
tick_and_run_on_attestation,
|
||||
tick_and_add_block,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slots,
|
||||
|
@ -338,3 +343,84 @@ def test_proposer_boost_correct_head(spec, state):
|
|||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_discard_equivocations(spec, state):
|
||||
test_steps = []
|
||||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
# Build block that serves as head before discarding equivocations
|
||||
state_1 = genesis_state.copy()
|
||||
next_slots(spec, state_1, 3)
|
||||
block_1 = build_empty_block_for_next_slot(spec, state_1)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1)
|
||||
|
||||
# Build equivocating attestations to feed to store
|
||||
state_eqv = state_1.copy()
|
||||
block_eqv = apply_empty_block(spec, state_eqv, state_eqv.slot + 1)
|
||||
attestation_eqv = get_valid_attestation(spec, state_eqv, slot=block_eqv.slot, signed=True)
|
||||
|
||||
next_slots(spec, state_1, 1)
|
||||
attestation = get_valid_attestation(spec, state_1, slot=block_eqv.slot, signed=True)
|
||||
assert spec.is_slashable_attestation_data(attestation.data, attestation_eqv.data)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state_1, attestation)
|
||||
indexed_attestation_eqv = spec.get_indexed_attestation(state_eqv, attestation_eqv)
|
||||
attester_slashing = spec.AttesterSlashing(attestation_1=indexed_attestation, attestation_2=indexed_attestation_eqv)
|
||||
|
||||
# Build block that serves as head after discarding equivocations
|
||||
state_2 = genesis_state.copy()
|
||||
next_slots(spec, state_2, 2)
|
||||
block_2 = build_empty_block_for_next_slot(spec, state_2)
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2)
|
||||
while spec.hash_tree_root(block_1) >= spec.hash_tree_root(block_2):
|
||||
block_2.body.graffiti = spec.Bytes32(hex(rng.getrandbits(8 * 32))[2:].zfill(64))
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state_2.copy(), block_2)
|
||||
assert spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2)
|
||||
|
||||
# Tick to (block_eqv.slot + 2) slot time
|
||||
time = store.genesis_time + (block_eqv.slot + 2) * spec.config.SECONDS_PER_SLOT
|
||||
on_tick_and_append_step(spec, store, time, test_steps)
|
||||
|
||||
# Process block_2
|
||||
yield from add_block(spec, store, signed_block_2, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
# Process block_1
|
||||
# The head should remain block_2
|
||||
yield from add_block(spec, store, signed_block_1, test_steps)
|
||||
assert store.proposer_boost_root == spec.Root()
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
# Process attestation
|
||||
# The head should change to block_1
|
||||
run_on_attestation(spec, store, attestation)
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_1)
|
||||
|
||||
# Process attester_slashing
|
||||
# The head should revert to block_2
|
||||
yield from add_attester_slashing(spec, store, attester_slashing, test_steps)
|
||||
assert spec.get_head(store) == spec.hash_tree_root(block_2)
|
||||
|
||||
test_steps.append({
|
||||
'checks': {
|
||||
'head': get_formatted_head_output(spec, store),
|
||||
}
|
||||
})
|
||||
|
||||
yield 'steps', test_steps
|
||||
|
|
|
@ -76,11 +76,27 @@ Adds `PowBlock` data which is required for executing `on_block(store, block)`.
|
|||
{
|
||||
pow_block: string -- the name of the `pow_block_<32-byte-root>.ssz_snappy` file.
|
||||
To be used in `get_pow_block` lookup
|
||||
}
|
||||
}
|
||||
```
|
||||
The file is located in the same folder (see below).
|
||||
PowBlocks should be used as return values for `get_pow_block(hash: Hash32) -> PowBlock` function if hashes match.
|
||||
|
||||
#### `on_attester_slashing` execution step
|
||||
|
||||
The parameter that is required for executing `on_attester_slashing(store, attester_slashing)`.
|
||||
|
||||
```yaml
|
||||
{
|
||||
attester_slashing: string -- the name of the `attester_slashing_<32-byte-root>.ssz_snappy` file.
|
||||
To execute `on_attester_slashing(store, attester_slashing)` with the given attester slashing.
|
||||
valid: bool -- optional, default to `true`.
|
||||
If it's `false`, this execution step is expected to be invalid.
|
||||
}
|
||||
```
|
||||
The file is located in the same folder (see below).
|
||||
|
||||
After this step, the `store` object may have been updated.
|
||||
|
||||
#### Checks step
|
||||
|
||||
The checks to verify the current status of `store`.
|
||||
|
|
Loading…
Reference in New Issue