Merge branch 'dev' into att-slot-range

This commit is contained in:
djrtwo 2023-06-14 09:35:30 -06:00
commit 559b89c447
37 changed files with 979 additions and 261 deletions

View File

@ -190,7 +190,7 @@ jobs:
- checkout
- run:
name: Check table of contents
command: sudo npm install -g doctoc@2 && make check_toc
command: sudo npm install -g doctoc@2.2.0 && make check_toc
codespell:
docker:
- image: circleci/python:3.9

View File

@ -50,7 +50,7 @@ jobs:
with:
ref: ${{ github.event.inputs.commitRef || env.DEFAULT_BRANCH }}
- name: Check table of contents
run: sudo npm install -g doctoc@2 && make check_toc
run: sudo npm install -g doctoc@2.2.0 && make check_toc
codespell:
runs-on: self-hosted

1
.gitignore vendored
View File

@ -23,6 +23,7 @@ tests/core/pyspec/eth2spec/capella/
tests/core/pyspec/eth2spec/deneb/
tests/core/pyspec/eth2spec/eip6110/
tests/core/pyspec/eth2spec/eip7045/
tests/core/pyspec/eth2spec/whisk/
# coverage reports
.htmlcov

View File

@ -34,7 +34,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
$(wildcard $(SSZ_DIR)/*.md)
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110 eip7045
ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb eip6110 eip7045 whisk
# The parameters for commands. Use `foreach` to avoid listing specs again.
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S)
@ -146,8 +146,8 @@ codespell:
lint: pyspec
. venv/bin/activate; cd $(PY_SPEC_DIR); \
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
&& pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
&& mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
&& python -m pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \
&& python -m mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE)
lint_generators: pyspec
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \

View File

@ -101,16 +101,16 @@ DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
# Networking
# ---------------------------------------------------------------
# `2**20` (= 1048576, 1 MiB)
GOSSIP_MAX_SIZE: 1048576
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 33024
# `2**20` (=1048576, 1 MiB)
MAX_CHUNK_SIZE: 1048576
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
@ -127,3 +127,13 @@ ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
ATTESTATION_SUBNET_PREFIX_BITS: 6
# Deneb
# `2**7` (=128)
MAX_REQUEST_BLOCKS_DENEB: 128
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
MAX_REQUEST_BLOB_SIDECARS: 768
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6

View File

@ -104,16 +104,16 @@ DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
# Networking
# ---------------------------------------------------------------
# `2**20` (= 1048576, 1 MiB)
GOSSIP_MAX_SIZE: 1048576
# `10 * 2**20` (= 10485760, 10 MiB)
GOSSIP_MAX_SIZE: 10485760
# `2**10` (= 1024)
MAX_REQUEST_BLOCKS: 1024
# `2**8` (= 256)
EPOCHS_PER_SUBNET_SUBSCRIPTION: 256
# [customized] `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 272)
MIN_EPOCHS_FOR_BLOCK_REQUESTS: 272
# `2**20` (=1048576, 1 MiB)
MAX_CHUNK_SIZE: 1048576
# `10 * 2**20` (=10485760, 10 MiB)
MAX_CHUNK_SIZE: 10485760
# 5s
TTFB_TIMEOUT: 5
# 10s
@ -130,3 +130,13 @@ ATTESTATION_SUBNET_COUNT: 64
ATTESTATION_SUBNET_EXTRA_BITS: 0
# ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS
ATTESTATION_SUBNET_PREFIX_BITS: 6
# Deneb
# `2**7` (=128)
MAX_REQUEST_BLOCKS_DENEB: 128
# MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK
MAX_REQUEST_BLOB_SIDECARS: 768
# `2**12` (= 4096 epochs, ~18 days)
MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096
# `6`
BLOB_SIDECAR_SUBNET_COUNT: 6

View File

@ -6,5 +6,5 @@
FIELD_ELEMENTS_PER_BLOB: 4096
# `uint64(2**12)` (= 4096)
MAX_BLOB_COMMITMENTS_PER_BLOCK: 4096
# `uint64(2**2)` (= 4)
MAX_BLOBS_PER_BLOCK: 4
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6

View File

@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
MAX_VOLUNTARY_EXITS: 16

View File

@ -6,5 +6,5 @@
FIELD_ELEMENTS_PER_BLOB: 4
# [customized]
MAX_BLOB_COMMITMENTS_PER_BLOCK: 16
# `uint64(2**2)` (= 4)
MAX_BLOBS_PER_BLOCK: 4
# `uint64(6)`
MAX_BLOBS_PER_BLOCK: 6

View File

@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128
# 2**4 (= 16)
MAX_DEPOSITS: 16
# 2**4 (= 16)
MAX_VOLUNTARY_EXITS: 16
MAX_VOLUNTARY_EXITS: 16

159
setup.py
View File

@ -49,7 +49,28 @@ CAPELLA = 'capella'
DENEB = 'deneb'
EIP6110 = 'eip6110'
EIP7045= 'eip7045'
WHISK = 'whisk'
PREVIOUS_FORK_OF = {
PHASE0: None,
ALTAIR: PHASE0,
BELLATRIX: ALTAIR,
CAPELLA: BELLATRIX,
DENEB: CAPELLA,
EIP6110: DENEB,
EIP7045: DENEB,
WHISK: CAPELLA,
}
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
IGNORE_SPEC_FILES = [
"specs/phase0/deposit-contract.md"
]
EXTRA_SPEC_FILES = {
BELLATRIX: "sync/optimistic.md"
}
# The helper functions that are used when defining constants
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
@ -96,6 +117,30 @@ class SpecObject(NamedTuple):
dataclasses: Dict[str, str]
def is_post_fork(a, b) -> bool:
"""
Returns true if fork a is after b, or if a == b
"""
if a == b:
return True
prev_fork = PREVIOUS_FORK_OF[a]
if prev_fork == b:
return True
elif prev_fork == None:
return False
else:
return is_post_fork(prev_fork, b)
def get_fork_directory(fork):
dir1 = f'specs/{fork}'
if os.path.exists(dir1):
return dir1
dir2 = f'specs/_features/{fork}'
if os.path.exists(dir2):
return dir2
raise FileNotFoundError(f"No directory found for fork: {fork}")
def _get_name_from_heading(heading: Heading) -> Optional[str]:
last_child = heading.children[-1]
if isinstance(last_child, CodeSpan):
@ -745,11 +790,35 @@ class EIP7045SpecBuilder(DenebSpecBuilder):
from eth2spec.deneb import {preset_name} as deneb
'''
all_builders = (
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
EIP6110SpecBuilder, EIP7045SpecBuilder,
)
spec_builders = {builder.fork: builder for builder in all_builders}
#
# WhiskSpecBuilder
#
class WhiskSpecBuilder(CapellaSpecBuilder):
fork: str = WHISK
@classmethod
def imports(cls, preset_name: str):
return super().imports(preset_name) + f'''
from eth2spec.capella import {preset_name} as capella
'''
@classmethod
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
# Necessary for custom types `WhiskShuffleProof` and `WhiskTrackerProof`
constants = {
'WHISK_MAX_SHUFFLE_PROOF_SIZE': spec_object.constant_vars['WHISK_MAX_SHUFFLE_PROOF_SIZE'].value,
'WHISK_MAX_OPENING_PROOF_SIZE': spec_object.constant_vars['WHISK_MAX_OPENING_PROOF_SIZE'].value,
}
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
spec_builders = {
builder.fork: builder
for builder in (
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
EIP6110SpecBuilder, EIP7045SpecBuilder, WhiskSpecBuilder,
)
}
def is_byte_vector(value: str) -> bool:
@ -1058,70 +1127,19 @@ class PySpecCommand(Command):
if len(self.md_doc_paths) == 0:
print("no paths were specified, using default markdown file paths for pyspec"
" build (spec fork: %s)" % self.spec_fork)
if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110, EIP7045):
self.md_doc_paths = """
specs/phase0/beacon-chain.md
specs/phase0/fork-choice.md
specs/phase0/validator.md
specs/phase0/weak-subjectivity.md
specs/phase0/p2p-interface.md
"""
if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110, EIP7045):
self.md_doc_paths += """
specs/altair/light-client/full-node.md
specs/altair/light-client/light-client.md
specs/altair/light-client/p2p-interface.md
specs/altair/light-client/sync-protocol.md
specs/altair/beacon-chain.md
specs/altair/bls.md
specs/altair/fork.md
specs/altair/validator.md
specs/altair/p2p-interface.md
"""
if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110, EIP7045):
self.md_doc_paths += """
specs/bellatrix/beacon-chain.md
specs/bellatrix/fork.md
specs/bellatrix/fork-choice.md
specs/bellatrix/validator.md
specs/bellatrix/p2p-interface.md
sync/optimistic.md
"""
if self.spec_fork in (CAPELLA, DENEB, EIP6110, EIP7045):
self.md_doc_paths += """
specs/capella/light-client/fork.md
specs/capella/light-client/full-node.md
specs/capella/light-client/p2p-interface.md
specs/capella/light-client/sync-protocol.md
specs/capella/beacon-chain.md
specs/capella/fork.md
specs/capella/fork-choice.md
specs/capella/validator.md
specs/capella/p2p-interface.md
"""
if self.spec_fork in (DENEB, EIP6110, EIP7045):
self.md_doc_paths += """
specs/deneb/light-client/fork.md
specs/deneb/light-client/full-node.md
specs/deneb/light-client/p2p-interface.md
specs/deneb/light-client/sync-protocol.md
specs/deneb/beacon-chain.md
specs/deneb/fork.md
specs/deneb/fork-choice.md
specs/deneb/polynomial-commitments.md
specs/deneb/p2p-interface.md
specs/deneb/validator.md
"""
if self.spec_fork == EIP6110:
self.md_doc_paths += """
specs/_features/eip6110/beacon-chain.md
specs/_features/eip6110/fork.md
"""
if self.spec_fork == EIP7045:
self.md_doc_paths += """
specs/_features/eip7045/beacon-chain.md
specs/_features/eip7045/fork.md
"""
self.md_doc_paths = ""
for fork in ALL_FORKS:
if is_post_fork(self.spec_fork, fork):
# Append all files in fork directory recursively
for root, dirs, files in os.walk(get_fork_directory(fork)):
for filename in files:
filepath = os.path.join(root, filename)
if filepath.endswith('.md') and filepath not in IGNORE_SPEC_FILES:
self.md_doc_paths += filepath + "\n"
# Append extra files if any
if fork in EXTRA_SPEC_FILES:
self.md_doc_paths += EXTRA_SPEC_FILES[fork] + "\n"
if len(self.md_doc_paths) == 0:
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
@ -1275,8 +1293,9 @@ setup(
"remerkleable==0.1.27",
"trie==2.0.2",
RUAMEL_YAML_VERSION,
"lru-dict==1.1.8",
"lru-dict==1.2.0",
MARKO_VERSION,
"py_arkworks_bls12381==0.3.4",
"curdleproofs @ git+https://github.com/nalinbhardwaj/curdleproofs.pie@805d06785b6ff35fde7148762277dd1ae678beeb#egg=curdleproofs&subdirectory=curdleproofs",
]
)

View File

@ -91,7 +91,8 @@ class ExecutionPayload(Container):
block_hash: Hash32
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
excess_data_gas: uint256
data_gas_used: uint64
excess_data_gas: uint64
deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP6110]
```
@ -116,7 +117,8 @@ class ExecutionPayloadHeader(Container):
block_hash: Hash32
transactions_root: Root
withdrawals_root: Root
excess_data_gas: uint256
data_gas_used: uint64
excess_data_gas: uint64
deposit_receipts_root: Root # [New in EIP6110]
```
@ -268,6 +270,7 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
block_hash=payload.block_hash,
transactions_root=hash_tree_root(payload.transactions),
withdrawals_root=hash_tree_root(payload.withdrawals),
data_gas_used=payload.data_gas_used,
excess_data_gas=payload.excess_data_gas,
deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP6110]
)

View File

@ -88,7 +88,8 @@ def upgrade_to_eip6110(pre: deneb.BeaconState) -> BeaconState:
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
excess_data_gas=uint256(0),
data_gas_used=uint64(0),
excess_data_gas=uint64(0),
deposit_receipts_root=Root(), # [New in EIP-6110]
)
post = BeaconState(

View File

@ -0,0 +1,471 @@
# Whisk -- The Beacon Chain
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Constants](#constants)
- [Cryptography](#cryptography)
- [BLS](#bls)
- [Curdleproofs and opening proofs](#curdleproofs-and-opening-proofs)
- [Epoch processing](#epoch-processing)
- [`WhiskTracker`](#whisktracker)
- [`Validator`](#validator)
- [`BeaconState`](#beaconstate)
- [Block processing](#block-processing)
- [Block header](#block-header)
- [Whisk](#whisk)
- [`BeaconBlockBody`](#beaconblockbody)
- [Deposits](#deposits)
- [`get_beacon_proposer_index`](#get_beacon_proposer_index)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document details the beacon chain additions and changes of to support the Whisk SSLE,
*Note:* This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development.
## Constants
| Name | Value | Description |
| ---------------------------------- | -------------------------- | ----------------------------------------------------------- |
| `WHISK_CANDIDATE_TRACKERS_COUNT` | `uint64(2**14)` (= 16,384) | number of candidate trackers |
| `WHISK_PROPOSER_TRACKERS_COUNT` | `uint64(2**13)` (= 8,192) | number of proposer trackers |
| `WHISK_EPOCHS_PER_SHUFFLING_PHASE` | `Epoch(2**8)` (= 256) | epochs per shuffling phase |
| `WHISK_VALIDATORS_PER_SHUFFLE` | `uint64(2**7)` (= 128) | number of validators shuffled per shuffle step |
| `WHISK_PROPOSER_SELECTION_GAP` | `Epoch(2)` | gap between proposer selection and the block proposal phase |
| `WHISK_MAX_SHUFFLE_PROOF_SIZE` | `uint64(2**15)` | max size of a shuffle proof |
| `WHISK_MAX_OPENING_PROOF_SIZE` | `uint64(2**10)` | max size of a opening proof |
| Name | Value |
| ---------------------------------- | -------------------------- |
| `DOMAIN_WHISK_CANDIDATE_SELECTION` | `DomainType('0x07000000')` |
| `DOMAIN_WHISK_SHUFFLE` | `DomainType('0x07100000')` |
| `DOMAIN_WHISK_PROPOSER_SELECTION` | `DomainType('0x07200000')` |
## Cryptography
### BLS
| Name | SSZ equivalent | Description |
| ------------------- | ---------------------------------------- | ----------------------------- |
| `BLSFieldElement` | `uint256` | BLS12-381 scalar |
| `BLSG1Point` | `Bytes48` | compressed BLS12-381 G1 point |
| `WhiskShuffleProof` | `ByteList[WHISK_MAX_SHUFFLE_PROOF_SIZE]` | Serialized shuffle proof |
| `WhiskTrackerProof` | `ByteList[WHISK_MAX_OPENING_PROOF_SIZE]` | Serialized tracker proof |
*Note*: A subgroup check MUST be performed when deserializing a `BLSG1Point` for use in any of the functions below.
```python
def BLSG1ScalarMultiply(scalar: BLSFieldElement, point: BLSG1Point) -> BLSG1Point:
return bls.G1_to_bytes48(bls.multiply(bls.bytes48_to_G1(point), scalar))
```
```python
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
"""
Convert bytes to a BLS field scalar. The output is not uniform over the BLS field.
TODO: Deneb will introduces this helper too. Should delete it once it's rebased to post-Deneb.
"""
field_element = int.from_bytes(b, ENDIANNESS)
assert field_element < BLS_MODULUS
return BLSFieldElement(field_element)
```
| Name | Value |
| ------------------ | ------------------------------------------------------------------------------- |
| `BLS_G1_GENERATOR` | `bls.G1_to_bytes48(bls.G1)` |
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` |
### Curdleproofs and opening proofs
Note that Curdleproofs (Whisk Shuffle Proofs), the tracker opening proofs and all related data structures and verifier code (along with tests) is specified in [curdleproofs.pie](https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only) repository.
```python
def IsValidWhiskShuffleProof(pre_shuffle_trackers: Sequence[WhiskTracker],
post_shuffle_trackers: Sequence[WhiskTracker],
M: BLSG1Point,
shuffle_proof: WhiskShuffleProof) -> bool:
"""
Verify `post_shuffle_trackers` is a permutation of `pre_shuffle_trackers`.
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only.
"""
# pylint: disable=unused-argument
return True
```
```python
def IsValidWhiskOpeningProof(tracker: WhiskTracker,
k_commitment: BLSG1Point,
tracker_proof: WhiskTrackerProof) -> bool:
"""
Verify knowledge of `k` such that `tracker.k_r_G == k * tracker.r_G` and `k_commitment == k * BLS_G1_GENERATOR`.
Defined in https://github.com/nalinbhardwaj/curdleproofs.pie/tree/verifier-only.
"""
# pylint: disable=unused-argument
return True
```
## Epoch processing
### `WhiskTracker`
```python
class WhiskTracker(Container):
r_G: BLSG1Point # r * G
k_r_G: BLSG1Point # k * r * G
```
### `Validator`
```python
class Validator(Container):
pubkey: BLSPubkey
withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
effective_balance: Gwei # Balance at stake
slashed: boolean
# Status epochs
activation_eligibility_epoch: Epoch # When criteria for activation were met
activation_epoch: Epoch
exit_epoch: Epoch
withdrawable_epoch: Epoch # When validator can withdraw funds
whisk_tracker: WhiskTracker # Whisk tracker (r * G, k * r * G) [New in Whisk]
whisk_k_commitment: BLSG1Point # Whisk k commitment k * BLS_G1_GENERATOR [New in Whisk]
```
### `BeaconState`
```python
class BeaconState(Container):
# Versioning
genesis_time: uint64
genesis_validators_root: Root
slot: Slot
fork: Fork
# History
latest_block_header: BeaconBlockHeader
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] # Frozen in Capella, replaced by historical_summaries
# Eth1
eth1_data: Eth1Data
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
eth1_deposit_index: uint64
# Registry
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] # [Modified in Whisk]
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
# Randomness
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
# Slashings
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
# Participation
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
# Finality
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
previous_justified_checkpoint: Checkpoint
current_justified_checkpoint: Checkpoint
finalized_checkpoint: Checkpoint
# Inactivity
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT]
# Sync
current_sync_committee: SyncCommittee
next_sync_committee: SyncCommittee
# Execution
latest_execution_payload_header: ExecutionPayloadHeader
# Withdrawals
next_withdrawal_index: WithdrawalIndex
next_withdrawal_validator_index: ValidatorIndex
# Deep history valid from Capella onwards
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
whisk_candidate_trackers: Vector[WhiskTracker, WHISK_CANDIDATE_TRACKERS_COUNT] # [New in Whisk]
whisk_proposer_trackers: Vector[WhiskTracker, WHISK_PROPOSER_TRACKERS_COUNT] # [New in Whisk]
```
```python
def select_whisk_trackers(state: BeaconState, epoch: Epoch) -> None:
# Select proposer trackers from candidate trackers
proposer_seed = get_seed(state, epoch - WHISK_PROPOSER_SELECTION_GAP, DOMAIN_WHISK_PROPOSER_SELECTION)
for i in range(WHISK_PROPOSER_TRACKERS_COUNT):
index = compute_shuffled_index(uint64(i), uint64(len(state.whisk_candidate_trackers)), proposer_seed)
state.whisk_proposer_trackers[i] = state.whisk_candidate_trackers[index]
# Select candidate trackers from active validator trackers
active_validator_indices = get_active_validator_indices(state, epoch)
for i in range(WHISK_CANDIDATE_TRACKERS_COUNT):
seed = hash(get_seed(state, epoch, DOMAIN_WHISK_CANDIDATE_SELECTION) + uint_to_bytes(i))
candidate_index = compute_proposer_index(state, active_validator_indices, seed) # sample by effective balance
state.whisk_candidate_trackers[i] = state.validators[candidate_index].whisk_tracker
```
```python
def process_whisk_updates(state: BeaconState) -> None:
next_epoch = Epoch(get_current_epoch(state) + 1)
if next_epoch % WHISK_EPOCHS_PER_SHUFFLING_PHASE == 0: # select trackers at the start of shuffling phases
select_whisk_trackers(state, next_epoch)
```
```python
def process_epoch(state: BeaconState) -> None:
process_justification_and_finalization(state)
process_inactivity_updates(state)
process_rewards_and_penalties(state)
process_registry_updates(state)
process_slashings(state)
process_eth1_data_reset(state)
process_effective_balance_updates(state)
process_slashings_reset(state)
process_randao_mixes_reset(state)
process_historical_summaries_update(state)
process_participation_flag_updates(state)
process_sync_committee_updates(state)
process_whisk_updates(state) # [New in Whisk]
```
## Block processing
### Block header
```python
def process_whisk_opening_proof(state: BeaconState, block: BeaconBlock) -> None:
tracker = state.whisk_proposer_trackers[state.slot % WHISK_PROPOSER_TRACKERS_COUNT]
k_commitment = state.validators[block.proposer_index].whisk_k_commitment
assert IsValidWhiskOpeningProof(tracker, k_commitment, block.body.whisk_opening_proof)
```
Removed `assert block.proposer_index == get_beacon_proposer_index(state)` check in Whisk.
```python
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
# Verify that the slots match
assert block.slot == state.slot
# Verify that the block is newer than latest block header
assert block.slot > state.latest_block_header.slot
# # Verify that proposer index is the correct index
# assert block.proposer_index == get_beacon_proposer_index(state)
# Verify that the parent matches
assert block.parent_root == hash_tree_root(state.latest_block_header)
# Cache current block as the new latest block
state.latest_block_header = BeaconBlockHeader(
slot=block.slot,
proposer_index=block.proposer_index,
parent_root=block.parent_root,
state_root=Bytes32(), # Overwritten in the next process_slot call
body_root=hash_tree_root(block.body),
)
# Verify proposer is not slashed
proposer = state.validators[block.proposer_index]
assert not proposer.slashed
process_whisk_opening_proof(state, block) # [New in Whisk]
```
### Whisk
#### `BeaconBlockBody`
```python
class BeaconBlockBody(capella.BeaconBlockBody):
randao_reveal: BLSSignature
eth1_data: Eth1Data # Eth1 data vote
graffiti: Bytes32 # Arbitrary data
# Operations
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
attestations: List[Attestation, MAX_ATTESTATIONS]
deposits: List[Deposit, MAX_DEPOSITS]
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
sync_aggregate: SyncAggregate
# Execution
execution_payload: ExecutionPayload
# Capella operations
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
# Whisk
whisk_opening_proof: WhiskTrackerProof # [New in Whisk]
whisk_post_shuffle_trackers: Vector[WhiskTracker, WHISK_VALIDATORS_PER_SHUFFLE] # [New in Whisk]
whisk_shuffle_proof: WhiskShuffleProof # [New in Whisk]
whisk_shuffle_proof_M_commitment: BLSG1Point # [New in Whisk]
whisk_registration_proof: WhiskTrackerProof # [New in Whisk]
whisk_tracker: WhiskTracker # [New in Whisk]
whisk_k_commitment: BLSG1Point # [New in Whisk]
```
```python
def get_shuffle_indices(randao_reveal: BLSSignature) -> Sequence[uint64]:
"""
Given a `randao_reveal` return the list of indices that got shuffled from the entire candidate set
"""
indices = []
for i in WHISK_VALIDATORS_PER_SHUFFLE:
# XXX ensure we are not suffering from modulo bias
shuffle_index = uint256(hash(randao_reveal + uint_to_bytes(i))) % WHISK_CANDIDATE_TRACKERS_COUNT
indices.append(shuffle_index)
return indices
```
```python
def process_shuffled_trackers(state: BeaconState, body: BeaconBlockBody) -> None:
# Check the shuffle proof
shuffle_indices = get_shuffle_indices(body.randao_reveal)
pre_shuffle_trackers = [state.whisk_candidate_trackers[i] for i in shuffle_indices]
post_shuffle_trackers = body.whisk_post_shuffle_trackers
shuffle_epoch = get_current_epoch(state) % WHISK_EPOCHS_PER_SHUFFLING_PHASE
if shuffle_epoch + WHISK_PROPOSER_SELECTION_GAP + 1 >= WHISK_EPOCHS_PER_SHUFFLING_PHASE:
# Require unchanged trackers during cooldown
assert pre_shuffle_trackers == post_shuffle_trackers
else:
# Require shuffled trackers during shuffle
assert IsValidWhiskShuffleProof(
pre_shuffle_trackers,
post_shuffle_trackers,
body.whisk_shuffle_proof_M_commitment,
body.whisk_shuffle_proof,
)
# Shuffle candidate trackers
for i, shuffle_index in enumerate(shuffle_indices):
state.whisk_candidate_trackers[shuffle_index] = post_shuffle_trackers[i]
```
```python
def is_k_commitment_unique(state: BeaconState, k_commitment: BLSG1Point) -> bool:
return all([validator.whisk_k_commitment != k_commitment for validator in state.validators])
```
```python
def process_whisk(state: BeaconState, body: BeaconBlockBody) -> None:
process_shuffled_trackers(state, body)
# Overwrite all validator Whisk fields (first Whisk proposal) or just the permutation commitment (next proposals)
proposer = state.validators[get_beacon_proposer_index(state)]
if proposer.whisk_tracker.r_G == BLS_G1_GENERATOR: # first Whisk proposal
assert body.whisk_tracker.r_G != BLS_G1_GENERATOR
assert is_k_commitment_unique(state, body.whisk_k_commitment)
assert IsValidWhiskOpeningProof(
body.whisk_tracker,
body.whisk_k_commitment,
body.whisk_registration_proof,
)
proposer.whisk_tracker = body.whisk_tracker
proposer.whisk_k_commitment = body.whisk_k_commitment
else: # next Whisk proposals
assert body.whisk_registration_proof == WhiskTrackerProof()
assert body.whisk_tracker == WhiskTracker()
assert body.whisk_k_commitment == BLSG1Point()
assert body.whisk_shuffle_proof_M_commitment == BLSG1Point()
```
```python
def process_block(state: BeaconState, block: BeaconBlock) -> None:
process_block_header(state, block)
if is_execution_enabled(state, block.body):
process_withdrawals(state, block.body.execution_payload)
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
process_randao(state, block.body)
process_eth1_data(state, block.body)
process_operations(state, block.body)
process_sync_aggregate(state, block.body.sync_aggregate)
process_whisk(state, block.body) # [New in Whisk]
```
### Deposits
```python
def get_unique_whisk_k(state: BeaconState, validator_index: ValidatorIndex) -> BLSFieldElement:
counter = 0
while True:
# hash `validator_index || counter`
k = BLSFieldElement(bytes_to_bls_field(hash(uint_to_bytes(validator_index) + uint_to_bytes(uint64(counter)))))
if is_k_commitment_unique(state, BLSG1ScalarMultiply(k, BLS_G1_GENERATOR)):
return k # unique by trial and error
counter += 1
```
```python
def get_initial_commitments(k: BLSFieldElement) -> Tuple[BLSG1Point, WhiskTracker]:
return (
BLSG1ScalarMultiply(k, BLS_G1_GENERATOR),
WhiskTracker(r_G=BLS_G1_GENERATOR, k_r_G=BLSG1ScalarMultiply(k, BLS_G1_GENERATOR))
)
```
```python
def get_validator_from_deposit_whisk(
state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64
) -> Validator:
effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
k = get_unique_whisk_k(state, ValidatorIndex(len(state.validators)))
whisk_k_commitment, whisk_tracker = get_initial_commitments(k)
validator = Validator(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
activation_eligibility_epoch=FAR_FUTURE_EPOCH,
activation_epoch=FAR_FUTURE_EPOCH,
exit_epoch=FAR_FUTURE_EPOCH,
withdrawable_epoch=FAR_FUTURE_EPOCH,
effective_balance=effective_balance,
# Whisk fields
whisk_tracker=whisk_tracker,
whisk_k_commitment=whisk_k_commitment,
)
return validator
```
```python
def apply_deposit(state: BeaconState,
pubkey: BLSPubkey,
withdrawal_credentials: Bytes32,
amount: uint64,
signature: BLSSignature) -> None:
validator_pubkeys = [validator.pubkey for validator in state.validators]
if pubkey not in validator_pubkeys:
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
deposit_message = DepositMessage(
pubkey=pubkey,
withdrawal_credentials=withdrawal_credentials,
amount=amount,
)
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
signing_root = compute_signing_root(deposit_message, domain)
# Initialize validator if the deposit signature is valid
if bls.Verify(pubkey, signing_root, signature):
index = get_index_for_new_validator(state)
validator = get_validator_from_deposit_whisk(state, pubkey, withdrawal_credentials, amount)
set_or_append_list(state.validators, index, validator)
set_or_append_list(state.balances, index, amount)
# [New in Altair]
set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000))
set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000))
set_or_append_list(state.inactivity_scores, index, uint64(0))
else:
# Increase balance by deposit amount
index = ValidatorIndex(validator_pubkeys.index(pubkey))
increase_balance(state, index, amount)
```
### `get_beacon_proposer_index`
```python
def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
"""
Return the beacon proposer index at the current slot.
"""
assert state.latest_block_header.slot == state.slot # sanity check `process_block_header` has been called
return state.latest_block_header.proposer_index
```

View File

@ -0,0 +1,138 @@
# Whisk -- Fork Logic
**Notice**: This document is a work-in-progress for researchers and implementers.
## Table of contents
<!-- TOC -->
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
- [Introduction](#introduction)
- [Configuration](#configuration)
- [Fork to Whisk](#fork-to-whisk)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
<!-- /TOC -->
## Introduction
This document describes the process of Whisk upgrade.
```
"""
WHISK_FORK_EPOCH
| cooldown
| | ||
v vsvv
--+~~~~~~~~~~~~~~~~~~~~~----+-
shuffling ^
|
|
proposer selection
candidate selection
"""
```
## Configuration
Warning: this configuration is not definitive.
| Name | Value |
| -------------------- | ----------------------- |
| `WHISK_FORK_VERSION` | `Version('0x05000000')` |
| `WHISK_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
## Fork to Whisk
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == WHISK_FORK_EPOCH`, an irregular state change is made to upgrade to Whisk. `WHISK_FORK_EPOCH` must be a multiple of `WHISK_RUN_DURATION_IN_EPOCHS`.
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `WHISK_FORK_EPOCH * SLOTS_PER_EPOCH`.
This ensures that we drop right into the beginning of the shuffling phase but without `process_whisk_epoch()` triggering for this Whisk run. Hence we handle all the setup ourselves in `upgrade_to_whisk()` below.
```python
def whisk_candidate_selection(state: BeaconState, epoch: Epoch) -> None:
# TODO
# pylint: disable=unused-argument
pass
```
```python
def whisk_proposer_selection(state: BeaconState, epoch: Epoch) -> None:
# TODO
# pylint: disable=unused-argument
pass
```
```python
def upgrade_to_whisk(pre: bellatrix.BeaconState) -> BeaconState:
epoch = bellatrix.get_current_epoch(pre)
post = BeaconState(
# Versioning
genesis_time=pre.genesis_time,
genesis_validators_root=pre.genesis_validators_root,
slot=pre.slot,
fork=Fork(
previous_version=pre.fork.current_version,
current_version=WHISK_FORK_VERSION,
epoch=epoch,
),
# History
latest_block_header=pre.latest_block_header,
block_roots=pre.block_roots,
state_roots=pre.state_roots,
historical_roots=pre.historical_roots,
# Eth1
eth1_data=pre.eth1_data,
eth1_data_votes=pre.eth1_data_votes,
eth1_deposit_index=pre.eth1_deposit_index,
# Registry
validators=[],
balances=pre.balances,
# Randomness
randao_mixes=pre.randao_mixes,
# Slashings
slashings=pre.slashings,
# Participation
previous_epoch_participation=pre.previous_epoch_participation,
current_epoch_participation=pre.current_epoch_participation,
# Finality
justification_bits=pre.justification_bits,
previous_justified_checkpoint=pre.previous_justified_checkpoint,
current_justified_checkpoint=pre.current_justified_checkpoint,
finalized_checkpoint=pre.finalized_checkpoint,
# Inactivity
inactivity_scores=pre.inactivity_Scores,
)
# Initialize all validators with predictable commitments
for val_index, pre_validator in enumerate(pre.validators):
whisk_commitment, whisk_tracker = get_initial_commitments(get_unique_whisk_k(post, ValidatorIndex(val_index)))
post_validator = Validator(
pubkey=pre_validator.pubkey,
withdrawal_credentials=pre_validator.withdrawal_credentials,
effective_balance=pre_validator.effective_balance,
slashed=pre_validator.slashed,
activation_eligibility_epoch=pre_validator.activation_eligibility_epoch,
activation_epoch=pre_validator.activation_epoch,
exit_epoch=pre_validator.exit_epoch,
withdrawable_epoch=pre_validator.withdrawable_epoch,
whisk_commitment=whisk_commitment,
whisk_tracker=whisk_tracker,
)
post.validators.append(post_validator)
# Do a candidate selection followed by a proposer selection so that we have proposers for the upcoming day
# Use an old epoch when selecting candidates so that we don't get the same seed as in the next candidate selection
whisk_candidate_selection(post, epoch - WHISK_PROPOSER_SELECTION_GAP - 1)
whisk_proposer_selection(post, epoch)
# Do a final round of candidate selection.
# We need it so that we have something to shuffle over the upcoming shuffling phase.
whisk_candidate_selection(post, epoch)
return post
```

View File

@ -14,7 +14,6 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas
- [Warning](#warning)
- [Modifications in Bellatrix](#modifications-in-bellatrix)
- [Configuration](#configuration)
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
- [Topics and messages](#topics-and-messages)
- [Global topics](#global-topics)
@ -41,15 +40,6 @@ Refer to the note in the [validator guide](./validator.md) for further details.
## Modifications in Bellatrix
### Configuration
This section outlines modifications constants that are used in this spec.
| Name | Value | Description |
|---|---|---|
| `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. |
| `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. |
### The gossip domain: gossipsub
Some gossip meshes are upgraded in Bellatrix to support upgraded types.
@ -61,11 +51,6 @@ All topics remain stable except the beacon block topic which is updated with the
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 and Altair documents unless explicitly noted here.
Starting at Bellatrix upgrade, each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24)
has a maximum size of `GOSSIP_MAX_SIZE_BELLATRIX`.
Clients MUST reject (fail validation) messages that are over this size limit.
Likewise, clients MUST NOT emit or propagate messages larger than this limit.
The derivation of the `message-id` remains stable.
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
@ -130,10 +115,6 @@ down-scoring or disconnection.
Request and Response remain unchanged unless explicitly noted here.
Starting at Bellatrix upgrade,
a global maximum uncompressed byte size of `MAX_CHUNK_SIZE_BELLATRIX` MUST be applied to all method response chunks
regardless of type specific bounds that *MUST* also be respected.
Bellatrix fork-digest is introduced to the `context` enum to specify Bellatrix block type.
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
@ -171,17 +152,12 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic
field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in
place at Phase 0. At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
place at Phase 0, so GOSSIP_MAX_SIZE has increased to 10 Mib on the network.
At the `GAS_LIMIT` (~30M) currently seen on mainnet in 2021, a single transaction
filled entirely with data at a cost of 16 gas per byte can create a valid
`ExecutionPayload` of ~2 MiB. Thus we need a size limit to at least account for
current mainnet conditions.
Geth currently has a [max gossip message size](https://github.com/ethereum/go-ethereum/blob/3ce9f6d96f38712f5d6756e97b59ccc20cc403b3/eth/protocols/eth/protocol.go#L49) of 10 MiB.
To support backward compatibility with this previously defined network limit,
we adopt `GOSSIP_MAX_SIZE_BELLATRIX` of 10 MiB for maximum gossip sizes at the
point of Bellatrix and beyond. Note, that clients SHOULD still reject objects
that exceed their maximum theoretical bounds which in most cases is less than `GOSSIP_MAX_SIZE_BELLATRIX`.
Note, that due to additional size induced by the `BeaconBlock` contents (e.g.
proposer signature, operations lists, etc) this does reduce the
theoretical max valid `ExecutionPayload` (and `transactions` list) size as

View File

@ -33,7 +33,8 @@
- [Modified `verify_and_notify_new_payload`](#modified-verify_and_notify_new_payload)
- [Block processing](#block-processing)
- [Execution payload](#execution-payload)
- [`process_execution_payload`](#process_execution_payload)
- [Modified `process_execution_payload`](#modified-process_execution_payload)
- [Modified `process_voluntary_exit`](#modified-process_voluntary_exit)
- [Testing](#testing)
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
@ -41,16 +42,16 @@
## Introduction
This upgrade adds blobs to the beacon chain as part of Deneb. This is an extension of the Capella upgrade.
The blob transactions are packed into the execution payload by the EL/builder with their corresponding blobs being independently transmitted and are limited by `MAX_DATA_GAS_PER_BLOCK // DATA_GAS_PER_BLOB`. However the CL limit is independently defined by `MAX_BLOBS_PER_BLOCK`.
Deneb is a consensus-layer upgrade containing a number of features. Including:
* [EIP-4844](https://eips.ethereum.org/EIPS/eip-4844): Shard Blob Transactions scale data-availability of Ethereum in a simple, forwards-compatible manner
* [EIP-7044](https://github.com/ethereum/EIPs/pull/7044): Perpetually Valid Signed Voluntary Exits
## Custom types
| Name | SSZ equivalent | Description |
| - | - | - |
| `VersionedHash` | `Bytes32` | |
| `BlobIndex` | `uint64` | |
| `VersionedHash` | `Bytes32` | *[New in Deneb:EIP4844]* |
| `BlobIndex` | `uint64` | *[New in Deneb:EIP4844]* |
## Constants
@ -73,8 +74,11 @@ The blob transactions are packed into the execution payload by the EL/builder wi
| Name | Value | Description |
| - | - | - |
| `MAX_BLOB_COMMITMENTS_PER_BLOCK` | `uint64(2**12)` (= 4096) | hardfork independent fixed theoretical limit same as `LIMIT_BLOBS_PER_TX` (see EIP 4844) |
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**2)` (= 4) | Maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` |
| `MAX_BLOB_COMMITMENTS_PER_BLOCK` | `uint64(2**12)` (= 4096) | *[New in Deneb:EIP4844]* hardfork independent fixed theoretical limit same as `LIMIT_BLOBS_PER_TX` (see EIP 4844) |
| `MAX_BLOBS_PER_BLOCK` | `uint64(6)` | *[New in Deneb:EIP4844]* maximum number of blobs in a single block limited by `MAX_BLOB_COMMITMENTS_PER_BLOCK` |
*Note*: The blob transactions are packed into the execution payload by the EL/builder with their corresponding blobs being independently transmitted
and are limited by `MAX_DATA_GAS_PER_BLOCK // DATA_GAS_PER_BLOB`. However the CL limit is independently defined by `MAX_BLOBS_PER_BLOCK`.
## Configuration
@ -100,9 +104,9 @@ class BeaconBlockBody(Container):
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
sync_aggregate: SyncAggregate
# Execution
execution_payload: ExecutionPayload # [Modified in Deneb]
execution_payload: ExecutionPayload # [Modified in Deneb:EIP4844]
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] # [New in Deneb]
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK] # [New in Deneb:EIP4844]
```
#### `ExecutionPayload`
@ -126,7 +130,8 @@ class ExecutionPayload(Container):
block_hash: Hash32 # Hash of execution block
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
excess_data_gas: uint256 # [New in Deneb]
data_gas_used: uint64 # [New in Deneb:EIP4844]
excess_data_gas: uint64 # [New in Deneb:EIP4844]
```
#### `ExecutionPayloadHeader`
@ -150,7 +155,8 @@ class ExecutionPayloadHeader(Container):
block_hash: Hash32 # Hash of execution block
transactions_root: Root
withdrawals_root: Root
excess_data_gas: uint256 # [New in Deneb]
data_gas_used: uint64 # [New in Deneb:EIP4844]
excess_data_gas: uint64 # [New in Deneb:EIP4844]
```
## Helper functions
@ -203,7 +209,7 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
if not self.is_valid_block_hash(new_payload_request.execution_payload):
return False
# [New in Deneb]
# [New in Deneb:EIP4844]
if not self.is_valid_versioned_hashes(new_payload_request):
return False
@ -217,7 +223,7 @@ def verify_and_notify_new_payload(self: ExecutionEngine,
#### Execution payload
##### `process_execution_payload`
##### Modified `process_execution_payload`
```python
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
@ -230,11 +236,11 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
# Verify timestamp
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
# [New in Deneb] Verify commitments are under limit
# [New in Deneb:EIP4844] Verify commitments are under limit
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
# Verify the execution payload is valid
# [Modified in Deneb] Pass `versioned_hashes` to Execution Engine
# [Modified in Deneb:EIP4844] Pass `versioned_hashes` to Execution Engine
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
assert execution_engine.verify_and_notify_new_payload(
NewPayloadRequest(execution_payload=payload, versioned_hashes=versioned_hashes)
@ -257,10 +263,36 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
block_hash=payload.block_hash,
transactions_root=hash_tree_root(payload.transactions),
withdrawals_root=hash_tree_root(payload.withdrawals),
excess_data_gas=payload.excess_data_gas, # [New in Deneb]
data_gas_used=payload.data_gas_used, # [New in Deneb:EIP4844]
excess_data_gas=payload.excess_data_gas, # [New in Deneb:EIP4844]
)
```
#### Modified `process_voluntary_exit`
*Note*: The function `process_voluntary_exit` is modified to use the a fixed fork version -- `CAPELLA_FORK_VERSION` -- for EIP-7044
```python
def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVoluntaryExit) -> None:
voluntary_exit = signed_voluntary_exit.message
validator = state.validators[voluntary_exit.validator_index]
# Verify the validator is active
assert is_active_validator(validator, get_current_epoch(state))
# Verify exit has not been initiated
assert validator.exit_epoch == FAR_FUTURE_EPOCH
# Exits must specify an epoch when they become valid; they are not valid before then
assert get_current_epoch(state) >= voluntary_exit.epoch
# Verify the validator has been active long enough
assert get_current_epoch(state) >= validator.activation_epoch + SHARD_COMMITTEE_PERIOD
# Verify signature
# [Modified in Deneb:EIP7044]
domain = compute_domain(DOMAIN_VOLUNTARY_EXIT, CAPELLA_FORK_VERSION, state.genesis_validators_root)
signing_root = compute_signing_root(voluntary_exit, domain)
assert bls.Verify(validator.pubkey, signing_root, signed_voluntary_exit.signature)
# Initiate exit
initiate_validator_exit(state, voluntary_exit.validator_index)
```
## Testing
*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only.

View File

@ -8,7 +8,6 @@
- [Introduction](#introduction)
- [Containers](#containers)
- [Helpers](#helpers)
- [`validate_blobs`](#validate_blobs)
- [`is_data_available`](#is_data_available)
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
- [`on_block`](#on_block)
@ -24,21 +23,12 @@ This is the modification of the fork choice accompanying the Deneb upgrade.
## Helpers
#### `validate_blobs`
```python
def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment],
blobs: Sequence[Blob],
proofs: Sequence[KZGProof]) -> None:
assert len(expected_kzg_commitments) == len(blobs) == len(proofs)
assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs)
```
#### `is_data_available`
*[New in Deneb:EIP4844]*
The implementation of `is_data_available` will become more sophisticated during later scaling upgrades.
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`.
Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `verify_blob_kzg_proof_batch`.
The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned.
@ -46,7 +36,8 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa
def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
# `retrieve_blobs_and_proofs` is implementation and context dependent
# It returns all the blobs for the given block root, and raises an exception if not available
# Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
# Note: the p2p network does not guarantee sidecar retrieval outside of
# `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root)
# For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST").
@ -54,8 +45,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ
if isinstance(blobs, str) or isinstance(proofs, str):
return True
validate_blobs(blob_kzg_commitments, blobs, proofs)
return True
return verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, proofs)
```
## Updated fork-choice handlers
@ -88,7 +78,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
)
assert store.finalized_checkpoint.root == finalized_checkpoint_block
# [New in Deneb]
# [New in Deneb:EIP4844]
# Check if blob data is available
# If not, this block MAY be queued and subsequently considered when blob data becomes available
assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments)

View File

@ -83,7 +83,8 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
block_hash=pre.latest_execution_payload_header.block_hash,
transactions_root=pre.latest_execution_payload_header.transactions_root,
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
excess_data_gas=uint256(0), # [New in Deneb]
data_gas_used=uint64(0), # [New in Deneb:EIP4844]
excess_data_gas=uint64(0), # [New in Deneb:EIP4844]
)
post = BeaconState(
# Versioning
@ -125,7 +126,7 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState:
current_sync_committee=pre.current_sync_committee,
next_sync_committee=pre.next_sync_committee,
# Execution-layer
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb]
latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb:EIP4844]
# Withdrawals
next_withdrawal_index=pre.next_withdrawal_index,
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,

View File

@ -41,7 +41,8 @@ def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHea
block_hash=pre.execution.block_hash,
transactions_root=pre.execution.transactions_root,
withdrawals_root=pre.execution.withdrawals_root,
excess_data_gas=uint256(0), # [New in Deneb]
data_gas_used=uint64(0), # [New in Deneb:EIP4844]
excess_data_gas=uint64(0), # [New in Deneb:EIP4844]
),
execution_branch=pre.execution_branch,
)

View File

@ -47,8 +47,9 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader:
withdrawals_root=hash_tree_root(payload.withdrawals),
)
# [New in Deneb]
# [New in Deneb:EIP4844]
if epoch >= DENEB_FORK_EPOCH:
execution_header.data_gas_used = payload.data_gas_used
execution_header.excess_data_gas = payload.excess_data_gas
execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX)

View File

@ -66,9 +66,9 @@ def get_lc_execution_root(header: LightClientHeader) -> Root:
def is_valid_light_client_header(header: LightClientHeader) -> bool:
epoch = compute_epoch_at_slot(header.beacon.slot)
# [New in Deneb]
# [New in Deneb:EIP4844]
if epoch < DENEB_FORK_EPOCH:
if header.execution.excess_data_gas != uint256(0):
if header.execution.data_gas_used != uint64(0) or header.execution.excess_data_gas != uint64(0):
return False
if epoch < CAPELLA_FORK_EPOCH:

View File

@ -40,16 +40,21 @@ The specification of these changes continues in the same format as the network s
### Configuration
*[New in Deneb:EIP4844]*
| Name | Value | Description |
|------------------------------------------|-----------------------------------|---------------------------------------------------------------------|
| `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request |
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request |
| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars |
| `BLOB_SIDECAR_SUBNET_COUNT` | `6` | The number of blob sidecar subnets used in the gossipsub protocol. |
### Containers
#### `BlobSidecar`
*[New in Deneb:EIP4844]*
```python
class BlobSidecar(Container):
block_root: Root
@ -64,6 +69,8 @@ class BlobSidecar(Container):
#### `SignedBlobSidecar`
*[New in Deneb:EIP4844]*
```python
class SignedBlobSidecar(Container):
message: BlobSidecar
@ -72,6 +79,8 @@ class SignedBlobSidecar(Container):
#### `BlobIdentifier`
*[New in Deneb:EIP4844]*
```python
class BlobIdentifier(Container):
block_root: Root
@ -107,7 +116,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar
| Name | Message Type |
| - | - |
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` (new) |
| `blob_sidecar_{subnet_id}` | `SignedBlobSidecar` [New in Deneb:EIP4844] |
##### Global topics
@ -124,6 +133,8 @@ New validation:
###### `blob_sidecar_{subnet_id}`
*[New in Deneb:EIP4844]*
This topic is used to propagate signed blob sidecars, where each blob index maps to some `subnet_id`.
The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`:
@ -191,7 +202,7 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time.
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/`
New in deneb.
*[New in Deneb:EIP4844]*
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
@ -240,7 +251,7 @@ Clients MAY limit the number of blocks and sidecars in the response.
**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/`
New in deneb.
*[New in Deneb:EIP4844]*
The `<context-bytes>` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`:
@ -269,7 +280,7 @@ Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, lea
The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip.
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`.
Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `verify_blob_kzg_proof_batch`.
`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window.
@ -278,18 +289,18 @@ The request MUST be encoded as an SSZ-container.
The response MUST consist of zero or more `response_chunk`.
Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload.
Clients MUST keep a record of signed blobs sidecars seen on the epoch range
`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`
Let `blob_serve_range` be `[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]`.
Clients MUST keep a record of signed blob sidecars seen on the epoch range `blob_serve_range`
where `current_epoch` is defined by the current wall-clock time,
and clients MUST support serving requests of blobs on this range.
Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
Peers that are unable to reply to blob sidecar requests within the
range `blob_serve_range` SHOULD respond with error code `3: ResourceUnavailable`.
Such peers that are unable to successfully reply to this range of requests MAY get descored
or disconnected at any time.
*Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint
MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS`
MUST backfill the local blobs database to at least the range `blob_serve_range`
to be fully compliant with `BlobSidecarsByRange` requests.
*Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin

View File

@ -49,7 +49,7 @@
## Introduction
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
This document specifies basic polynomial operations and KZG polynomial commitment operations that are essential for the implementation of the EIP-4844 feature in the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.

View File

@ -10,8 +10,6 @@
- [Introduction](#introduction)
- [Prerequisites](#prerequisites)
- [Constants](#constants)
- [Misc](#misc)
- [Helpers](#helpers)
- [`BlobsBundle`](#blobsbundle)
- [Modified `GetPayloadResponse`](#modified-getpayloadresponse)
@ -40,18 +38,12 @@ All behaviors and definitions defined in this document, and documents it extends
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout.
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
## Constants
### Misc
| Name | Value | Unit |
| - | - | :-: |
| `BLOB_SIDECAR_SUBNET_COUNT` | `4` | The number of blob sidecar subnets used in the gossipsub protocol. |
## Helpers
### `BlobsBundle`
*[New in Deneb:EIP4844]*
```python
@dataclass
class BlobsBundle(object):
@ -67,7 +59,7 @@ class BlobsBundle(object):
class GetPayloadResponse(object):
execution_payload: ExecutionPayload
block_value: uint256
blobs_bundle: BlobsBundle
blobs_bundle: BlobsBundle # [New in Deneb:EIP4844]
```
## Protocol
@ -98,6 +90,8 @@ All validator responsibilities remain unchanged other than those noted below.
##### Blob KZG commitments
*[New in Deneb:EIP4844]*
1. After retrieving the execution payload from the execution engine as specified in Capella,
use the `payload_id` to retrieve `blobs`, `blob_kzg_commitments`, and `blob_kzg_proofs`
via `get_payload(payload_id).blobs_bundle`.
@ -105,6 +99,8 @@ via `get_payload(payload_id).blobs_bundle`.
#### Constructing the `SignedBlobSidecar`s
*[New in Deneb:EIP4844]*
To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal.
##### Sidecar

View File

@ -189,11 +189,11 @@ This section outlines configurations that are used in this spec.
| Name | Value | Description |
|---|---|---|
| `GOSSIP_MAX_SIZE` | `2**20` (= 1048576, 1 MiB) | The maximum allowed size of uncompressed gossip messages. |
| `GOSSIP_MAX_SIZE` | `10 * 2**20` (= 10485760, 10 MiB) | The maximum allowed size of uncompressed gossip messages. |
| `MAX_REQUEST_BLOCKS` | `2**10` (= 1024) | Maximum number of blocks in a single request |
| `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | Number of epochs on a subnet subscription (~27 hours) |
| `MIN_EPOCHS_FOR_BLOCK_REQUESTS` | `MIN_VALIDATOR_WITHDRAWABILITY_DELAY + CHURN_LIMIT_QUOTIENT // 2` (= 33024, ~5 months) | The minimum epoch range over which a node must serve blocks |
| `MAX_CHUNK_SIZE` | `2**20` (=1048576, 1 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
| `MAX_CHUNK_SIZE` | `10 * 2**20` (=10485760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses. |
| `TTFB_TIMEOUT` | `5` | The maximum duration in **seconds** to wait for first byte of request response (time-to-first-byte). |
| `RESP_TIMEOUT` | `10` | The maximum duration in **seconds** for complete response transfer. |
| `ATTESTATION_PROPAGATION_SLOT_RANGE` | `32` | The maximum number of slots during which an attestation can be propagated. |

View File

@ -1 +1 @@
1.4.0-alpha.0
1.4.0-alpha.3

View File

@ -2,6 +2,11 @@ from eth2spec.test.context import (
spec_state_test,
always_bls,
with_bellatrix_and_later,
with_phases,
)
from eth2spec.test.helpers.constants import (
BELLATRIX,
CAPELLA,
)
from eth2spec.test.helpers.keys import pubkey_to_privkey
from eth2spec.test.helpers.state import (
@ -12,8 +17,10 @@ from eth2spec.test.helpers.voluntary_exits import (
sign_voluntary_exit,
)
BELLATRIX_AND_CAPELLA = [BELLATRIX, CAPELLA]
def _run_voluntary_exit_processing_test(
def run_voluntary_exit_processing_test(
spec,
state,
fork_version,
@ -51,7 +58,7 @@ def _run_voluntary_exit_processing_test(
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(spec, state):
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
@ -60,11 +67,11 @@ def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(s
)
@with_bellatrix_and_later
@with_phases(BELLATRIX_AND_CAPELLA)
@spec_state_test
@always_bls
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
@ -72,13 +79,13 @@ def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec,
)
@with_bellatrix_and_later
@with_phases([BELLATRIX, CAPELLA])
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
@ -86,13 +93,13 @@ def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, st
)
@with_bellatrix_and_later
@with_phases(BELLATRIX_AND_CAPELLA)
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
assert state.fork.previous_version != state.fork.current_version
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
@ -107,7 +114,7 @@ def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_ep
def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,
@ -122,7 +129,7 @@ def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(s
def test_invalid_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch(spec, state):
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
yield from _run_voluntary_exit_processing_test(
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=spec.config.GENESIS_FORK_VERSION,

View File

@ -0,0 +1,89 @@
from eth2spec.test.context import (
always_bls,
spec_state_test,
with_phases,
with_deneb_and_later,
)
from eth2spec.test.helpers.constants import (
DENEB,
)
from eth2spec.test.bellatrix.block_processing.test_process_voluntary_exit import (
run_voluntary_exit_processing_test,
)
@with_deneb_and_later
@spec_state_test
@always_bls
def test_invalid_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
"""
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
"""
assert state.fork.current_version != spec.config.CAPELLA_FORK_VERSION
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.current_version,
is_before_fork_epoch=False,
valid=False,
)
@with_deneb_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
"""
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
Note: This test is valid for ``spec.fork == DENEB`` and invalid for subsequent forks
"""
assert state.fork.previous_version != state.fork.current_version
if spec.fork == DENEB:
assert state.fork.previous_version == spec.config.CAPELLA_FORK_VERSION
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=False,
)
else:
assert state.fork.previous_version != spec.config.CAPELLA_FORK_VERSION
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=False,
valid=False,
)
@with_deneb_and_later
@spec_state_test
@always_bls
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
"""
Since Deneb, the VoluntaryExit domain is fixed to `CAPELLA_FORK_VERSION`
Note: This test is valid for ``spec.fork == DENEB`` and invalid for subsequent forks
"""
assert state.fork.previous_version != state.fork.current_version
if spec.fork == DENEB:
assert state.fork.previous_version == spec.config.CAPELLA_FORK_VERSION
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=True,
)
else:
assert state.fork.previous_version != spec.config.CAPELLA_FORK_VERSION
yield from run_voluntary_exit_processing_test(
spec,
state,
fork_version=state.fork.previous_version,
is_before_fork_epoch=True,
valid=False,
)

View File

@ -16,13 +16,14 @@ from eth2spec.test.helpers.sharding import (
)
def run_block_with_blobs(spec, state, blob_count, excess_data_gas=1, valid=True):
def run_block_with_blobs(spec, state, blob_count, data_gas_used=1, excess_data_gas=1, valid=True):
yield 'pre', state
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.data_gas_used = data_gas_used
block.body.execution_payload.excess_data_gas = excess_data_gas
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)

View File

@ -1,54 +0,0 @@
from eth2spec.test.helpers.state import (
state_transition_and_sign_block,
)
from eth2spec.test.helpers.block import (
build_empty_block_for_next_slot
)
from eth2spec.test.context import (
spec_state_test,
with_deneb_and_later,
)
from eth2spec.test.helpers.execution_payload import (
compute_el_block_hash,
)
from eth2spec.test.helpers.sharding import (
get_sample_opaque_tx,
)
def _run_validate_blobs(spec, state, blob_count):
block = build_empty_block_for_next_slot(spec, state)
opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count)
block.body.blob_kzg_commitments = blob_kzg_commitments
block.body.execution_payload.transactions = [opaque_tx]
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
state_transition_and_sign_block(spec, state, block)
blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs)
blobs = [sidecar.blob for sidecar in blob_sidecars]
kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars]
spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_zero_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=0)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_one_blob(spec, state):
_run_validate_blobs(spec, state, blob_count=1)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_two_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=2)
@with_deneb_and_later
@spec_state_test
def test_validate_blobs_max_blobs(spec, state):
_run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)

View File

@ -10,3 +10,13 @@ from eth2spec.test.context import (
@single_phase
def test_length(spec):
assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
@with_deneb_and_later
@spec_test
@single_phase
def test_networking(spec):
assert spec.MAX_BLOBS_PER_BLOCK < spec.MAX_BLOB_COMMITMENTS_PER_BLOCK
assert spec.config.MAX_REQUEST_BLOB_SIDECARS == spec.config.MAX_REQUEST_BLOCKS_DENEB * spec.MAX_BLOBS_PER_BLOCK
# Start with the same size, but `BLOB_SIDECAR_SUBNET_COUNT` could potentially increase later.
assert spec.config.BLOB_SIDECAR_SUBNET_COUNT == spec.MAX_BLOBS_PER_BLOCK

View File

@ -31,6 +31,7 @@ def get_execution_payload_header(spec, execution_payload):
if is_post_capella(spec):
payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals)
if is_post_deneb(spec):
payload_header.data_gas_used = execution_payload.data_gas_used
payload_header.excess_data_gas = execution_payload.excess_data_gas
if is_post_eip6110(spec):
payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts)
@ -98,6 +99,7 @@ def compute_el_header_block_hash(spec,
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
if is_post_deneb(spec):
# excess_data_gas
execution_payload_header_rlp.append((big_endian_int, payload_header.data_gas_used))
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
if is_post_eip6110(spec):
# deposit_receipts_root
@ -201,6 +203,7 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
if is_post_capella(spec):
payload.withdrawals = spec.get_expected_withdrawals(state)
if is_post_deneb(spec):
payload.data_gas_used = 0
payload.excess_data_gas = 0
if is_post_eip6110(spec):
# just to be clear

View File

@ -1,29 +1,31 @@
from random import Random
from eth2spec.utils import bls
from eth2spec.test.context import expect_assertion_error
from eth2spec.test.helpers.forks import is_post_deneb
from eth2spec.test.helpers.keys import privkeys
def prepare_signed_exits(spec, state, indices, fork_version=None):
if fork_version is None:
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
else:
domain = spec.compute_domain(spec.DOMAIN_VOLUNTARY_EXIT, fork_version, state.genesis_validators_root)
def create_signed_exit(index):
exit = spec.VoluntaryExit(
voluntary_exit = spec.VoluntaryExit(
epoch=spec.get_current_epoch(state),
validator_index=index,
)
signing_root = spec.compute_signing_root(exit, domain)
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
return sign_voluntary_exit(spec, state, voluntary_exit, privkeys[index], fork_version=fork_version)
return [create_signed_exit(index) for index in indices]
def sign_voluntary_exit(spec, state, voluntary_exit, privkey, fork_version=None):
if fork_version is None:
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
if is_post_deneb(spec):
domain = spec.compute_domain(
spec.DOMAIN_VOLUNTARY_EXIT,
spec.config.CAPELLA_FORK_VERSION,
state.genesis_validators_root,
)
else:
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT, voluntary_exit.epoch)
else:
domain = spec.compute_domain(spec.DOMAIN_VOLUNTARY_EXIT, fork_version, state.genesis_validators_root)

View File

@ -1,4 +1,4 @@
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110
from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB
from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators
@ -15,14 +15,12 @@ if __name__ == "__main__":
]}
capella_mods = combine_mods(_new_capella_mods, bellatrix_mods)
deneb_mods = capella_mods
eip6110_mods = deneb_mods
all_mods = {
ALTAIR: altair_mods,
BELLATRIX: bellatrix_mods,
CAPELLA: capella_mods,
DENEB: deneb_mods,
EIP6110: eip6110_mods,
}
run_state_test_generators(runner_name="light_client", all_mods=all_mods)

View File

@ -39,6 +39,7 @@ if __name__ == "__main__":
_new_deneb_mods = {key: 'eth2spec.test.deneb.block_processing.test_process_' + key for key in [
'execution_payload',
'voluntary_exit',
]}
deneb_mods = combine_mods(_new_deneb_mods, capella_mods)