commit
389b2ddfb9
|
@ -0,0 +1,78 @@
|
||||||
|
name: Run test vector generation
|
||||||
|
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
shell: zsh {0}
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
ref:
|
||||||
|
description: The branch, tag or SHA to checkout and build from
|
||||||
|
default: dev
|
||||||
|
type: string
|
||||||
|
required: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
generate-tests:
|
||||||
|
runs-on: [self-hosted-ghr-custom, size-chungus-x64, profile-consensusSpecs]
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: 'ethereum/consensus-specs'
|
||||||
|
path: 'consensus-specs'
|
||||||
|
ref: ${{ inputs.source_ref }}
|
||||||
|
- name: Checkout consensus-spec-tests repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
with:
|
||||||
|
repository: 'ethereum/consensus-spec-tests'
|
||||||
|
path: 'consensus-spec-tests'
|
||||||
|
fetch-depth: 1
|
||||||
|
- name: Setup Python
|
||||||
|
uses: actions/setup-python@v5
|
||||||
|
with:
|
||||||
|
python-version: '3.10'
|
||||||
|
cache: ''
|
||||||
|
- name: Clean up Spec Repository
|
||||||
|
run: |
|
||||||
|
cd consensus-specs
|
||||||
|
make clean
|
||||||
|
- name: Install dependencies and generate pyspec
|
||||||
|
run: |
|
||||||
|
cd consensus-specs
|
||||||
|
make install_test
|
||||||
|
make -B pyspec
|
||||||
|
- name: Generate tests
|
||||||
|
run: |
|
||||||
|
cd consensus-specs
|
||||||
|
make -j 16 generate_tests 2>&1 | tee ../consensustestgen.log
|
||||||
|
cp -r presets/ ../consensus-spec-tests/presets
|
||||||
|
cp -r configs/ ../consensus-spec-tests/configs
|
||||||
|
find . -type d -empty -delete
|
||||||
|
- name: Archive configurations
|
||||||
|
run: |
|
||||||
|
cd consensus-spec-tests
|
||||||
|
tar -czvf general.tar.gz tests/general
|
||||||
|
tar -czvf minimal.tar.gz tests/minimal
|
||||||
|
tar -czvf mainnet.tar.gz tests/mainnet
|
||||||
|
- name: Upload general.tar.gz
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: General Test Configuration
|
||||||
|
path: consensus-spec-tests/general.tar.gz
|
||||||
|
- name: Upload minimal.tar.gz
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: Minimal Test Configuration
|
||||||
|
path: consensus-spec-tests/minimal.tar.gz
|
||||||
|
- name: Upload mainnet.tar.gz
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: Mainnet Test Configuration
|
||||||
|
path: consensus-spec-tests/mainnet.tar.gz
|
||||||
|
- name: Upload consensustestgen
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: consensustestgen.log
|
||||||
|
path: consensustestgen.log
|
|
@ -24,6 +24,7 @@ tests/core/pyspec/eth2spec/deneb/
|
||||||
tests/core/pyspec/eth2spec/electra/
|
tests/core/pyspec/eth2spec/electra/
|
||||||
tests/core/pyspec/eth2spec/whisk/
|
tests/core/pyspec/eth2spec/whisk/
|
||||||
tests/core/pyspec/eth2spec/eip7594/
|
tests/core/pyspec/eth2spec/eip7594/
|
||||||
|
tests/core/pyspec/eth2spec/eip6800/
|
||||||
|
|
||||||
# coverage reports
|
# coverage reports
|
||||||
.htmlcov
|
.htmlcov
|
||||||
|
|
2
Makefile
2
Makefile
|
@ -35,7 +35,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \
|
||||||
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
|
$(wildcard $(SPEC_DIR)/_features/*/*/*.md) \
|
||||||
$(wildcard $(SSZ_DIR)/*.md)
|
$(wildcard $(SSZ_DIR)/*.md)
|
||||||
|
|
||||||
ALL_EXECUTABLE_SPEC_NAMES = phase0 altair bellatrix capella deneb electra whisk
|
ALL_EXECUTABLE_SPEC_NAMES = phase0 altair bellatrix capella deneb electra whisk eip6800
|
||||||
# The parameters for commands. Use `foreach` to avoid listing specs again.
|
# The parameters for commands. Use `foreach` to avoid listing specs again.
|
||||||
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), --cov=eth2spec.$S.$(TEST_PRESET_TYPE))
|
||||||
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), ./eth2spec/$S)
|
PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPEC_NAMES), ./eth2spec/$S)
|
||||||
|
|
|
@ -159,6 +159,9 @@ NUMBER_OF_COLUMNS: 128
|
||||||
MAX_CELLS_IN_EXTENDED_MATRIX: 768
|
MAX_CELLS_IN_EXTENDED_MATRIX: 768
|
||||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32
|
DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32
|
||||||
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
||||||
|
SAMPLES_PER_SLOT: 8
|
||||||
|
CUSTODY_REQUIREMENT: 1
|
||||||
|
TARGET_NUMBER_OF_PEERS: 70
|
||||||
|
|
||||||
# [New in Electra:EIP7251]
|
# [New in Electra:EIP7251]
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 # 2**7 * 10**9 (= 128,000,000,000)
|
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 128000000000 # 2**7 * 10**9 (= 128,000,000,000)
|
||||||
|
|
|
@ -158,6 +158,9 @@ NUMBER_OF_COLUMNS: 128
|
||||||
MAX_CELLS_IN_EXTENDED_MATRIX: 768
|
MAX_CELLS_IN_EXTENDED_MATRIX: 768
|
||||||
DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32
|
DATA_COLUMN_SIDECAR_SUBNET_COUNT: 32
|
||||||
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
MAX_REQUEST_DATA_COLUMN_SIDECARS: 16384
|
||||||
|
SAMPLES_PER_SLOT: 8
|
||||||
|
CUSTODY_REQUIREMENT: 1
|
||||||
|
TARGET_NUMBER_OF_PEERS: 70
|
||||||
|
|
||||||
# [New in Electra:EIP7251]
|
# [New in Electra:EIP7251]
|
||||||
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000 # 2**6 * 10**9 (= 64,000,000,000)
|
MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA: 64000000000 # 2**6 * 10**9 (= 64,000,000,000)
|
||||||
|
|
|
@ -54,10 +54,10 @@ You can refer to the previous fork's `fork.md` file.
|
||||||
### 5. Make it executable
|
### 5. Make it executable
|
||||||
- Update Pyspec [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name.
|
- Update Pyspec [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name.
|
||||||
- Update helpers for [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py) for building the spec:
|
- Update helpers for [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py) for building the spec:
|
||||||
- Update [`pysetup/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/constants.py) with the new feature name as Pyspec `constants.py` defined.
|
- Update [`pysetup/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/constants.py) with the new feature name as Pyspec `constants.py` defined.
|
||||||
- Update [`pysetup/spec_builders/__init__.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/spec_builders/__init__.py). Implement a new `<FEATURE_NAME>SpecBuilder` in `pysetup/spec_builders/<FEATURE_NAME>.py` with the new feature name. e.g., `EIP9999SpecBuilder`. Append it to the `spec_builders` list.
|
- Update [`pysetup/spec_builders/__init__.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/spec_builders/__init__.py). Implement a new `<FEATURE_NAME>SpecBuilder` in `pysetup/spec_builders/<FEATURE_NAME>.py` with the new feature name. e.g., `EIP9999SpecBuilder`. Append it to the `spec_builders` list.
|
||||||
- Update [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py): add the path of the new markdown files in `get_md_doc_paths` function if needed.
|
- Update [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py): add the path of the new markdown files in `get_md_doc_paths` function if needed.
|
||||||
- Update `PREVIOUS_FORK_OF` setting in both [`test/helpers/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/constants.py) and [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py).
|
- Update `PREVIOUS_FORK_OF` setting in both [`test/helpers/constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) and [`pysetup/md_doc_paths.py`](https://github.com/ethereum/consensus-specs/blob/dev/pysetup/md_doc_paths.py).
|
||||||
- NOTE: since these two modules (the pyspec itself and the spec builder tool) must be separate, the fork sequence setting has to be defined again.
|
- NOTE: since these two modules (the pyspec itself and the spec builder tool) must be separate, the fork sequence setting has to be defined again.
|
||||||
|
|
||||||
## B: Make it executable for pytest and test generator
|
## B: Make it executable for pytest and test generator
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Mainnet preset - EIP6800
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# `uint64(2**16)` (= 65,536)
|
||||||
|
MAX_STEMS: 65536
|
||||||
|
# `uint64(33)`
|
||||||
|
MAX_COMMITMENTS_PER_STEM: 33
|
||||||
|
# `uint64(2**8)` (= 256)
|
||||||
|
VERKLE_WIDTH: 256
|
||||||
|
# `uint64(2**3)` (= 8)
|
||||||
|
IPA_PROOF_DEPTH: 8
|
|
@ -30,12 +30,12 @@ MAX_ATTESTER_SLASHINGS_ELECTRA: 1
|
||||||
# `uint64(2**3)` (= 8)
|
# `uint64(2**3)` (= 8)
|
||||||
MAX_ATTESTATIONS_ELECTRA: 8
|
MAX_ATTESTATIONS_ELECTRA: 8
|
||||||
# `uint64(2**0)` (= 1)
|
# `uint64(2**0)` (= 1)
|
||||||
MAX_CONSOLIDATIONS: 1
|
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1
|
||||||
|
|
||||||
# Execution
|
# Execution
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**13 (= 8192) receipts
|
# 2**13 (= 8192) deposit requests
|
||||||
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192
|
MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192
|
||||||
# 2**4 (= 16) withdrawal requests
|
# 2**4 (= 16) withdrawal requests
|
||||||
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16
|
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,12 @@
|
||||||
|
# Minimal preset - EIP6800
|
||||||
|
|
||||||
|
# Execution
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# `uint64(2**16)` (= 65,536)
|
||||||
|
MAX_STEMS: 65536
|
||||||
|
# `uint64(33)`
|
||||||
|
MAX_COMMITMENTS_PER_STEM: 33
|
||||||
|
# `uint64(2**8)` (= 256)
|
||||||
|
VERKLE_WIDTH: 256
|
||||||
|
# `uint64(2**3)` (= 8)
|
||||||
|
IPA_PROOF_DEPTH: 8
|
|
@ -30,12 +30,12 @@ MAX_ATTESTER_SLASHINGS_ELECTRA: 1
|
||||||
# `uint64(2**3)` (= 8)
|
# `uint64(2**3)` (= 8)
|
||||||
MAX_ATTESTATIONS_ELECTRA: 8
|
MAX_ATTESTATIONS_ELECTRA: 8
|
||||||
# `uint64(2**0)` (= 1)
|
# `uint64(2**0)` (= 1)
|
||||||
MAX_CONSOLIDATIONS: 1
|
MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1
|
||||||
|
|
||||||
# Execution
|
# Execution
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# [customized]
|
# [customized]
|
||||||
MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 4
|
MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 4
|
||||||
# [customized] 2**1 (= 2) withdrawal requests
|
# [customized] 2**1 (= 2) withdrawal requests
|
||||||
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2
|
MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2
|
||||||
|
|
||||||
|
|
|
@ -6,10 +6,10 @@ CAPELLA = 'capella'
|
||||||
DENEB = 'deneb'
|
DENEB = 'deneb'
|
||||||
ELECTRA = 'electra'
|
ELECTRA = 'electra'
|
||||||
EIP7594 = 'eip7594'
|
EIP7594 = 'eip7594'
|
||||||
|
EIP6800 = 'eip6800'
|
||||||
WHISK = 'whisk'
|
WHISK = 'whisk'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# The helper functions that are used when defining constants
|
# The helper functions that are used when defining constants
|
||||||
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
|
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||||
def ceillog2(x: int) -> uint64:
|
def ceillog2(x: int) -> uint64:
|
||||||
|
|
|
@ -178,7 +178,7 @@ def combine_dicts(old_dict: Dict[str, T], new_dict: Dict[str, T]) -> Dict[str, T
|
||||||
|
|
||||||
ignored_dependencies = [
|
ignored_dependencies = [
|
||||||
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
|
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
|
||||||
'Bytes1', 'Bytes4', 'Bytes8', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
'Bytes1', 'Bytes4', 'Bytes8', 'Bytes20', 'Bytes31', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||||
'bytes', 'byte', 'ByteList', 'ByteVector',
|
'bytes', 'byte', 'ByteList', 'ByteVector',
|
||||||
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
|
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
|
||||||
|
|
|
@ -9,6 +9,7 @@ from .constants import (
|
||||||
ELECTRA,
|
ELECTRA,
|
||||||
WHISK,
|
WHISK,
|
||||||
EIP7594,
|
EIP7594,
|
||||||
|
EIP6800,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -21,6 +22,7 @@ PREVIOUS_FORK_OF = {
|
||||||
ELECTRA: DENEB,
|
ELECTRA: DENEB,
|
||||||
WHISK: CAPELLA,
|
WHISK: CAPELLA,
|
||||||
EIP7594: DENEB,
|
EIP7594: DENEB,
|
||||||
|
EIP6800: DENEB,
|
||||||
}
|
}
|
||||||
|
|
||||||
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
|
ALL_FORKS = list(PREVIOUS_FORK_OF.keys())
|
||||||
|
|
|
@ -6,12 +6,13 @@ from .deneb import DenebSpecBuilder
|
||||||
from .electra import ElectraSpecBuilder
|
from .electra import ElectraSpecBuilder
|
||||||
from .whisk import WhiskSpecBuilder
|
from .whisk import WhiskSpecBuilder
|
||||||
from .eip7594 import EIP7594SpecBuilder
|
from .eip7594 import EIP7594SpecBuilder
|
||||||
|
from .eip6800 import EIP6800SpecBuilder
|
||||||
|
|
||||||
|
|
||||||
spec_builders = {
|
spec_builders = {
|
||||||
builder.fork: builder
|
builder.fork: builder
|
||||||
for builder in (
|
for builder in (
|
||||||
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
|
Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder,
|
||||||
ElectraSpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder,
|
ElectraSpecBuilder, WhiskSpecBuilder, EIP7594SpecBuilder, EIP6800SpecBuilder,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
from .base import BaseSpecBuilder
|
||||||
|
from ..constants import EIP6800
|
||||||
|
|
||||||
|
|
||||||
|
class EIP6800SpecBuilder(BaseSpecBuilder):
|
||||||
|
fork: str = EIP6800
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def imports(cls, preset_name: str):
|
||||||
|
return f'''
|
||||||
|
from eth2spec.deneb import {preset_name} as deneb
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Bytes31
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||||
|
return {
|
||||||
|
'MAX_STEMS': spec_object.preset_vars['MAX_STEMS'].value,
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
#! /bin/sh
|
#! /bin/bash
|
||||||
|
|
||||||
# Run 'consensus-specs' tests from a docker container instance.
|
# Run 'consensus-specs' tests from a docker container instance.
|
||||||
# *Be sure to launch Docker before running this script.*
|
# *Be sure to launch Docker before running this script.*
|
||||||
|
|
6
setup.py
6
setup.py
|
@ -219,7 +219,13 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], pr
|
||||||
elif source.startswith("class"):
|
elif source.startswith("class"):
|
||||||
class_name, parent_class = _get_class_info_from_source(source)
|
class_name, parent_class = _get_class_info_from_source(source)
|
||||||
# check consistency with spec
|
# check consistency with spec
|
||||||
|
try:
|
||||||
assert class_name == current_name
|
assert class_name == current_name
|
||||||
|
except Exception:
|
||||||
|
print('class_name', class_name)
|
||||||
|
print('current_name', current_name)
|
||||||
|
raise
|
||||||
|
|
||||||
if parent_class:
|
if parent_class:
|
||||||
assert parent_class == "Container"
|
assert parent_class == "Container"
|
||||||
# NOTE: trim whitespace from spec
|
# NOTE: trim whitespace from spec
|
||||||
|
|
|
@ -0,0 +1,221 @@
|
||||||
|
# EIP6800 -- The Beacon Chain
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Custom types](#custom-types)
|
||||||
|
- [Preset](#preset)
|
||||||
|
- [Execution](#execution)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [Extended containers](#extended-containers)
|
||||||
|
- [`ExecutionPayload`](#executionpayload)
|
||||||
|
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [`SuffixStateDiff`](#suffixstatediff)
|
||||||
|
- [`StemStateDiff`](#stemstatediff)
|
||||||
|
- [`IPAProof`](#ipaproof)
|
||||||
|
- [`VerkleProof`](#verkleproof)
|
||||||
|
- [`ExecutionWitness`](#executionwitness)
|
||||||
|
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
|
- [Execution payload](#execution-payload)
|
||||||
|
- [`process_execution_payload`](#process_execution_payload)
|
||||||
|
- [Testing](#testing)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This upgrade adds transaction execution to the beacon chain as part of the eip6800 upgrade.
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
| Name | SSZ equivalent | Description |
|
||||||
|
| - | - | - |
|
||||||
|
| `BanderwagonGroupElement` | `Bytes32` | |
|
||||||
|
| `BanderwagonFieldElement` | `Bytes32` | |
|
||||||
|
| `Stem` | `Bytes31` | |
|
||||||
|
|
||||||
|
## Preset
|
||||||
|
|
||||||
|
### Execution
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `MAX_STEMS` | `uint64(2**16)` (= 65,536) |
|
||||||
|
| `MAX_COMMITMENTS_PER_STEM` | `uint64(33)` |
|
||||||
|
| `VERKLE_WIDTH` | `uint64(2**8)` (= 256) |
|
||||||
|
| `IPA_PROOF_DEPTH` | `uint64(2**3)` (= 8) |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### Extended containers
|
||||||
|
|
||||||
|
#### `ExecutionPayload`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayload(Container):
|
||||||
|
# Execution block header fields
|
||||||
|
parent_hash: Hash32
|
||||||
|
fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper
|
||||||
|
state_root: Bytes32
|
||||||
|
receipts_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
prev_randao: Bytes32 # 'difficulty' in the yellow paper
|
||||||
|
block_number: uint64 # 'number' in the yellow paper
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||||
|
base_fee_per_gas: uint256
|
||||||
|
# Extra payload fields
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||||
|
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||||
|
blob_gas_used: uint64
|
||||||
|
excess_blob_gas: uint64
|
||||||
|
execution_witness: ExecutionWitness # [New in EIP6800]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `ExecutionPayloadHeader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayloadHeader(Container):
|
||||||
|
# Execution block header fields
|
||||||
|
parent_hash: Hash32
|
||||||
|
fee_recipient: ExecutionAddress
|
||||||
|
state_root: Bytes32
|
||||||
|
receipts_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
prev_randao: Bytes32
|
||||||
|
block_number: uint64
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||||
|
base_fee_per_gas: uint256
|
||||||
|
# Extra payload fields
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
transactions_root: Root
|
||||||
|
withdrawals_root: Root
|
||||||
|
blob_gas_used: uint64
|
||||||
|
excess_data_gas: uint64
|
||||||
|
execution_witness_root: Root # [New in EIP6800]
|
||||||
|
```
|
||||||
|
|
||||||
|
### New containers
|
||||||
|
|
||||||
|
#### `SuffixStateDiff`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SuffixStateDiff(Container):
|
||||||
|
suffix: Bytes1
|
||||||
|
# Null means not currently present
|
||||||
|
current_value: Optional[Bytes32]
|
||||||
|
# Null means value not updated
|
||||||
|
new_value: Optional[Bytes32]
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note*: on the Kaustinen testnet, `new_value` is omitted from the container.
|
||||||
|
|
||||||
|
#### `StemStateDiff`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class StemStateDiff(Container):
|
||||||
|
stem: Stem
|
||||||
|
# Valid only if list is sorted by suffixes
|
||||||
|
suffix_diffs: List[SuffixStateDiff, VERKLE_WIDTH]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `IPAProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class IPAProof(Container):
|
||||||
|
cl: Vector[BanderwagonGroupElement, IPA_PROOF_DEPTH]
|
||||||
|
cr: Vector[BanderwagonGroupElement, IPA_PROOF_DEPTH]
|
||||||
|
final_evaluation = BanderwagonFieldElement
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `VerkleProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class VerkleProof(Container):
|
||||||
|
other_stems: List[Bytes31, MAX_STEMS]
|
||||||
|
depth_extension_present: ByteList[MAX_STEMS]
|
||||||
|
commitments_by_path: List[BanderwagonGroupElement, MAX_STEMS * MAX_COMMITMENTS_PER_STEM]
|
||||||
|
d: BanderwagonGroupElement
|
||||||
|
ipa_proof: IPAProof
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `ExecutionWitness`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionWitness(Container):
|
||||||
|
state_diff: List[StemStateDiff, MAX_STEMS]
|
||||||
|
verkle_proof: VerkleProof
|
||||||
|
```
|
||||||
|
|
||||||
|
## Beacon chain state transition function
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
#### Execution payload
|
||||||
|
|
||||||
|
##### `process_execution_payload`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_execution_payload(state: BeaconState, body: BeaconBlockBody, execution_engine: ExecutionEngine) -> None:
|
||||||
|
payload = body.execution_payload
|
||||||
|
|
||||||
|
# Verify consistency of the parent hash with respect to the previous execution payload header
|
||||||
|
assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||||
|
# Verify prev_randao
|
||||||
|
assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state))
|
||||||
|
# Verify timestamp
|
||||||
|
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||||
|
|
||||||
|
# Verify commitments are under limit
|
||||||
|
assert len(body.blob_kzg_commitments) <= MAX_BLOBS_PER_BLOCK
|
||||||
|
|
||||||
|
# Verify the execution payload is valid
|
||||||
|
# Pass `versioned_hashes` to Execution Engine
|
||||||
|
# Pass `parent_beacon_block_root` to Execution Engine
|
||||||
|
versioned_hashes = [kzg_commitment_to_versioned_hash(commitment) for commitment in body.blob_kzg_commitments]
|
||||||
|
assert execution_engine.verify_and_notify_new_payload(
|
||||||
|
NewPayloadRequest(
|
||||||
|
execution_payload=payload,
|
||||||
|
versioned_hashes=versioned_hashes,
|
||||||
|
parent_beacon_block_root=state.latest_block_header.parent_root,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Cache execution payload header
|
||||||
|
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||||
|
parent_hash=payload.parent_hash,
|
||||||
|
fee_recipient=payload.fee_recipient,
|
||||||
|
state_root=payload.state_root,
|
||||||
|
receipts_root=payload.receipts_root,
|
||||||
|
logs_bloom=payload.logs_bloom,
|
||||||
|
prev_randao=payload.prev_randao,
|
||||||
|
block_number=payload.block_number,
|
||||||
|
gas_limit=payload.gas_limit,
|
||||||
|
gas_used=payload.gas_used,
|
||||||
|
timestamp=payload.timestamp,
|
||||||
|
extra_data=payload.extra_data,
|
||||||
|
base_fee_per_gas=payload.base_fee_per_gas,
|
||||||
|
block_hash=payload.block_hash,
|
||||||
|
transactions_root=hash_tree_root(payload.transactions),
|
||||||
|
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||||
|
excess_data_gas=payload.excess_data_gas,
|
||||||
|
execution_witness_root=hash_tree_root(payload.execution_witness), # [New in EIP6800]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing
|
||||||
|
|
||||||
|
TBD
|
|
@ -0,0 +1,145 @@
|
||||||
|
# EIP-6800 -- Fork Logic
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Modified `compute_fork_version`](#modified-compute_fork_version)
|
||||||
|
- [Fork to eip6800](#fork-to-eip6800)
|
||||||
|
- [Fork trigger](#fork-trigger)
|
||||||
|
- [Upgrading the state](#upgrading-the-state)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document describes the process of the eip6800 upgrade.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Warning: this configuration is not definitive.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `EIP6800_FORK_VERSION` | `Version('0x05000000')` |
|
||||||
|
| `EIP6800_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||||
|
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
#### Modified `compute_fork_version`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_fork_version(epoch: Epoch) -> Version:
|
||||||
|
"""
|
||||||
|
Return the fork version at the given ``epoch``.
|
||||||
|
"""
|
||||||
|
if epoch >= EIP6800_FORK_EPOCH:
|
||||||
|
return EIP6800_FORK_VERSION
|
||||||
|
if epoch >= DENEB_FORK_EPOCH:
|
||||||
|
return DENEB_FORK_VERSION
|
||||||
|
if epoch >= CAPELLA_FORK_EPOCH:
|
||||||
|
return CAPELLA_FORK_VERSION
|
||||||
|
if epoch >= BELLATRIX_FORK_EPOCH:
|
||||||
|
return BELLATRIX_FORK_VERSION
|
||||||
|
if epoch >= ALTAIR_FORK_EPOCH:
|
||||||
|
return ALTAIR_FORK_VERSION
|
||||||
|
return GENESIS_FORK_VERSION
|
||||||
|
```
|
||||||
|
|
||||||
|
## Fork to eip6800
|
||||||
|
|
||||||
|
### Fork trigger
|
||||||
|
|
||||||
|
The fork is triggered at epoch `EIP6800_FORK_EPOCH`.
|
||||||
|
|
||||||
|
Note that for the pure eip6800 networks, we don't apply `upgrade_to_eip6800` since it starts with the eip6800 version logic.
|
||||||
|
|
||||||
|
### Upgrading the state
|
||||||
|
|
||||||
|
If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6800_FORK_EPOCH`,
|
||||||
|
an irregular state change is made to upgrade to eip6800.
|
||||||
|
|
||||||
|
The upgrade occurs after the completion of the inner loop of `process_slots` that sets `state.slot` equal to `EIP6800_FORK_EPOCH * SLOTS_PER_EPOCH`.
|
||||||
|
Care must be taken when transitioning through the fork boundary as implementations will need a modified [state transition function](../phase0/beacon-chain.md#beacon-chain-state-transition-function) that deviates from the Phase 0 document.
|
||||||
|
In particular, the outer `state_transition` function defined in the Phase 0 document will not expose the precise fork slot to execute the upgrade in the presence of skipped slots at the fork boundary. Instead, the logic must be within `process_slots`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def upgrade_to_eip6800(pre: deneb.BeaconState) -> BeaconState:
|
||||||
|
epoch = capella.get_current_epoch(pre)
|
||||||
|
latest_execution_payload_header = ExecutionPayloadHeader(
|
||||||
|
parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||||
|
fee_recipient=pre.latest_execution_payload_header.fee_recipient,
|
||||||
|
state_root=pre.latest_execution_payload_header.state_root,
|
||||||
|
receipts_root=pre.latest_execution_payload_header.receipts_root,
|
||||||
|
logs_bloom=pre.latest_execution_payload_header.logs_bloom,
|
||||||
|
prev_randao=pre.latest_execution_payload_header.prev_randao,
|
||||||
|
block_number=pre.latest_execution_payload_header.block_number,
|
||||||
|
gas_limit=pre.latest_execution_payload_header.gas_limit,
|
||||||
|
gas_used=pre.latest_execution_payload_header.gas_used,
|
||||||
|
timestamp=pre.latest_execution_payload_header.timestamp,
|
||||||
|
extra_data=pre.latest_execution_payload_header.extra_data,
|
||||||
|
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||||
|
excess_data_gas=uint256(0),
|
||||||
|
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||||
|
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||||
|
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||||
|
execution_witness_root=hash_tree_root(ExecutionWitness([], [])) # New in eip6800
|
||||||
|
)
|
||||||
|
post = BeaconState(
|
||||||
|
# Versioning
|
||||||
|
genesis_time=pre.genesis_time,
|
||||||
|
genesis_validators_root=pre.genesis_validators_root,
|
||||||
|
slot=pre.slot,
|
||||||
|
fork=Fork(
|
||||||
|
previous_version=pre.fork.current_version,
|
||||||
|
current_version=EIP6800_FORK_VERSION, # [Modified in eip6800]
|
||||||
|
epoch=epoch,
|
||||||
|
),
|
||||||
|
# History
|
||||||
|
latest_block_header=pre.latest_block_header,
|
||||||
|
block_roots=pre.block_roots,
|
||||||
|
state_roots=pre.state_roots,
|
||||||
|
historical_roots=pre.historical_roots,
|
||||||
|
# Eth1
|
||||||
|
eth1_data=pre.eth1_data,
|
||||||
|
eth1_data_votes=pre.eth1_data_votes,
|
||||||
|
eth1_deposit_index=pre.eth1_deposit_index,
|
||||||
|
# Registry
|
||||||
|
validators=pre.validators,
|
||||||
|
balances=pre.balances,
|
||||||
|
# Randomness
|
||||||
|
randao_mixes=pre.randao_mixes,
|
||||||
|
# Slashings
|
||||||
|
slashings=pre.slashings,
|
||||||
|
# Participation
|
||||||
|
previous_epoch_participation=pre.previous_epoch_participation,
|
||||||
|
current_epoch_participation=pre.current_epoch_participation,
|
||||||
|
# Finality
|
||||||
|
justification_bits=pre.justification_bits,
|
||||||
|
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||||
|
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||||
|
finalized_checkpoint=pre.finalized_checkpoint,
|
||||||
|
# Inactivity
|
||||||
|
inactivity_scores=pre.inactivity_scores,
|
||||||
|
# Sync
|
||||||
|
current_sync_committee=pre.current_sync_committee,
|
||||||
|
next_sync_committee=pre.next_sync_committee,
|
||||||
|
# Execution-layer
|
||||||
|
latest_execution_payload_header=latest_execution_payload_header,
|
||||||
|
# Withdrawals
|
||||||
|
next_withdrawal_index=pre.next_withdrawal_index,
|
||||||
|
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||||
|
# Deep history valid from Capella onwards
|
||||||
|
historical_summaries=pre.historical_summaries,
|
||||||
|
)
|
||||||
|
|
||||||
|
return post
|
||||||
|
```
|
|
@ -17,6 +17,7 @@
|
||||||
- [Custody setting](#custody-setting)
|
- [Custody setting](#custody-setting)
|
||||||
- [Containers](#containers)
|
- [Containers](#containers)
|
||||||
- [`DataColumnSidecar`](#datacolumnsidecar)
|
- [`DataColumnSidecar`](#datacolumnsidecar)
|
||||||
|
- [`MatrixEntry`](#matrixentry)
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [`get_custody_columns`](#get_custody_columns)
|
- [`get_custody_columns`](#get_custody_columns)
|
||||||
- [`compute_extended_matrix`](#compute_extended_matrix)
|
- [`compute_extended_matrix`](#compute_extended_matrix)
|
||||||
|
@ -53,12 +54,10 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
## Custom types
|
## Custom types
|
||||||
|
|
||||||
We define the following Python custom types for type hinting and readability:
|
|
||||||
|
|
||||||
| Name | SSZ equivalent | Description |
|
| Name | SSZ equivalent | Description |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `DataColumn` | `List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]` | The data of each column in EIP-7594 |
|
| `RowIndex` | `uint64` | Row identifier in the matrix of cells |
|
||||||
| `ExtendedMatrix` | `List[Cell, MAX_CELLS_IN_EXTENDED_MATRIX]` | The full data of one-dimensional erasure coding extended blobs (in row major format). |
|
| `ColumnIndex` | `uint64` | Column identifier in the matrix of cells |
|
||||||
|
|
||||||
## Configuration
|
## Configuration
|
||||||
|
|
||||||
|
@ -79,7 +78,7 @@ We define the following Python custom types for type hinting and readability:
|
||||||
|
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `SAMPLES_PER_SLOT` | `8` | Number of `DataColumn` random samples a node queries per slot |
|
| `SAMPLES_PER_SLOT` | `8` | Number of `DataColumnSidecar` random samples a node queries per slot |
|
||||||
| `CUSTODY_REQUIREMENT` | `1` | Minimum number of subnets an honest node custodies and serves samples from |
|
| `CUSTODY_REQUIREMENT` | `1` | Minimum number of subnets an honest node custodies and serves samples from |
|
||||||
| `TARGET_NUMBER_OF_PEERS` | `70` | Suggested minimum peer count |
|
| `TARGET_NUMBER_OF_PEERS` | `70` | Suggested minimum peer count |
|
||||||
|
|
||||||
|
@ -90,13 +89,23 @@ We define the following Python custom types for type hinting and readability:
|
||||||
```python
|
```python
|
||||||
class DataColumnSidecar(Container):
|
class DataColumnSidecar(Container):
|
||||||
index: ColumnIndex # Index of column in extended matrix
|
index: ColumnIndex # Index of column in extended matrix
|
||||||
column: DataColumn
|
column: List[Cell, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||||
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||||
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
kzg_proofs: List[KZGProof, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||||
signed_block_header: SignedBeaconBlockHeader
|
signed_block_header: SignedBeaconBlockHeader
|
||||||
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
kzg_commitments_inclusion_proof: Vector[Bytes32, KZG_COMMITMENTS_INCLUSION_PROOF_DEPTH]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `MatrixEntry`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class MatrixEntry(Container):
|
||||||
|
cell: Cell
|
||||||
|
kzg_proof: KZGProof
|
||||||
|
column_index: ColumnIndex
|
||||||
|
row_index: RowIndex
|
||||||
|
```
|
||||||
|
|
||||||
### Helper functions
|
### Helper functions
|
||||||
|
|
||||||
#### `get_custody_columns`
|
#### `get_custody_columns`
|
||||||
|
@ -132,7 +141,7 @@ def get_custody_columns(node_id: NodeID, custody_subnet_count: uint64) -> Sequen
|
||||||
#### `compute_extended_matrix`
|
#### `compute_extended_matrix`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_extended_matrix(blobs: Sequence[Blob]) -> ExtendedMatrix:
|
def compute_extended_matrix(blobs: Sequence[Blob]) -> List[MatrixEntry, MAX_CELLS_IN_EXTENDED_MATRIX]:
|
||||||
"""
|
"""
|
||||||
Return the full ``ExtendedMatrix``.
|
Return the full ``ExtendedMatrix``.
|
||||||
|
|
||||||
|
@ -140,29 +149,44 @@ def compute_extended_matrix(blobs: Sequence[Blob]) -> ExtendedMatrix:
|
||||||
The data structure for storing cells is implementation-dependent.
|
The data structure for storing cells is implementation-dependent.
|
||||||
"""
|
"""
|
||||||
extended_matrix = []
|
extended_matrix = []
|
||||||
for blob in blobs:
|
for blob_index, blob in enumerate(blobs):
|
||||||
extended_matrix.extend(compute_cells(blob))
|
cells, proofs = compute_cells_and_kzg_proofs(blob)
|
||||||
return ExtendedMatrix(extended_matrix)
|
for cell_index, (cell, proof) in enumerate(zip(cells, proofs)):
|
||||||
|
extended_matrix.append(MatrixEntry(
|
||||||
|
cell=cell,
|
||||||
|
kzg_proof=proof,
|
||||||
|
row_index=blob_index,
|
||||||
|
column_index=cell_index,
|
||||||
|
))
|
||||||
|
return extended_matrix
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `recover_matrix`
|
#### `recover_matrix`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def recover_matrix(cells_dict: Dict[Tuple[BlobIndex, CellID], Cell], blob_count: uint64) -> ExtendedMatrix:
|
def recover_matrix(partial_matrix: Sequence[MatrixEntry],
|
||||||
|
blob_count: uint64) -> List[MatrixEntry, MAX_CELLS_IN_EXTENDED_MATRIX]:
|
||||||
"""
|
"""
|
||||||
Return the recovered ``ExtendedMatrix``.
|
Return the recovered extended matrix.
|
||||||
|
|
||||||
This helper demonstrates how to apply ``recover_all_cells``.
|
This helper demonstrates how to apply ``recover_cells_and_kzg_proofs``.
|
||||||
The data structure for storing cells is implementation-dependent.
|
The data structure for storing cells is implementation-dependent.
|
||||||
"""
|
"""
|
||||||
extended_matrix: List[Cell] = []
|
extended_matrix = []
|
||||||
for blob_index in range(blob_count):
|
for blob_index in range(blob_count):
|
||||||
cell_ids = [cell_id for b_index, cell_id in cells_dict.keys() if b_index == blob_index]
|
cell_indices = [e.column_index for e in partial_matrix if e.row_index == blob_index]
|
||||||
cells = [cells_dict[(BlobIndex(blob_index), cell_id)] for cell_id in cell_ids]
|
cells = [e.cell for e in partial_matrix if e.row_index == blob_index]
|
||||||
|
proofs = [e.kzg_proof for e in partial_matrix if e.row_index == blob_index]
|
||||||
|
|
||||||
all_cells_for_row = recover_all_cells(cell_ids, cells)
|
recovered_cells, recovered_proofs = recover_cells_and_kzg_proofs(cell_indices, cells, proofs)
|
||||||
extended_matrix.extend(all_cells_for_row)
|
for cell_index, (cell, proof) in enumerate(zip(recovered_cells, recovered_proofs)):
|
||||||
return ExtendedMatrix(extended_matrix)
|
extended_matrix.append(MatrixEntry(
|
||||||
|
cell=cell,
|
||||||
|
kzg_proof=proof,
|
||||||
|
row_index=blob_index,
|
||||||
|
column_index=cell_index,
|
||||||
|
))
|
||||||
|
return extended_matrix
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `get_data_column_sidecars`
|
#### `get_data_column_sidecars`
|
||||||
|
@ -182,15 +206,15 @@ def get_data_column_sidecars(signed_block: SignedBeaconBlock,
|
||||||
proofs = [cells_and_proofs[i][1] for i in range(blob_count)]
|
proofs = [cells_and_proofs[i][1] for i in range(blob_count)]
|
||||||
sidecars = []
|
sidecars = []
|
||||||
for column_index in range(NUMBER_OF_COLUMNS):
|
for column_index in range(NUMBER_OF_COLUMNS):
|
||||||
column = DataColumn([cells[row_index][column_index]
|
column_cells = [cells[row_index][column_index]
|
||||||
for row_index in range(blob_count)])
|
for row_index in range(blob_count)]
|
||||||
kzg_proof_of_column = [proofs[row_index][column_index]
|
column_proofs = [proofs[row_index][column_index]
|
||||||
for row_index in range(blob_count)]
|
for row_index in range(blob_count)]
|
||||||
sidecars.append(DataColumnSidecar(
|
sidecars.append(DataColumnSidecar(
|
||||||
index=column_index,
|
index=column_index,
|
||||||
column=column,
|
column=column_cells,
|
||||||
kzg_commitments=block.body.blob_kzg_commitments,
|
kzg_commitments=block.body.blob_kzg_commitments,
|
||||||
kzg_proofs=kzg_proof_of_column,
|
kzg_proofs=column_proofs,
|
||||||
signed_block_header=signed_block_header,
|
signed_block_header=signed_block_header,
|
||||||
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
|
kzg_commitments_inclusion_proof=kzg_commitments_inclusion_proof,
|
||||||
))
|
))
|
||||||
|
@ -283,7 +307,7 @@ Such trailing techniques and their analysis will be valuable for any DAS constru
|
||||||
|
|
||||||
### Row (blob) custody
|
### Row (blob) custody
|
||||||
|
|
||||||
In the one-dimension construction, a node samples the peers by requesting the whole `DataColumn`. In reconstruction, a node can reconstruct all the blobs by 50% of the columns. Note that nodes can still download the row via `blob_sidecar_{subnet_id}` subnets.
|
In the one-dimension construction, a node samples the peers by requesting the whole `DataColumnSidecar`. In reconstruction, a node can reconstruct all the blobs by 50% of the columns. Note that nodes can still download the row via `blob_sidecar_{subnet_id}` subnets.
|
||||||
|
|
||||||
The potential benefits of having row custody could include:
|
The potential benefits of having row custody could include:
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@
|
||||||
- [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1)
|
- [DataColumnSidecarsByRange v1](#datacolumnsidecarsbyrange-v1)
|
||||||
- [The discovery domain: discv5](#the-discovery-domain-discv5)
|
- [The discovery domain: discv5](#the-discovery-domain-discv5)
|
||||||
- [ENR structure](#enr-structure)
|
- [ENR structure](#enr-structure)
|
||||||
- [`custody_subnet_count`](#custody_subnet_count)
|
- [Custody subnet count](#custody-subnet-count)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
@ -218,7 +218,7 @@ Request Content:
|
||||||
(
|
(
|
||||||
start_slot: Slot
|
start_slot: Slot
|
||||||
count: uint64
|
count: uint64
|
||||||
columns: List[ColumnIndex]
|
columns: List[ColumnIndex, NUMBER_OF_COLUMNS]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -284,10 +284,10 @@ After the initial data column sidecar, clients MAY stop in the process of respon
|
||||||
|
|
||||||
#### ENR structure
|
#### ENR structure
|
||||||
|
|
||||||
##### `custody_subnet_count`
|
##### Custody subnet count
|
||||||
|
|
||||||
A new field is added to the ENR under the key `custody_subnet_count` to facilitate custody data column discovery.
|
A new field is added to the ENR under the key `csc` to facilitate custody data column discovery.
|
||||||
|
|
||||||
| Key | Value |
|
| Key | Value |
|
||||||
|:-----------------------|:-------------|
|
|:------|:-----------------------------------------|
|
||||||
| `custody_subnet_count` | SSZ `uint64` |
|
| `csc` | Custody subnet count, big endian integer |
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# EIP-7594 -- Polynomial Commitments
|
# EIP-7594 -- Polynomial Commitments Sampling
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
|
@ -7,6 +7,7 @@
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
|
- [Public Methods](#public-methods)
|
||||||
- [Custom types](#custom-types)
|
- [Custom types](#custom-types)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Preset](#preset)
|
- [Preset](#preset)
|
||||||
|
@ -20,13 +21,13 @@
|
||||||
- [FFTs](#ffts)
|
- [FFTs](#ffts)
|
||||||
- [`_fft_field`](#_fft_field)
|
- [`_fft_field`](#_fft_field)
|
||||||
- [`fft_field`](#fft_field)
|
- [`fft_field`](#fft_field)
|
||||||
|
- [`coset_fft_field`](#coset_fft_field)
|
||||||
- [Polynomials in coefficient form](#polynomials-in-coefficient-form)
|
- [Polynomials in coefficient form](#polynomials-in-coefficient-form)
|
||||||
- [`polynomial_eval_to_coeff`](#polynomial_eval_to_coeff)
|
- [`polynomial_eval_to_coeff`](#polynomial_eval_to_coeff)
|
||||||
- [`add_polynomialcoeff`](#add_polynomialcoeff)
|
- [`add_polynomialcoeff`](#add_polynomialcoeff)
|
||||||
- [`neg_polynomialcoeff`](#neg_polynomialcoeff)
|
- [`neg_polynomialcoeff`](#neg_polynomialcoeff)
|
||||||
- [`multiply_polynomialcoeff`](#multiply_polynomialcoeff)
|
- [`multiply_polynomialcoeff`](#multiply_polynomialcoeff)
|
||||||
- [`divide_polynomialcoeff`](#divide_polynomialcoeff)
|
- [`divide_polynomialcoeff`](#divide_polynomialcoeff)
|
||||||
- [`shift_polynomialcoeff`](#shift_polynomialcoeff)
|
|
||||||
- [`interpolate_polynomialcoeff`](#interpolate_polynomialcoeff)
|
- [`interpolate_polynomialcoeff`](#interpolate_polynomialcoeff)
|
||||||
- [`vanishing_polynomialcoeff`](#vanishing_polynomialcoeff)
|
- [`vanishing_polynomialcoeff`](#vanishing_polynomialcoeff)
|
||||||
- [`evaluate_polynomialcoeff`](#evaluate_polynomialcoeff)
|
- [`evaluate_polynomialcoeff`](#evaluate_polynomialcoeff)
|
||||||
|
@ -38,27 +39,34 @@
|
||||||
- [Cells](#cells-1)
|
- [Cells](#cells-1)
|
||||||
- [Cell computation](#cell-computation)
|
- [Cell computation](#cell-computation)
|
||||||
- [`compute_cells_and_kzg_proofs`](#compute_cells_and_kzg_proofs)
|
- [`compute_cells_and_kzg_proofs`](#compute_cells_and_kzg_proofs)
|
||||||
- [`compute_cells`](#compute_cells)
|
|
||||||
- [Cell verification](#cell-verification)
|
- [Cell verification](#cell-verification)
|
||||||
- [`verify_cell_kzg_proof`](#verify_cell_kzg_proof)
|
- [`verify_cell_kzg_proof`](#verify_cell_kzg_proof)
|
||||||
- [`verify_cell_kzg_proof_batch`](#verify_cell_kzg_proof_batch)
|
- [`verify_cell_kzg_proof_batch`](#verify_cell_kzg_proof_batch)
|
||||||
- [Reconstruction](#reconstruction)
|
- [Reconstruction](#reconstruction)
|
||||||
- [`construct_vanishing_polynomial`](#construct_vanishing_polynomial)
|
- [`construct_vanishing_polynomial`](#construct_vanishing_polynomial)
|
||||||
- [`recover_shifted_data`](#recover_shifted_data)
|
- [`recover_data`](#recover_data)
|
||||||
- [`recover_original_data`](#recover_original_data)
|
- [`recover_cells_and_kzg_proofs`](#recover_cells_and_kzg_proofs)
|
||||||
- [`recover_all_cells`](#recover_all_cells)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- /TOC -->
|
<!-- /TOC -->
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This document extends [polynomial-commitments.md](polynomial-commitments.md) with the functions required for data availability sampling (DAS). It is not part of the core Deneb spec but an extension that can be optionally implemented to allow nodes to reduce their load using DAS.
|
This document extends [polynomial-commitments.md](../../deneb/polynomial-commitments.md) with the functions required for data availability sampling (DAS). It is not part of the core Deneb spec but an extension that can be optionally implemented to allow nodes to reduce their load using DAS.
|
||||||
|
|
||||||
|
## Public Methods
|
||||||
|
|
||||||
For any KZG library extended to support DAS, functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
For any KZG library extended to support DAS, functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||||
|
|
||||||
Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions.
|
Public functions MUST accept raw bytes as input and perform the required cryptographic normalization before invoking any internal functions.
|
||||||
|
|
||||||
|
The following is a list of the public methods:
|
||||||
|
|
||||||
|
- [`compute_cells_and_kzg_proofs`](#compute_cells_and_kzg_proofs)
|
||||||
|
- [`verify_cell_kzg_proof`](#verify_cell_kzg_proof)
|
||||||
|
- [`verify_cell_kzg_proof_batch`](#verify_cell_kzg_proof_batch)
|
||||||
|
- [`recover_cells_and_kzg_proofs`](#recover_cells_and_kzg_proofs)
|
||||||
|
|
||||||
## Custom types
|
## Custom types
|
||||||
|
|
||||||
| Name | SSZ equivalent | Description |
|
| Name | SSZ equivalent | Description |
|
||||||
|
@ -67,9 +75,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog
|
||||||
| `Coset` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The evaluation domain of a cell |
|
| `Coset` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The evaluation domain of a cell |
|
||||||
| `CosetEvals` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The internal representation of a cell (the evaluations over its Coset) |
|
| `CosetEvals` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_CELL]` | The internal representation of a cell (the evaluations over its Coset) |
|
||||||
| `Cell` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_CELL]` | The unit of blob data that can come with its own KZG proof |
|
| `Cell` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_CELL]` | The unit of blob data that can come with its own KZG proof |
|
||||||
| `CellID` | `uint64` | Cell identifier |
|
| `CellIndex` | `uint64` | Validation: `x < CELLS_PER_EXT_BLOB` |
|
||||||
| `RowIndex` | `uint64` | Row identifier |
|
|
||||||
| `ColumnIndex` | `uint64` | Column identifier |
|
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
|
@ -180,6 +186,41 @@ def fft_field(vals: Sequence[BLSFieldElement],
|
||||||
return _fft_field(vals, roots_of_unity)
|
return _fft_field(vals, roots_of_unity)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### `coset_fft_field`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def coset_fft_field(vals: Sequence[BLSFieldElement],
|
||||||
|
roots_of_unity: Sequence[BLSFieldElement],
|
||||||
|
inv: bool=False) -> Sequence[BLSFieldElement]:
|
||||||
|
"""
|
||||||
|
Computes an FFT/IFFT over a coset of the roots of unity.
|
||||||
|
This is useful for when one wants to divide by a polynomial which
|
||||||
|
vanishes on one or more elements in the domain.
|
||||||
|
"""
|
||||||
|
vals = vals.copy()
|
||||||
|
|
||||||
|
def shift_vals(vals: Sequence[BLSFieldElement], factor: BLSFieldElement) -> Sequence[BLSFieldElement]:
|
||||||
|
"""
|
||||||
|
Multiply each entry in `vals` by succeeding powers of `factor`
|
||||||
|
i.e., [vals[0] * factor^0, vals[1] * factor^1, ..., vals[n] * factor^n]
|
||||||
|
"""
|
||||||
|
shift = 1
|
||||||
|
for i in range(len(vals)):
|
||||||
|
vals[i] = BLSFieldElement((int(vals[i]) * shift) % BLS_MODULUS)
|
||||||
|
shift = (shift * int(factor)) % BLS_MODULUS
|
||||||
|
return vals
|
||||||
|
|
||||||
|
# This is the coset generator; it is used to compute a FFT/IFFT over a coset of
|
||||||
|
# the roots of unity.
|
||||||
|
shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY)
|
||||||
|
if inv:
|
||||||
|
vals = fft_field(vals, roots_of_unity, inv)
|
||||||
|
shift_inv = bls_modular_inverse(shift_factor)
|
||||||
|
return shift_vals(vals, shift_inv)
|
||||||
|
else:
|
||||||
|
vals = shift_vals(vals, shift_factor)
|
||||||
|
return fft_field(vals, roots_of_unity, inv)
|
||||||
|
```
|
||||||
|
|
||||||
### Polynomials in coefficient form
|
### Polynomials in coefficient form
|
||||||
|
|
||||||
|
@ -257,23 +298,6 @@ def divide_polynomialcoeff(a: PolynomialCoeff, b: PolynomialCoeff) -> Polynomial
|
||||||
return [x % BLS_MODULUS for x in o]
|
return [x % BLS_MODULUS for x in o]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `shift_polynomialcoeff`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def shift_polynomialcoeff(polynomial_coeff: PolynomialCoeff, factor: BLSFieldElement) -> PolynomialCoeff:
|
|
||||||
"""
|
|
||||||
Shift the evaluation of a polynomial in coefficient form by factor.
|
|
||||||
This results in a new polynomial g(x) = f(factor * x)
|
|
||||||
"""
|
|
||||||
factor_power = 1
|
|
||||||
inv_factor = pow(int(factor), BLS_MODULUS - 2, BLS_MODULUS)
|
|
||||||
o = []
|
|
||||||
for p in polynomial_coeff:
|
|
||||||
o.append(int(p) * factor_power % BLS_MODULUS)
|
|
||||||
factor_power = factor_power * inv_factor % BLS_MODULUS
|
|
||||||
return o
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `interpolate_polynomialcoeff`
|
#### `interpolate_polynomialcoeff`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -404,15 +428,15 @@ def verify_kzg_proof_multi_impl(commitment: KZGCommitment,
|
||||||
#### `coset_for_cell`
|
#### `coset_for_cell`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def coset_for_cell(cell_id: CellID) -> Coset:
|
def coset_for_cell(cell_index: CellIndex) -> Coset:
|
||||||
"""
|
"""
|
||||||
Get the coset for a given ``cell_id``
|
Get the coset for a given ``cell_index``.
|
||||||
"""
|
"""
|
||||||
assert cell_id < CELLS_PER_EXT_BLOB
|
assert cell_index < CELLS_PER_EXT_BLOB
|
||||||
roots_of_unity_brp = bit_reversal_permutation(
|
roots_of_unity_brp = bit_reversal_permutation(
|
||||||
compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
|
compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
|
||||||
)
|
)
|
||||||
return Coset(roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_id:FIELD_ELEMENTS_PER_CELL * (cell_id + 1)])
|
return Coset(roots_of_unity_brp[FIELD_ELEMENTS_PER_CELL * cell_index:FIELD_ELEMENTS_PER_CELL * (cell_index + 1)])
|
||||||
```
|
```
|
||||||
|
|
||||||
## Cells
|
## Cells
|
||||||
|
@ -441,7 +465,7 @@ def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
|
||||||
proofs = []
|
proofs = []
|
||||||
|
|
||||||
for i in range(CELLS_PER_EXT_BLOB):
|
for i in range(CELLS_PER_EXT_BLOB):
|
||||||
coset = coset_for_cell(CellID(i))
|
coset = coset_for_cell(CellIndex(i))
|
||||||
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
|
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
|
||||||
cells.append(coset_evals_to_cell(ys))
|
cells.append(coset_evals_to_cell(ys))
|
||||||
proofs.append(proof)
|
proofs.append(proof)
|
||||||
|
@ -449,38 +473,13 @@ def compute_cells_and_kzg_proofs(blob: Blob) -> Tuple[
|
||||||
return cells, proofs
|
return cells, proofs
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `compute_cells`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def compute_cells(blob: Blob) -> Vector[Cell, CELLS_PER_EXT_BLOB]:
|
|
||||||
"""
|
|
||||||
Compute the cell data for an extended blob (without computing the proofs).
|
|
||||||
|
|
||||||
Public method.
|
|
||||||
"""
|
|
||||||
assert len(blob) == BYTES_PER_BLOB
|
|
||||||
|
|
||||||
polynomial = blob_to_polynomial(blob)
|
|
||||||
polynomial_coeff = polynomial_eval_to_coeff(polynomial)
|
|
||||||
|
|
||||||
extended_data = fft_field(polynomial_coeff + [0] * FIELD_ELEMENTS_PER_BLOB,
|
|
||||||
compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB))
|
|
||||||
extended_data_rbo = bit_reversal_permutation(extended_data)
|
|
||||||
cells = []
|
|
||||||
for cell_id in range(CELLS_PER_EXT_BLOB):
|
|
||||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
|
||||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
|
||||||
cells.append(coset_evals_to_cell(CosetEvals(extended_data_rbo[start:end])))
|
|
||||||
return cells
|
|
||||||
```
|
|
||||||
|
|
||||||
### Cell verification
|
### Cell verification
|
||||||
|
|
||||||
#### `verify_cell_kzg_proof`
|
#### `verify_cell_kzg_proof`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def verify_cell_kzg_proof(commitment_bytes: Bytes48,
|
def verify_cell_kzg_proof(commitment_bytes: Bytes48,
|
||||||
cell_id: CellID,
|
cell_index: CellIndex,
|
||||||
cell: Cell,
|
cell: Cell,
|
||||||
proof_bytes: Bytes48) -> bool:
|
proof_bytes: Bytes48) -> bool:
|
||||||
"""
|
"""
|
||||||
|
@ -489,11 +488,11 @@ def verify_cell_kzg_proof(commitment_bytes: Bytes48,
|
||||||
Public method.
|
Public method.
|
||||||
"""
|
"""
|
||||||
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
|
assert len(commitment_bytes) == BYTES_PER_COMMITMENT
|
||||||
assert cell_id < CELLS_PER_EXT_BLOB
|
assert cell_index < CELLS_PER_EXT_BLOB
|
||||||
assert len(cell) == BYTES_PER_CELL
|
assert len(cell) == BYTES_PER_CELL
|
||||||
assert len(proof_bytes) == BYTES_PER_PROOF
|
assert len(proof_bytes) == BYTES_PER_PROOF
|
||||||
|
|
||||||
coset = coset_for_cell(cell_id)
|
coset = coset_for_cell(cell_index)
|
||||||
|
|
||||||
return verify_kzg_proof_multi_impl(
|
return verify_kzg_proof_multi_impl(
|
||||||
bytes_to_kzg_commitment(commitment_bytes),
|
bytes_to_kzg_commitment(commitment_bytes),
|
||||||
|
@ -511,7 +510,7 @@ def verify_cell_kzg_proof_batch(row_commitments_bytes: Sequence[Bytes48],
|
||||||
cells: Sequence[Cell],
|
cells: Sequence[Cell],
|
||||||
proofs_bytes: Sequence[Bytes48]) -> bool:
|
proofs_bytes: Sequence[Bytes48]) -> bool:
|
||||||
"""
|
"""
|
||||||
Verify a set of cells, given their corresponding proofs and their coordinates (row_id, column_id) in the blob
|
Verify a set of cells, given their corresponding proofs and their coordinates (row_index, column_index) in the blob
|
||||||
matrix. The list of all commitments is also provided in row_commitments_bytes.
|
matrix. The list of all commitments is also provided in row_commitments_bytes.
|
||||||
|
|
||||||
This function implements the naive algorithm of checking every cell
|
This function implements the naive algorithm of checking every cell
|
||||||
|
@ -536,7 +535,7 @@ def verify_cell_kzg_proof_batch(row_commitments_bytes: Sequence[Bytes48],
|
||||||
for proof_bytes in proofs_bytes:
|
for proof_bytes in proofs_bytes:
|
||||||
assert len(proof_bytes) == BYTES_PER_PROOF
|
assert len(proof_bytes) == BYTES_PER_PROOF
|
||||||
|
|
||||||
# Get commitments via row IDs
|
# Get commitments via row indices
|
||||||
commitments_bytes = [row_commitments_bytes[row_index] for row_index in row_indices]
|
commitments_bytes = [row_commitments_bytes[row_index] for row_index in row_indices]
|
||||||
|
|
||||||
# Get objects from bytes
|
# Get objects from bytes
|
||||||
|
@ -555,20 +554,24 @@ def verify_cell_kzg_proof_batch(row_commitments_bytes: Sequence[Bytes48],
|
||||||
### `construct_vanishing_polynomial`
|
### `construct_vanishing_polynomial`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def construct_vanishing_polynomial(missing_cell_ids: Sequence[CellID]) -> Tuple[
|
def construct_vanishing_polynomial(missing_cell_indices: Sequence[CellIndex]) -> Sequence[BLSFieldElement]:
|
||||||
Sequence[BLSFieldElement],
|
|
||||||
Sequence[BLSFieldElement]]:
|
|
||||||
"""
|
"""
|
||||||
Given the cells that are missing from the data, compute the polynomial that vanishes at every point that
|
Given the cells indices that are missing from the data, compute the polynomial that vanishes at every point that
|
||||||
corresponds to a missing field element.
|
corresponds to a missing field element.
|
||||||
|
|
||||||
|
This method assumes that all of the cells cannot be missing. In this case the vanishing polynomial
|
||||||
|
could be computed as Z(x) = x^n - 1, where `n` is FIELD_ELEMENTS_PER_EXT_BLOB.
|
||||||
|
|
||||||
|
We never encounter this case however because this method is used solely for recovery and recovery only
|
||||||
|
works if at least half of the cells are available.
|
||||||
"""
|
"""
|
||||||
# Get the small domain
|
# Get the small domain
|
||||||
roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_EXT_BLOB)
|
roots_of_unity_reduced = compute_roots_of_unity(CELLS_PER_EXT_BLOB)
|
||||||
|
|
||||||
# Compute polynomial that vanishes at all the missing cells (over the small domain)
|
# Compute polynomial that vanishes at all the missing cells (over the small domain)
|
||||||
short_zero_poly = vanishing_polynomialcoeff([
|
short_zero_poly = vanishing_polynomialcoeff([
|
||||||
roots_of_unity_reduced[reverse_bits(missing_cell_id, CELLS_PER_EXT_BLOB)]
|
roots_of_unity_reduced[reverse_bits(missing_cell_index, CELLS_PER_EXT_BLOB)]
|
||||||
for missing_cell_id in missing_cell_ids
|
for missing_cell_index in missing_cell_indices
|
||||||
])
|
])
|
||||||
|
|
||||||
# Extend vanishing polynomial to full domain using the closed form of the vanishing polynomial over a coset
|
# Extend vanishing polynomial to full domain using the closed form of the vanishing polynomial over a coset
|
||||||
|
@ -576,98 +579,80 @@ def construct_vanishing_polynomial(missing_cell_ids: Sequence[CellID]) -> Tuple[
|
||||||
for i, coeff in enumerate(short_zero_poly):
|
for i, coeff in enumerate(short_zero_poly):
|
||||||
zero_poly_coeff[i * FIELD_ELEMENTS_PER_CELL] = coeff
|
zero_poly_coeff[i * FIELD_ELEMENTS_PER_CELL] = coeff
|
||||||
|
|
||||||
# Compute evaluations of the extended vanishing polynomial
|
return zero_poly_coeff
|
||||||
zero_poly_eval = fft_field(zero_poly_coeff,
|
|
||||||
compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB))
|
|
||||||
zero_poly_eval_brp = bit_reversal_permutation(zero_poly_eval)
|
|
||||||
|
|
||||||
# Sanity check
|
|
||||||
for cell_id in range(CELLS_PER_EXT_BLOB):
|
|
||||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
|
||||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
|
||||||
if cell_id in missing_cell_ids:
|
|
||||||
assert all(a == 0 for a in zero_poly_eval_brp[start:end])
|
|
||||||
else: # cell_id in cell_ids
|
|
||||||
assert all(a != 0 for a in zero_poly_eval_brp[start:end])
|
|
||||||
|
|
||||||
return zero_poly_coeff, zero_poly_eval
|
|
||||||
```
|
```
|
||||||
|
|
||||||
### `recover_shifted_data`
|
### `recover_data`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def recover_shifted_data(cell_ids: Sequence[CellID],
|
def recover_data(cell_indices: Sequence[CellIndex],
|
||||||
cells: Sequence[Cell],
|
cells: Sequence[Cell],
|
||||||
zero_poly_eval: Sequence[BLSFieldElement],
|
) -> Sequence[BLSFieldElement]:
|
||||||
zero_poly_coeff: Sequence[BLSFieldElement],
|
|
||||||
roots_of_unity_extended: Sequence[BLSFieldElement]) -> Tuple[
|
|
||||||
Sequence[BLSFieldElement],
|
|
||||||
Sequence[BLSFieldElement],
|
|
||||||
BLSFieldElement]:
|
|
||||||
"""
|
"""
|
||||||
Given Z(x), return polynomial Q_1(x)=(E*Z)(k*x) and Q_2(x)=Z(k*x) and k^{-1}.
|
Recover the missing evaluations for the extended blob, given at least half of the evaluations.
|
||||||
"""
|
"""
|
||||||
shift_factor = BLSFieldElement(PRIMITIVE_ROOT_OF_UNITY)
|
|
||||||
shift_inv = div(BLSFieldElement(1), shift_factor)
|
|
||||||
|
|
||||||
|
# Get the extended domain. This will be referred to as the FFT domain.
|
||||||
|
roots_of_unity_extended = compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
|
||||||
|
|
||||||
|
# Flatten the cells into evaluations.
|
||||||
|
# If a cell is missing, then its evaluation is zero.
|
||||||
extended_evaluation_rbo = [0] * FIELD_ELEMENTS_PER_EXT_BLOB
|
extended_evaluation_rbo = [0] * FIELD_ELEMENTS_PER_EXT_BLOB
|
||||||
for cell_id, cell in zip(cell_ids, cells):
|
for cell_index, cell in zip(cell_indices, cells):
|
||||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
start = cell_index * FIELD_ELEMENTS_PER_CELL
|
||||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
end = (cell_index + 1) * FIELD_ELEMENTS_PER_CELL
|
||||||
extended_evaluation_rbo[start:end] = cell
|
extended_evaluation_rbo[start:end] = cell
|
||||||
extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo)
|
extended_evaluation = bit_reversal_permutation(extended_evaluation_rbo)
|
||||||
|
|
||||||
# Compute (E*Z)(x)
|
# Compute Z(x) in monomial form
|
||||||
|
# Z(x) is the polynomial which vanishes on all of the evaluations which are missing
|
||||||
|
missing_cell_indices = [CellIndex(cell_index) for cell_index in range(CELLS_PER_EXT_BLOB)
|
||||||
|
if cell_index not in cell_indices]
|
||||||
|
zero_poly_coeff = construct_vanishing_polynomial(missing_cell_indices)
|
||||||
|
|
||||||
|
# Convert Z(x) to evaluation form over the FFT domain
|
||||||
|
zero_poly_eval = fft_field(zero_poly_coeff, roots_of_unity_extended)
|
||||||
|
|
||||||
|
# Compute (E*Z)(x) = E(x) * Z(x) in evaluation form over the FFT domain
|
||||||
extended_evaluation_times_zero = [BLSFieldElement(int(a) * int(b) % BLS_MODULUS)
|
extended_evaluation_times_zero = [BLSFieldElement(int(a) * int(b) % BLS_MODULUS)
|
||||||
for a, b in zip(zero_poly_eval, extended_evaluation)]
|
for a, b in zip(zero_poly_eval, extended_evaluation)]
|
||||||
|
|
||||||
extended_evaluations_fft = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
|
# Convert (E*Z)(x) to monomial form
|
||||||
|
extended_evaluation_times_zero_coeffs = fft_field(extended_evaluation_times_zero, roots_of_unity_extended, inv=True)
|
||||||
|
|
||||||
# Compute (E*Z)(k*x)
|
# Convert (E*Z)(x) to evaluation form over a coset of the FFT domain
|
||||||
shifted_extended_evaluation = shift_polynomialcoeff(extended_evaluations_fft, shift_factor)
|
extended_evaluations_over_coset = coset_fft_field(extended_evaluation_times_zero_coeffs, roots_of_unity_extended)
|
||||||
# Compute Z(k*x)
|
|
||||||
shifted_zero_poly = shift_polynomialcoeff(zero_poly_coeff, shift_factor)
|
|
||||||
|
|
||||||
eval_shifted_extended_evaluation = fft_field(shifted_extended_evaluation, roots_of_unity_extended)
|
# Convert Z(x) to evaluation form over a coset of the FFT domain
|
||||||
eval_shifted_zero_poly = fft_field(shifted_zero_poly, roots_of_unity_extended)
|
zero_poly_over_coset = coset_fft_field(zero_poly_coeff, roots_of_unity_extended)
|
||||||
|
|
||||||
return eval_shifted_extended_evaluation, eval_shifted_zero_poly, shift_inv
|
# Compute Q_3(x) = (E*Z)(x) / Z(x) in evaluation form over a coset of the FFT domain
|
||||||
```
|
reconstructed_poly_over_coset = [
|
||||||
|
|
||||||
### `recover_original_data`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def recover_original_data(eval_shifted_extended_evaluation: Sequence[BLSFieldElement],
|
|
||||||
eval_shifted_zero_poly: Sequence[BLSFieldElement],
|
|
||||||
shift_inv: BLSFieldElement,
|
|
||||||
roots_of_unity_extended: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
|
|
||||||
"""
|
|
||||||
Given Q_1, Q_2 and k^{-1}, compute P(x).
|
|
||||||
"""
|
|
||||||
# Compute Q_3 = Q_1(x)/Q_2(x) = P(k*x)
|
|
||||||
eval_shifted_reconstructed_poly = [
|
|
||||||
div(a, b)
|
div(a, b)
|
||||||
for a, b in zip(eval_shifted_extended_evaluation, eval_shifted_zero_poly)
|
for a, b in zip(extended_evaluations_over_coset, zero_poly_over_coset)
|
||||||
]
|
]
|
||||||
|
|
||||||
shifted_reconstructed_poly = fft_field(eval_shifted_reconstructed_poly, roots_of_unity_extended, inv=True)
|
# Convert Q_3(x) to monomial form
|
||||||
|
reconstructed_poly_coeff = coset_fft_field(reconstructed_poly_over_coset, roots_of_unity_extended, inv=True)
|
||||||
|
|
||||||
# Unshift P(k*x) by k^{-1} to get P(x)
|
# Convert Q_3(x) to evaluation form over the FFT domain and bit reverse the result
|
||||||
reconstructed_poly = shift_polynomialcoeff(shifted_reconstructed_poly, shift_inv)
|
reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly_coeff, roots_of_unity_extended))
|
||||||
|
|
||||||
reconstructed_data = bit_reversal_permutation(fft_field(reconstructed_poly, roots_of_unity_extended))
|
|
||||||
|
|
||||||
return reconstructed_data
|
return reconstructed_data
|
||||||
```
|
```
|
||||||
|
|
||||||
### `recover_all_cells`
|
### `recover_cells_and_kzg_proofs`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def recover_all_cells(cell_ids: Sequence[CellID], cells: Sequence[Cell]) -> Sequence[Cell]:
|
def recover_cells_and_kzg_proofs(cell_indices: Sequence[CellIndex],
|
||||||
|
cells: Sequence[Cell],
|
||||||
|
proofs_bytes: Sequence[Bytes48]) -> Tuple[
|
||||||
|
Vector[Cell, CELLS_PER_EXT_BLOB],
|
||||||
|
Vector[KZGProof, CELLS_PER_EXT_BLOB]]:
|
||||||
"""
|
"""
|
||||||
Recover all of the cells in the extended blob from FIELD_ELEMENTS_PER_EXT_BLOB evaluations,
|
Given at least 50% of cells/proofs for a blob, recover all the cells/proofs.
|
||||||
half of which can be missing.
|
This algorithm uses FFTs to recover cells faster than using Lagrange
|
||||||
This algorithm uses FFTs to recover cells faster than using Lagrange implementation, as can be seen here:
|
implementation, as can be seen here:
|
||||||
https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039
|
https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039
|
||||||
|
|
||||||
A faster version thanks to Qi Zhou can be found here:
|
A faster version thanks to Qi Zhou can be found here:
|
||||||
|
@ -675,47 +660,46 @@ def recover_all_cells(cell_ids: Sequence[CellID], cells: Sequence[Cell]) -> Sequ
|
||||||
|
|
||||||
Public method.
|
Public method.
|
||||||
"""
|
"""
|
||||||
assert len(cell_ids) == len(cells)
|
assert len(cell_indices) == len(cells) == len(proofs_bytes)
|
||||||
# Check we have enough cells to be able to perform the reconstruction
|
# Check we have enough cells to be able to perform the reconstruction
|
||||||
assert CELLS_PER_EXT_BLOB / 2 <= len(cell_ids) <= CELLS_PER_EXT_BLOB
|
assert CELLS_PER_EXT_BLOB / 2 <= len(cell_indices) <= CELLS_PER_EXT_BLOB
|
||||||
# Check for duplicates
|
# Check for duplicates
|
||||||
assert len(cell_ids) == len(set(cell_ids))
|
assert len(cell_indices) == len(set(cell_indices))
|
||||||
|
# Check that the cell indices are within bounds
|
||||||
|
for cell_index in cell_indices:
|
||||||
|
assert cell_index < CELLS_PER_EXT_BLOB
|
||||||
# Check that each cell is the correct length
|
# Check that each cell is the correct length
|
||||||
for cell in cells:
|
for cell in cells:
|
||||||
assert len(cell) == BYTES_PER_CELL
|
assert len(cell) == BYTES_PER_CELL
|
||||||
|
# Check that each proof is the correct length
|
||||||
# Get the extended domain
|
for proof_bytes in proofs_bytes:
|
||||||
roots_of_unity_extended = compute_roots_of_unity(FIELD_ELEMENTS_PER_EXT_BLOB)
|
assert len(proof_bytes) == BYTES_PER_PROOF
|
||||||
|
|
||||||
# Convert cells to coset evals
|
# Convert cells to coset evals
|
||||||
cosets_evals = [cell_to_coset_evals(cell) for cell in cells]
|
cosets_evals = [cell_to_coset_evals(cell) for cell in cells]
|
||||||
|
|
||||||
missing_cell_ids = [CellID(cell_id) for cell_id in range(CELLS_PER_EXT_BLOB) if cell_id not in cell_ids]
|
reconstructed_data = recover_data(cell_indices, cosets_evals)
|
||||||
zero_poly_coeff, zero_poly_eval = construct_vanishing_polynomial(missing_cell_ids)
|
|
||||||
|
|
||||||
eval_shifted_extended_evaluation, eval_shifted_zero_poly, shift_inv = recover_shifted_data(
|
for cell_index, coset_evals in zip(cell_indices, cosets_evals):
|
||||||
cell_ids,
|
start = cell_index * FIELD_ELEMENTS_PER_CELL
|
||||||
cosets_evals,
|
end = (cell_index + 1) * FIELD_ELEMENTS_PER_CELL
|
||||||
zero_poly_eval,
|
|
||||||
zero_poly_coeff,
|
|
||||||
roots_of_unity_extended,
|
|
||||||
)
|
|
||||||
|
|
||||||
reconstructed_data = recover_original_data(
|
|
||||||
eval_shifted_extended_evaluation,
|
|
||||||
eval_shifted_zero_poly,
|
|
||||||
shift_inv,
|
|
||||||
roots_of_unity_extended,
|
|
||||||
)
|
|
||||||
|
|
||||||
for cell_id, coset_evals in zip(cell_ids, cosets_evals):
|
|
||||||
start = cell_id * FIELD_ELEMENTS_PER_CELL
|
|
||||||
end = (cell_id + 1) * FIELD_ELEMENTS_PER_CELL
|
|
||||||
assert reconstructed_data[start:end] == coset_evals
|
assert reconstructed_data[start:end] == coset_evals
|
||||||
|
|
||||||
reconstructed_data_as_cells = [
|
recovered_cells = [
|
||||||
coset_evals_to_cell(reconstructed_data[i * FIELD_ELEMENTS_PER_CELL:(i + 1) * FIELD_ELEMENTS_PER_CELL])
|
coset_evals_to_cell(reconstructed_data[i * FIELD_ELEMENTS_PER_CELL:(i + 1) * FIELD_ELEMENTS_PER_CELL])
|
||||||
for i in range(CELLS_PER_EXT_BLOB)]
|
for i in range(CELLS_PER_EXT_BLOB)]
|
||||||
|
|
||||||
return reconstructed_data_as_cells
|
polynomial_eval = reconstructed_data[:FIELD_ELEMENTS_PER_BLOB]
|
||||||
|
polynomial_coeff = polynomial_eval_to_coeff(polynomial_eval)
|
||||||
|
recovered_proofs = [None] * CELLS_PER_EXT_BLOB
|
||||||
|
for i, cell_index in enumerate(cell_indices):
|
||||||
|
recovered_proofs[cell_index] = bytes_to_kzg_proof(proofs_bytes[i])
|
||||||
|
for i in range(CELLS_PER_EXT_BLOB):
|
||||||
|
if recovered_proofs[i] is None:
|
||||||
|
coset = coset_for_cell(CellIndex(i))
|
||||||
|
proof, ys = compute_kzg_proof_multi_impl(polynomial_coeff, coset)
|
||||||
|
assert coset_evals_to_cell(ys) == recovered_cells[i]
|
||||||
|
recovered_proofs[i] = proof
|
||||||
|
|
||||||
|
return recovered_cells, recovered_proofs
|
||||||
```
|
```
|
||||||
|
|
|
@ -24,12 +24,11 @@
|
||||||
- [Validator cycle](#validator-cycle)
|
- [Validator cycle](#validator-cycle)
|
||||||
- [Containers](#containers)
|
- [Containers](#containers)
|
||||||
- [New containers](#new-containers)
|
- [New containers](#new-containers)
|
||||||
- [`DepositReceipt`](#depositreceipt)
|
- [`DepositRequest`](#depositrequest)
|
||||||
- [`PendingBalanceDeposit`](#pendingbalancedeposit)
|
- [`PendingBalanceDeposit`](#pendingbalancedeposit)
|
||||||
- [`PendingPartialWithdrawal`](#pendingpartialwithdrawal)
|
- [`PendingPartialWithdrawal`](#pendingpartialwithdrawal)
|
||||||
- [`ExecutionLayerWithdrawalRequest`](#executionlayerwithdrawalrequest)
|
- [`WithdrawalRequest`](#withdrawalrequest)
|
||||||
- [`Consolidation`](#consolidation)
|
- [`ConsolidationRequest`](#consolidationrequest)
|
||||||
- [`SignedConsolidation`](#signedconsolidation)
|
|
||||||
- [`PendingConsolidation`](#pendingconsolidation)
|
- [`PendingConsolidation`](#pendingconsolidation)
|
||||||
- [Modified Containers](#modified-containers)
|
- [Modified Containers](#modified-containers)
|
||||||
- [`AttesterSlashing`](#attesterslashing)
|
- [`AttesterSlashing`](#attesterslashing)
|
||||||
|
@ -42,6 +41,7 @@
|
||||||
- [`BeaconState`](#beaconstate)
|
- [`BeaconState`](#beaconstate)
|
||||||
- [Helper functions](#helper-functions)
|
- [Helper functions](#helper-functions)
|
||||||
- [Predicates](#predicates)
|
- [Predicates](#predicates)
|
||||||
|
- [Updated `compute_proposer_index`](#updated-compute_proposer_index)
|
||||||
- [Updated `is_eligible_for_activation_queue`](#updated-is_eligible_for_activation_queue)
|
- [Updated `is_eligible_for_activation_queue`](#updated-is_eligible_for_activation_queue)
|
||||||
- [New `is_compounding_withdrawal_credential`](#new-is_compounding_withdrawal_credential)
|
- [New `is_compounding_withdrawal_credential`](#new-is_compounding_withdrawal_credential)
|
||||||
- [New `has_compounding_withdrawal_credential`](#new-has_compounding_withdrawal_credential)
|
- [New `has_compounding_withdrawal_credential`](#new-has_compounding_withdrawal_credential)
|
||||||
|
@ -58,6 +58,7 @@
|
||||||
- [New `get_active_balance`](#new-get_active_balance)
|
- [New `get_active_balance`](#new-get_active_balance)
|
||||||
- [New `get_pending_balance_to_withdraw`](#new-get_pending_balance_to_withdraw)
|
- [New `get_pending_balance_to_withdraw`](#new-get_pending_balance_to_withdraw)
|
||||||
- [Modified `get_attesting_indices`](#modified-get_attesting_indices)
|
- [Modified `get_attesting_indices`](#modified-get_attesting_indices)
|
||||||
|
- [Modified `get_next_sync_committee_indices`](#modified-get_next_sync_committee_indices)
|
||||||
- [Beacon state mutators](#beacon-state-mutators)
|
- [Beacon state mutators](#beacon-state-mutators)
|
||||||
- [Updated `initiate_validator_exit`](#updated--initiate_validator_exit)
|
- [Updated `initiate_validator_exit`](#updated--initiate_validator_exit)
|
||||||
- [New `switch_to_compounding_validator`](#new-switch_to_compounding_validator)
|
- [New `switch_to_compounding_validator`](#new-switch_to_compounding_validator)
|
||||||
|
@ -91,11 +92,11 @@
|
||||||
- [Voluntary exits](#voluntary-exits)
|
- [Voluntary exits](#voluntary-exits)
|
||||||
- [Updated `process_voluntary_exit`](#updated-process_voluntary_exit)
|
- [Updated `process_voluntary_exit`](#updated-process_voluntary_exit)
|
||||||
- [Execution layer withdrawal requests](#execution-layer-withdrawal-requests)
|
- [Execution layer withdrawal requests](#execution-layer-withdrawal-requests)
|
||||||
- [New `process_execution_layer_withdrawal_request`](#new-process_execution_layer_withdrawal_request)
|
- [New `process_withdrawal_request`](#new-process_withdrawal_request)
|
||||||
- [Deposit receipts](#deposit-receipts)
|
- [Deposit requests](#deposit-requests)
|
||||||
- [New `process_deposit_receipt`](#new-process_deposit_receipt)
|
- [New `process_deposit_request`](#new-process_deposit_request)
|
||||||
- [Consolidations](#consolidations)
|
- [Execution layer consolidation requests](#execution-layer-consolidation-requests)
|
||||||
- [New `process_consolidation`](#new-process_consolidation)
|
- [New `process_consolidation_request`](#new-process_consolidation_request)
|
||||||
- [Testing](#testing)
|
- [Testing](#testing)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -119,15 +120,13 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | *[New in Electra:EIP6110]* |
|
| `UNSET_DEPOSIT_REQUESTS_START_INDEX` | `uint64(2**64 - 1)` | *[New in Electra:EIP6110]* |
|
||||||
| `FULL_EXIT_REQUEST_AMOUNT` | `uint64(0)` | *[New in Electra:EIP7002]* |
|
| `FULL_EXIT_REQUEST_AMOUNT` | `uint64(0)` | *[New in Electra:EIP7002]* |
|
||||||
|
|
||||||
### Withdrawal prefixes
|
### Withdrawal prefixes
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `BLS_WITHDRAWAL_PREFIX` | `Bytes1('0x00')` |
|
|
||||||
| `ETH1_ADDRESS_WITHDRAWAL_PREFIX` | `Bytes1('0x01')` |
|
|
||||||
| `COMPOUNDING_WITHDRAWAL_PREFIX` | `Bytes1('0x02')` |
|
| `COMPOUNDING_WITHDRAWAL_PREFIX` | `Bytes1('0x02')` |
|
||||||
|
|
||||||
### Domains
|
### Domains
|
||||||
|
@ -164,16 +163,16 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `MAX_CONSOLIDATIONS` | `uint64(1)` |
|
| `MAX_ATTESTER_SLASHINGS_ELECTRA` | `2**0` (= 1) | *[New in Electra:EIP7549]* |
|
||||||
|
| `MAX_ATTESTATIONS_ELECTRA` | `2**3` (= 8) | *[New in Electra:EIP7549]* |
|
||||||
|
|
||||||
### Execution
|
### Execution
|
||||||
|
|
||||||
| Name | Value | Description |
|
| Name | Value | Description |
|
||||||
| - | - | - |
|
| - | - | - |
|
||||||
| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | *[New in Electra:EIP6110]* Maximum number of deposit receipts allowed in each payload |
|
| `MAX_DEPOSIT_REQUESTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | *[New in Electra:EIP6110]* Maximum number of deposit receipts allowed in each payload |
|
||||||
| `MAX_ATTESTER_SLASHINGS_ELECTRA` | `2**0` (= 1) | *[New in Electra:EIP7549]* |
|
|
||||||
| `MAX_ATTESTATIONS_ELECTRA` | `2**3` (= 8) | *[New in Electra:EIP7549]* |
|
|
||||||
| `MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD` | `uint64(2**4)` (= 16)| *[New in Electra:EIP7002]* Maximum number of execution layer withdrawal requests in each payload |
|
| `MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD` | `uint64(2**4)` (= 16)| *[New in Electra:EIP7002]* Maximum number of execution layer withdrawal requests in each payload |
|
||||||
|
| `MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD` | `uint64(1)` (= 1) | *[New in Electra:EIP7002]* Maximum number of execution layer consolidation requests in each payload |
|
||||||
|
|
||||||
### Withdrawals processing
|
### Withdrawals processing
|
||||||
|
|
||||||
|
@ -194,12 +193,12 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
|
|
||||||
### New containers
|
### New containers
|
||||||
|
|
||||||
#### `DepositReceipt`
|
#### `DepositRequest`
|
||||||
|
|
||||||
*Note*: The container is new in EIP6110.
|
*Note*: The container is new in EIP6110.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class DepositReceipt(Container):
|
class DepositRequest(Container):
|
||||||
pubkey: BLSPubkey
|
pubkey: BLSPubkey
|
||||||
withdrawal_credentials: Bytes32
|
withdrawal_credentials: Bytes32
|
||||||
amount: Gwei
|
amount: Gwei
|
||||||
|
@ -227,36 +226,26 @@ class PendingPartialWithdrawal(Container):
|
||||||
amount: Gwei
|
amount: Gwei
|
||||||
withdrawable_epoch: Epoch
|
withdrawable_epoch: Epoch
|
||||||
```
|
```
|
||||||
#### `ExecutionLayerWithdrawalRequest`
|
#### `WithdrawalRequest`
|
||||||
|
|
||||||
*Note*: The container is new in EIP7251:EIP7002.
|
*Note*: The container is new in EIP7251:EIP7002.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class ExecutionLayerWithdrawalRequest(Container):
|
class WithdrawalRequest(Container):
|
||||||
source_address: ExecutionAddress
|
source_address: ExecutionAddress
|
||||||
validator_pubkey: BLSPubkey
|
validator_pubkey: BLSPubkey
|
||||||
amount: Gwei
|
amount: Gwei
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `Consolidation`
|
#### `ConsolidationRequest`
|
||||||
|
|
||||||
*Note*: The container is new in EIP7251.
|
*Note*: The container is new in EIP7251.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
class Consolidation(Container):
|
class ConsolidationRequest(Container):
|
||||||
source_index: ValidatorIndex
|
source_address: ExecutionAddress
|
||||||
target_index: ValidatorIndex
|
source_pubkey: BLSPubkey
|
||||||
epoch: Epoch
|
target_pubkey: BLSPubkey
|
||||||
```
|
|
||||||
|
|
||||||
#### `SignedConsolidation`
|
|
||||||
|
|
||||||
*Note*: The container is new in EIP7251.
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedConsolidation(Container):
|
|
||||||
message: Consolidation
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `PendingConsolidation`
|
#### `PendingConsolidation`
|
||||||
|
@ -287,8 +276,8 @@ class AttesterSlashing(Container):
|
||||||
class Attestation(Container):
|
class Attestation(Container):
|
||||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT] # [Modified in Electra:EIP7549]
|
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE * MAX_COMMITTEES_PER_SLOT] # [Modified in Electra:EIP7549]
|
||||||
data: AttestationData
|
data: AttestationData
|
||||||
committee_bits: Bitvector[MAX_COMMITTEES_PER_SLOT] # [New in Electra:EIP7549]
|
|
||||||
signature: BLSSignature
|
signature: BLSSignature
|
||||||
|
committee_bits: Bitvector[MAX_COMMITTEES_PER_SLOT] # [New in Electra:EIP7549]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `IndexedAttestation`
|
#### `IndexedAttestation`
|
||||||
|
@ -319,7 +308,6 @@ class BeaconBlockBody(Container):
|
||||||
execution_payload: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
|
execution_payload: ExecutionPayload # [Modified in Electra:EIP6110:EIP7002]
|
||||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
blob_kzg_commitments: List[KZGCommitment, MAX_BLOB_COMMITMENTS_PER_BLOCK]
|
||||||
consolidations: List[SignedConsolidation, MAX_CONSOLIDATIONS] # [New in Electra:EIP7251]
|
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `ExecutionPayload`
|
#### `ExecutionPayload`
|
||||||
|
@ -345,9 +333,11 @@ class ExecutionPayload(Container):
|
||||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||||
blob_gas_used: uint64
|
blob_gas_used: uint64
|
||||||
excess_blob_gas: uint64
|
excess_blob_gas: uint64
|
||||||
deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in Electra:EIP6110]
|
deposit_requests: List[DepositRequest, MAX_DEPOSIT_REQUESTS_PER_PAYLOAD] # [New in Electra:EIP6110]
|
||||||
# [New in Electra:EIP7002:EIP7251]
|
# [New in Electra:EIP7002:EIP7251]
|
||||||
withdrawal_requests: List[ExecutionLayerWithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]
|
withdrawal_requests: List[WithdrawalRequest, MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD]
|
||||||
|
# [New in Electra:EIP7251]
|
||||||
|
consolidation_requests: List[ConsolidationRequest, MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `ExecutionPayloadHeader`
|
#### `ExecutionPayloadHeader`
|
||||||
|
@ -373,8 +363,9 @@ class ExecutionPayloadHeader(Container):
|
||||||
withdrawals_root: Root
|
withdrawals_root: Root
|
||||||
blob_gas_used: uint64
|
blob_gas_used: uint64
|
||||||
excess_blob_gas: uint64
|
excess_blob_gas: uint64
|
||||||
deposit_receipts_root: Root # [New in Electra:EIP6110]
|
deposit_requests_root: Root # [New in Electra:EIP6110]
|
||||||
withdrawal_requests_root: Root # [New in Electra:EIP7002:EIP7251]
|
withdrawal_requests_root: Root # [New in Electra:EIP7002:EIP7251]
|
||||||
|
consolidation_requests_root: Root # [New in Electra:EIP7251]
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `BeaconState`
|
#### `BeaconState`
|
||||||
|
@ -422,7 +413,7 @@ class BeaconState(Container):
|
||||||
next_withdrawal_validator_index: ValidatorIndex
|
next_withdrawal_validator_index: ValidatorIndex
|
||||||
# Deep history valid from Capella onwards
|
# Deep history valid from Capella onwards
|
||||||
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
|
historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT]
|
||||||
deposit_receipts_start_index: uint64 # [New in Electra:EIP6110]
|
deposit_requests_start_index: uint64 # [New in Electra:EIP6110]
|
||||||
deposit_balance_to_consume: Gwei # [New in Electra:EIP7251]
|
deposit_balance_to_consume: Gwei # [New in Electra:EIP7251]
|
||||||
exit_balance_to_consume: Gwei # [New in Electra:EIP7251]
|
exit_balance_to_consume: Gwei # [New in Electra:EIP7251]
|
||||||
earliest_exit_epoch: Epoch # [New in Electra:EIP7251]
|
earliest_exit_epoch: Epoch # [New in Electra:EIP7251]
|
||||||
|
@ -438,6 +429,29 @@ class BeaconState(Container):
|
||||||
|
|
||||||
### Predicates
|
### Predicates
|
||||||
|
|
||||||
|
#### Updated `compute_proposer_index`
|
||||||
|
|
||||||
|
*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` preset.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_proposer_index(state: BeaconState, indices: Sequence[ValidatorIndex], seed: Bytes32) -> ValidatorIndex:
|
||||||
|
"""
|
||||||
|
Return from ``indices`` a random index sampled by effective balance.
|
||||||
|
"""
|
||||||
|
assert len(indices) > 0
|
||||||
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
|
i = uint64(0)
|
||||||
|
total = uint64(len(indices))
|
||||||
|
while True:
|
||||||
|
candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||||
|
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||||
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
|
# [Modified in Electra:EIP7251]
|
||||||
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
|
||||||
|
return candidate_index
|
||||||
|
i += 1
|
||||||
|
```
|
||||||
|
|
||||||
#### Updated `is_eligible_for_activation_queue`
|
#### Updated `is_eligible_for_activation_queue`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -602,6 +616,36 @@ def get_attesting_indices(state: BeaconState, attestation: Attestation) -> Set[V
|
||||||
return output
|
return output
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Modified `get_next_sync_committee_indices`
|
||||||
|
|
||||||
|
*Note*: The function is modified to use `MAX_EFFECTIVE_BALANCE_ELECTRA` preset.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_next_sync_committee_indices(state: BeaconState) -> Sequence[ValidatorIndex]:
|
||||||
|
"""
|
||||||
|
Return the sync committee indices, with possible duplicates, for the next sync committee.
|
||||||
|
"""
|
||||||
|
epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
|
|
||||||
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
|
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||||
|
active_validator_count = uint64(len(active_validator_indices))
|
||||||
|
seed = get_seed(state, epoch, DOMAIN_SYNC_COMMITTEE)
|
||||||
|
i = 0
|
||||||
|
sync_committee_indices: List[ValidatorIndex] = []
|
||||||
|
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||||
|
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||||
|
candidate_index = active_validator_indices[shuffled_index]
|
||||||
|
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||||
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
|
# [Modified in Electra:EIP7251]
|
||||||
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE_ELECTRA * random_byte:
|
||||||
|
sync_committee_indices.append(candidate_index)
|
||||||
|
i += 1
|
||||||
|
return sync_committee_indices
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Beacon state mutators
|
### Beacon state mutators
|
||||||
|
|
||||||
#### Updated `initiate_validator_exit`
|
#### Updated `initiate_validator_exit`
|
||||||
|
@ -798,12 +842,27 @@ def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||||
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
available_for_processing = state.deposit_balance_to_consume + get_activation_exit_churn_limit(state)
|
||||||
processed_amount = 0
|
processed_amount = 0
|
||||||
next_deposit_index = 0
|
next_deposit_index = 0
|
||||||
|
deposits_to_postpone = []
|
||||||
|
|
||||||
for deposit in state.pending_balance_deposits:
|
for deposit in state.pending_balance_deposits:
|
||||||
|
validator = state.validators[deposit.index]
|
||||||
|
# Validator is exiting, postpone the deposit until after withdrawable epoch
|
||||||
|
if validator.exit_epoch < FAR_FUTURE_EPOCH:
|
||||||
|
if get_current_epoch(state) <= validator.withdrawable_epoch:
|
||||||
|
deposits_to_postpone.append(deposit)
|
||||||
|
# Deposited balance will never become active. Increase balance but do not consume churn
|
||||||
|
else:
|
||||||
|
increase_balance(state, deposit.index, deposit.amount)
|
||||||
|
# Validator is not exiting, attempt to process deposit
|
||||||
|
else:
|
||||||
|
# Deposit does not fit in the churn, no more deposit processing in this epoch.
|
||||||
if processed_amount + deposit.amount > available_for_processing:
|
if processed_amount + deposit.amount > available_for_processing:
|
||||||
break
|
break
|
||||||
|
# Deposit fits in the churn, process it. Increase balance and consume churn.
|
||||||
|
else:
|
||||||
increase_balance(state, deposit.index, deposit.amount)
|
increase_balance(state, deposit.index, deposit.amount)
|
||||||
processed_amount += deposit.amount
|
processed_amount += deposit.amount
|
||||||
|
# Regardless of how the deposit was handled, we move on in the queue.
|
||||||
next_deposit_index += 1
|
next_deposit_index += 1
|
||||||
|
|
||||||
state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
state.pending_balance_deposits = state.pending_balance_deposits[next_deposit_index:]
|
||||||
|
@ -812,6 +871,8 @@ def process_pending_balance_deposits(state: BeaconState) -> None:
|
||||||
state.deposit_balance_to_consume = Gwei(0)
|
state.deposit_balance_to_consume = Gwei(0)
|
||||||
else:
|
else:
|
||||||
state.deposit_balance_to_consume = available_for_processing - processed_amount
|
state.deposit_balance_to_consume = available_for_processing - processed_amount
|
||||||
|
|
||||||
|
state.pending_balance_deposits += deposits_to_postpone
|
||||||
```
|
```
|
||||||
|
|
||||||
#### New `process_pending_consolidations`
|
#### New `process_pending_consolidations`
|
||||||
|
@ -1011,8 +1072,9 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||||
blob_gas_used=payload.blob_gas_used,
|
blob_gas_used=payload.blob_gas_used,
|
||||||
excess_blob_gas=payload.excess_blob_gas,
|
excess_blob_gas=payload.excess_blob_gas,
|
||||||
deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in Electra:EIP6110]
|
deposit_requests_root=hash_tree_root(payload.deposit_requests), # [New in Electra:EIP6110]
|
||||||
withdrawal_requests_root=hash_tree_root(payload.withdrawal_requests), # [New in Electra:EIP7002:EIP7251]
|
withdrawal_requests_root=hash_tree_root(payload.withdrawal_requests), # [New in Electra:EIP7002:EIP7251]
|
||||||
|
consolidation_requests_root=hash_tree_root(payload.consolidation_requests), # [New in Electra:EIP7251]
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1026,7 +1088,7 @@ def process_execution_payload(state: BeaconState, body: BeaconBlockBody, executi
|
||||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||||
# [Modified in Electra:EIP6110]
|
# [Modified in Electra:EIP6110]
|
||||||
# Disable former deposit mechanism once all prior deposits are processed
|
# Disable former deposit mechanism once all prior deposits are processed
|
||||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||||
assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||||
else:
|
else:
|
||||||
|
@ -1042,10 +1104,11 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||||
for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
for_ops(body.deposits, process_deposit) # [Modified in Electra:EIP7251]
|
||||||
for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
for_ops(body.voluntary_exits, process_voluntary_exit) # [Modified in Electra:EIP7251]
|
||||||
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
for_ops(body.bls_to_execution_changes, process_bls_to_execution_change)
|
||||||
|
for_ops(body.execution_payload.deposit_requests, process_deposit_request) # [New in Electra:EIP6110]
|
||||||
# [New in Electra:EIP7002:EIP7251]
|
# [New in Electra:EIP7002:EIP7251]
|
||||||
for_ops(body.execution_payload.withdrawal_requests, process_execution_layer_withdrawal_request)
|
for_ops(body.execution_payload.withdrawal_requests, process_withdrawal_request)
|
||||||
for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) # [New in Electra:EIP6110]
|
# [New in Electra:EIP7251]
|
||||||
for_ops(body.consolidations, process_consolidation) # [New in Electra:EIP7251]
|
for_ops(body.execution_payload.consolidation_requests, process_consolidation_request)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Attestations
|
##### Attestations
|
||||||
|
@ -1204,16 +1267,16 @@ def process_voluntary_exit(state: BeaconState, signed_voluntary_exit: SignedVolu
|
||||||
|
|
||||||
##### Execution layer withdrawal requests
|
##### Execution layer withdrawal requests
|
||||||
|
|
||||||
###### New `process_execution_layer_withdrawal_request`
|
###### New `process_withdrawal_request`
|
||||||
|
|
||||||
*Note*: This function is new in Electra following EIP-7002 and EIP-7251.
|
*Note*: This function is new in Electra following EIP-7002 and EIP-7251.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_execution_layer_withdrawal_request(
|
def process_withdrawal_request(
|
||||||
state: BeaconState,
|
state: BeaconState,
|
||||||
execution_layer_withdrawal_request: ExecutionLayerWithdrawalRequest
|
withdrawal_request: WithdrawalRequest
|
||||||
) -> None:
|
) -> None:
|
||||||
amount = execution_layer_withdrawal_request.amount
|
amount = withdrawal_request.amount
|
||||||
is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
is_full_exit_request = amount == FULL_EXIT_REQUEST_AMOUNT
|
||||||
|
|
||||||
# If partial withdrawal queue is full, only full exits are processed
|
# If partial withdrawal queue is full, only full exits are processed
|
||||||
|
@ -1222,7 +1285,7 @@ def process_execution_layer_withdrawal_request(
|
||||||
|
|
||||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
# Verify pubkey exists
|
# Verify pubkey exists
|
||||||
request_pubkey = execution_layer_withdrawal_request.validator_pubkey
|
request_pubkey = withdrawal_request.validator_pubkey
|
||||||
if request_pubkey not in validator_pubkeys:
|
if request_pubkey not in validator_pubkeys:
|
||||||
return
|
return
|
||||||
index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
index = ValidatorIndex(validator_pubkeys.index(request_pubkey))
|
||||||
|
@ -1231,7 +1294,7 @@ def process_execution_layer_withdrawal_request(
|
||||||
# Verify withdrawal credentials
|
# Verify withdrawal credentials
|
||||||
has_correct_credential = has_execution_withdrawal_credential(validator)
|
has_correct_credential = has_execution_withdrawal_credential(validator)
|
||||||
is_correct_source_address = (
|
is_correct_source_address = (
|
||||||
validator.withdrawal_credentials[12:] == execution_layer_withdrawal_request.source_address
|
validator.withdrawal_credentials[12:] == withdrawal_request.source_address
|
||||||
)
|
)
|
||||||
if not (has_correct_credential and is_correct_source_address):
|
if not (has_correct_credential and is_correct_source_address):
|
||||||
return
|
return
|
||||||
|
@ -1271,64 +1334,83 @@ def process_execution_layer_withdrawal_request(
|
||||||
))
|
))
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Deposit receipts
|
##### Deposit requests
|
||||||
|
|
||||||
###### New `process_deposit_receipt`
|
###### New `process_deposit_request`
|
||||||
|
|
||||||
*Note*: This function is new in Electra:EIP6110.
|
*Note*: This function is new in Electra:EIP6110.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None:
|
def process_deposit_request(state: BeaconState, deposit_request: DepositRequest) -> None:
|
||||||
# Set deposit receipt start index
|
# Set deposit request start index
|
||||||
if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX:
|
if state.deposit_requests_start_index == UNSET_DEPOSIT_REQUESTS_START_INDEX:
|
||||||
state.deposit_receipts_start_index = deposit_receipt.index
|
state.deposit_requests_start_index = deposit_request.index
|
||||||
|
|
||||||
apply_deposit(
|
apply_deposit(
|
||||||
state=state,
|
state=state,
|
||||||
pubkey=deposit_receipt.pubkey,
|
pubkey=deposit_request.pubkey,
|
||||||
withdrawal_credentials=deposit_receipt.withdrawal_credentials,
|
withdrawal_credentials=deposit_request.withdrawal_credentials,
|
||||||
amount=deposit_receipt.amount,
|
amount=deposit_request.amount,
|
||||||
signature=deposit_receipt.signature,
|
signature=deposit_request.signature,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### Consolidations
|
##### Execution layer consolidation requests
|
||||||
|
|
||||||
###### New `process_consolidation`
|
###### New `process_consolidation_request`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_consolidation(state: BeaconState, signed_consolidation: SignedConsolidation) -> None:
|
def process_consolidation_request(
|
||||||
# If the pending consolidations queue is full, no consolidations are allowed in the block
|
state: BeaconState,
|
||||||
assert len(state.pending_consolidations) < PENDING_CONSOLIDATIONS_LIMIT
|
consolidation_request: ConsolidationRequest
|
||||||
# If there is too little available consolidation churn limit, no consolidations are allowed in the block
|
) -> None:
|
||||||
assert get_consolidation_churn_limit(state) > MIN_ACTIVATION_BALANCE
|
# If the pending consolidations queue is full, consolidation requests are ignored
|
||||||
consolidation = signed_consolidation.message
|
if len(state.pending_consolidations) == PENDING_CONSOLIDATIONS_LIMIT:
|
||||||
# Verify that source != target, so a consolidation cannot be used as an exit.
|
return
|
||||||
assert consolidation.source_index != consolidation.target_index
|
# If there is too little available consolidation churn limit, consolidation requests are ignored
|
||||||
|
if get_consolidation_churn_limit(state) <= MIN_ACTIVATION_BALANCE:
|
||||||
|
return
|
||||||
|
|
||||||
|
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
# Verify pubkeys exists
|
||||||
|
request_source_pubkey = consolidation_request.source_pubkey
|
||||||
|
request_target_pubkey = consolidation_request.target_pubkey
|
||||||
|
if request_source_pubkey not in validator_pubkeys:
|
||||||
|
return
|
||||||
|
if request_target_pubkey not in validator_pubkeys:
|
||||||
|
return
|
||||||
|
source_index = ValidatorIndex(validator_pubkeys.index(request_source_pubkey))
|
||||||
|
target_index = ValidatorIndex(validator_pubkeys.index(request_target_pubkey))
|
||||||
|
source_validator = state.validators[source_index]
|
||||||
|
target_validator = state.validators[target_index]
|
||||||
|
|
||||||
|
# Verify that source != target, so a consolidation cannot be used as an exit.
|
||||||
|
if source_index == target_index:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify source withdrawal credentials
|
||||||
|
has_correct_credential = has_execution_withdrawal_credential(source_validator)
|
||||||
|
is_correct_source_address = (
|
||||||
|
source_validator.withdrawal_credentials[12:] == consolidation_request.source_address
|
||||||
|
)
|
||||||
|
if not (has_correct_credential and is_correct_source_address):
|
||||||
|
return
|
||||||
|
|
||||||
|
# Verify that target has execution withdrawal credentials
|
||||||
|
if not has_execution_withdrawal_credential(target_validator):
|
||||||
|
return
|
||||||
|
|
||||||
source_validator = state.validators[consolidation.source_index]
|
|
||||||
target_validator = state.validators[consolidation.target_index]
|
|
||||||
# Verify the source and the target are active
|
# Verify the source and the target are active
|
||||||
current_epoch = get_current_epoch(state)
|
current_epoch = get_current_epoch(state)
|
||||||
assert is_active_validator(source_validator, current_epoch)
|
if not is_active_validator(source_validator, current_epoch):
|
||||||
assert is_active_validator(target_validator, current_epoch)
|
return
|
||||||
|
if not is_active_validator(target_validator, current_epoch):
|
||||||
|
return
|
||||||
# Verify exits for source and target have not been initiated
|
# Verify exits for source and target have not been initiated
|
||||||
assert source_validator.exit_epoch == FAR_FUTURE_EPOCH
|
if source_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||||
assert target_validator.exit_epoch == FAR_FUTURE_EPOCH
|
return
|
||||||
# Consolidations must specify an epoch when they become valid; they are not valid before then
|
if target_validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||||
assert current_epoch >= consolidation.epoch
|
return
|
||||||
|
|
||||||
# Verify the source and the target have Execution layer withdrawal credentials
|
|
||||||
assert has_execution_withdrawal_credential(source_validator)
|
|
||||||
assert has_execution_withdrawal_credential(target_validator)
|
|
||||||
# Verify the same withdrawal address
|
|
||||||
assert source_validator.withdrawal_credentials[12:] == target_validator.withdrawal_credentials[12:]
|
|
||||||
|
|
||||||
# Verify consolidation is signed by the source and the target
|
|
||||||
domain = compute_domain(DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
|
|
||||||
signing_root = compute_signing_root(consolidation, domain)
|
|
||||||
pubkeys = [source_validator.pubkey, target_validator.pubkey]
|
|
||||||
assert bls.FastAggregateVerify(pubkeys, signing_root, signed_consolidation.signature)
|
|
||||||
|
|
||||||
# Initiate source validator exit and append pending consolidation
|
# Initiate source validator exit and append pending consolidation
|
||||||
source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
source_validator.exit_epoch = compute_consolidation_epoch_and_update_churn(
|
||||||
|
@ -1338,8 +1420,8 @@ def process_consolidation(state: BeaconState, signed_consolidation: SignedConsol
|
||||||
source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
source_validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
)
|
)
|
||||||
state.pending_consolidations.append(PendingConsolidation(
|
state.pending_consolidations.append(PendingConsolidation(
|
||||||
source_index=consolidation.source_index,
|
source_index=source_index,
|
||||||
target_index=consolidation.target_index
|
target_index=target_index
|
||||||
))
|
))
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -1349,7 +1431,7 @@ def process_consolidation(state: BeaconState, signed_consolidation: SignedConsol
|
||||||
Modifications include:
|
Modifications include:
|
||||||
1. Use `ELECTRA_FORK_VERSION` as the previous and current fork version.
|
1. Use `ELECTRA_FORK_VERSION` as the previous and current fork version.
|
||||||
2. Utilize the Electra `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
2. Utilize the Electra `BeaconBlockBody` when constructing the initial `latest_block_header`.
|
||||||
3. *[New in Electra:EIP6110]* Add `deposit_receipts_start_index` variable to the genesis state initialization.
|
3. *[New in Electra:EIP6110]* Add `deposit_requests_start_index` variable to the genesis state initialization.
|
||||||
4. *[New in Electra:EIP7251]* Initialize new fields to support increasing the maximum effective balance.
|
4. *[New in Electra:EIP7251]* Initialize new fields to support increasing the maximum effective balance.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -1369,7 +1451,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||||
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in Electra:EIP6110]
|
deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX, # [New in Electra:EIP6110]
|
||||||
)
|
)
|
||||||
|
|
||||||
# Process deposits
|
# Process deposits
|
||||||
|
@ -1387,8 +1469,10 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
||||||
# Process activations
|
# Process activations
|
||||||
for index, validator in enumerate(state.validators):
|
for index, validator in enumerate(state.validators):
|
||||||
balance = state.balances[index]
|
balance = state.balances[index]
|
||||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
# [Modified in Electra:EIP7251]
|
||||||
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
validator.effective_balance = min(
|
||||||
|
balance - balance % EFFECTIVE_BALANCE_INCREMENT, get_validator_max_effective_balance(validator))
|
||||||
|
if validator.effective_balance >= MIN_ACTIVATION_BALANCE:
|
||||||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||||
validator.activation_epoch = GENESIS_EPOCH
|
validator.activation_epoch = GENESIS_EPOCH
|
||||||
|
|
||||||
|
|
|
@ -90,8 +90,9 @@ def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||||
blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
blob_gas_used=pre.latest_execution_payload_header.blob_gas_used,
|
||||||
excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
excess_blob_gas=pre.latest_execution_payload_header.excess_blob_gas,
|
||||||
deposit_receipts_root=Root(), # [New in Electra:EIP6110]
|
deposit_requests_root=Root(), # [New in Electra:EIP6110]
|
||||||
withdrawal_requests_root=Root(), # [New in Electra:EIP7002],
|
withdrawal_requests_root=Root(), # [New in Electra:EIP7002]
|
||||||
|
consolidation_requests_root=Root(), # [New in Electra:EIP7251]
|
||||||
)
|
)
|
||||||
|
|
||||||
exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
exit_epochs = [v.exit_epoch for v in pre.validators if v.exit_epoch != FAR_FUTURE_EPOCH]
|
||||||
|
@ -146,7 +147,7 @@ def upgrade_to_electra(pre: deneb.BeaconState) -> BeaconState:
|
||||||
# Deep history valid from Capella onwards
|
# Deep history valid from Capella onwards
|
||||||
historical_summaries=pre.historical_summaries,
|
historical_summaries=pre.historical_summaries,
|
||||||
# [New in Electra:EIP6110]
|
# [New in Electra:EIP6110]
|
||||||
deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX,
|
deposit_requests_start_index=UNSET_DEPOSIT_REQUESTS_START_INDEX,
|
||||||
# [New in Electra:EIP7251]
|
# [New in Electra:EIP7251]
|
||||||
deposit_balance_to_consume=0,
|
deposit_balance_to_consume=0,
|
||||||
exit_balance_to_consume=0,
|
exit_balance_to_consume=0,
|
||||||
|
|
|
@ -8,6 +8,10 @@
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Prerequisites](#prerequisites)
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [Modified Containers](#modified-containers)
|
||||||
|
- [`AggregateAndProof`](#aggregateandproof)
|
||||||
|
- [`SignedAggregateAndProof`](#signedaggregateandproof)
|
||||||
- [Block proposal](#block-proposal)
|
- [Block proposal](#block-proposal)
|
||||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
- [Attester slashings](#attester-slashings)
|
- [Attester slashings](#attester-slashings)
|
||||||
|
@ -34,6 +38,27 @@ All behaviors and definitions defined in this document, and documents it extends
|
||||||
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Electra](./beacon-chain.md) are requisite for this document and used throughout.
|
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Electra](./beacon-chain.md) are requisite for this document and used throughout.
|
||||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### Modified Containers
|
||||||
|
|
||||||
|
#### `AggregateAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AggregateAndProof(Container):
|
||||||
|
aggregator_index: ValidatorIndex
|
||||||
|
aggregate: Attestation # [Modified in Electra:EIP7549]
|
||||||
|
selection_proof: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `SignedAggregateAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedAggregateAndProof(Container):
|
||||||
|
message: AggregateAndProof # [Modified in Electra:EIP7549]
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
## Block proposal
|
## Block proposal
|
||||||
|
|
||||||
### Constructing the `BeaconBlockBody`
|
### Constructing the `BeaconBlockBody`
|
||||||
|
@ -80,7 +105,7 @@ def compute_on_chain_aggregate(network_aggregates: Sequence[Attestation]) -> Att
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_eth1_pending_deposit_count(state: BeaconState) -> uint64:
|
def get_eth1_pending_deposit_count(state: BeaconState) -> uint64:
|
||||||
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index)
|
eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_requests_start_index)
|
||||||
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
if state.eth1_deposit_index < eth1_deposit_index_limit:
|
||||||
return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.5.0-alpha.2
|
1.5.0-alpha.3
|
||||||
|
|
|
@ -24,6 +24,11 @@ from eth2spec.test.context import (
|
||||||
with_presets,
|
with_presets,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
always_bls,
|
always_bls,
|
||||||
|
single_phase,
|
||||||
|
with_custom_state,
|
||||||
|
spec_test,
|
||||||
|
default_balances_electra,
|
||||||
|
default_activation_threshold,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -143,7 +148,9 @@ def is_duplicate_sync_committee(committee_indices):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
|
@single_phase
|
||||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||||
committee_indices = compute_committee_indices(state)
|
committee_indices = compute_committee_indices(state)
|
||||||
|
|
||||||
|
|
|
@ -24,6 +24,8 @@ from eth2spec.test.context import (
|
||||||
with_custom_state,
|
with_custom_state,
|
||||||
with_presets,
|
with_presets,
|
||||||
spec_test,
|
spec_test,
|
||||||
|
default_balances_electra,
|
||||||
|
misc_balances_electra,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -132,7 +134,9 @@ def test_random_with_exits_with_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
|
@single_phase
|
||||||
def test_random_only_one_participant_without_duplicates(spec, state):
|
def test_random_only_one_participant_without_duplicates(spec, state):
|
||||||
rng = random.Random(501)
|
rng = random.Random(501)
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
@ -144,7 +148,9 @@ def test_random_only_one_participant_without_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
|
@single_phase
|
||||||
def test_random_low_participation_without_duplicates(spec, state):
|
def test_random_low_participation_without_duplicates(spec, state):
|
||||||
rng = random.Random(601)
|
rng = random.Random(601)
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
@ -156,7 +162,9 @@ def test_random_low_participation_without_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
|
@single_phase
|
||||||
def test_random_high_participation_without_duplicates(spec, state):
|
def test_random_high_participation_without_duplicates(spec, state):
|
||||||
rng = random.Random(701)
|
rng = random.Random(701)
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
@ -168,7 +176,9 @@ def test_random_high_participation_without_duplicates(spec, state):
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
|
@single_phase
|
||||||
def test_random_all_but_one_participating_without_duplicates(spec, state):
|
def test_random_all_but_one_participating_without_duplicates(spec, state):
|
||||||
rng = random.Random(801)
|
rng = random.Random(801)
|
||||||
yield from _test_harness_for_randomized_test_case(
|
yield from _test_harness_for_randomized_test_case(
|
||||||
|
@ -181,7 +191,7 @@ def test_random_all_but_one_participating_without_duplicates(spec, state):
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_test
|
@spec_test
|
||||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
@with_custom_state(balances_fn=misc_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_random_misc_balances_and_half_participation_without_duplicates(spec, state):
|
def test_random_misc_balances_and_half_participation_without_duplicates(spec, state):
|
||||||
rng = random.Random(1501)
|
rng = random.Random(1501)
|
||||||
|
@ -194,7 +204,8 @@ def test_random_misc_balances_and_half_participation_without_duplicates(spec, st
|
||||||
|
|
||||||
@with_altair_and_later
|
@with_altair_and_later
|
||||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||||
@spec_state_test
|
@spec_test
|
||||||
|
@with_custom_state(balances_fn=default_balances_electra, threshold_fn=default_activation_threshold)
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_random_with_exits_without_duplicates(spec, state):
|
def test_random_with_exits_without_duplicates(spec, state):
|
||||||
rng = random.Random(1502)
|
rng = random.Random(1502)
|
||||||
|
|
|
@ -16,7 +16,7 @@ from .helpers.constants import (
|
||||||
ALLOWED_TEST_RUNNER_FORKS,
|
ALLOWED_TEST_RUNNER_FORKS,
|
||||||
LIGHT_CLIENT_TESTING_FORKS,
|
LIGHT_CLIENT_TESTING_FORKS,
|
||||||
)
|
)
|
||||||
from .helpers.forks import is_post_fork
|
from .helpers.forks import is_post_fork, is_post_electra
|
||||||
from .helpers.genesis import create_genesis_state
|
from .helpers.genesis import create_genesis_state
|
||||||
from .helpers.typing import (
|
from .helpers.typing import (
|
||||||
Spec,
|
Spec,
|
||||||
|
@ -86,6 +86,9 @@ def default_activation_threshold(spec: Spec):
|
||||||
Helper method to use the default balance activation threshold for state creation for tests.
|
Helper method to use the default balance activation threshold for state creation for tests.
|
||||||
Usage: `@with_custom_state(threshold_fn=default_activation_threshold, ...)`
|
Usage: `@with_custom_state(threshold_fn=default_activation_threshold, ...)`
|
||||||
"""
|
"""
|
||||||
|
if is_post_electra(spec):
|
||||||
|
return spec.MIN_ACTIVATION_BALANCE
|
||||||
|
else:
|
||||||
return spec.MAX_EFFECTIVE_BALANCE
|
return spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
|
|
||||||
|
@ -106,6 +109,18 @@ def default_balances(spec: Spec):
|
||||||
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators
|
||||||
|
|
||||||
|
|
||||||
|
def default_balances_electra(spec: Spec):
|
||||||
|
"""
|
||||||
|
Helper method to create a series of default balances for Electra.
|
||||||
|
Usage: `@with_custom_state(balances_fn=default_balances_electra, ...)`
|
||||||
|
"""
|
||||||
|
if not is_post_electra(spec):
|
||||||
|
return default_balances(spec)
|
||||||
|
|
||||||
|
num_validators = spec.SLOTS_PER_EPOCH * 8
|
||||||
|
return [spec.MAX_EFFECTIVE_BALANCE_ELECTRA] * num_validators
|
||||||
|
|
||||||
|
|
||||||
def scaled_churn_balances_min_churn_limit(spec: Spec):
|
def scaled_churn_balances_min_churn_limit(spec: Spec):
|
||||||
"""
|
"""
|
||||||
Helper method to create enough validators to scale the churn limit.
|
Helper method to create enough validators to scale the churn limit.
|
||||||
|
@ -175,6 +190,21 @@ def misc_balances(spec: Spec):
|
||||||
return balances
|
return balances
|
||||||
|
|
||||||
|
|
||||||
|
def misc_balances_electra(spec: Spec):
|
||||||
|
"""
|
||||||
|
Helper method to create a series of balances that includes some misc. balances for Electra.
|
||||||
|
Usage: `@with_custom_state(balances_fn=misc_balances, ...)`
|
||||||
|
"""
|
||||||
|
if not is_post_electra(spec):
|
||||||
|
return misc_balances(spec)
|
||||||
|
|
||||||
|
num_validators = spec.SLOTS_PER_EPOCH * 8
|
||||||
|
balances = [spec.MAX_EFFECTIVE_BALANCE_ELECTRA * 2 * i // num_validators for i in range(num_validators)]
|
||||||
|
rng = Random(1234)
|
||||||
|
rng.shuffle(balances)
|
||||||
|
return balances
|
||||||
|
|
||||||
|
|
||||||
def misc_balances_in_default_range_with_many_validators(spec: Spec):
|
def misc_balances_in_default_range_with_many_validators(spec: Spec):
|
||||||
"""
|
"""
|
||||||
Helper method to create a series of balances that includes some misc. balances but
|
Helper method to create a series of balances that includes some misc. balances but
|
||||||
|
|
|
@ -9,6 +9,11 @@ from eth2spec.test.helpers.sharding import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def chunks(lst, n):
|
||||||
|
"""Helper that splits a list into N sized chunks."""
|
||||||
|
return [lst[i:i + n] for i in range(0, len(lst), n)]
|
||||||
|
|
||||||
|
|
||||||
@with_eip7594_and_later
|
@with_eip7594_and_later
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
|
@ -20,15 +25,15 @@ def test_compute_extended_matrix(spec):
|
||||||
extended_matrix = spec.compute_extended_matrix(input_blobs)
|
extended_matrix = spec.compute_extended_matrix(input_blobs)
|
||||||
assert len(extended_matrix) == spec.CELLS_PER_EXT_BLOB * blob_count
|
assert len(extended_matrix) == spec.CELLS_PER_EXT_BLOB * blob_count
|
||||||
|
|
||||||
rows = [extended_matrix[i:(i + spec.CELLS_PER_EXT_BLOB)]
|
rows = chunks(extended_matrix, spec.CELLS_PER_EXT_BLOB)
|
||||||
for i in range(0, len(extended_matrix), spec.CELLS_PER_EXT_BLOB)]
|
|
||||||
assert len(rows) == blob_count
|
assert len(rows) == blob_count
|
||||||
assert len(rows[0]) == spec.CELLS_PER_EXT_BLOB
|
for row in rows:
|
||||||
|
assert len(row) == spec.CELLS_PER_EXT_BLOB
|
||||||
|
|
||||||
for blob_index, row in enumerate(rows):
|
for blob_index, row in enumerate(rows):
|
||||||
extended_blob = []
|
extended_blob = []
|
||||||
for cell in row:
|
for entry in row:
|
||||||
extended_blob.extend(spec.cell_to_coset_evals(cell))
|
extended_blob.extend(spec.cell_to_coset_evals(entry.cell))
|
||||||
blob_part = extended_blob[0:len(extended_blob) // 2]
|
blob_part = extended_blob[0:len(extended_blob) // 2]
|
||||||
blob = b''.join([spec.bls_field_to_bytes(x) for x in blob_part])
|
blob = b''.join([spec.bls_field_to_bytes(x) for x in blob_part])
|
||||||
assert blob == input_blobs[blob_index]
|
assert blob == input_blobs[blob_index]
|
||||||
|
@ -43,27 +48,19 @@ def test_recover_matrix(spec):
|
||||||
# Number of samples we will be recovering from
|
# Number of samples we will be recovering from
|
||||||
N_SAMPLES = spec.CELLS_PER_EXT_BLOB // 2
|
N_SAMPLES = spec.CELLS_PER_EXT_BLOB // 2
|
||||||
|
|
||||||
|
# Compute an extended matrix with two blobs
|
||||||
blob_count = 2
|
blob_count = 2
|
||||||
cells_dict = {}
|
blobs = [get_sample_blob(spec, rng=rng) for _ in range(blob_count)]
|
||||||
original_cells = []
|
extended_matrix = spec.compute_extended_matrix(blobs)
|
||||||
for blob_index in range(blob_count):
|
|
||||||
# Get the data we will be working with
|
|
||||||
blob = get_sample_blob(spec, rng=rng)
|
|
||||||
# Extend data with Reed-Solomon and split the extended data in cells
|
|
||||||
cells = spec.compute_cells(blob)
|
|
||||||
original_cells.append(cells)
|
|
||||||
cell_ids = []
|
|
||||||
# First figure out just the indices of the cells
|
|
||||||
for _ in range(N_SAMPLES):
|
|
||||||
cell_id = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
|
||||||
while cell_id in cell_ids:
|
|
||||||
cell_id = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
|
||||||
cell_ids.append(cell_id)
|
|
||||||
cell = cells[cell_id]
|
|
||||||
cells_dict[(blob_index, cell_id)] = cell
|
|
||||||
assert len(cell_ids) == N_SAMPLES
|
|
||||||
|
|
||||||
# Recover the matrix
|
# Construct a matrix with some entries missing
|
||||||
recovered_matrix = spec.recover_matrix(cells_dict, blob_count)
|
partial_matrix = []
|
||||||
flatten_original_cells = [cell for cells in original_cells for cell in cells]
|
for blob_entries in chunks(extended_matrix, spec.CELLS_PER_EXT_BLOB):
|
||||||
assert recovered_matrix == flatten_original_cells
|
rng.shuffle(blob_entries)
|
||||||
|
partial_matrix.extend(blob_entries[:N_SAMPLES])
|
||||||
|
|
||||||
|
# Given the partial matrix, recover the missing entries
|
||||||
|
recovered_matrix = spec.recover_matrix(partial_matrix, blob_count)
|
||||||
|
|
||||||
|
# Ensure that the recovered matrix matches the original matrix
|
||||||
|
assert recovered_matrix == extended_matrix
|
||||||
|
|
|
@ -15,18 +15,98 @@ from eth2spec.utils.bls import BLS_MODULUS
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_fft(spec):
|
def test_fft(spec):
|
||||||
|
|
||||||
|
# in this test we sample a random polynomial in coefficient form
|
||||||
|
# then we apply an FFT to get evaluations over the roots of unity
|
||||||
|
# we then apply an inverse FFT to the evaluations to get coefficients
|
||||||
|
|
||||||
|
# we check two things:
|
||||||
|
# 1) the original coefficients and the resulting coefficients match
|
||||||
|
# 2) the evaluations that we got are the same as if we would have evaluated individually
|
||||||
|
|
||||||
rng = random.Random(5566)
|
rng = random.Random(5566)
|
||||||
|
|
||||||
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_BLOB)
|
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||||
|
|
||||||
|
# sample a random polynomial
|
||||||
poly_coeff = [rng.randint(0, BLS_MODULUS - 1) for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)]
|
poly_coeff = [rng.randint(0, BLS_MODULUS - 1) for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)]
|
||||||
|
|
||||||
|
# do an FFT and then an inverse FFT
|
||||||
poly_eval = spec.fft_field(poly_coeff, roots_of_unity)
|
poly_eval = spec.fft_field(poly_coeff, roots_of_unity)
|
||||||
poly_coeff_inversed = spec.fft_field(poly_eval, roots_of_unity, inv=True)
|
poly_coeff_inversed = spec.fft_field(poly_eval, roots_of_unity, inv=True)
|
||||||
|
|
||||||
|
# first check: inverse FFT after FFT results in original coefficients
|
||||||
assert len(poly_eval) == len(poly_coeff) == len(poly_coeff_inversed)
|
assert len(poly_eval) == len(poly_coeff) == len(poly_coeff_inversed)
|
||||||
assert poly_coeff_inversed == poly_coeff
|
assert poly_coeff_inversed == poly_coeff
|
||||||
|
|
||||||
|
# second check: result of FFT are really the evaluations
|
||||||
|
for i, w in enumerate(roots_of_unity):
|
||||||
|
individual_evaluation = spec.evaluate_polynomialcoeff(poly_coeff, w)
|
||||||
|
assert individual_evaluation == poly_eval[i]
|
||||||
|
|
||||||
|
|
||||||
|
@with_eip7594_and_later
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_coset_fft(spec):
|
||||||
|
|
||||||
|
# in this test we sample a random polynomial in coefficient form
|
||||||
|
# then we apply a Coset FFT to get evaluations over the coset of the roots of unity
|
||||||
|
# we then apply an inverse Coset FFT to the evaluations to get coefficients
|
||||||
|
|
||||||
|
# we check two things:
|
||||||
|
# 1) the original coefficients and the resulting coefficients match
|
||||||
|
# 2) the evaluations that we got are the same as if we would have evaluated individually
|
||||||
|
|
||||||
|
rng = random.Random(5566)
|
||||||
|
|
||||||
|
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||||
|
|
||||||
|
# this is the shift that generates the coset
|
||||||
|
coset_shift = spec.PRIMITIVE_ROOT_OF_UNITY
|
||||||
|
|
||||||
|
# sample a random polynomial
|
||||||
|
poly_coeff = [rng.randint(0, BLS_MODULUS - 1) for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)]
|
||||||
|
|
||||||
|
# do a coset FFT and then an inverse coset FFT
|
||||||
|
poly_eval = spec.coset_fft_field(poly_coeff, roots_of_unity)
|
||||||
|
poly_coeff_inversed = spec.coset_fft_field(poly_eval, roots_of_unity, inv=True)
|
||||||
|
|
||||||
|
# first check: inverse coset FFT after coset FFT results in original coefficients
|
||||||
|
assert len(poly_eval) == len(poly_coeff) == len(poly_coeff_inversed)
|
||||||
|
assert poly_coeff_inversed == poly_coeff
|
||||||
|
|
||||||
|
# second check: result of FFT are really the evaluations over the coset
|
||||||
|
for i, w in enumerate(roots_of_unity):
|
||||||
|
# the element of the coset is coset_shift * w
|
||||||
|
shifted_w = spec.BLSFieldElement((coset_shift * int(w)) % BLS_MODULUS)
|
||||||
|
individual_evaluation = spec.evaluate_polynomialcoeff(poly_coeff, shifted_w)
|
||||||
|
assert individual_evaluation == poly_eval[i]
|
||||||
|
|
||||||
|
|
||||||
|
@with_eip7594_and_later
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_construct_vanishing_polynomial(spec):
|
||||||
|
rng = random.Random(5566)
|
||||||
|
|
||||||
|
num_missing_cells = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
||||||
|
# Get a unique list of `num_missing_cells` cell indices
|
||||||
|
unique_missing_cell_indices = rng.sample(range(spec.CELLS_PER_EXT_BLOB), num_missing_cells)
|
||||||
|
|
||||||
|
zero_poly_coeff = spec.construct_vanishing_polynomial(unique_missing_cell_indices)
|
||||||
|
roots_of_unity = spec.compute_roots_of_unity(spec.FIELD_ELEMENTS_PER_EXT_BLOB)
|
||||||
|
zero_poly_eval = spec.fft_field(zero_poly_coeff, roots_of_unity)
|
||||||
|
zero_poly_eval_brp = spec.bit_reversal_permutation(zero_poly_eval)
|
||||||
|
|
||||||
|
for cell_index in range(spec.CELLS_PER_EXT_BLOB):
|
||||||
|
start = cell_index * spec.FIELD_ELEMENTS_PER_CELL
|
||||||
|
end = (cell_index + 1) * spec.FIELD_ELEMENTS_PER_CELL
|
||||||
|
if cell_index in unique_missing_cell_indices:
|
||||||
|
assert all(a == 0 for a in zero_poly_eval_brp[start:end])
|
||||||
|
else: # cell_index in cell_indices
|
||||||
|
assert all(a != 0 for a in zero_poly_eval_brp[start:end])
|
||||||
|
|
||||||
|
|
||||||
@with_eip7594_and_later
|
@with_eip7594_and_later
|
||||||
@spec_test
|
@spec_test
|
||||||
|
@ -36,10 +116,10 @@ def test_verify_cell_kzg_proof(spec):
|
||||||
commitment = spec.blob_to_kzg_commitment(blob)
|
commitment = spec.blob_to_kzg_commitment(blob)
|
||||||
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
|
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
|
||||||
|
|
||||||
cell_id = 0
|
cell_index = 0
|
||||||
assert spec.verify_cell_kzg_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
|
assert spec.verify_cell_kzg_proof(commitment, cell_index, cells[cell_index], proofs[cell_index])
|
||||||
cell_id = 1
|
cell_index = 1
|
||||||
assert spec.verify_cell_kzg_proof(commitment, cell_id, cells[cell_id], proofs[cell_id])
|
assert spec.verify_cell_kzg_proof(commitment, cell_index, cells[cell_index], proofs[cell_index])
|
||||||
|
|
||||||
|
|
||||||
@with_eip7594_and_later
|
@with_eip7594_and_later
|
||||||
|
@ -64,7 +144,7 @@ def test_verify_cell_kzg_proof_batch(spec):
|
||||||
@with_eip7594_and_later
|
@with_eip7594_and_later
|
||||||
@spec_test
|
@spec_test
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_recover_all_cells(spec):
|
def test_recover_cells_and_kzg_proofs(spec):
|
||||||
rng = random.Random(5566)
|
rng = random.Random(5566)
|
||||||
|
|
||||||
# Number of samples we will be recovering from
|
# Number of samples we will be recovering from
|
||||||
|
@ -74,29 +154,31 @@ def test_recover_all_cells(spec):
|
||||||
blob = get_sample_blob(spec)
|
blob = get_sample_blob(spec)
|
||||||
|
|
||||||
# Extend data with Reed-Solomon and split the extended data in cells
|
# Extend data with Reed-Solomon and split the extended data in cells
|
||||||
cells = spec.compute_cells(blob)
|
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
|
||||||
|
|
||||||
# Compute the cells we will be recovering from
|
# Compute the cells we will be recovering from
|
||||||
cell_ids = []
|
cell_indices = []
|
||||||
# First figure out just the indices of the cells
|
# First figure out just the indices of the cells
|
||||||
for i in range(N_SAMPLES):
|
for i in range(N_SAMPLES):
|
||||||
j = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
j = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
||||||
while j in cell_ids:
|
while j in cell_indices:
|
||||||
j = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
j = rng.randint(0, spec.CELLS_PER_EXT_BLOB - 1)
|
||||||
cell_ids.append(j)
|
cell_indices.append(j)
|
||||||
# Now the cells themselves
|
# Now the cells/proofs themselves
|
||||||
known_cells = [cells[cell_id] for cell_id in cell_ids]
|
known_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
|
known_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
|
|
||||||
# Recover all of the cells
|
# Recover the missing cells and proofs
|
||||||
recovered_cells = spec.recover_all_cells(cell_ids, known_cells)
|
recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, known_cells, known_proofs)
|
||||||
recovered_data = [x for xs in recovered_cells for x in xs]
|
recovered_data = [x for xs in recovered_cells for x in xs]
|
||||||
|
|
||||||
# Check that the original data match the non-extended portion of the recovered data
|
# Check that the original data match the non-extended portion of the recovered data
|
||||||
blob_byte_array = [b for b in blob]
|
blob_byte_array = [b for b in blob]
|
||||||
assert blob_byte_array == recovered_data[:len(recovered_data) // 2]
|
assert blob_byte_array == recovered_data[:len(recovered_data) // 2]
|
||||||
|
|
||||||
# Check that the recovered cells match the original cells
|
# Check that the recovered cells/proofs match the original cells/proofs
|
||||||
assert cells == recovered_cells
|
assert cells == recovered_cells
|
||||||
|
assert proofs == recovered_proofs
|
||||||
|
|
||||||
|
|
||||||
@with_eip7594_and_later
|
@with_eip7594_and_later
|
||||||
|
|
|
@ -11,8 +11,8 @@ from eth2spec.test.context import (
|
||||||
def test_invariants(spec):
|
def test_invariants(spec):
|
||||||
assert spec.FIELD_ELEMENTS_PER_BLOB % spec.FIELD_ELEMENTS_PER_CELL == 0
|
assert spec.FIELD_ELEMENTS_PER_BLOB % spec.FIELD_ELEMENTS_PER_CELL == 0
|
||||||
assert spec.FIELD_ELEMENTS_PER_EXT_BLOB % spec.config.NUMBER_OF_COLUMNS == 0
|
assert spec.FIELD_ELEMENTS_PER_EXT_BLOB % spec.config.NUMBER_OF_COLUMNS == 0
|
||||||
assert spec.SAMPLES_PER_SLOT <= spec.config.NUMBER_OF_COLUMNS
|
assert spec.config.SAMPLES_PER_SLOT <= spec.config.NUMBER_OF_COLUMNS
|
||||||
assert spec.CUSTODY_REQUIREMENT <= spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
assert spec.config.CUSTODY_REQUIREMENT <= spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
||||||
assert spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT <= spec.config.NUMBER_OF_COLUMNS
|
assert spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT <= spec.config.NUMBER_OF_COLUMNS
|
||||||
assert spec.config.NUMBER_OF_COLUMNS % spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT == 0
|
assert spec.config.NUMBER_OF_COLUMNS % spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT == 0
|
||||||
assert spec.config.MAX_REQUEST_DATA_COLUMN_SIDECARS == (
|
assert spec.config.MAX_REQUEST_DATA_COLUMN_SIDECARS == (
|
||||||
|
|
|
@ -9,7 +9,7 @@ from eth2spec.test.context import (
|
||||||
def run_get_custody_columns(spec, peer_count, custody_subnet_count):
|
def run_get_custody_columns(spec, peer_count, custody_subnet_count):
|
||||||
assignments = [spec.get_custody_columns(node_id, custody_subnet_count) for node_id in range(peer_count)]
|
assignments = [spec.get_custody_columns(node_id, custody_subnet_count) for node_id in range(peer_count)]
|
||||||
|
|
||||||
columns_per_subnet = spec.NUMBER_OF_COLUMNS // spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
columns_per_subnet = spec.config.NUMBER_OF_COLUMNS // spec.config.DATA_COLUMN_SIDECAR_SUBNET_COUNT
|
||||||
for assignment in assignments:
|
for assignment in assignments:
|
||||||
assert len(assignment) == custody_subnet_count * columns_per_subnet
|
assert len(assignment) == custody_subnet_count * columns_per_subnet
|
||||||
assert len(assignment) == len(set(assignment))
|
assert len(assignment) == len(set(assignment))
|
||||||
|
@ -20,8 +20,8 @@ def run_get_custody_columns(spec, peer_count, custody_subnet_count):
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_get_custody_columns_peers_within_number_of_columns(spec):
|
def test_get_custody_columns_peers_within_number_of_columns(spec):
|
||||||
peer_count = 10
|
peer_count = 10
|
||||||
custody_subnet_count = spec.CUSTODY_REQUIREMENT
|
custody_subnet_count = spec.config.CUSTODY_REQUIREMENT
|
||||||
assert spec.NUMBER_OF_COLUMNS > peer_count
|
assert spec.config.NUMBER_OF_COLUMNS > peer_count
|
||||||
run_get_custody_columns(spec, peer_count, custody_subnet_count)
|
run_get_custody_columns(spec, peer_count, custody_subnet_count)
|
||||||
|
|
||||||
|
|
||||||
|
@ -30,8 +30,8 @@ def test_get_custody_columns_peers_within_number_of_columns(spec):
|
||||||
@single_phase
|
@single_phase
|
||||||
def test_get_custody_columns_peers_more_than_number_of_columns(spec):
|
def test_get_custody_columns_peers_more_than_number_of_columns(spec):
|
||||||
peer_count = 200
|
peer_count = 200
|
||||||
custody_subnet_count = spec.CUSTODY_REQUIREMENT
|
custody_subnet_count = spec.config.CUSTODY_REQUIREMENT
|
||||||
assert spec.NUMBER_OF_COLUMNS < peer_count
|
assert spec.config.NUMBER_OF_COLUMNS < peer_count
|
||||||
run_get_custody_columns(spec, peer_count, custody_subnet_count)
|
run_get_custody_columns(spec, peer_count, custody_subnet_count)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,811 +0,0 @@
|
||||||
from eth2spec.test.helpers.constants import MINIMAL
|
|
||||||
from eth2spec.test.context import (
|
|
||||||
with_electra_and_later,
|
|
||||||
with_presets,
|
|
||||||
always_bls,
|
|
||||||
spec_test,
|
|
||||||
single_phase,
|
|
||||||
with_custom_state,
|
|
||||||
scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
default_activation_threshold,
|
|
||||||
)
|
|
||||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
|
||||||
from eth2spec.test.helpers.consolidations import (
|
|
||||||
run_consolidation_processing,
|
|
||||||
sign_consolidation,
|
|
||||||
)
|
|
||||||
from eth2spec.test.helpers.withdrawals import (
|
|
||||||
set_eth1_withdrawal_credential_with_balance,
|
|
||||||
set_compounding_withdrawal_credential,
|
|
||||||
)
|
|
||||||
|
|
||||||
# ***********************
|
|
||||||
# * CONSOLIDATION TESTS *
|
|
||||||
# ***********************
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_basic_consolidation_in_current_consolidation_epoch(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set earliest consolidation epoch to the expected exit epoch
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
state.earliest_consolidation_epoch = expected_exit_epoch
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
# Set the consolidation balance to consume equal to churn limit
|
|
||||||
state.consolidation_balance_to_consume = consolidation_churn_limit
|
|
||||||
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume
|
|
||||||
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
|
||||||
)
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_basic_consolidation_in_new_consolidation_epoch(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
# Set consolidation balance to consume to some arbitrary nonzero value below the churn limit
|
|
||||||
state.consolidation_balance_to_consume = spec.EFFECTIVE_BALANCE_INCREMENT
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
# consolidation_balance_to_consume is replenished to the churn limit since we move to a new consolidation epoch
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume
|
|
||||||
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
|
||||||
)
|
|
||||||
# Check exit epochs
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_basic_consolidation_with_preexisting_churn(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set earliest consolidation epoch to the expected exit epoch
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
state.earliest_consolidation_epoch = expected_exit_epoch
|
|
||||||
# Set some nonzero preexisting churn lower than churn limit and sufficient to process the consolidation
|
|
||||||
preexisting_churn = 2 * spec.MIN_ACTIVATION_BALANCE
|
|
||||||
state.consolidation_balance_to_consume = preexisting_churn
|
|
||||||
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume
|
|
||||||
== preexisting_churn - spec.MIN_ACTIVATION_BALANCE
|
|
||||||
)
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_basic_consolidation_with_insufficient_preexisting_churn(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set earliest consolidation epoch to the first available epoch
|
|
||||||
state.earliest_consolidation_epoch = spec.compute_activation_exit_epoch(
|
|
||||||
current_epoch
|
|
||||||
)
|
|
||||||
# Set preexisting churn lower than required to process the consolidation
|
|
||||||
preexisting_churn = spec.MIN_ACTIVATION_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
|
|
||||||
state.consolidation_balance_to_consume = preexisting_churn
|
|
||||||
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
# It takes one more epoch to process the consolidation due to insufficient churn
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 1
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
remainder = spec.MIN_ACTIVATION_BALANCE % preexisting_churn
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume == consolidation_churn_limit - remainder
|
|
||||||
)
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_basic_consolidation_with_compounding_credential(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
# Set the consolidation balance to consume equal to churn limit
|
|
||||||
state.consolidation_balance_to_consume = consolidation_churn_limit
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_compounding_withdrawal_credential(spec, state, source_index)
|
|
||||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume
|
|
||||||
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
|
||||||
)
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_consolidation_churn_limit_balance(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
source_validator = state.validators[source_index]
|
|
||||||
source_validator.effective_balance = consolidation_churn_limit
|
|
||||||
# Churn limit increases due to higher total balance
|
|
||||||
updated_consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_compounding_withdrawal_credential(spec, state, source_index)
|
|
||||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
# validator's effective balance fits into the churn, exit as soon as possible
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
assert (
|
|
||||||
state.consolidation_balance_to_consume
|
|
||||||
== updated_consolidation_churn_limit - consolidation_churn_limit
|
|
||||||
)
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_consolidation_balance_larger_than_churn_limit(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
# Set source balance higher than consolidation churn limit
|
|
||||||
state.validators[source_index].effective_balance = 2 * consolidation_churn_limit
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_compounding_withdrawal_credential(spec, state, source_index)
|
|
||||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
|
||||||
|
|
||||||
# Consolidation churn limit increases due to higher total balance
|
|
||||||
new_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
remainder = state.validators[source_index].effective_balance % new_churn_limit
|
|
||||||
expected_balance = new_churn_limit - remainder
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 1
|
|
||||||
# Check consolidation churn is decremented correctly
|
|
||||||
assert state.consolidation_balance_to_consume == expected_balance
|
|
||||||
# Check exit epoch
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_consolidation_balance_through_two_churn_epochs(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_compounding_withdrawal_credential(spec, state, source_index)
|
|
||||||
set_compounding_withdrawal_credential(spec, state, target_index)
|
|
||||||
|
|
||||||
# Set source balance higher than consolidation churn limit
|
|
||||||
state.validators[source_index].effective_balance = 3 * consolidation_churn_limit
|
|
||||||
|
|
||||||
new_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
remainder = state.validators[source_index].effective_balance % new_churn_limit
|
|
||||||
expected_balance = new_churn_limit - remainder
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(spec, state, signed_consolidation)
|
|
||||||
|
|
||||||
# when exiting a multiple of the churn limit greater than 1, an extra exit epoch is added
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 2
|
|
||||||
assert state.validators[0].exit_epoch == expected_exit_epoch
|
|
||||||
# since the earliest exit epoch moves to a new one, consolidation balance is back to full
|
|
||||||
assert state.consolidation_balance_to_consume == expected_balance
|
|
||||||
|
|
||||||
|
|
||||||
# Failing tests
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_source_equals_target(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
validator_privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
|
|
||||||
|
|
||||||
# Set withdrawal credentials to eth1
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch,
|
|
||||||
source_index=validator_index,
|
|
||||||
target_index=validator_index,
|
|
||||||
),
|
|
||||||
validator_privkey,
|
|
||||||
validator_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_exceed_pending_consolidations_limit(spec, state):
|
|
||||||
state.pending_consolidations = [
|
|
||||||
spec.PendingConsolidation(source_index=0, target_index=1)
|
|
||||||
] * spec.PENDING_CONSOLIDATIONS_LIMIT
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_not_enough_consolidation_churn_available(spec, state):
|
|
||||||
state.validators = state.validators[0:2]
|
|
||||||
state.pending_consolidations = [
|
|
||||||
spec.PendingConsolidation(source_index=0, target_index=1)
|
|
||||||
]
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_exited_source(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
# exit source
|
|
||||||
spec.initiate_validator_exit(state, 0)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_exited_target(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
# exit target
|
|
||||||
spec.initiate_validator_exit(state, 1)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_inactive_source(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
# set source validator as not yet activated
|
|
||||||
state.validators[0].activation_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_inactive_target(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
# set target validator as not yet activated
|
|
||||||
state.validators[1].activation_epoch = spec.FAR_FUTURE_EPOCH
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_no_execution_withdrawal_credential(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_different_credentials(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
# Set source and target withdrawal credentials to different eth1 credentials
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1, address=b"\x10" * 20)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
@always_bls
|
|
||||||
def test_invalid_source_signature(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set earliest consolidation epoch to the expected exit epoch
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
state.earliest_consolidation_epoch = expected_exit_epoch
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
# Set the consolidation balance to consume equal to churn limit
|
|
||||||
state.consolidation_balance_to_consume = consolidation_churn_limit
|
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Change the pubkey of the source validator, invalidating its signature
|
|
||||||
state.validators[0].pubkey = state.validators[1].pubkey
|
|
||||||
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
@always_bls
|
|
||||||
def test_invalid_target_signature(spec, state):
|
|
||||||
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
|
||||||
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[source_index].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[target_index].pubkey]
|
|
||||||
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, source_index)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
|
||||||
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(
|
|
||||||
epoch=current_epoch, source_index=source_index, target_index=target_index
|
|
||||||
),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Set earliest consolidation epoch to the expected exit epoch
|
|
||||||
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
|
||||||
state.earliest_consolidation_epoch = expected_exit_epoch
|
|
||||||
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
|
||||||
# Set the consolidation balance to consume equal to churn limit
|
|
||||||
state.consolidation_balance_to_consume = consolidation_churn_limit
|
|
||||||
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Change the pubkey of the target validator, invalidating its signature
|
|
||||||
state.validators[1].pubkey = state.validators[2].pubkey
|
|
||||||
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
|
||||||
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
|
||||||
@with_custom_state(
|
|
||||||
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
|
||||||
threshold_fn=default_activation_threshold,
|
|
||||||
)
|
|
||||||
@spec_test
|
|
||||||
@single_phase
|
|
||||||
def test_invalid_before_specified_epoch(spec, state):
|
|
||||||
current_epoch = spec.get_current_epoch(state)
|
|
||||||
source_privkey = pubkey_to_privkey[state.validators[0].pubkey]
|
|
||||||
target_privkey = pubkey_to_privkey[state.validators[1].pubkey]
|
|
||||||
# Set source and target withdrawal credentials to the same eth1 credential
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 0)
|
|
||||||
set_eth1_withdrawal_credential_with_balance(spec, state, 1)
|
|
||||||
# set epoch=current_epoch + 1, so it's too early to process it
|
|
||||||
signed_consolidation = sign_consolidation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
spec.Consolidation(epoch=current_epoch + 1, source_index=0, target_index=1),
|
|
||||||
source_privkey,
|
|
||||||
target_privkey,
|
|
||||||
)
|
|
||||||
yield from run_consolidation_processing(
|
|
||||||
spec, state, signed_consolidation, valid=False
|
|
||||||
)
|
|
|
@ -0,0 +1,809 @@
|
||||||
|
from eth2spec.test.helpers.constants import MINIMAL
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
with_electra_and_later,
|
||||||
|
with_presets,
|
||||||
|
spec_test,
|
||||||
|
single_phase,
|
||||||
|
with_custom_state,
|
||||||
|
scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
default_activation_threshold,
|
||||||
|
spec_state_test,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.withdrawals import (
|
||||||
|
set_eth1_withdrawal_credential_with_balance,
|
||||||
|
set_compounding_withdrawal_credential,
|
||||||
|
)
|
||||||
|
|
||||||
|
# ***********************
|
||||||
|
# * CONSOLIDATION TESTS *
|
||||||
|
# ***********************
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_basic_consolidation_in_current_consolidation_epoch(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set earliest consolidation epoch to the expected exit epoch
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||||
|
state.earliest_consolidation_epoch = expected_exit_epoch
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
# Set the consolidation balance to consume equal to churn limit
|
||||||
|
state.consolidation_balance_to_consume = consolidation_churn_limit
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume
|
||||||
|
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
||||||
|
)
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_basic_consolidation_in_new_consolidation_epoch(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
# Set consolidation balance to consume to some arbitrary nonzero value below the churn limit
|
||||||
|
state.consolidation_balance_to_consume = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
# consolidation_balance_to_consume is replenished to the churn limit since we move to a new consolidation epoch
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume
|
||||||
|
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
||||||
|
)
|
||||||
|
# Check exit epochs
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_basic_consolidation_with_preexisting_churn(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set earliest consolidation epoch to the expected exit epoch
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||||
|
state.earliest_consolidation_epoch = expected_exit_epoch
|
||||||
|
# Set some nonzero preexisting churn lower than churn limit and sufficient to process the consolidation
|
||||||
|
preexisting_churn = 2 * spec.MIN_ACTIVATION_BALANCE
|
||||||
|
state.consolidation_balance_to_consume = preexisting_churn
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume
|
||||||
|
== preexisting_churn - spec.MIN_ACTIVATION_BALANCE
|
||||||
|
)
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_basic_consolidation_with_insufficient_preexisting_churn(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set earliest consolidation epoch to the first available epoch
|
||||||
|
state.earliest_consolidation_epoch = spec.compute_activation_exit_epoch(
|
||||||
|
current_epoch
|
||||||
|
)
|
||||||
|
# Set preexisting churn lower than required to process the consolidation
|
||||||
|
preexisting_churn = spec.MIN_ACTIVATION_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
state.consolidation_balance_to_consume = preexisting_churn
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
# It takes one more epoch to process the consolidation due to insufficient churn
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 1
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
remainder = spec.MIN_ACTIVATION_BALANCE % preexisting_churn
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume == consolidation_churn_limit - remainder
|
||||||
|
)
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_basic_consolidation_with_compounding_credentials(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_compounding_withdrawal_credential(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_compounding_withdrawal_credential(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set the consolidation balance to consume equal to churn limit
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
state.consolidation_balance_to_consume = consolidation_churn_limit
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume
|
||||||
|
== consolidation_churn_limit - spec.MIN_ACTIVATION_BALANCE
|
||||||
|
)
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_consolidation_churn_limit_balance(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set source effective balance to consolidation churn limit
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
state.validators[source_index].effective_balance = consolidation_churn_limit
|
||||||
|
# Churn limit increases due to higher total balance
|
||||||
|
updated_consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
# validator's effective balance fits into the churn, exit as soon as possible
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch)
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
assert (
|
||||||
|
state.consolidation_balance_to_consume
|
||||||
|
== updated_consolidation_churn_limit - consolidation_churn_limit
|
||||||
|
)
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_consolidation_balance_larger_than_churn_limit(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set source effective balance to 2 * consolidation churn limit
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
state.validators[source_index].effective_balance = 2 * consolidation_churn_limit
|
||||||
|
|
||||||
|
# Consolidation churn limit increases due to higher total balance
|
||||||
|
updated_consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
remainder = state.validators[source_index].effective_balance % updated_consolidation_churn_limit
|
||||||
|
expected_balance = updated_consolidation_churn_limit - remainder
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 1
|
||||||
|
# Check consolidation churn is decremented correctly
|
||||||
|
assert state.consolidation_balance_to_consume == expected_balance
|
||||||
|
# Check exit epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == expected_exit_epoch
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_consolidation_balance_through_two_churn_epochs(spec, state):
|
||||||
|
# This state has 256 validators each with 32 ETH in MINIMAL preset, 128 ETH consolidation churn
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Set target to eth1 credentials
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# Set source balance higher to 3 * consolidation churn limit
|
||||||
|
consolidation_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
state.validators[source_index].effective_balance = 3 * consolidation_churn_limit
|
||||||
|
|
||||||
|
new_churn_limit = spec.get_consolidation_churn_limit(state)
|
||||||
|
remainder = state.validators[source_index].effective_balance % new_churn_limit
|
||||||
|
expected_balance = new_churn_limit - remainder
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(spec, state, consolidation)
|
||||||
|
|
||||||
|
# when exiting a multiple of the churn limit greater than 1, an extra exit epoch is added
|
||||||
|
expected_exit_epoch = spec.compute_activation_exit_epoch(current_epoch) + 2
|
||||||
|
assert state.validators[0].exit_epoch == expected_exit_epoch
|
||||||
|
# since the earliest exit epoch moves to a new one, consolidation balance is back to full
|
||||||
|
assert state.consolidation_balance_to_consume == expected_balance
|
||||||
|
|
||||||
|
|
||||||
|
# Failing tests
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_source_equals_target(spec, state):
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
|
||||||
|
# Set source to eth1 credentials
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation from source to source
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[source_index].pubkey,
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_exceed_pending_consolidations_limit(spec, state):
|
||||||
|
state.pending_consolidations = [
|
||||||
|
spec.PendingConsolidation(source_index=0, target_index=1)
|
||||||
|
] * spec.PENDING_CONSOLIDATIONS_LIMIT
|
||||||
|
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_not_enough_consolidation_churn_available(spec, state):
|
||||||
|
state.validators = state.validators[0:2]
|
||||||
|
state.pending_consolidations = [
|
||||||
|
spec.PendingConsolidation(source_index=0, target_index=1)
|
||||||
|
]
|
||||||
|
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_exited_source(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# exit source
|
||||||
|
spec.initiate_validator_exit(state, source_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_exited_target(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
# exit target
|
||||||
|
spec.initiate_validator_exit(state, 1)
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_inactive_source(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# set source validator as not yet activated
|
||||||
|
state.validators[source_index].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_inactive_target(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
# set target validator as not yet activated
|
||||||
|
state.validators[1].activation_epoch = spec.FAR_FUTURE_EPOCH
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_no_source_execution_withdrawal_credential(spec, state):
|
||||||
|
# Set up a correct consolidation, but source does not have
|
||||||
|
# an execution withdrawal credential
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_no_target_execution_withdrawal_credential(spec, state):
|
||||||
|
# Set up a correct consolidation, but target does not have
|
||||||
|
# an execution withdrawal credential
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_incorrect_source_address(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with different source address
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=b"\x33" * 20,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_unknown_source_pubkey(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with different source pubkey
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=source_address,
|
||||||
|
source_pubkey=b"\x00" * 48,
|
||||||
|
target_pubkey=state.validators[target_index].pubkey,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@with_presets([MINIMAL], "need sufficient consolidation churn limit")
|
||||||
|
@with_custom_state(
|
||||||
|
balances_fn=scaled_churn_balances_exceed_activation_exit_churn_limit,
|
||||||
|
threshold_fn=default_activation_threshold,
|
||||||
|
)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
def test_incorrect_unknown_target_pubkey(spec, state):
|
||||||
|
# Set up an otherwise correct consolidation
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
source_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||||
|
target_index = spec.get_active_validator_indices(state, current_epoch)[1]
|
||||||
|
source_address = b"\x22" * 20
|
||||||
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
|
spec, state, source_index, address=source_address
|
||||||
|
)
|
||||||
|
# Make consolidation with different target pubkey
|
||||||
|
consolidation = spec.ConsolidationRequest(
|
||||||
|
source_address=b"\x33" * 20,
|
||||||
|
source_pubkey=state.validators[source_index].pubkey,
|
||||||
|
target_pubkey=b"\x00" * 48,
|
||||||
|
)
|
||||||
|
set_eth1_withdrawal_credential_with_balance(spec, state, target_index)
|
||||||
|
|
||||||
|
yield from run_consolidation_processing(
|
||||||
|
spec, state, consolidation, success=False
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_consolidation_processing(spec, state, consolidation, success=True):
|
||||||
|
"""
|
||||||
|
Run ``process_consolidation``, yielding:
|
||||||
|
- pre-state ('pre')
|
||||||
|
- consolidation_request ('consolidation_request')
|
||||||
|
- post-state ('post').
|
||||||
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
|
"""
|
||||||
|
|
||||||
|
if success:
|
||||||
|
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
source_index = spec.ValidatorIndex(validator_pubkeys.index(consolidation.source_pubkey))
|
||||||
|
target_index = spec.ValidatorIndex(validator_pubkeys.index(consolidation.target_pubkey))
|
||||||
|
source_validator = state.validators[source_index]
|
||||||
|
target_validator = state.validators[target_index]
|
||||||
|
pre_exit_epoch_source = source_validator.exit_epoch
|
||||||
|
pre_exit_epoch_target = target_validator.exit_epoch
|
||||||
|
pre_pending_consolidations = state.pending_consolidations.copy()
|
||||||
|
else:
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
yield 'consolidation_request', consolidation
|
||||||
|
|
||||||
|
spec.process_consolidation_request(state, consolidation)
|
||||||
|
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
if success:
|
||||||
|
# Check source and target have execution credentials
|
||||||
|
assert spec.has_execution_withdrawal_credential(source_validator)
|
||||||
|
assert spec.has_execution_withdrawal_credential(target_validator)
|
||||||
|
# Check source address in the consolidation fits the withdrawal credentials
|
||||||
|
assert source_validator.withdrawal_credentials[12:] == consolidation.source_address
|
||||||
|
# Check source and target are not the same
|
||||||
|
assert source_index != target_index
|
||||||
|
# Check source and target were not exiting
|
||||||
|
assert pre_exit_epoch_source == spec.FAR_FUTURE_EPOCH
|
||||||
|
assert pre_exit_epoch_target == spec.FAR_FUTURE_EPOCH
|
||||||
|
# Check source is now exiting
|
||||||
|
assert state.validators[source_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
# Check that the exit epoch matches earliest_consolidation_epoch
|
||||||
|
assert state.validators[source_index].exit_epoch == state.earliest_consolidation_epoch
|
||||||
|
# Check that the correct consolidation has been appended
|
||||||
|
expected_new_pending_consolidation = spec.PendingConsolidation(
|
||||||
|
source_index=source_index,
|
||||||
|
target_index=target_index,
|
||||||
|
)
|
||||||
|
assert state.pending_consolidations == pre_pending_consolidations + [expected_new_pending_consolidation]
|
||||||
|
else:
|
||||||
|
assert pre_state == state
|
|
@ -1,8 +1,8 @@
|
||||||
from eth2spec.test.context import spec_state_test, always_bls, with_electra_and_later
|
from eth2spec.test.context import spec_state_test, always_bls, with_electra_and_later
|
||||||
from eth2spec.test.helpers.deposits import (
|
from eth2spec.test.helpers.deposits import (
|
||||||
prepare_deposit_receipt,
|
prepare_deposit_request,
|
||||||
run_deposit_receipt_processing,
|
run_deposit_request_processing,
|
||||||
run_deposit_receipt_processing_with_specific_fork_version
|
run_deposit_request_processing_with_specific_fork_version
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import next_epoch_via_block
|
from eth2spec.test.helpers.state import next_epoch_via_block
|
||||||
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
|
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
|
||||||
|
@ -15,9 +15,9 @@ def test_new_deposit_under_max(spec, state):
|
||||||
validator_index = len(state.validators)
|
validator_index = len(state.validators)
|
||||||
# effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
|
# effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement.
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE - 1
|
amount = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -27,9 +27,9 @@ def test_new_deposit_max(spec, state):
|
||||||
validator_index = len(state.validators)
|
validator_index = len(state.validators)
|
||||||
# effective balance will be exactly the same as balance.
|
# effective balance will be exactly the same as balance.
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -39,9 +39,9 @@ def test_new_deposit_over_max(spec, state):
|
||||||
validator_index = len(state.validators)
|
validator_index = len(state.validators)
|
||||||
# just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
|
# just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE + 1
|
amount = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -55,7 +55,7 @@ def test_new_deposit_eth1_withdrawal_credentials(spec, state):
|
||||||
+ b'\x59' * 20 # a 20-byte eth1 address
|
+ b'\x59' * 20 # a 20-byte eth1 address
|
||||||
)
|
)
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
deposit_receipt = prepare_deposit_receipt(
|
deposit_request = prepare_deposit_request(
|
||||||
spec,
|
spec,
|
||||||
validator_index,
|
validator_index,
|
||||||
amount,
|
amount,
|
||||||
|
@ -63,7 +63,7 @@ def test_new_deposit_eth1_withdrawal_credentials(spec, state):
|
||||||
signed=True,
|
signed=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -76,7 +76,7 @@ def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
||||||
+ b'\x02' * 31 # Garabage bytes
|
+ b'\x02' * 31 # Garabage bytes
|
||||||
)
|
)
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
deposit_receipt = prepare_deposit_receipt(
|
deposit_request = prepare_deposit_request(
|
||||||
spec,
|
spec,
|
||||||
validator_index,
|
validator_index,
|
||||||
amount,
|
amount,
|
||||||
|
@ -84,7 +84,7 @@ def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
||||||
signed=True,
|
signed=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -95,8 +95,8 @@ def test_correct_sig_but_forked_state(spec, state):
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
# deposits will always be valid, regardless of the current fork
|
# deposits will always be valid, regardless of the current fork
|
||||||
state.fork.current_version = spec.Version('0x1234abcd')
|
state.fork.current_version = spec.Version('0x1234abcd')
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -106,8 +106,8 @@ def test_incorrect_sig_new_deposit(spec, state):
|
||||||
# fresh deposit = next validator index = validator appended to registry
|
# fresh deposit = next validator index = validator appended to registry
|
||||||
validator_index = len(state.validators)
|
validator_index = len(state.validators)
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount)
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, effective=False)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index, effective=False)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -115,12 +115,12 @@ def test_incorrect_sig_new_deposit(spec, state):
|
||||||
def test_top_up__max_effective_balance(spec, state):
|
def test_top_up__max_effective_balance(spec, state):
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE
|
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE
|
||||||
state.validators[validator_index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
state.validators[validator_index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
deposits_len = len(state.pending_balance_deposits)
|
deposits_len = len(state.pending_balance_deposits)
|
||||||
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
||||||
|
@ -132,14 +132,14 @@ def test_top_up__max_effective_balance(spec, state):
|
||||||
def test_top_up__less_effective_balance(spec, state):
|
def test_top_up__less_effective_balance(spec, state):
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
initial_balance = spec.MAX_EFFECTIVE_BALANCE - 1000
|
initial_balance = spec.MAX_EFFECTIVE_BALANCE - 1000
|
||||||
initial_effective_balance = spec.MAX_EFFECTIVE_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
|
initial_effective_balance = spec.MAX_EFFECTIVE_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
state.balances[validator_index] = initial_balance
|
state.balances[validator_index] = initial_balance
|
||||||
state.validators[validator_index].effective_balance = initial_effective_balance
|
state.validators[validator_index].effective_balance = initial_effective_balance
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
deposits_len = len(state.pending_balance_deposits)
|
deposits_len = len(state.pending_balance_deposits)
|
||||||
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
||||||
|
@ -152,14 +152,14 @@ def test_top_up__less_effective_balance(spec, state):
|
||||||
def test_top_up__zero_balance(spec, state):
|
def test_top_up__zero_balance(spec, state):
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, signed=True)
|
||||||
|
|
||||||
initial_balance = 0
|
initial_balance = 0
|
||||||
initial_effective_balance = 0
|
initial_effective_balance = 0
|
||||||
state.balances[validator_index] = initial_balance
|
state.balances[validator_index] = initial_balance
|
||||||
state.validators[validator_index].effective_balance = initial_effective_balance
|
state.validators[validator_index].effective_balance = initial_effective_balance
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
deposits_len = len(state.pending_balance_deposits)
|
deposits_len = len(state.pending_balance_deposits)
|
||||||
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
||||||
|
@ -173,10 +173,10 @@ def test_top_up__zero_balance(spec, state):
|
||||||
def test_incorrect_sig_top_up(spec, state):
|
def test_incorrect_sig_top_up(spec, state):
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount)
|
||||||
|
|
||||||
# invalid signatures, in top-ups, are allowed!
|
# invalid signatures, in top-ups, are allowed!
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -185,7 +185,7 @@ def test_incorrect_withdrawal_credentials_top_up(spec, state):
|
||||||
validator_index = 0
|
validator_index = 0
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
|
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
|
||||||
deposit_receipt = prepare_deposit_receipt(
|
deposit_request = prepare_deposit_request(
|
||||||
spec,
|
spec,
|
||||||
validator_index,
|
validator_index,
|
||||||
amount,
|
amount,
|
||||||
|
@ -193,7 +193,7 @@ def test_incorrect_withdrawal_credentials_top_up(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
# inconsistent withdrawal credentials, in top-ups, are allowed!
|
# inconsistent withdrawal credentials, in top-ups, are allowed!
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -205,9 +205,9 @@ def test_key_validate_invalid_subgroup(spec, state):
|
||||||
# All-zero pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
|
# All-zero pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception.
|
||||||
pubkey = b'\x00' * 48
|
pubkey = b'\x00' * 48
|
||||||
|
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -221,9 +221,9 @@ def test_key_validate_invalid_decompression(spec, state):
|
||||||
pubkey_hex = 'c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
|
pubkey_hex = 'c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
|
||||||
pubkey = bytes.fromhex(pubkey_hex)
|
pubkey = bytes.fromhex(pubkey_hex)
|
||||||
|
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, pubkey=pubkey, signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
|
@ -235,7 +235,7 @@ def test_ineffective_deposit_with_previous_fork_version(spec, state):
|
||||||
# NOTE: it was effective in Altair.
|
# NOTE: it was effective in Altair.
|
||||||
assert state.fork.previous_version != state.fork.current_version
|
assert state.fork.previous_version != state.fork.current_version
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing_with_specific_fork_version(
|
yield from run_deposit_request_processing_with_specific_fork_version(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
fork_version=state.fork.previous_version,
|
fork_version=state.fork.previous_version,
|
||||||
|
@ -249,7 +249,7 @@ def test_ineffective_deposit_with_previous_fork_version(spec, state):
|
||||||
def test_effective_deposit_with_genesis_fork_version(spec, state):
|
def test_effective_deposit_with_genesis_fork_version(spec, state):
|
||||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing_with_specific_fork_version(
|
yield from run_deposit_request_processing_with_specific_fork_version(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
fork_version=spec.config.GENESIS_FORK_VERSION,
|
fork_version=spec.config.GENESIS_FORK_VERSION,
|
||||||
|
@ -272,9 +272,9 @@ def test_success_top_up_to_withdrawn_validator(spec, state):
|
||||||
|
|
||||||
# Make a top-up balance to validator
|
# Make a top-up balance to validator
|
||||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||||
deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, len(state.validators), signed=True)
|
deposit_request = prepare_deposit_request(spec, validator_index, amount, len(state.validators), signed=True)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index)
|
yield from run_deposit_request_processing(spec, state, deposit_request, validator_index)
|
||||||
|
|
||||||
deposits_len = len(state.pending_balance_deposits)
|
deposits_len = len(state.pending_balance_deposits)
|
||||||
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
assert state.pending_balance_deposits[deposits_len - 1].amount == amount
|
|
@ -29,14 +29,14 @@ def test_basic_withdrawal_request(spec, state):
|
||||||
set_eth1_withdrawal_credential_with_balance(
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
spec, state, validator_index, address=address
|
spec, state, validator_index, address=address
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request
|
spec, state, withdrawal_request
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -51,14 +51,14 @@ def test_basic_withdrawal_request_with_compounding_credentials(spec, state):
|
||||||
validator_pubkey = state.validators[validator_index].pubkey
|
validator_pubkey = state.validators[validator_index].pubkey
|
||||||
address = b"\x22" * 20
|
address = b"\x22" * 20
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request
|
spec, state, withdrawal_request
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ def test_basic_withdrawal_request_with_full_partial_withdrawal_queue(spec, state
|
||||||
set_eth1_withdrawal_credential_with_balance(
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
spec, state, validator_index, address=address
|
spec, state, validator_index, address=address
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
|
@ -89,10 +89,10 @@ def test_basic_withdrawal_request_with_full_partial_withdrawal_queue(spec, state
|
||||||
] * spec.PENDING_PARTIAL_WITHDRAWALS_LIMIT
|
] * spec.PENDING_PARTIAL_WITHDRAWALS_LIMIT
|
||||||
|
|
||||||
# Exit should still be processed
|
# Exit should still be processed
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -113,14 +113,14 @@ def test_incorrect_source_address(spec, state):
|
||||||
set_eth1_withdrawal_credential_with_balance(
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
spec, state, validator_index, address=address
|
spec, state, validator_index, address=address
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=incorrect_address,
|
source_address=incorrect_address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -142,14 +142,14 @@ def test_incorrect_withdrawal_credential_prefix(spec, state):
|
||||||
spec.BLS_WITHDRAWAL_PREFIX
|
spec.BLS_WITHDRAWAL_PREFIX
|
||||||
+ state.validators[validator_index].withdrawal_credentials[1:]
|
+ state.validators[validator_index].withdrawal_credentials[1:]
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -168,14 +168,14 @@ def test_on_withdrawal_request_initiated_validator(spec, state):
|
||||||
)
|
)
|
||||||
# Initiate exit earlier
|
# Initiate exit earlier
|
||||||
spec.initiate_validator_exit(state, validator_index)
|
spec.initiate_validator_exit(state, validator_index)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -189,7 +189,7 @@ def test_activation_epoch_less_than_shard_committee_period(spec, state):
|
||||||
set_eth1_withdrawal_credential_with_balance(
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
spec, state, validator_index, address=address
|
spec, state, validator_index, address=address
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
amount=spec.FULL_EXIT_REQUEST_AMOUNT,
|
||||||
|
@ -200,8 +200,8 @@ def test_activation_epoch_less_than_shard_committee_period(spec, state):
|
||||||
+ spec.config.SHARD_COMMITTEE_PERIOD
|
+ spec.config.SHARD_COMMITTEE_PERIOD
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -221,16 +221,16 @@ def test_basic_partial_withdrawal_request(spec, state):
|
||||||
state.balances[validator_index] += amount
|
state.balances[validator_index] += amount
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -253,16 +253,16 @@ def test_basic_partial_withdrawal_request_higher_excess_balance(spec, state):
|
||||||
state.balances[validator_index] += 2 * amount
|
state.balances[validator_index] += 2 * amount
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -286,16 +286,16 @@ def test_basic_partial_withdrawal_request_lower_than_excess_balance(spec, state)
|
||||||
state.balances[validator_index] += excess_balance
|
state.balances[validator_index] += excess_balance
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -316,7 +316,7 @@ def test_partial_withdrawal_request_with_pending_withdrawals(spec, state):
|
||||||
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -331,10 +331,10 @@ def test_partial_withdrawal_request_with_pending_withdrawals(spec, state):
|
||||||
# Set balance so that the validator still has excess balance even with the pending withdrawals
|
# Set balance so that the validator still has excess balance even with the pending withdrawals
|
||||||
state.balances[validator_index] += 3 * amount
|
state.balances[validator_index] += 3 * amount
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -357,7 +357,7 @@ def test_partial_withdrawal_request_with_pending_withdrawals_and_high_amount(
|
||||||
amount = spec.UINT64_MAX
|
amount = spec.UINT64_MAX
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -376,10 +376,10 @@ def test_partial_withdrawal_request_with_pending_withdrawals_and_high_amount(
|
||||||
# Set balance so that the validator still has excess balance even with the pending withdrawals
|
# Set balance so that the validator still has excess balance even with the pending withdrawals
|
||||||
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -399,7 +399,7 @@ def test_partial_withdrawal_request_with_high_balance(spec, state):
|
||||||
)
|
)
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -407,10 +407,10 @@ def test_partial_withdrawal_request_with_high_balance(spec, state):
|
||||||
|
|
||||||
churn_limit = spec.get_activation_exit_churn_limit(state)
|
churn_limit = spec.get_activation_exit_churn_limit(state)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -435,16 +435,16 @@ def test_partial_withdrawal_request_with_high_amount(spec, state):
|
||||||
state.balances[validator_index] += 1
|
state.balances[validator_index] += 1
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -467,16 +467,16 @@ def test_partial_withdrawal_request_with_low_amount(spec, state):
|
||||||
state.balances[validator_index] += amount
|
state.balances[validator_index] += amount
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check that the assigned exit epoch is correct
|
# Check that the assigned exit epoch is correct
|
||||||
|
@ -501,7 +501,7 @@ def test_partial_withdrawal_queue_full(spec, state):
|
||||||
# Ensure that the validator has sufficient excess balance
|
# Ensure that the validator has sufficient excess balance
|
||||||
state.balances[validator_index] += 2 * amount
|
state.balances[validator_index] += 2 * amount
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -514,8 +514,8 @@ def test_partial_withdrawal_queue_full(spec, state):
|
||||||
state.pending_partial_withdrawals = [
|
state.pending_partial_withdrawals = [
|
||||||
partial_withdrawal
|
partial_withdrawal
|
||||||
] * spec.PENDING_PARTIAL_WITHDRAWALS_LIMIT
|
] * spec.PENDING_PARTIAL_WITHDRAWALS_LIMIT
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -534,16 +534,16 @@ def test_no_compounding_credentials(spec, state):
|
||||||
set_eth1_withdrawal_credential_with_balance(
|
set_eth1_withdrawal_credential_with_balance(
|
||||||
spec, state, validator_index, address=address
|
spec, state, validator_index, address=address
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
success=False,
|
success=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -559,14 +559,14 @@ def test_no_excess_balance(spec, state):
|
||||||
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -583,7 +583,7 @@ def test_pending_withdrawals_consume_all_excess_balance(spec, state):
|
||||||
state.balances[validator_index] += 10 * amount
|
state.balances[validator_index] += 10 * amount
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -595,8 +595,8 @@ def test_pending_withdrawals_consume_all_excess_balance(spec, state):
|
||||||
)
|
)
|
||||||
state.pending_partial_withdrawals = [partial_withdrawal] * 10
|
state.pending_partial_withdrawals = [partial_withdrawal] * 10
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -615,16 +615,16 @@ def test_insufficient_effective_balance(spec, state):
|
||||||
].effective_balance -= spec.EFFECTIVE_BALANCE_INCREMENT
|
].effective_balance -= spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
execution_layer_withdrawal_request,
|
withdrawal_request,
|
||||||
success=False,
|
success=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -644,14 +644,14 @@ def test_partial_withdrawal_incorrect_source_address(spec, state):
|
||||||
state.balances[validator_index] += 2 * amount
|
state.balances[validator_index] += 2 * amount
|
||||||
|
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=incorrect_address,
|
source_address=incorrect_address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -673,14 +673,14 @@ def test_partial_withdrawal_incorrect_withdrawal_credential_prefix(spec, state):
|
||||||
spec.BLS_WITHDRAWAL_PREFIX
|
spec.BLS_WITHDRAWAL_PREFIX
|
||||||
+ state.validators[validator_index].withdrawal_credentials[1:]
|
+ state.validators[validator_index].withdrawal_credentials[1:]
|
||||||
)
|
)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -699,14 +699,14 @@ def test_partial_withdrawal_on_exit_initiated_validator(spec, state):
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
# Initiate exit earlier
|
# Initiate exit earlier
|
||||||
spec.initiate_validator_exit(state, validator_index)
|
spec.initiate_validator_exit(state, validator_index)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -722,7 +722,7 @@ def test_partial_withdrawal_activation_epoch_less_than_shard_committee_period(
|
||||||
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
state.balances[validator_index] += 2 * amount
|
state.balances[validator_index] += 2 * amount
|
||||||
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
set_compounding_withdrawal_credential(spec, state, validator_index, address=address)
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
amount=amount,
|
amount=amount,
|
||||||
|
@ -733,8 +733,8 @@ def test_partial_withdrawal_activation_epoch_less_than_shard_committee_period(
|
||||||
+ spec.config.SHARD_COMMITTEE_PERIOD
|
+ spec.config.SHARD_COMMITTEE_PERIOD
|
||||||
)
|
)
|
||||||
|
|
||||||
yield from run_execution_layer_withdrawal_request_processing(
|
yield from run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, success=False
|
spec, state, withdrawal_request, success=False
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -743,28 +743,28 @@ def test_partial_withdrawal_activation_epoch_less_than_shard_committee_period(
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
def run_execution_layer_withdrawal_request_processing(
|
def run_withdrawal_request_processing(
|
||||||
spec, state, execution_layer_withdrawal_request, valid=True, success=True
|
spec, state, withdrawal_request, valid=True, success=True
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
Run ``process_execution_layer_withdrawal_request``, yielding:
|
Run ``process_withdrawal_request``, yielding:
|
||||||
- pre-state ('pre')
|
- pre-state ('pre')
|
||||||
- execution_layer_withdrawal_request ('execution_layer_withdrawal_request')
|
- withdrawal_request ('withdrawal_request')
|
||||||
- post-state ('post').
|
- post-state ('post').
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
If ``success == False``, it doesn't initiate exit successfully
|
If ``success == False``, it doesn't initiate exit successfully
|
||||||
"""
|
"""
|
||||||
validator_index = get_validator_index_by_pubkey(
|
validator_index = get_validator_index_by_pubkey(
|
||||||
state, execution_layer_withdrawal_request.validator_pubkey
|
state, withdrawal_request.validator_pubkey
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "pre", state
|
yield "pre", state
|
||||||
yield "execution_layer_withdrawal_request", execution_layer_withdrawal_request
|
yield "withdrawal_request", withdrawal_request
|
||||||
|
|
||||||
if not valid:
|
if not valid:
|
||||||
expect_assertion_error(
|
expect_assertion_error(
|
||||||
lambda: spec.process_execution_layer_withdrawal_request(
|
lambda: spec.process_withdrawal_request(
|
||||||
state, execution_layer_withdrawal_request
|
state, withdrawal_request
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
yield "post", None
|
yield "post", None
|
||||||
|
@ -776,11 +776,11 @@ def run_execution_layer_withdrawal_request_processing(
|
||||||
pre_effective_balance = state.validators[validator_index].effective_balance
|
pre_effective_balance = state.validators[validator_index].effective_balance
|
||||||
pre_state = state.copy()
|
pre_state = state.copy()
|
||||||
expected_amount_to_withdraw = compute_amount_to_withdraw(
|
expected_amount_to_withdraw = compute_amount_to_withdraw(
|
||||||
spec, state, validator_index, execution_layer_withdrawal_request.amount
|
spec, state, validator_index, withdrawal_request.amount
|
||||||
)
|
)
|
||||||
|
|
||||||
spec.process_execution_layer_withdrawal_request(
|
spec.process_withdrawal_request(
|
||||||
state, execution_layer_withdrawal_request
|
state, withdrawal_request
|
||||||
)
|
)
|
||||||
|
|
||||||
yield "post", state
|
yield "post", state
|
||||||
|
@ -794,7 +794,7 @@ def run_execution_layer_withdrawal_request_processing(
|
||||||
state.validators[validator_index].effective_balance == pre_effective_balance
|
state.validators[validator_index].effective_balance == pre_effective_balance
|
||||||
)
|
)
|
||||||
# Full exit request
|
# Full exit request
|
||||||
if execution_layer_withdrawal_request.amount == spec.FULL_EXIT_REQUEST_AMOUNT:
|
if withdrawal_request.amount == spec.FULL_EXIT_REQUEST_AMOUNT:
|
||||||
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
assert spec.get_pending_balance_to_withdraw(state, validator_index) == 0
|
assert spec.get_pending_balance_to_withdraw(state, validator_index) == 0
|
|
@ -5,6 +5,10 @@ from eth2spec.test.context import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_process_pending_balance_deposits(spec, state):
|
||||||
|
yield from run_epoch_processing_with(spec, state, 'process_pending_balance_deposits')
|
||||||
|
|
||||||
|
|
||||||
@with_electra_and_later
|
@with_electra_and_later
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_pending_deposit_min_activation_balance(spec, state):
|
def test_pending_deposit_min_activation_balance(spec, state):
|
||||||
|
@ -14,9 +18,9 @@ def test_pending_deposit_min_activation_balance(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=index, amount=amount)
|
spec.PendingBalanceDeposit(index=index, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balance = state.balances[index]
|
pre_balance = state.balances[index]
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
assert state.balances[index] == pre_balance + amount
|
assert state.balances[index] == pre_balance + amount
|
||||||
# No leftover deposit balance to consume when there are no deposits left to process
|
# No leftover deposit balance to consume when there are no deposits left to process
|
||||||
assert state.deposit_balance_to_consume == 0
|
assert state.deposit_balance_to_consume == 0
|
||||||
|
@ -32,9 +36,9 @@ def test_pending_deposit_balance_equal_churn(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=index, amount=amount)
|
spec.PendingBalanceDeposit(index=index, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balance = state.balances[index]
|
pre_balance = state.balances[index]
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
assert state.balances[index] == pre_balance + amount
|
assert state.balances[index] == pre_balance + amount
|
||||||
assert state.deposit_balance_to_consume == 0
|
assert state.deposit_balance_to_consume == 0
|
||||||
assert state.pending_balance_deposits == []
|
assert state.pending_balance_deposits == []
|
||||||
|
@ -49,9 +53,9 @@ def test_pending_deposit_balance_above_churn(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=index, amount=amount)
|
spec.PendingBalanceDeposit(index=index, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balance = state.balances[index]
|
pre_balance = state.balances[index]
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
# deposit was above churn, balance hasn't changed
|
# deposit was above churn, balance hasn't changed
|
||||||
assert state.balances[index] == pre_balance
|
assert state.balances[index] == pre_balance
|
||||||
# deposit balance to consume is the full churn limit
|
# deposit balance to consume is the full churn limit
|
||||||
|
@ -74,9 +78,9 @@ def test_pending_deposit_preexisting_churn(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=index, amount=amount)
|
spec.PendingBalanceDeposit(index=index, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balance = state.balances[index]
|
pre_balance = state.balances[index]
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
# balance was deposited correctly
|
# balance was deposited correctly
|
||||||
assert state.balances[index] == pre_balance + amount
|
assert state.balances[index] == pre_balance + amount
|
||||||
# No leftover deposit balance to consume when there are no deposits left to process
|
# No leftover deposit balance to consume when there are no deposits left to process
|
||||||
|
@ -96,9 +100,9 @@ def test_multiple_pending_deposits_below_churn(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=1, amount=amount)
|
spec.PendingBalanceDeposit(index=1, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balances = state.balances.copy()
|
pre_balances = state.balances.copy()
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
for i in [0, 1]:
|
for i in [0, 1]:
|
||||||
assert state.balances[i] == pre_balances[i] + amount
|
assert state.balances[i] == pre_balances[i] + amount
|
||||||
# No leftover deposit balance to consume when there are no deposits left to process
|
# No leftover deposit balance to consume when there are no deposits left to process
|
||||||
|
@ -116,9 +120,9 @@ def test_multiple_pending_deposits_above_churn(spec, state):
|
||||||
spec.PendingBalanceDeposit(index=i, amount=amount)
|
spec.PendingBalanceDeposit(index=i, amount=amount)
|
||||||
)
|
)
|
||||||
pre_balances = state.balances.copy()
|
pre_balances = state.balances.copy()
|
||||||
yield from run_epoch_processing_with(
|
|
||||||
spec, state, "process_pending_balance_deposits"
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
)
|
|
||||||
# First two deposits are processed, third is not because above churn
|
# First two deposits are processed, third is not because above churn
|
||||||
for i in [0, 1]:
|
for i in [0, 1]:
|
||||||
assert state.balances[i] == pre_balances[i] + amount
|
assert state.balances[i] == pre_balances[i] + amount
|
||||||
|
@ -132,3 +136,143 @@ def test_multiple_pending_deposits_above_churn(spec, state):
|
||||||
assert state.pending_balance_deposits == [
|
assert state.pending_balance_deposits == [
|
||||||
spec.PendingBalanceDeposit(index=2, amount=amount)
|
spec.PendingBalanceDeposit(index=2, amount=amount)
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_skipped_deposit_exiting_validator(spec, state):
|
||||||
|
index = 0
|
||||||
|
amount = spec.MIN_ACTIVATION_BALANCE
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=index, amount=amount))
|
||||||
|
pre_pending_balance_deposits = state.pending_balance_deposits.copy()
|
||||||
|
pre_balance = state.balances[index]
|
||||||
|
# Initiate the validator's exit
|
||||||
|
spec.initiate_validator_exit(state, index)
|
||||||
|
|
||||||
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
|
|
||||||
|
# Deposit is skipped because validator is exiting
|
||||||
|
assert state.balances[index] == pre_balance
|
||||||
|
# All deposits either processed or postponed, no leftover deposit balance to consume
|
||||||
|
assert state.deposit_balance_to_consume == 0
|
||||||
|
# The deposit is still in the queue
|
||||||
|
assert state.pending_balance_deposits == pre_pending_balance_deposits
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_skipped_deposits_exiting_validators(spec, state):
|
||||||
|
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
for i in [0, 1, 2]:
|
||||||
|
# Append pending deposit for validator i
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=i, amount=amount))
|
||||||
|
|
||||||
|
# Initiate the exit of validator i
|
||||||
|
spec.initiate_validator_exit(state, i)
|
||||||
|
pre_pending_balance_deposits = state.pending_balance_deposits.copy()
|
||||||
|
pre_balances = state.balances.copy()
|
||||||
|
|
||||||
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
|
|
||||||
|
# All deposits are postponed, no balance changes
|
||||||
|
assert state.balances == pre_balances
|
||||||
|
# All deposits are postponed, no leftover deposit balance to consume
|
||||||
|
assert state.deposit_balance_to_consume == 0
|
||||||
|
# All deposits still in the queue, in the same order
|
||||||
|
assert state.pending_balance_deposits == pre_pending_balance_deposits
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_multiple_pending_one_skipped(spec, state):
|
||||||
|
amount = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
for i in [0, 1, 2]:
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=i, amount=amount))
|
||||||
|
pre_balances = state.balances.copy()
|
||||||
|
# Initiate the second validator's exit
|
||||||
|
spec.initiate_validator_exit(state, 1)
|
||||||
|
|
||||||
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
|
|
||||||
|
# First and last deposit are processed, second is not because of exiting
|
||||||
|
for i in [0, 2]:
|
||||||
|
assert state.balances[i] == pre_balances[i] + amount
|
||||||
|
assert state.balances[1] == pre_balances[1]
|
||||||
|
# All deposits either processed or postponed, no leftover deposit balance to consume
|
||||||
|
assert state.deposit_balance_to_consume == 0
|
||||||
|
# second deposit is still in the queue
|
||||||
|
assert state.pending_balance_deposits == [spec.PendingBalanceDeposit(index=1, amount=amount)]
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_mixture_of_skipped_and_above_churn(spec, state):
|
||||||
|
amount01 = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
amount2 = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||||
|
# First two validators have small deposit, third validators a large one
|
||||||
|
for i in [0, 1]:
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=i, amount=amount01))
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=2, amount=amount2))
|
||||||
|
pre_balances = state.balances.copy()
|
||||||
|
# Initiate the second validator's exit
|
||||||
|
spec.initiate_validator_exit(state, 1)
|
||||||
|
|
||||||
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
|
|
||||||
|
# First deposit is processed
|
||||||
|
assert state.balances[0] == pre_balances[0] + amount01
|
||||||
|
# Second deposit is postponed, third is above churn
|
||||||
|
for i in [1, 2]:
|
||||||
|
assert state.balances[i] == pre_balances[i]
|
||||||
|
# First deposit consumes some deposit balance
|
||||||
|
# Deposit balance to consume is not reset because third deposit is not processed
|
||||||
|
assert state.deposit_balance_to_consume == spec.get_activation_exit_churn_limit(state) - amount01
|
||||||
|
# second and third deposit still in the queue, but second is appended at the end
|
||||||
|
assert state.pending_balance_deposits == [spec.PendingBalanceDeposit(index=2, amount=amount2),
|
||||||
|
spec.PendingBalanceDeposit(index=1, amount=amount01)]
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_processing_deposit_of_withdrawable_validator(spec, state):
|
||||||
|
index = 0
|
||||||
|
amount = spec.MIN_ACTIVATION_BALANCE
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=index, amount=amount))
|
||||||
|
pre_balance = state.balances[index]
|
||||||
|
# Initiate the validator's exit
|
||||||
|
spec.initiate_validator_exit(state, index)
|
||||||
|
# Set epoch to withdrawable epoch + 1 to allow processing of the deposit
|
||||||
|
state.slot = spec.SLOTS_PER_EPOCH * (state.validators[index].withdrawable_epoch + 1)
|
||||||
|
|
||||||
|
yield from run_process_pending_balance_deposits(spec, state)
|
||||||
|
|
||||||
|
# Deposit is correctly processed
|
||||||
|
assert state.balances[index] == pre_balance + amount
|
||||||
|
# No leftover deposit balance to consume when there are no deposits left to process
|
||||||
|
assert state.deposit_balance_to_consume == 0
|
||||||
|
assert state.pending_balance_deposits == []
|
||||||
|
|
||||||
|
|
||||||
|
@with_electra_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_processing_deposit_of_withdrawable_validator_does_not_get_churned(spec, state):
|
||||||
|
amount = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||||
|
for i in [0, 1]:
|
||||||
|
state.pending_balance_deposits.append(spec.PendingBalanceDeposit(index=i, amount=amount))
|
||||||
|
pre_balances = state.balances.copy()
|
||||||
|
# Initiate the first validator's exit
|
||||||
|
spec.initiate_validator_exit(state, 0)
|
||||||
|
# Set epoch to withdrawable epoch + 1 to allow processing of the deposit
|
||||||
|
state.slot = spec.SLOTS_PER_EPOCH * (state.validators[0].withdrawable_epoch + 1)
|
||||||
|
# Don't use run_epoch_processing_with to avoid penalties being applied
|
||||||
|
yield 'pre', state
|
||||||
|
spec.process_pending_balance_deposits(state)
|
||||||
|
yield 'post', state
|
||||||
|
# First deposit is processed though above churn limit, because validator is withdrawable
|
||||||
|
assert state.balances[0] == pre_balances[0] + amount
|
||||||
|
# Second deposit is not processed because above churn
|
||||||
|
assert state.balances[1] == pre_balances[1]
|
||||||
|
# Second deposit is not processed, so there's leftover deposit balance to consume.
|
||||||
|
# First deposit does not consume any.
|
||||||
|
assert state.deposit_balance_to_consume == spec.get_activation_exit_churn_limit(state)
|
||||||
|
assert state.pending_balance_deposits == [spec.PendingBalanceDeposit(index=1, amount=amount)]
|
||||||
|
|
|
@ -36,12 +36,12 @@ def test_basic_el_withdrawal_request(spec, state):
|
||||||
assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
|
assert state.validators[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
validator_pubkey = state.validators[validator_index].pubkey
|
validator_pubkey = state.validators[validator_index].pubkey
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
)
|
)
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
block.body.execution_payload.withdrawal_requests = [execution_layer_withdrawal_request]
|
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
|
||||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
@ -73,11 +73,11 @@ def test_basic_btec_and_el_withdrawal_request_in_same_block(spec, state):
|
||||||
block.body.bls_to_execution_changes = [signed_address_change]
|
block.body.bls_to_execution_changes = [signed_address_change]
|
||||||
|
|
||||||
validator_pubkey = state.validators[validator_index].pubkey
|
validator_pubkey = state.validators[validator_index].pubkey
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
)
|
)
|
||||||
block.body.execution_payload.withdrawal_requests = [execution_layer_withdrawal_request]
|
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
|
||||||
|
|
||||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
@ -125,12 +125,12 @@ def test_basic_btec_before_el_withdrawal_request(spec, state):
|
||||||
|
|
||||||
# block_2 contains an EL-Exit operation of the given validator
|
# block_2 contains an EL-Exit operation of the given validator
|
||||||
validator_pubkey = state.validators[validator_index].pubkey
|
validator_pubkey = state.validators[validator_index].pubkey
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
)
|
)
|
||||||
block_2 = build_empty_block_for_next_slot(spec, state)
|
block_2 = build_empty_block_for_next_slot(spec, state)
|
||||||
block_2.body.execution_payload.withdrawal_requests = [execution_layer_withdrawal_request]
|
block_2.body.execution_payload.withdrawal_requests = [withdrawal_request]
|
||||||
block_2.body.execution_payload.block_hash = compute_el_block_hash(spec, block_2.body.execution_payload)
|
block_2.body.execution_payload.block_hash = compute_el_block_hash(spec, block_2.body.execution_payload)
|
||||||
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
|
signed_block_2 = state_transition_and_sign_block(spec, state, block_2)
|
||||||
|
|
||||||
|
@ -157,13 +157,13 @@ def test_cl_exit_and_el_withdrawal_request_in_same_block(spec, state):
|
||||||
signed_voluntary_exits = prepare_signed_exits(spec, state, indices=[validator_index])
|
signed_voluntary_exits = prepare_signed_exits(spec, state, indices=[validator_index])
|
||||||
# EL-Exit
|
# EL-Exit
|
||||||
validator_pubkey = state.validators[validator_index].pubkey
|
validator_pubkey = state.validators[validator_index].pubkey
|
||||||
execution_layer_withdrawal_request = spec.ExecutionLayerWithdrawalRequest(
|
withdrawal_request = spec.WithdrawalRequest(
|
||||||
source_address=address,
|
source_address=address,
|
||||||
validator_pubkey=validator_pubkey,
|
validator_pubkey=validator_pubkey,
|
||||||
)
|
)
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
block.body.voluntary_exits = signed_voluntary_exits
|
block.body.voluntary_exits = signed_voluntary_exits
|
||||||
block.body.execution_payload.withdrawal_requests = [execution_layer_withdrawal_request]
|
block.body.execution_payload.withdrawal_requests = [withdrawal_request]
|
||||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,7 @@ from eth2spec.test.context import (
|
||||||
from eth2spec.test.helpers.deposits import (
|
from eth2spec.test.helpers.deposits import (
|
||||||
build_deposit_data,
|
build_deposit_data,
|
||||||
deposit_from_context,
|
deposit_from_context,
|
||||||
prepare_deposit_receipt,
|
prepare_deposit_request,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.execution_payload import (
|
from eth2spec.test.helpers.execution_payload import (
|
||||||
compute_el_block_hash,
|
compute_el_block_hash,
|
||||||
|
@ -38,8 +38,8 @@ def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True)
|
||||||
# Check that deposits are applied
|
# Check that deposits are applied
|
||||||
if valid:
|
if valid:
|
||||||
expected_pubkeys = [d.data.pubkey for d in block.body.deposits]
|
expected_pubkeys = [d.data.pubkey for d in block.body.deposits]
|
||||||
deposit_receipts = block.body.execution_payload.deposit_receipts
|
deposit_requests = block.body.execution_payload.deposit_requests
|
||||||
expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)]
|
expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_requests if (d.pubkey not in top_up_keys)]
|
||||||
actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]]
|
actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]]
|
||||||
|
|
||||||
assert actual_pubkeys == expected_pubkeys
|
assert actual_pubkeys == expected_pubkeys
|
||||||
|
@ -48,12 +48,12 @@ def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True)
|
||||||
def prepare_state_and_block(spec,
|
def prepare_state_and_block(spec,
|
||||||
state,
|
state,
|
||||||
deposit_cnt,
|
deposit_cnt,
|
||||||
deposit_receipt_cnt,
|
deposit_request_cnt,
|
||||||
first_deposit_receipt_index=0,
|
first_deposit_request_index=0,
|
||||||
deposit_receipts_start_index=None,
|
deposit_requests_start_index=None,
|
||||||
eth1_data_deposit_count=None):
|
eth1_data_deposit_count=None):
|
||||||
deposits = []
|
deposits = []
|
||||||
deposit_receipts = []
|
deposit_requests = []
|
||||||
keypair_index = len(state.validators)
|
keypair_index = len(state.validators)
|
||||||
|
|
||||||
# Prepare deposits
|
# Prepare deposits
|
||||||
|
@ -83,26 +83,26 @@ def prepare_state_and_block(spec,
|
||||||
deposit_count=eth1_data_deposit_count,
|
deposit_count=eth1_data_deposit_count,
|
||||||
block_hash=state.eth1_data.block_hash)
|
block_hash=state.eth1_data.block_hash)
|
||||||
|
|
||||||
# Prepare deposit receipts
|
# Prepare deposit requests
|
||||||
for offset in range(deposit_receipt_cnt):
|
for offset in range(deposit_request_cnt):
|
||||||
deposit_receipt = prepare_deposit_receipt(spec,
|
deposit_request = prepare_deposit_request(spec,
|
||||||
keypair_index,
|
keypair_index,
|
||||||
# use max effective balance
|
# use max effective balance
|
||||||
spec.MAX_EFFECTIVE_BALANCE,
|
spec.MAX_EFFECTIVE_BALANCE,
|
||||||
first_deposit_receipt_index + offset,
|
first_deposit_request_index + offset,
|
||||||
signed=True)
|
signed=True)
|
||||||
deposit_receipts.append(deposit_receipt)
|
deposit_requests.append(deposit_request)
|
||||||
keypair_index += 1
|
keypair_index += 1
|
||||||
|
|
||||||
# Set start index if defined
|
# Set start index if defined
|
||||||
if deposit_receipts_start_index:
|
if deposit_requests_start_index:
|
||||||
state.deposit_receipts_start_index = deposit_receipts_start_index
|
state.deposit_requests_start_index = deposit_requests_start_index
|
||||||
|
|
||||||
block = build_empty_block_for_next_slot(spec, state)
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
|
||||||
# Assign deposits and deposit receipts
|
# Assign deposits and deposit requests
|
||||||
block.body.deposits = deposits
|
block.body.deposits = deposits
|
||||||
block.body.execution_payload.deposit_receipts = deposit_receipts
|
block.body.execution_payload.deposit_requests = deposit_requests
|
||||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||||
|
|
||||||
return state, block
|
return state, block
|
||||||
|
@ -111,27 +111,27 @@ def prepare_state_and_block(spec,
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__start_index_is_set(spec, state):
|
def test_deposit_transition__start_index_is_set(spec, state):
|
||||||
# 0 deposits, 2 deposit receipts, unset deposit_receipts_start_index
|
# 0 deposits, 2 deposit requests, unset deposit_requests_start_index
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=0,
|
deposit_cnt=0,
|
||||||
deposit_receipt_cnt=2,
|
deposit_request_cnt=2,
|
||||||
first_deposit_receipt_index=state.eth1_data.deposit_count + 11)
|
first_deposit_request_index=state.eth1_data.deposit_count + 11)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block)
|
yield from run_deposit_transition_block(spec, state, block)
|
||||||
|
|
||||||
# deposit_receipts_start_index must be set to the index of the first receipt
|
# deposit_requests_start_index must be set to the index of the first request
|
||||||
assert state.deposit_receipts_start_index == block.body.execution_payload.deposit_receipts[0].index
|
assert state.deposit_requests_start_index == block.body.execution_payload.deposit_requests[0].index
|
||||||
|
|
||||||
|
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__process_eth1_deposits(spec, state):
|
def test_deposit_transition__process_eth1_deposits(spec, state):
|
||||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
|
# 3 deposits, 1 deposit request, state.eth1_data.deposit_count < state.deposit_requests_start_index
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=3,
|
deposit_cnt=3,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=11,
|
first_deposit_request_index=11,
|
||||||
deposit_receipts_start_index=7)
|
deposit_requests_start_index=7)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block)
|
yield from run_deposit_transition_block(spec, state, block)
|
||||||
|
|
||||||
|
@ -139,13 +139,13 @@ def test_deposit_transition__process_eth1_deposits(spec, state):
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__process_max_eth1_deposits(spec, state):
|
def test_deposit_transition__process_max_eth1_deposits(spec, state):
|
||||||
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
|
# spec.MAX_DEPOSITS deposits, 1 deposit request, state.eth1_data.deposit_count > state.deposit_requests_start_index
|
||||||
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS
|
# state.deposit_requests_start_index == spec.MAX_DEPOSITS
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=spec.MAX_DEPOSITS,
|
deposit_cnt=spec.MAX_DEPOSITS,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=spec.MAX_DEPOSITS + 1,
|
first_deposit_request_index=spec.MAX_DEPOSITS + 1,
|
||||||
deposit_receipts_start_index=spec.MAX_DEPOSITS,
|
deposit_requests_start_index=spec.MAX_DEPOSITS,
|
||||||
eth1_data_deposit_count=23)
|
eth1_data_deposit_count=23)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block)
|
yield from run_deposit_transition_block(spec, state, block)
|
||||||
|
@ -154,12 +154,12 @@ def test_deposit_transition__process_max_eth1_deposits(spec, state):
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state):
|
def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state):
|
||||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count == state.deposit_receipts_start_index
|
# 3 deposits, 1 deposit request, state.eth1_data.deposit_count == state.deposit_requests_start_index
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=3,
|
deposit_cnt=3,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=7,
|
first_deposit_request_index=7,
|
||||||
deposit_receipts_start_index=3)
|
deposit_requests_start_index=3)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block)
|
yield from run_deposit_transition_block(spec, state, block)
|
||||||
|
|
||||||
|
@ -167,12 +167,12 @@ def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state):
|
def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state):
|
||||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index
|
# 3 deposits, 1 deposit request, state.eth1_data.deposit_count < state.deposit_requests_start_index
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=3,
|
deposit_cnt=3,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=29,
|
first_deposit_request_index=29,
|
||||||
deposit_receipts_start_index=23,
|
deposit_requests_start_index=23,
|
||||||
eth1_data_deposit_count=17)
|
eth1_data_deposit_count=17)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||||
|
@ -181,12 +181,12 @@ def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state):
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state):
|
def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state):
|
||||||
# 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.eth1_data_index
|
# 3 deposits, 1 deposit request, state.eth1_data.deposit_count < state.eth1_data_index
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=3,
|
deposit_cnt=3,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=11,
|
first_deposit_request_index=11,
|
||||||
deposit_receipts_start_index=7,
|
deposit_requests_start_index=7,
|
||||||
eth1_data_deposit_count=2)
|
eth1_data_deposit_count=2)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||||
|
@ -195,13 +195,13 @@ def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state):
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(spec, state):
|
def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(spec, state):
|
||||||
# spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index
|
# spec.MAX_DEPOSITS deposits, 1 deposit request, state.eth1_data.deposit_count > state.deposit_requests_start_index
|
||||||
# state.deposit_receipts_start_index == spec.MAX_DEPOSITS - 1
|
# state.deposit_requests_start_index == spec.MAX_DEPOSITS - 1
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=spec.MAX_DEPOSITS,
|
deposit_cnt=spec.MAX_DEPOSITS,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=spec.MAX_DEPOSITS,
|
first_deposit_request_index=spec.MAX_DEPOSITS,
|
||||||
deposit_receipts_start_index=spec.MAX_DEPOSITS - 1,
|
deposit_requests_start_index=spec.MAX_DEPOSITS - 1,
|
||||||
eth1_data_deposit_count=23)
|
eth1_data_deposit_count=23)
|
||||||
|
|
||||||
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
yield from run_deposit_transition_block(spec, state, block, valid=False)
|
||||||
|
@ -210,16 +210,16 @@ def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(
|
||||||
@with_phases([ELECTRA])
|
@with_phases([ELECTRA])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
|
def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
|
||||||
# 1 deposit, 1 deposit receipt that top ups deposited validator
|
# 1 deposit, 1 deposit request that top ups deposited validator
|
||||||
state, block = prepare_state_and_block(spec, state,
|
state, block = prepare_state_and_block(spec, state,
|
||||||
deposit_cnt=1,
|
deposit_cnt=1,
|
||||||
deposit_receipt_cnt=1,
|
deposit_request_cnt=1,
|
||||||
first_deposit_receipt_index=11,
|
first_deposit_request_index=11,
|
||||||
deposit_receipts_start_index=7)
|
deposit_requests_start_index=7)
|
||||||
|
|
||||||
# Artificially assign deposit's pubkey to a deposit receipt of the same block
|
# Artificially assign deposit's pubkey to a deposit request of the same block
|
||||||
top_up_keys = [block.body.deposits[0].data.pubkey]
|
top_up_keys = [block.body.deposits[0].data.pubkey]
|
||||||
block.body.execution_payload.deposit_receipts[0].pubkey = top_up_keys[0]
|
block.body.execution_payload.deposit_requests[0].pubkey = top_up_keys[0]
|
||||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||||
|
|
||||||
pre_pending_deposits = len(state.pending_balance_deposits)
|
pre_pending_deposits = len(state.pending_balance_deposits)
|
||||||
|
@ -229,5 +229,5 @@ def test_deposit_transition__deposit_and_top_up_same_block(spec, state):
|
||||||
# Check the top up
|
# Check the top up
|
||||||
assert len(state.pending_balance_deposits) == pre_pending_deposits + 2
|
assert len(state.pending_balance_deposits) == pre_pending_deposits + 2
|
||||||
assert state.pending_balance_deposits[pre_pending_deposits].amount == block.body.deposits[0].data.amount
|
assert state.pending_balance_deposits[pre_pending_deposits].amount == block.body.deposits[0].data.amount
|
||||||
amount_from_deposit = block.body.execution_payload.deposit_receipts[0].amount
|
amount_from_deposit = block.body.execution_payload.deposit_requests[0].amount
|
||||||
assert state.pending_balance_deposits[pre_pending_deposits + 1].amount == amount_from_deposit
|
assert state.pending_balance_deposits[pre_pending_deposits + 1].amount == amount_from_deposit
|
||||||
|
|
|
@ -1,61 +0,0 @@
|
||||||
from eth2spec.utils import bls
|
|
||||||
from eth2spec.test.context import expect_assertion_error
|
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_signed_consolidations(spec, state, index_pairs, fork_version=None):
|
|
||||||
def create_signed_consolidation(source_index, target_index):
|
|
||||||
consolidation = spec.Consolidation(
|
|
||||||
epoch=spec.get_current_epoch(state),
|
|
||||||
source_index=source_index,
|
|
||||||
target_index=target_index,
|
|
||||||
)
|
|
||||||
return sign_consolidation(spec, state, consolidation, privkeys[source_index], privkeys[target_index],
|
|
||||||
fork_version=fork_version)
|
|
||||||
|
|
||||||
return [create_signed_consolidation(source_index, target_index) for (source_index, target_index) in index_pairs]
|
|
||||||
|
|
||||||
|
|
||||||
def sign_consolidation(spec, state, consolidation, source_privkey, target_privkey, fork_version=None):
|
|
||||||
domain = spec.compute_domain(spec.DOMAIN_CONSOLIDATION, genesis_validators_root=state.genesis_validators_root)
|
|
||||||
signing_root = spec.compute_signing_root(consolidation, domain)
|
|
||||||
return spec.SignedConsolidation(
|
|
||||||
message=consolidation,
|
|
||||||
signature=bls.Aggregate([bls.Sign(source_privkey, signing_root), bls.Sign(target_privkey, signing_root)])
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_consolidation_processing(spec, state, signed_consolidation, valid=True):
|
|
||||||
"""
|
|
||||||
Run ``process_consolidation``, yielding:
|
|
||||||
- pre-state ('pre')
|
|
||||||
- consolidation ('consolidation')
|
|
||||||
- post-state ('post').
|
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
|
||||||
"""
|
|
||||||
|
|
||||||
source_validator = state.validators[signed_consolidation.message.source_index]
|
|
||||||
target_validator = state.validators[signed_consolidation.message.target_index]
|
|
||||||
|
|
||||||
yield 'pre', state
|
|
||||||
yield 'consolidation', signed_consolidation
|
|
||||||
|
|
||||||
if not valid:
|
|
||||||
expect_assertion_error(lambda: spec.process_consolidation(state, signed_consolidation))
|
|
||||||
yield 'post', None
|
|
||||||
return
|
|
||||||
|
|
||||||
pre_exit_epoch = source_validator.exit_epoch
|
|
||||||
|
|
||||||
spec.process_consolidation(state, signed_consolidation)
|
|
||||||
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
assert source_validator.withdrawal_credentials[1:] == target_validator.withdrawal_credentials[1:]
|
|
||||||
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
|
||||||
assert state.validators[signed_consolidation.message.source_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
assert state.validators[signed_consolidation.message.source_index].exit_epoch == state.earliest_consolidation_epoch
|
|
||||||
assert state.pending_consolidations[len(state.pending_consolidations) - 1] == spec.PendingConsolidation(
|
|
||||||
source_index=signed_consolidation.message.source_index,
|
|
||||||
target_index=signed_consolidation.message.target_index
|
|
||||||
)
|
|
|
@ -34,7 +34,6 @@ LATEST_FORK = MAINNET_FORKS[-1]
|
||||||
ALL_PHASES = (
|
ALL_PHASES = (
|
||||||
# Formal forks
|
# Formal forks
|
||||||
*MAINNET_FORKS,
|
*MAINNET_FORKS,
|
||||||
DENEB,
|
|
||||||
ELECTRA,
|
ELECTRA,
|
||||||
# Experimental patches
|
# Experimental patches
|
||||||
EIP7594,
|
EIP7594,
|
||||||
|
|
|
@ -171,7 +171,7 @@ def prepare_state_and_deposit(spec, state, validator_index, amount,
|
||||||
return deposit
|
return deposit
|
||||||
|
|
||||||
|
|
||||||
def build_deposit_receipt(spec,
|
def build_deposit_request(spec,
|
||||||
index,
|
index,
|
||||||
pubkey,
|
pubkey,
|
||||||
privkey,
|
privkey,
|
||||||
|
@ -179,7 +179,7 @@ def build_deposit_receipt(spec,
|
||||||
withdrawal_credentials,
|
withdrawal_credentials,
|
||||||
signed):
|
signed):
|
||||||
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed)
|
deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed)
|
||||||
return spec.DepositReceipt(
|
return spec.DepositRequest(
|
||||||
pubkey=deposit_data.pubkey,
|
pubkey=deposit_data.pubkey,
|
||||||
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
||||||
amount=deposit_data.amount,
|
amount=deposit_data.amount,
|
||||||
|
@ -187,14 +187,14 @@ def build_deposit_receipt(spec,
|
||||||
index=index)
|
index=index)
|
||||||
|
|
||||||
|
|
||||||
def prepare_deposit_receipt(spec, validator_index, amount,
|
def prepare_deposit_request(spec, validator_index, amount,
|
||||||
index=None,
|
index=None,
|
||||||
pubkey=None,
|
pubkey=None,
|
||||||
privkey=None,
|
privkey=None,
|
||||||
withdrawal_credentials=None,
|
withdrawal_credentials=None,
|
||||||
signed=False):
|
signed=False):
|
||||||
"""
|
"""
|
||||||
Create a deposit receipt for the given validator, depositing the given amount.
|
Create a deposit request for the given validator, depositing the given amount.
|
||||||
"""
|
"""
|
||||||
if index is None:
|
if index is None:
|
||||||
index = validator_index
|
index = validator_index
|
||||||
|
@ -209,7 +209,7 @@ def prepare_deposit_receipt(spec, validator_index, amount,
|
||||||
if withdrawal_credentials is None:
|
if withdrawal_credentials is None:
|
||||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||||
|
|
||||||
return build_deposit_receipt(
|
return build_deposit_request(
|
||||||
spec,
|
spec,
|
||||||
index,
|
index,
|
||||||
pubkey,
|
pubkey,
|
||||||
|
@ -320,11 +320,11 @@ def run_deposit_processing_with_specific_fork_version(
|
||||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
|
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
|
||||||
|
|
||||||
|
|
||||||
def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, valid=True, effective=True):
|
def run_deposit_request_processing(spec, state, deposit_request, validator_index, valid=True, effective=True):
|
||||||
"""
|
"""
|
||||||
Run ``process_deposit_receipt``, yielding:
|
Run ``process_deposit_request``, yielding:
|
||||||
- pre-state ('pre')
|
- pre-state ('pre')
|
||||||
- deposit_receipt ('deposit_receipt')
|
- deposit_request ('deposit_request')
|
||||||
- post-state ('post').
|
- post-state ('post').
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
If ``valid == False``, run expecting ``AssertionError``
|
||||||
"""
|
"""
|
||||||
|
@ -340,18 +340,18 @@ def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index
|
||||||
pre_pending_deposits = len(state.pending_balance_deposits)
|
pre_pending_deposits = len(state.pending_balance_deposits)
|
||||||
|
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
yield 'deposit_receipt', deposit_receipt
|
yield 'deposit_request', deposit_request
|
||||||
|
|
||||||
if not valid:
|
if not valid:
|
||||||
expect_assertion_error(lambda: spec.process_deposit_receipt(state, deposit_receipt))
|
expect_assertion_error(lambda: spec.process_deposit_request(state, deposit_request))
|
||||||
yield 'post', None
|
yield 'post', None
|
||||||
return
|
return
|
||||||
|
|
||||||
spec.process_deposit_receipt(state, deposit_receipt)
|
spec.process_deposit_request(state, deposit_request)
|
||||||
|
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
if not effective or not bls.KeyValidate(deposit_receipt.pubkey):
|
if not effective or not bls.KeyValidate(deposit_request.pubkey):
|
||||||
assert len(state.validators) == pre_validator_count
|
assert len(state.validators) == pre_validator_count
|
||||||
assert len(state.balances) == pre_validator_count
|
assert len(state.balances) == pre_validator_count
|
||||||
if is_top_up:
|
if is_top_up:
|
||||||
|
@ -368,11 +368,11 @@ def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index
|
||||||
assert len(state.balances) == pre_validator_count + 1
|
assert len(state.balances) == pre_validator_count + 1
|
||||||
|
|
||||||
assert len(state.pending_balance_deposits) == pre_pending_deposits + 1
|
assert len(state.pending_balance_deposits) == pre_pending_deposits + 1
|
||||||
assert state.pending_balance_deposits[pre_pending_deposits].amount == deposit_receipt.amount
|
assert state.pending_balance_deposits[pre_pending_deposits].amount == deposit_request.amount
|
||||||
assert state.pending_balance_deposits[pre_pending_deposits].index == validator_index
|
assert state.pending_balance_deposits[pre_pending_deposits].index == validator_index
|
||||||
|
|
||||||
|
|
||||||
def run_deposit_receipt_processing_with_specific_fork_version(
|
def run_deposit_request_processing_with_specific_fork_version(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
fork_version,
|
fork_version,
|
||||||
|
@ -391,17 +391,17 @@ def run_deposit_receipt_processing_with_specific_fork_version(
|
||||||
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
||||||
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
||||||
)
|
)
|
||||||
deposit_receipt = spec.DepositReceipt(
|
deposit_request = spec.DepositRequest(
|
||||||
pubkey=deposit_data.pubkey,
|
pubkey=deposit_data.pubkey,
|
||||||
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
withdrawal_credentials=deposit_data.withdrawal_credentials,
|
||||||
amount=deposit_data.amount,
|
amount=deposit_data.amount,
|
||||||
signature=deposit_data.signature,
|
signature=deposit_data.signature,
|
||||||
index=validator_index)
|
index=validator_index)
|
||||||
|
|
||||||
yield from run_deposit_receipt_processing(
|
yield from run_deposit_request_processing(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
deposit_receipt,
|
deposit_request,
|
||||||
validator_index,
|
validator_index,
|
||||||
valid=valid,
|
valid=valid,
|
||||||
effective=effective
|
effective=effective
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
from eth2spec.test.context import expect_assertion_error
|
|
||||||
from eth2spec.test.helpers.state import get_validator_index_by_pubkey
|
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Run processing
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
def run_execution_layer_withdrawal_request_processing(spec, state, withdrawal_request, valid=True, success=True):
|
|
||||||
"""
|
|
||||||
Run ``process_execution_layer_withdrawal_request``, yielding:
|
|
||||||
- pre-state ('pre')
|
|
||||||
- withdrawal_request ('withdrawal_request')
|
|
||||||
- post-state ('post').
|
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
|
||||||
If ``success == False``, it doesn't initiate exit successfully
|
|
||||||
"""
|
|
||||||
validator_index = get_validator_index_by_pubkey(state, withdrawal_request.validator_pubkey)
|
|
||||||
|
|
||||||
yield 'pre', state
|
|
||||||
yield 'withdrawal_request', withdrawal_request
|
|
||||||
|
|
||||||
if not valid:
|
|
||||||
expect_assertion_error(lambda: spec.process_withdrawal_request(state, withdrawal_request))
|
|
||||||
yield 'post', None
|
|
||||||
return
|
|
||||||
|
|
||||||
pre_exit_epoch = state.validators[validator_index].exit_epoch
|
|
||||||
|
|
||||||
spec.process_withdrawal_request(state, withdrawal_request)
|
|
||||||
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
if success:
|
|
||||||
assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
|
|
||||||
assert state.validators[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
|
||||||
else:
|
|
||||||
assert state.validators[validator_index].exit_epoch == pre_exit_epoch
|
|
|
@ -35,8 +35,9 @@ def get_execution_payload_header(spec, execution_payload):
|
||||||
payload_header.blob_gas_used = execution_payload.blob_gas_used
|
payload_header.blob_gas_used = execution_payload.blob_gas_used
|
||||||
payload_header.excess_blob_gas = execution_payload.excess_blob_gas
|
payload_header.excess_blob_gas = execution_payload.excess_blob_gas
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts)
|
payload_header.deposit_requests_root = spec.hash_tree_root(execution_payload.deposit_requests)
|
||||||
payload_header.withdrawal_requests_root = spec.hash_tree_root(execution_payload.withdrawal_requests)
|
payload_header.withdrawal_requests_root = spec.hash_tree_root(execution_payload.withdrawal_requests)
|
||||||
|
payload_header.consolidation_requests_root = spec.hash_tree_root(execution_payload.consolidation_requests)
|
||||||
return payload_header
|
return payload_header
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,8 +59,7 @@ def compute_el_header_block_hash(spec,
|
||||||
payload_header,
|
payload_header,
|
||||||
transactions_trie_root,
|
transactions_trie_root,
|
||||||
withdrawals_trie_root=None,
|
withdrawals_trie_root=None,
|
||||||
deposit_receipts_trie_root=None,
|
requests_trie_root=None):
|
||||||
withdrawal_requests_root=None):
|
|
||||||
"""
|
"""
|
||||||
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
|
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
|
||||||
"""
|
"""
|
||||||
|
@ -101,15 +101,16 @@ def compute_el_header_block_hash(spec,
|
||||||
# withdrawals_root
|
# withdrawals_root
|
||||||
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
|
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
|
||||||
if is_post_deneb(spec):
|
if is_post_deneb(spec):
|
||||||
# excess_blob_gas
|
# blob_gas_used
|
||||||
execution_payload_header_rlp.append((big_endian_int, payload_header.blob_gas_used))
|
execution_payload_header_rlp.append((big_endian_int, payload_header.blob_gas_used))
|
||||||
|
# excess_blob_gas
|
||||||
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_blob_gas))
|
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_blob_gas))
|
||||||
|
# parent_beacon_root
|
||||||
|
empty_root = bytes.fromhex("0000000000000000000000000000000000000000000000000000000000000000")
|
||||||
|
execution_payload_header_rlp.append((Binary(32, 32), empty_root))
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
# deposit_receipts_root
|
# requests_root
|
||||||
assert deposit_receipts_trie_root is not None
|
execution_payload_header_rlp.append((Binary(32, 32), requests_trie_root))
|
||||||
execution_payload_header_rlp.append((Binary(32, 32), deposit_receipts_trie_root))
|
|
||||||
# withdrawal requests root
|
|
||||||
execution_payload_header_rlp.append((Binary(32, 32), withdrawal_requests_root))
|
|
||||||
|
|
||||||
sedes = List([schema for schema, _ in execution_payload_header_rlp])
|
sedes = List([schema for schema, _ in execution_payload_header_rlp])
|
||||||
values = [value for _, value in execution_payload_header_rlp]
|
values = [value for _, value in execution_payload_header_rlp]
|
||||||
|
@ -136,8 +137,27 @@ def get_withdrawal_rlp(withdrawal):
|
||||||
return encode(values, sedes)
|
return encode(values, sedes)
|
||||||
|
|
||||||
|
|
||||||
|
def get_deposit_request_rlp_bytes(deposit_request):
|
||||||
|
deposit_request_rlp = [
|
||||||
|
# pubkey
|
||||||
|
(Binary(48, 48), deposit_request.pubkey),
|
||||||
|
# withdrawal_credentials
|
||||||
|
(Binary(32, 32), deposit_request.withdrawal_credentials),
|
||||||
|
# amount
|
||||||
|
(big_endian_int, deposit_request.amount),
|
||||||
|
# pubkey
|
||||||
|
(Binary(96, 96), deposit_request.signature),
|
||||||
|
# index
|
||||||
|
(big_endian_int, deposit_request.index),
|
||||||
|
]
|
||||||
|
|
||||||
|
sedes = List([schema for schema, _ in deposit_request_rlp])
|
||||||
|
values = [value for _, value in deposit_request_rlp]
|
||||||
|
return b"\x00" + encode(values, sedes)
|
||||||
|
|
||||||
|
|
||||||
# https://eips.ethereum.org/EIPS/eip-7002
|
# https://eips.ethereum.org/EIPS/eip-7002
|
||||||
def get_withdrawal_request_rlp(withdrawal_request):
|
def get_withdrawal_request_rlp_bytes(withdrawal_request):
|
||||||
withdrawal_request_rlp = [
|
withdrawal_request_rlp = [
|
||||||
# source_address
|
# source_address
|
||||||
(Binary(20, 20), withdrawal_request.source_address),
|
(Binary(20, 20), withdrawal_request.source_address),
|
||||||
|
@ -147,43 +167,41 @@ def get_withdrawal_request_rlp(withdrawal_request):
|
||||||
|
|
||||||
sedes = List([schema for schema, _ in withdrawal_request_rlp])
|
sedes = List([schema for schema, _ in withdrawal_request_rlp])
|
||||||
values = [value for _, value in withdrawal_request_rlp]
|
values = [value for _, value in withdrawal_request_rlp]
|
||||||
return encode(values, sedes)
|
return b"\x01" + encode(values, sedes)
|
||||||
|
|
||||||
|
|
||||||
def get_deposit_receipt_rlp(spec, deposit_receipt):
|
# https://eips.ethereum.org/EIPS/eip-7251
|
||||||
deposit_receipt_rlp = [
|
def get_consolidation_request_rlp_bytes(consolidation_request):
|
||||||
# pubkey
|
consolidation_request_rlp = [
|
||||||
(Binary(48, 48), deposit_receipt.pubkey),
|
# source_address
|
||||||
# withdrawal_credentials
|
(Binary(20, 20), consolidation_request.source_address),
|
||||||
(Binary(32, 32), deposit_receipt.withdrawal_credentials),
|
# source_pubkey
|
||||||
# amount
|
(Binary(48, 48), consolidation_request.source_pubkey),
|
||||||
(big_endian_int, deposit_receipt.amount),
|
# target_pubkey
|
||||||
# pubkey
|
(Binary(48, 48), consolidation_request.target_pubkey),
|
||||||
(Binary(96, 96), deposit_receipt.signature),
|
|
||||||
# index
|
|
||||||
(big_endian_int, deposit_receipt.index),
|
|
||||||
]
|
]
|
||||||
|
|
||||||
sedes = List([schema for schema, _ in deposit_receipt_rlp])
|
sedes = List([schema for schema, _ in consolidation_request_rlp])
|
||||||
values = [value for _, value in deposit_receipt_rlp]
|
values = [value for _, value in consolidation_request_rlp]
|
||||||
return encode(values, sedes)
|
return b"\x02" + encode(values, sedes)
|
||||||
|
|
||||||
|
|
||||||
def compute_el_block_hash(spec, payload):
|
def compute_el_block_hash(spec, payload):
|
||||||
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
|
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
|
||||||
|
|
||||||
withdrawals_trie_root = None
|
withdrawals_trie_root = None
|
||||||
deposit_receipts_trie_root = None
|
requests_trie_root = None
|
||||||
withdrawal_requests_root = None
|
|
||||||
|
|
||||||
if is_post_capella(spec):
|
if is_post_capella(spec):
|
||||||
withdrawals_encoded = [get_withdrawal_rlp(withdrawal) for withdrawal in payload.withdrawals]
|
withdrawals_encoded = [get_withdrawal_rlp(withdrawal) for withdrawal in payload.withdrawals]
|
||||||
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
|
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
deposit_receipts_encoded = [get_deposit_receipt_rlp(spec, receipt) for receipt in payload.deposit_receipts]
|
requests_encoded = []
|
||||||
deposit_receipts_trie_root = compute_trie_root_from_indexed_data(deposit_receipts_encoded)
|
requests_encoded += [get_deposit_request_rlp_bytes(request) for request in payload.deposit_requests]
|
||||||
withdrawal_requests_encoded = [get_withdrawal_request_rlp(request) for request in payload.withdrawal_requests]
|
requests_encoded += [get_withdrawal_request_rlp_bytes(request) for request in payload.withdrawal_requests]
|
||||||
withdrawal_requests_root = compute_trie_root_from_indexed_data(withdrawal_requests_encoded)
|
requests_encoded += [get_consolidation_request_rlp_bytes(request) for request in payload.consolidation_requests]
|
||||||
|
|
||||||
|
requests_trie_root = compute_trie_root_from_indexed_data(requests_encoded)
|
||||||
|
|
||||||
payload_header = get_execution_payload_header(spec, payload)
|
payload_header = get_execution_payload_header(spec, payload)
|
||||||
|
|
||||||
|
@ -192,8 +210,7 @@ def compute_el_block_hash(spec, payload):
|
||||||
payload_header,
|
payload_header,
|
||||||
transactions_trie_root,
|
transactions_trie_root,
|
||||||
withdrawals_trie_root,
|
withdrawals_trie_root,
|
||||||
deposit_receipts_trie_root,
|
requests_trie_root,
|
||||||
withdrawal_requests_root,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -229,8 +246,9 @@ def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||||
payload.blob_gas_used = 0
|
payload.blob_gas_used = 0
|
||||||
payload.excess_blob_gas = 0
|
payload.excess_blob_gas = 0
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
# just to be clear
|
payload.deposit_requests = []
|
||||||
payload.deposit_receipts = []
|
payload.withdrawal_requests = []
|
||||||
|
payload.consolidation_requests = []
|
||||||
|
|
||||||
payload.block_hash = compute_el_block_hash(spec, payload)
|
payload.block_hash = compute_el_block_hash(spec, payload)
|
||||||
|
|
||||||
|
|
|
@ -15,8 +15,23 @@ from eth2spec.test.helpers.whisk import compute_whisk_initial_tracker_cached, co
|
||||||
def build_mock_validator(spec, i: int, balance: int):
|
def build_mock_validator(spec, i: int, balance: int):
|
||||||
active_pubkey = pubkeys[i]
|
active_pubkey = pubkeys[i]
|
||||||
withdrawal_pubkey = pubkeys[-1 - i]
|
withdrawal_pubkey = pubkeys[-1 - i]
|
||||||
|
if is_post_electra(spec):
|
||||||
|
if balance > spec.MIN_ACTIVATION_BALANCE:
|
||||||
|
# use compounding withdrawal credentials if the balance is higher than MIN_ACTIVATION_BALANCE
|
||||||
|
withdrawal_credentials = (
|
||||||
|
spec.COMPOUNDING_WITHDRAWAL_PREFIX
|
||||||
|
+ b'\x00' * 11
|
||||||
|
+ spec.hash(withdrawal_pubkey)[12:]
|
||||||
|
)
|
||||||
|
else:
|
||||||
# insecurely use pubkey as withdrawal key as well
|
# insecurely use pubkey as withdrawal key as well
|
||||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(withdrawal_pubkey)[1:]
|
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(withdrawal_pubkey)[1:]
|
||||||
|
max_effective_balace = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||||
|
else:
|
||||||
|
# insecurely use pubkey as withdrawal key as well
|
||||||
|
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(withdrawal_pubkey)[1:]
|
||||||
|
max_effective_balace = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
validator = spec.Validator(
|
validator = spec.Validator(
|
||||||
pubkey=active_pubkey,
|
pubkey=active_pubkey,
|
||||||
withdrawal_credentials=withdrawal_credentials,
|
withdrawal_credentials=withdrawal_credentials,
|
||||||
|
@ -24,7 +39,7 @@ def build_mock_validator(spec, i: int, balance: int):
|
||||||
activation_epoch=spec.FAR_FUTURE_EPOCH,
|
activation_epoch=spec.FAR_FUTURE_EPOCH,
|
||||||
exit_epoch=spec.FAR_FUTURE_EPOCH,
|
exit_epoch=spec.FAR_FUTURE_EPOCH,
|
||||||
withdrawable_epoch=spec.FAR_FUTURE_EPOCH,
|
withdrawable_epoch=spec.FAR_FUTURE_EPOCH,
|
||||||
effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, spec.MAX_EFFECTIVE_BALANCE)
|
effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, max_effective_balace)
|
||||||
)
|
)
|
||||||
|
|
||||||
return validator
|
return validator
|
||||||
|
@ -50,22 +65,19 @@ def get_sample_genesis_execution_payload_header(spec,
|
||||||
|
|
||||||
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
withdrawals_trie_root = None
|
withdrawals_trie_root = None
|
||||||
deposit_receipts_trie_root = None
|
requests_trie_root = None
|
||||||
exits_trie_root = None
|
|
||||||
|
|
||||||
if is_post_capella(spec):
|
if is_post_capella(spec):
|
||||||
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
deposit_receipts_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
requests_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||||
exits_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
|
||||||
|
|
||||||
payload_header.block_hash = compute_el_header_block_hash(
|
payload_header.block_hash = compute_el_header_block_hash(
|
||||||
spec,
|
spec,
|
||||||
payload_header,
|
payload_header,
|
||||||
transactions_trie_root,
|
transactions_trie_root,
|
||||||
withdrawals_trie_root,
|
withdrawals_trie_root,
|
||||||
deposit_receipts_trie_root,
|
requests_trie_root,
|
||||||
exits_trie_root,
|
|
||||||
)
|
)
|
||||||
return payload_header
|
return payload_header
|
||||||
|
|
||||||
|
@ -134,7 +146,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_post_electra(spec):
|
if is_post_electra(spec):
|
||||||
state.deposit_receipts_start_index = spec.UNSET_DEPOSIT_RECEIPTS_START_INDEX
|
state.deposit_requests_start_index = spec.UNSET_DEPOSIT_REQUESTS_START_INDEX
|
||||||
|
|
||||||
if is_post_whisk(spec):
|
if is_post_whisk(spec):
|
||||||
vc = len(state.validators)
|
vc = len(state.validators)
|
||||||
|
|
|
@ -11,6 +11,7 @@ from eth2spec.test.helpers.deposits import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.forks import (
|
from eth2spec.test.helpers.forks import (
|
||||||
is_post_altair,
|
is_post_altair,
|
||||||
|
is_post_electra,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -69,9 +70,14 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
||||||
if is_post_altair(spec):
|
if is_post_altair(spec):
|
||||||
yield 'description', 'meta', get_post_altair_description(spec)
|
yield 'description', 'meta', get_post_altair_description(spec)
|
||||||
|
|
||||||
|
if is_post_electra(spec):
|
||||||
|
max_effective_balance = spec.MAX_EFFECTIVE_BALANCE_ELECTRA
|
||||||
|
else:
|
||||||
|
max_effective_balance = spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
main_deposit_count = spec.config.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||||
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
main_deposits, _, deposit_data_list = prepare_full_genesis_deposits(
|
||||||
spec, spec.MAX_EFFECTIVE_BALANCE,
|
spec, max_effective_balance,
|
||||||
deposit_count=main_deposit_count, signed=True,
|
deposit_count=main_deposit_count, signed=True,
|
||||||
)
|
)
|
||||||
# For deposits above, and for another deposit_count, add a balance of EFFECTIVE_BALANCE_INCREMENT
|
# For deposits above, and for another deposit_count, add a balance of EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
@ -99,6 +105,8 @@ def test_initialize_beacon_state_some_small_balances(spec):
|
||||||
assert state.eth1_data.deposit_count == len(deposits)
|
assert state.eth1_data.deposit_count == len(deposits)
|
||||||
assert state.eth1_data.block_hash == eth1_block_hash
|
assert state.eth1_data.block_hash == eth1_block_hash
|
||||||
# only main deposits participate to the active balance
|
# only main deposits participate to the active balance
|
||||||
|
# NOTE: they are pre-ELECTRA deposits with BLS_WITHDRAWAL_PREFIX,
|
||||||
|
# so `MAX_EFFECTIVE_BALANCE` is used
|
||||||
assert spec.get_total_active_balance(state) == main_deposit_count * spec.MAX_EFFECTIVE_BALANCE
|
assert spec.get_total_active_balance(state) == main_deposit_count * spec.MAX_EFFECTIVE_BALANCE
|
||||||
|
|
||||||
# yield state
|
# yield state
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
|
from py_ecc.bls import G2ProofOfPossession as py_ecc_bls
|
||||||
from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2
|
from py_ecc.bls.g2_primitives import signature_to_G2 as _signature_to_G2
|
||||||
from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
from py_ecc.optimized_bls12_381 import ( # noqa: F401
|
||||||
G1 as py_ecc_G1,
|
G1 as py_ecc_G1,
|
||||||
G2 as py_ecc_G2,
|
G2 as py_ecc_G2,
|
||||||
|
|
|
@ -10,3 +10,4 @@ from remerkleable.core import BasicView, View, Path
|
||||||
|
|
||||||
|
|
||||||
Bytes20 = ByteVector[20] # type: ignore
|
Bytes20 = ByteVector[20] # type: ignore
|
||||||
|
Bytes31 = ByteVector[31] # type: ignore
|
||||||
|
|
|
@ -1,23 +0,0 @@
|
||||||
# Test format: Recover all cells
|
|
||||||
|
|
||||||
Recover all cells given at least 50% of the original `cells`.
|
|
||||||
|
|
||||||
## Test case format
|
|
||||||
|
|
||||||
The test data is declared in a `data.yaml` file:
|
|
||||||
|
|
||||||
```yaml
|
|
||||||
input:
|
|
||||||
cell_ids: List[CellID] -- the cell identifier for each cell
|
|
||||||
cells: List[Cell] -- the partial collection of cells
|
|
||||||
output: List[Cell] -- all cells, including recovered cells
|
|
||||||
```
|
|
||||||
|
|
||||||
- `CellID` is an unsigned 64-bit integer.
|
|
||||||
- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
|
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
|
||||||
|
|
||||||
## Condition
|
|
||||||
|
|
||||||
The `recover_all_cells` handler should recover missing cells, and the result should match the expected `output`. If any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or any `cell_id` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
|
|
@ -0,0 +1,24 @@
|
||||||
|
# Test format: Recover cells and KZG proofs
|
||||||
|
|
||||||
|
Recover all cells/proofs given at least 50% of the original `cells` and `proofs`.
|
||||||
|
|
||||||
|
## Test case format
|
||||||
|
|
||||||
|
The test data is declared in a `data.yaml` file:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
input:
|
||||||
|
cell_indices: List[CellIndex] -- the cell indices
|
||||||
|
cells: List[Cell] -- the partial collection of cells
|
||||||
|
output: Tuple[List[Cell], List[KZGProof]] -- all cells and proofs
|
||||||
|
```
|
||||||
|
|
||||||
|
- `CellIndex` is an unsigned 64-bit integer.
|
||||||
|
- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
|
||||||
|
- `KZGProof` is a 48-byte hexadecimal string, prefixed with `0x`.
|
||||||
|
|
||||||
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
|
## Condition
|
||||||
|
|
||||||
|
The `recover_cells_and_kzg_proofs` handler should recover missing cells and proofs, and the result should match the expected `output`. If any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), any proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
|
@ -9,18 +9,18 @@ The test data is declared in a `data.yaml` file:
|
||||||
```yaml
|
```yaml
|
||||||
input:
|
input:
|
||||||
commitment: Bytes48 -- the KZG commitment
|
commitment: Bytes48 -- the KZG commitment
|
||||||
cell_id: CellID -- the identifier for the cell
|
cell_index: CellIndex -- the cell index
|
||||||
cell: Cell -- the cell
|
cell: Cell -- the cell
|
||||||
proof: Bytes48 -- the KZG proof for the cell
|
proof: Bytes48 -- the KZG proof for the cell
|
||||||
output: bool -- true (correct proof) or false (incorrect proof)
|
output: bool -- true (correct proof) or false (incorrect proof)
|
||||||
```
|
```
|
||||||
|
|
||||||
- `Bytes48` is a 48-byte hexadecimal string, prefixed with `0x`.
|
- `Bytes48` is a 48-byte hexadecimal string, prefixed with `0x`.
|
||||||
- `CellID` is an unsigned 64-bit integer.
|
- `CellIndex` is an unsigned 64-bit integer.
|
||||||
- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
|
- `Cell` is a 2048-byte hexadecimal string, prefixed with `0x`.
|
||||||
|
|
||||||
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`.
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
||||||
The `verify_cell_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `cell` by using the cell KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), `cell` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or `cell_id` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
The `verify_cell_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `cell` by using the cell KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), `cell` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
||||||
|
|
|
@ -25,4 +25,4 @@ All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `
|
||||||
|
|
||||||
## Condition
|
## Condition
|
||||||
|
|
||||||
The `verify_cell_kzg_proof_batch` handler should verify that `row_commitments` are correct KZG commitments to `cells` by using the cell KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or any `cell_id` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
The `verify_cell_kzg_proof_batch` handler should verify that `row_commitments` are correct KZG commitments to `cells` by using the cell KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve), any cell is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), or any `cell_index` is invalid (e.g. greater than the number of cells for an extended blob), it should error, i.e. the output should be `null`.
|
||||||
|
|
|
@ -45,8 +45,9 @@ Operations:
|
||||||
| `execution_payload` | `BeaconBlockBody` | **`body`** | `process_execution_payload(state, body)` (new in Bellatrix) |
|
| `execution_payload` | `BeaconBlockBody` | **`body`** | `process_execution_payload(state, body)` (new in Bellatrix) |
|
||||||
| `withdrawals` | `ExecutionPayload` | `execution_payload` | `process_withdrawals(state, execution_payload)` (new in Capella) |
|
| `withdrawals` | `ExecutionPayload` | `execution_payload` | `process_withdrawals(state, execution_payload)` (new in Capella) |
|
||||||
| `bls_to_execution_change` | `SignedBLSToExecutionChange` | `address_change` | `process_bls_to_execution_change(state, address_change)` (new in Capella) |
|
| `bls_to_execution_change` | `SignedBLSToExecutionChange` | `address_change` | `process_bls_to_execution_change(state, address_change)` (new in Capella) |
|
||||||
| `deposit_receipt` | `DepositReceipt` | `deposit_receipt` | `process_deposit_receipt(state, deposit_receipt)` (new in Electra) |
|
| `deposit_request` | `DepositRequest` | `deposit_request` | `process_deposit_request(state, deposit_request)` (new in Electra) |
|
||||||
| `exits` | `ExecutionLayerExit` | `execution_layer_exit` | `process_execution_layer_exit(state, execution_layer_exit)` (new in Electra) |
|
| `withdrawal_request` | `WithdrawalRequest` | `withdrawal_request` | `process_withdrawal_request(state, withdrawal_request)` (new in Electra) |
|
||||||
|
| `consolidation_request` | `ConsolidationRequest` | `consolidation_request` | `process_consolidation_request(state, consolidation_request)` (new in Electra) |
|
||||||
|
|
||||||
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
|
||||||
|
|
||||||
|
|
|
@ -11,11 +11,9 @@ from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||||
from eth2spec.test.helpers.constants import EIP7594
|
from eth2spec.test.helpers.constants import EIP7594
|
||||||
from eth2spec.test.helpers.typing import SpecForkName
|
from eth2spec.test.helpers.typing import SpecForkName
|
||||||
from eth2spec.test.utils.kzg_tests import (
|
from eth2spec.test.utils.kzg_tests import (
|
||||||
BLOB_RANDOM_VALID1,
|
|
||||||
BLOB_RANDOM_VALID2,
|
|
||||||
BLOB_RANDOM_VALID3,
|
|
||||||
CELL_RANDOM_VALID1,
|
CELL_RANDOM_VALID1,
|
||||||
CELL_RANDOM_VALID2,
|
CELL_RANDOM_VALID2,
|
||||||
|
G1,
|
||||||
INVALID_BLOBS,
|
INVALID_BLOBS,
|
||||||
INVALID_G1_POINTS,
|
INVALID_G1_POINTS,
|
||||||
INVALID_INDIVIDUAL_CELL_BYTES,
|
INVALID_INDIVIDUAL_CELL_BYTES,
|
||||||
|
@ -31,39 +29,11 @@ from eth2spec.test.utils.kzg_tests import (
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
|
||||||
# Test cases for compute_cells
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
def case01_compute_cells():
|
|
||||||
# Valid cases
|
|
||||||
for blob in VALID_BLOBS:
|
|
||||||
cells = spec.compute_cells(blob)
|
|
||||||
identifier = make_id(blob)
|
|
||||||
yield f'compute_cells_case_valid_{identifier}', {
|
|
||||||
'input': {
|
|
||||||
'blob': encode_hex(blob),
|
|
||||||
},
|
|
||||||
'output': encode_hex_list(cells)
|
|
||||||
}
|
|
||||||
|
|
||||||
# Edge case: Invalid blobs
|
|
||||||
for blob in INVALID_BLOBS:
|
|
||||||
expect_exception(spec.compute_cells, blob)
|
|
||||||
identifier = make_id(blob)
|
|
||||||
yield f'compute_cells_case_invalid_blob_{identifier}', {
|
|
||||||
'input': {
|
|
||||||
'blob': encode_hex(blob)
|
|
||||||
},
|
|
||||||
'output': None
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Test cases for compute_cells_and_kzg_proofs
|
# Test cases for compute_cells_and_kzg_proofs
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
def case02_compute_cells_and_kzg_proofs():
|
def case_compute_cells_and_kzg_proofs():
|
||||||
# Valid cases
|
# Valid cases
|
||||||
for blob in VALID_BLOBS:
|
for blob in VALID_BLOBS:
|
||||||
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
|
cells, proofs = spec.compute_cells_and_kzg_proofs(blob)
|
||||||
|
@ -93,20 +63,20 @@ def case02_compute_cells_and_kzg_proofs():
|
||||||
# Test cases for verify_cell_kzg_proof
|
# Test cases for verify_cell_kzg_proof
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
def case03_verify_cell_kzg_proof():
|
def case_verify_cell_kzg_proof():
|
||||||
# Valid cases
|
# Valid cases
|
||||||
for i in range(len(VALID_BLOBS)):
|
for i in range(len(VALID_BLOBS)):
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
||||||
commitment = VALID_COMMITMENTS[i]
|
commitment = VALID_COMMITMENTS[i]
|
||||||
cell_id = (2 ** i - 1) % spec.CELLS_PER_EXT_BLOB
|
cell_index = (2 ** i - 1) % spec.CELLS_PER_EXT_BLOB
|
||||||
cell = cells[cell_id]
|
cell = cells[cell_index]
|
||||||
proof = proofs[cell_id]
|
proof = proofs[cell_index]
|
||||||
assert spec.verify_cell_kzg_proof(commitment, cell_id, cell, proof)
|
assert spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_valid_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_valid_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -117,15 +87,15 @@ def case03_verify_cell_kzg_proof():
|
||||||
for i in range(len(VALID_BLOBS)):
|
for i in range(len(VALID_BLOBS)):
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
||||||
commitment = bls_add_one(VALID_COMMITMENTS[i])
|
commitment = bls_add_one(VALID_COMMITMENTS[i])
|
||||||
cell_id = 99 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 99 % spec.CELLS_PER_EXT_BLOB
|
||||||
cell = cells[cell_id]
|
cell = cells[cell_index]
|
||||||
proof = proofs[cell_id]
|
proof = proofs[cell_index]
|
||||||
assert not spec.verify_cell_kzg_proof(commitment, cell_id, cell, proof)
|
assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_incorrect_commitment_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_incorrect_commitment_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -134,17 +104,17 @@ def case03_verify_cell_kzg_proof():
|
||||||
|
|
||||||
# Incorrect cell
|
# Incorrect cell
|
||||||
for i in range(len(VALID_INDIVIDUAL_RANDOM_CELL_BYTES)):
|
for i in range(len(VALID_INDIVIDUAL_RANDOM_CELL_BYTES)):
|
||||||
cell_id = 16 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 16 % spec.CELLS_PER_EXT_BLOB
|
||||||
commitment = VALID_COMMITMENTS[i]
|
commitment = VALID_COMMITMENTS[i]
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
||||||
cell = VALID_INDIVIDUAL_RANDOM_CELL_BYTES[i]
|
cell = VALID_INDIVIDUAL_RANDOM_CELL_BYTES[i]
|
||||||
proof = proofs[cell_id]
|
proof = proofs[cell_index]
|
||||||
assert not spec.verify_cell_kzg_proof(commitment, cell_id, cell, proof)
|
assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_incorrect_cell_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_incorrect_cell_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -153,17 +123,17 @@ def case03_verify_cell_kzg_proof():
|
||||||
|
|
||||||
# Incorrect proof
|
# Incorrect proof
|
||||||
for i in range(len(VALID_BLOBS)):
|
for i in range(len(VALID_BLOBS)):
|
||||||
cell_id = 91 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 91 % spec.CELLS_PER_EXT_BLOB
|
||||||
commitment = VALID_COMMITMENTS[i]
|
commitment = VALID_COMMITMENTS[i]
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
||||||
cell = cells[cell_id]
|
cell = cells[cell_index]
|
||||||
proof = bls_add_one(proofs[cell_id])
|
proof = bls_add_one(proofs[cell_index])
|
||||||
assert not spec.verify_cell_kzg_proof(commitment, cell_id, cell, proof)
|
assert not spec.verify_cell_kzg_proof(commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_incorrect_proof_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_incorrect_proof_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -173,33 +143,33 @@ def case03_verify_cell_kzg_proof():
|
||||||
# Edge case: Invalid commitment
|
# Edge case: Invalid commitment
|
||||||
for commitment in INVALID_G1_POINTS:
|
for commitment in INVALID_G1_POINTS:
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[0]
|
cells, proofs = VALID_CELLS_AND_PROOFS[0]
|
||||||
cell_id = 81 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 81 % spec.CELLS_PER_EXT_BLOB
|
||||||
cell = cells[cell_id]
|
cell = cells[cell_index]
|
||||||
proof = proofs[cell_id]
|
proof = proofs[cell_index]
|
||||||
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_id, cell, proof)
|
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_invalid_commitment_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_invalid_commitment_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: Invalid cell_id
|
# Edge case: Invalid cell_index
|
||||||
for cell_id in [spec.CELLS_PER_EXT_BLOB, spec.CELLS_PER_EXT_BLOB + 1]:
|
for cell_index in [spec.CELLS_PER_EXT_BLOB, spec.CELLS_PER_EXT_BLOB + 1]:
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[1]
|
cells, proofs = VALID_CELLS_AND_PROOFS[1]
|
||||||
commitment = VALID_COMMITMENTS[1]
|
commitment = VALID_COMMITMENTS[1]
|
||||||
cell = cells[0]
|
cell = cells[0]
|
||||||
proof = proofs[0]
|
proof = proofs[0]
|
||||||
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_id, cell, proof)
|
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_invalid_cell_id_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_invalid_cell_index_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -208,16 +178,16 @@ def case03_verify_cell_kzg_proof():
|
||||||
|
|
||||||
# Edge case: Invalid cell
|
# Edge case: Invalid cell
|
||||||
for cell in INVALID_INDIVIDUAL_CELL_BYTES:
|
for cell in INVALID_INDIVIDUAL_CELL_BYTES:
|
||||||
cell_id = 32 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 32 % spec.CELLS_PER_EXT_BLOB
|
||||||
commitment = VALID_COMMITMENTS[2]
|
commitment = VALID_COMMITMENTS[2]
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[2]
|
cells, proofs = VALID_CELLS_AND_PROOFS[2]
|
||||||
proof = proofs[cell_id]
|
proof = proofs[cell_index]
|
||||||
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_id, cell, proof)
|
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_invalid_cell_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_invalid_cell_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -228,14 +198,14 @@ def case03_verify_cell_kzg_proof():
|
||||||
for proof in INVALID_G1_POINTS:
|
for proof in INVALID_G1_POINTS:
|
||||||
cells, _ = VALID_CELLS_AND_PROOFS[3]
|
cells, _ = VALID_CELLS_AND_PROOFS[3]
|
||||||
commitment = VALID_COMMITMENTS[3]
|
commitment = VALID_COMMITMENTS[3]
|
||||||
cell_id = 36 % spec.CELLS_PER_EXT_BLOB
|
cell_index = 36 % spec.CELLS_PER_EXT_BLOB
|
||||||
cell = cells[cell_id]
|
cell = cells[cell_index]
|
||||||
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_id, cell, proof)
|
expect_exception(spec.verify_cell_kzg_proof, commitment, cell_index, cell, proof)
|
||||||
identifier = make_id(commitment, cell_id, cell, proof)
|
identifier = make_id(commitment, cell_index, cell, proof)
|
||||||
yield f'verify_cell_kzg_proof_case_invalid_proof_{identifier}', {
|
yield f'verify_cell_kzg_proof_case_invalid_proof_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'commitment': encode_hex(commitment),
|
'commitment': encode_hex(commitment),
|
||||||
'cell_id': cell_id,
|
'cell_index': cell_index,
|
||||||
'cell': encode_hex(cell),
|
'cell': encode_hex(cell),
|
||||||
'proof': encode_hex(proof),
|
'proof': encode_hex(proof),
|
||||||
},
|
},
|
||||||
|
@ -247,7 +217,7 @@ def case03_verify_cell_kzg_proof():
|
||||||
# Test cases for verify_cell_kzg_proof_batch
|
# Test cases for verify_cell_kzg_proof_batch
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
def case04_verify_cell_kzg_proof_batch():
|
def case_verify_cell_kzg_proof_batch():
|
||||||
# Valid cases
|
# Valid cases
|
||||||
for i in range(len(VALID_BLOBS)):
|
for i in range(len(VALID_BLOBS)):
|
||||||
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
cells, proofs = VALID_CELLS_AND_PROOFS[i]
|
||||||
|
@ -616,182 +586,254 @@ def case04_verify_cell_kzg_proof_batch():
|
||||||
|
|
||||||
|
|
||||||
###############################################################################
|
###############################################################################
|
||||||
# Test cases for recover_all_cells
|
# Test cases for recover_cells_and_kzg_proofs
|
||||||
###############################################################################
|
###############################################################################
|
||||||
|
|
||||||
def case05_recover_all_cells():
|
def case_recover_cells_and_kzg_proofs():
|
||||||
# Valid: No missing cells
|
# Valid: No missing cells
|
||||||
blob = BLOB_RANDOM_VALID1
|
cells, proofs = VALID_CELLS_AND_PROOFS[0]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB))
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB))
|
recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, cells, proofs)
|
||||||
recovered_cells = spec.recover_all_cells(cell_ids, cells)
|
|
||||||
assert recovered_cells == cells
|
assert recovered_cells == cells
|
||||||
identifier = make_id(cell_ids, cells)
|
assert recovered_proofs == proofs
|
||||||
yield f'recover_all_cells_case_valid_no_missing_{identifier}', {
|
identifier = make_id(cell_indices, cells, proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_valid_no_missing_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(cells),
|
'cells': encode_hex_list(cells),
|
||||||
|
'proofs': encode_hex_list(proofs),
|
||||||
},
|
},
|
||||||
'output': encode_hex_list(recovered_cells)
|
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
|
||||||
}
|
}
|
||||||
|
|
||||||
# Valid: Half missing cells (every other cell)
|
# Valid: Half missing cells (every other cell)
|
||||||
blob = BLOB_RANDOM_VALID2
|
cells, proofs = VALID_CELLS_AND_PROOFS[1]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
||||||
cell_ids = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
recovered_cells = spec.recover_all_cells(cell_ids, partial_cells)
|
recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
|
||||||
assert recovered_cells == cells
|
assert recovered_cells == cells
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
assert recovered_proofs == proofs
|
||||||
yield f'recover_all_cells_case_valid_half_missing_every_other_cell_{identifier}', {
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_valid_half_missing_every_other_cell_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': encode_hex_list(recovered_cells)
|
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
|
||||||
}
|
}
|
||||||
|
|
||||||
# Valid: Half missing cells (first half)
|
# Valid: Half missing cells (first half)
|
||||||
blob = BLOB_RANDOM_VALID3
|
cells, proofs = VALID_CELLS_AND_PROOFS[2]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB // 2))
|
||||||
cell_ids = list(range(0, spec.CELLS_PER_EXT_BLOB // 2))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
recovered_cells = spec.recover_all_cells(cell_ids, partial_cells)
|
recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
|
||||||
assert recovered_cells == cells
|
assert recovered_cells == cells
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
assert recovered_proofs == proofs
|
||||||
yield f'recover_all_cells_case_valid_half_missing_first_half_{identifier}', {
|
identifier = make_id(cell_indices, partial_cells)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_valid_half_missing_first_half_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': encode_hex_list(recovered_cells)
|
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
|
||||||
}
|
}
|
||||||
|
|
||||||
# Valid: Half missing cells (second half)
|
# Valid: Half missing cells (second half)
|
||||||
blob = BLOB_RANDOM_VALID1
|
cells, proofs = VALID_CELLS_AND_PROOFS[3]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2, spec.CELLS_PER_EXT_BLOB))
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB // 2, spec.CELLS_PER_EXT_BLOB))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
recovered_cells = spec.recover_all_cells(cell_ids, partial_cells)
|
recovered_cells, recovered_proofs = spec.recover_cells_and_kzg_proofs(cell_indices, partial_cells, partial_proofs)
|
||||||
assert recovered_cells == cells
|
assert recovered_cells == cells
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
assert recovered_proofs == proofs
|
||||||
yield f'recover_all_cells_case_valid_half_missing_second_half_{identifier}', {
|
identifier = make_id(cell_indices, partial_cells)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_valid_half_missing_second_half_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': encode_hex_list(recovered_cells)
|
'output': (encode_hex_list(recovered_cells), encode_hex_list(recovered_proofs))
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: All cells are missing
|
# Edge case: All cells are missing
|
||||||
cell_ids, partial_cells = [], []
|
cell_indices, partial_cells = [], []
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells)
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
identifier = make_id(cell_indices, partial_cells)
|
||||||
yield f'recover_all_cells_case_invalid_all_cells_are_missing_{identifier}', {
|
yield f'recover_cells_and_kzg_proofs_case_invalid_all_cells_are_missing_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: More than half missing
|
# Edge case: More than half missing
|
||||||
blob = BLOB_RANDOM_VALID2
|
cells, proofs = VALID_CELLS_AND_PROOFS[4]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2 - 1))
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB // 2 - 1))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
yield f'recover_all_cells_case_invalid_more_than_half_missing_{identifier}', {
|
yield f'recover_cells_and_kzg_proofs_case_invalid_more_than_half_missing_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: Invalid cell_id
|
# Edge case: More cells provided than CELLS_PER_EXT_BLOB
|
||||||
blob = BLOB_RANDOM_VALID1
|
cells, proofs = VALID_CELLS_AND_PROOFS[5]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB)) + [0]
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
# Replace first cell_id with an invalid value
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
cell_ids[0] = spec.CELLS_PER_EXT_BLOB
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cells_than_cells_per_ext_blob_{identifier}', {
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
|
||||||
yield f'recover_all_cells_case_invalid_cell_id_{identifier}', {
|
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
|
},
|
||||||
|
'output': None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Edge case: Invalid cell_index
|
||||||
|
cells, proofs = VALID_CELLS_AND_PROOFS[6]
|
||||||
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
||||||
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
|
# Replace first cell_index with an invalid value
|
||||||
|
cell_indices[0] = spec.CELLS_PER_EXT_BLOB
|
||||||
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_invalid_cell_index_{identifier}', {
|
||||||
|
'input': {
|
||||||
|
'cell_indices': cell_indices,
|
||||||
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: Invalid cell
|
# Edge case: Invalid cell
|
||||||
blob = BLOB_RANDOM_VALID2
|
|
||||||
for cell in INVALID_INDIVIDUAL_CELL_BYTES:
|
for cell in INVALID_INDIVIDUAL_CELL_BYTES:
|
||||||
cells = spec.compute_cells(blob)
|
cells, proofs = VALID_CELLS_AND_PROOFS[6]
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
# Replace first cell with an invalid value
|
# Replace first cell with an invalid value
|
||||||
partial_cells[0] = cell
|
partial_cells[0] = cell
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
yield f'recover_all_cells_case_invalid_cell_{identifier}', {
|
yield f'recover_cells_and_kzg_proofs_case_invalid_cell_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: More cell_ids than cells
|
# Edge case: Invalid proof
|
||||||
blob = BLOB_RANDOM_VALID3
|
for proof in INVALID_G1_POINTS:
|
||||||
cells = spec.compute_cells(blob)
|
cells, proofs = VALID_CELLS_AND_PROOFS[0]
|
||||||
cell_ids = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
# Add another cell_id
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
cell_ids.append(spec.CELLS_PER_EXT_BLOB - 1)
|
# Replace first proof with an invalid value
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
partial_proofs[0] = proof
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
yield f'recover_all_cells_case_invalid_more_cell_ids_than_cells_{identifier}', {
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_invalid_proof_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: More cells than cell_ids
|
# Edge case: More cell_indices than cells
|
||||||
blob = BLOB_RANDOM_VALID1
|
cells, proofs = VALID_CELLS_AND_PROOFS[0]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
||||||
cell_ids = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
|
# Add another cell_index
|
||||||
|
cell_indices.append(spec.CELLS_PER_EXT_BLOB - 1)
|
||||||
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cell_indices_than_cells_{identifier}', {
|
||||||
|
'input': {
|
||||||
|
'cell_indices': cell_indices,
|
||||||
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
|
},
|
||||||
|
'output': None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Edge case: More cells than cell_indices
|
||||||
|
cells, proofs = VALID_CELLS_AND_PROOFS[1]
|
||||||
|
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
||||||
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
# Add another cell
|
# Add another cell
|
||||||
partial_cells.append(CELL_RANDOM_VALID1)
|
partial_cells.append(CELL_RANDOM_VALID1)
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
yield f'recover_all_cells_case_invalid_more_cells_than_cell_ids_{identifier}', {
|
yield f'recover_cells_and_kzg_proofs_case_invalid_more_cells_than_cell_indices_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
|
||||||
# Edge case: Duplicate cell_id
|
# Edge case: More proofs than cell_indices
|
||||||
blob = BLOB_RANDOM_VALID2
|
cells, proofs = VALID_CELLS_AND_PROOFS[1]
|
||||||
cells = spec.compute_cells(blob)
|
cell_indices = list(range(0, spec.CELLS_PER_EXT_BLOB, 2))
|
||||||
cell_ids = list(range(spec.CELLS_PER_EXT_BLOB // 2))
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
partial_cells = [cells[cell_id] for cell_id in cell_ids]
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
# Replace first cell_id with the second cell_id
|
# Add another proof
|
||||||
cell_ids[0] = cell_ids[1]
|
partial_proofs.append(G1)
|
||||||
expect_exception(spec.recover_all_cells, cell_ids, partial_cells)
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
identifier = make_id(cell_ids, partial_cells)
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
yield f'recover_all_cells_case_invalid_duplicate_cell_id_{identifier}', {
|
yield f'recover_cells_and_kzg_proofs_case_invalid_more_proofs_than_cell_indices_{identifier}', {
|
||||||
'input': {
|
'input': {
|
||||||
'cell_ids': cell_ids,
|
'cell_indices': cell_indices,
|
||||||
'cells': encode_hex_list(partial_cells),
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
|
},
|
||||||
|
'output': None
|
||||||
|
}
|
||||||
|
|
||||||
|
# Edge case: Duplicate cell_index
|
||||||
|
cells, proofs = VALID_CELLS_AND_PROOFS[2]
|
||||||
|
# There will be 65 cells, where 64 are unique and 1 is a duplicate.
|
||||||
|
# Depending on the implementation, 63 & 1 might not fail for the right
|
||||||
|
# reason. For example, if the implementation assigns cells in an array
|
||||||
|
# via index, this would result in 63 cells and the test would fail due
|
||||||
|
# to insufficient cell count, not because of a duplicate cell.
|
||||||
|
cell_indices = list(range(spec.CELLS_PER_EXT_BLOB // 2 + 1))
|
||||||
|
partial_cells = [cells[cell_index] for cell_index in cell_indices]
|
||||||
|
partial_proofs = [proofs[cell_index] for cell_index in cell_indices]
|
||||||
|
# Replace first cell_index with the second cell_index
|
||||||
|
cell_indices[0] = cell_indices[1]
|
||||||
|
expect_exception(spec.recover_cells_and_kzg_proofs, cell_indices, partial_cells, partial_proofs)
|
||||||
|
identifier = make_id(cell_indices, partial_cells, partial_proofs)
|
||||||
|
yield f'recover_cells_and_kzg_proofs_case_invalid_duplicate_cell_index_{identifier}', {
|
||||||
|
'input': {
|
||||||
|
'cell_indices': cell_indices,
|
||||||
|
'cells': encode_hex_list(partial_cells),
|
||||||
|
'proofs': encode_hex_list(partial_proofs),
|
||||||
},
|
},
|
||||||
'output': None
|
'output': None
|
||||||
}
|
}
|
||||||
|
@ -829,9 +871,8 @@ if __name__ == "__main__":
|
||||||
bls.use_arkworks()
|
bls.use_arkworks()
|
||||||
gen_runner.run_generator("kzg_7594", [
|
gen_runner.run_generator("kzg_7594", [
|
||||||
# EIP-7594
|
# EIP-7594
|
||||||
create_provider(EIP7594, 'compute_cells', case01_compute_cells),
|
create_provider(EIP7594, 'compute_cells_and_kzg_proofs', case_compute_cells_and_kzg_proofs),
|
||||||
create_provider(EIP7594, 'compute_cells_and_kzg_proofs', case02_compute_cells_and_kzg_proofs),
|
create_provider(EIP7594, 'verify_cell_kzg_proof', case_verify_cell_kzg_proof),
|
||||||
create_provider(EIP7594, 'verify_cell_kzg_proof', case03_verify_cell_kzg_proof),
|
create_provider(EIP7594, 'verify_cell_kzg_proof_batch', case_verify_cell_kzg_proof_batch),
|
||||||
create_provider(EIP7594, 'verify_cell_kzg_proof_batch', case04_verify_cell_kzg_proof_batch),
|
create_provider(EIP7594, 'recover_cells_and_kzg_proofs', case_recover_cells_and_kzg_proofs),
|
||||||
create_provider(EIP7594, 'recover_all_cells', case05_recover_all_cells),
|
|
||||||
])
|
])
|
||||||
|
|
|
@ -45,10 +45,10 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
_new_electra_mods = {key: 'eth2spec.test.electra.block_processing.test_process_' + key for key in [
|
_new_electra_mods = {key: 'eth2spec.test.electra.block_processing.test_process_' + key for key in [
|
||||||
'attestation',
|
'attestation',
|
||||||
'consolidation',
|
'consolidation_request',
|
||||||
'deposit_receipt',
|
'deposit_request',
|
||||||
'execution_layer_withdrawal_request',
|
'voluntary_exit',
|
||||||
'voluntary_exit'
|
'withdrawal_request',
|
||||||
]}
|
]}
|
||||||
electra_mods = combine_mods(_new_electra_mods, deneb_mods)
|
electra_mods = combine_mods(_new_electra_mods, deneb_mods)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue