Merge branch 'dev' into pr2649-tests
This commit is contained in:
commit
c185f91e00
18
Makefile
18
Makefile
|
@ -41,6 +41,8 @@ CURRENT_DIR = ${CURDIR}
|
|||
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
|
||||
GENERATOR_ERROR_LOG_FILE = $(CURRENT_DIR)/$(TEST_VECTOR_DIR)/testgen_error_log.txt
|
||||
|
||||
SCRIPTS_DIR = ${CURRENT_DIR}/scripts
|
||||
|
||||
export DAPP_SKIP_BUILD:=1
|
||||
export DAPP_SRC:=$(SOLIDITY_DEPOSIT_CONTRACT_DIR)
|
||||
export DAPP_LIB:=$(SOLIDITY_DEPOSIT_CONTRACT_DIR)/lib
|
||||
|
@ -103,12 +105,12 @@ install_test:
|
|||
# Testing against `minimal` config by default
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
# Testing against `minimal` config by default
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.minimal --cov=eth2spec.altair.minimal --cov=eth2spec.bellatrix.minimal --cov=eth2spec.capella.minimal --cov=eth2spec.eip4844.minimal --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p $(TEST_REPORT_DIR);
|
||||
|
@ -140,8 +142,8 @@ codespell:
|
|||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||
&& pylint --disable=all --enable unused-argument ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella
|
||||
&& pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/eip4844 \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.eip4844
|
||||
|
||||
lint_generators: pyspec
|
||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||
|
@ -195,6 +197,14 @@ $(TEST_VECTOR_DIR):
|
|||
$(TEST_VECTOR_DIR)/:
|
||||
$(info ignoring duplicate tests dir)
|
||||
|
||||
gen_kzg_setups:
|
||||
cd $(SCRIPTS_DIR); \
|
||||
if ! test -d venv; then python3 -m venv venv; fi; \
|
||||
. venv/bin/activate; \
|
||||
pip3 install -r requirements.txt; \
|
||||
python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4 --g2-length=65 --output-dir ${CURRENT_DIR}/presets/minimal/trusted_setups; \
|
||||
python3 ./gen_kzg_trusted_setups.py --secret=1337 --g1-length=4096 --g2-length=65 --output-dir ${CURRENT_DIR}/presets/mainnet/trusted_setups
|
||||
|
||||
# For any generator, build it using the run_generator function.
|
||||
# (creation of output dir is a dependency)
|
||||
gen_%: $(TEST_VECTOR_DIR)
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
[![Join the chat at https://discord.gg/qGpsxSA](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/qGpsxSA) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
To learn more about proof-of-stake and sharding, see the [PoS FAQ](https://eth.wiki/en/concepts/proof-of-stake-faqs), [sharding FAQ](https://eth.wiki/sharding/Sharding-FAQs) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
||||
To learn more about proof-of-stake and sharding, see the [PoS documentation](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/), [sharding documentation](https://ethereum.org/en/upgrades/sharding/) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm).
|
||||
|
||||
This repository hosts the current Ethereum proof-of-stake specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests.
|
||||
|
||||
|
@ -24,7 +24,7 @@ Features are researched and developed in parallel, and then consolidated into se
|
|||
### In-development Specifications
|
||||
| Code Name or Topic | Specs | Notes |
|
||||
| - | - | - |
|
||||
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Validator additions](specs/capella/validator.md)</li></ul></ul> |
|
||||
| Capella (tentative) | <ul><li>Core</li><ul><li>[Beacon chain changes](specs/capella/beacon-chain.md)</li><li>[Capella fork](specs/capella/fork.md)</li></ul><li>Additions</li><ul><li>[Validator additions](specs/capella/validator.md)</li><li>[P2P networking](specs/capella/p2p-interface.md)</li></ul></ul> |
|
||||
| EIP4844 (tentative) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/eip4844/beacon-chain.md)</li><li>[EIP-4844 fork](specs/eip4844/fork.md)</li><li>[Polynomial commitments](specs/eip4844/polynomial-commitments.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/eip4844/validator.md)</li><li>[P2P networking](specs/eip4844/p2p-interface.md)</li></ul></ul> |
|
||||
| Sharding (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/sharding/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[P2P networking](specs/sharding/p2p-interface.md)</li></ul></ul> |
|
||||
| Custody Game (outdated) | <ul><li>Core</li><ul><li>[Beacon Chain changes](specs/custody_game/beacon-chain.md)</li></ul><li>Additions</li><ul><li>[Honest validator guide changes](specs/custody_game/validator.md)</li></ul></ul> | Dependent on sharding |
|
||||
|
|
|
@ -11,3 +11,8 @@ warn_unused_configs = True
|
|||
warn_redundant_casts = True
|
||||
|
||||
ignore_missing_imports = True
|
||||
|
||||
# pylint
|
||||
[MESSAGES CONTROL]
|
||||
disable = all
|
||||
enable = unused-argument
|
||||
|
|
|
@ -1,24 +1,17 @@
|
|||
# Mainnet preset - Capella
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256) withdrawals
|
||||
MAX_PARTIAL_WITHDRAWALS_PER_EPOCH: 256
|
||||
|
||||
|
||||
# State list lengths
|
||||
# ---------------------------------------------------------------
|
||||
# 2**40 (= 1,099,511,627,776) withdrawals
|
||||
WITHDRAWAL_QUEUE_LIMIT: 1099511627776
|
||||
|
||||
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
# 2**4 (= 16)
|
||||
MAX_BLS_TO_EXECUTION_CHANGES: 16
|
||||
|
||||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
# 2**4 (= 16) withdrawals
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 16
|
||||
|
||||
# Withdrawals processing
|
||||
# ---------------------------------------------------------------
|
||||
# 2**14 (= 16384) validators
|
||||
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16384
|
||||
|
|
|
@ -4,5 +4,5 @@
|
|||
# ---------------------------------------------------------------
|
||||
# `uint64(4096)`
|
||||
FIELD_ELEMENTS_PER_BLOB: 4096
|
||||
# `uint64(2**4)` (= 16)
|
||||
MAX_BLOBS_PER_BLOCK: 16
|
||||
# `uint64(2**2)` (= 4)
|
||||
MAX_BLOBS_PER_BLOCK: 4
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -1,17 +1,5 @@
|
|||
# Minimal preset - Capella
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] 16 for more interesting tests at low validator count
|
||||
MAX_PARTIAL_WITHDRAWALS_PER_EPOCH: 16
|
||||
|
||||
|
||||
# State list lengths
|
||||
# ---------------------------------------------------------------
|
||||
# 2**40 (= 1,099,511,627,776) withdrawals
|
||||
WITHDRAWAL_QUEUE_LIMIT: 1099511627776
|
||||
|
||||
|
||||
# Max operations per block
|
||||
# ---------------------------------------------------------------
|
||||
# 2**4 (= 16)
|
||||
|
@ -20,5 +8,10 @@ MAX_BLS_TO_EXECUTION_CHANGES: 16
|
|||
|
||||
# Execution
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] Lower than MAX_PARTIAL_WITHDRAWALS_PER_EPOCH so not all processed in one block
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 8
|
||||
# [customized] 2**2 (= 4)
|
||||
MAX_WITHDRAWALS_PER_PAYLOAD: 4
|
||||
|
||||
# Withdrawals processing
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] 2**4 (= 16) validators
|
||||
MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP: 16
|
||||
|
|
|
@ -4,5 +4,5 @@
|
|||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
FIELD_ELEMENTS_PER_BLOB: 4
|
||||
# `uint64(2**4)` (= 16)
|
||||
MAX_BLOBS_PER_BLOCK: 16
|
||||
# `uint64(2**2)` (= 4)
|
||||
MAX_BLOBS_PER_BLOCK: 4
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,43 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
|
||||
from eth2spec.utils.kzg import (
|
||||
dump_kzg_trusted_setup_files,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--secret",
|
||||
dest="secret",
|
||||
type=int,
|
||||
required=True,
|
||||
help='the secret of trusted setup',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--g1-length",
|
||||
dest="g1_length",
|
||||
type=int,
|
||||
required=True,
|
||||
help='the length of G1 trusted setup',
|
||||
)
|
||||
parser.add_argument(
|
||||
"--g2-length",
|
||||
dest="g2_length",
|
||||
type=int,
|
||||
required=True,
|
||||
help='the length of G2 trusted setup',
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output-dir",
|
||||
dest="output_dir",
|
||||
required=True,
|
||||
help='the output directory',
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
dump_kzg_trusted_setup_files(args.secret, args.g1_length, args.g2_length, args.output_dir)
|
|
@ -0,0 +1 @@
|
|||
../[generator]
|
118
setup.py
118
setup.py
|
@ -7,13 +7,14 @@ import os
|
|||
import re
|
||||
import string
|
||||
import textwrap
|
||||
from typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar
|
||||
from typing import Dict, NamedTuple, List, Sequence, Optional, TypeVar, Tuple
|
||||
from abc import ABC, abstractmethod
|
||||
import ast
|
||||
import subprocess
|
||||
import sys
|
||||
import copy
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
|
||||
|
||||
# NOTE: have to programmatically include third-party dependencies in `setup.py`.
|
||||
|
@ -121,7 +122,7 @@ def _get_self_type_from_source(source: str) -> Optional[str]:
|
|||
return args[0].annotation.id
|
||||
|
||||
|
||||
def _get_class_info_from_source(source: str) -> (str, Optional[str]):
|
||||
def _get_class_info_from_source(source: str) -> Tuple[str, Optional[str]]:
|
||||
class_def = ast.parse(source).body[0]
|
||||
base = class_def.bases[0]
|
||||
if isinstance(base, ast.Name):
|
||||
|
@ -140,6 +141,28 @@ def _is_constant_id(name: str) -> bool:
|
|||
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))
|
||||
|
||||
|
||||
def _load_kzg_trusted_setups(preset_name):
|
||||
"""
|
||||
[TODO] it's not the final mainnet trusted setup.
|
||||
We will update it after the KZG ceremony.
|
||||
"""
|
||||
file_path = str(Path(__file__).parent) + '/presets/' + preset_name + '/trusted_setups/testing_trusted_setups.json'
|
||||
|
||||
with open(file_path, 'r') as f:
|
||||
json_data = json.load(f)
|
||||
|
||||
trusted_setup_G1 = json_data['setup_G1']
|
||||
trusted_setup_G2 = json_data['setup_G2']
|
||||
trusted_setup_G1_lagrange = json_data['setup_G1_lagrange']
|
||||
roots_of_unity = json_data['roots_of_unity']
|
||||
|
||||
return trusted_setup_G1, trusted_setup_G2, trusted_setup_G1_lagrange, roots_of_unity
|
||||
|
||||
ALL_KZG_SETUPS = {
|
||||
'minimal': _load_kzg_trusted_setups('minimal'),
|
||||
'mainnet': _load_kzg_trusted_setups('mainnet')
|
||||
}
|
||||
|
||||
ETH2_SPEC_COMMENT_PREFIX = "eth2spec:"
|
||||
|
||||
|
||||
|
@ -167,7 +190,16 @@ def _parse_value(name: str, typed_value: str, type_hint: Optional[str]=None) ->
|
|||
return VariableDefinition(type_name=type_name, value=typed_value[i+1:-1], comment=comment, type_hint=type_hint)
|
||||
|
||||
|
||||
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) -> SpecObject:
|
||||
def _update_constant_vars_with_kzg_setups(constant_vars, preset_name):
|
||||
comment = "noqa: E501"
|
||||
kzg_setups = ALL_KZG_SETUPS[preset_name]
|
||||
constant_vars['KZG_SETUP_G1'] = VariableDefinition(constant_vars['KZG_SETUP_G1'].value, str(kzg_setups[0]), comment, None)
|
||||
constant_vars['KZG_SETUP_G2'] = VariableDefinition(constant_vars['KZG_SETUP_G2'].value, str(kzg_setups[1]), comment, None)
|
||||
constant_vars['KZG_SETUP_LAGRANGE'] = VariableDefinition(constant_vars['KZG_SETUP_LAGRANGE'].value, str(kzg_setups[2]), comment, None)
|
||||
constant_vars['ROOTS_OF_UNITY'] = VariableDefinition(constant_vars['ROOTS_OF_UNITY'].value, str(kzg_setups[3]), comment, None)
|
||||
|
||||
|
||||
def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str], preset_name=str) -> SpecObject:
|
||||
functions: Dict[str, str] = {}
|
||||
protocols: Dict[str, ProtocolDefinition] = {}
|
||||
constant_vars: Dict[str, VariableDefinition] = {}
|
||||
|
@ -232,7 +264,7 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) ->
|
|||
|
||||
if not _is_constant_id(name):
|
||||
# Check for short type declarations
|
||||
if value.startswith(("uint", "Bytes", "ByteList", "Union", "Vector", "List")):
|
||||
if value.startswith(("uint", "Bytes", "ByteList", "Union", "Vector", "List", "ByteVector")):
|
||||
custom_types[name] = value
|
||||
continue
|
||||
|
||||
|
@ -256,6 +288,10 @@ def get_spec(file_name: Path, preset: Dict[str, str], config: Dict[str, str]) ->
|
|||
if comment == "skip":
|
||||
should_skip = True
|
||||
|
||||
# Load KZG trusted setup from files
|
||||
if any('KZG_SETUP' in name for name in constant_vars):
|
||||
_update_constant_vars_with_kzg_setups(constant_vars, preset_name)
|
||||
|
||||
return SpecObject(
|
||||
functions=functions,
|
||||
protocols=protocols,
|
||||
|
@ -552,6 +588,7 @@ class NoopExecutionEngine(ExecutionEngine):
|
|||
pass
|
||||
|
||||
def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload:
|
||||
# pylint: disable=unused-argument
|
||||
raise NotImplementedError("no default block production")
|
||||
|
||||
|
||||
|
@ -582,15 +619,13 @@ from eth2spec.bellatrix import {preset_name} as bellatrix
|
|||
#
|
||||
# EIP4844SpecBuilder
|
||||
#
|
||||
class EIP4844SpecBuilder(BellatrixSpecBuilder):
|
||||
class EIP4844SpecBuilder(CapellaSpecBuilder):
|
||||
fork: str = EIP4844
|
||||
|
||||
@classmethod
|
||||
def imports(cls, preset_name: str):
|
||||
return super().imports(preset_name) + f'''
|
||||
from eth2spec.utils import kzg
|
||||
from eth2spec.bellatrix import {preset_name} as bellatrix
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize as ssz_serialize
|
||||
from eth2spec.capella import {preset_name} as capella
|
||||
'''
|
||||
|
||||
|
||||
|
@ -603,41 +638,56 @@ T = TypeVar('T') # For generic function
|
|||
@classmethod
|
||||
def sundry_functions(cls) -> str:
|
||||
return super().sundry_functions() + '\n\n' + '''
|
||||
# TODO: for mainnet, load pre-generated trusted setup file to reduce building time.
|
||||
# TESTING_FIELD_ELEMENTS_PER_BLOB is hardcoded copy from minimal presets
|
||||
TESTING_FIELD_ELEMENTS_PER_BLOB = 4
|
||||
TESTING_SECRET = 1337
|
||||
TESTING_KZG_SETUP_G1 = kzg.generate_setup(bls.G1, TESTING_SECRET, TESTING_FIELD_ELEMENTS_PER_BLOB)
|
||||
TESTING_KZG_SETUP_G2 = kzg.generate_setup(bls.G2, TESTING_SECRET, TESTING_FIELD_ELEMENTS_PER_BLOB)
|
||||
TESTING_KZG_SETUP_LAGRANGE = kzg.get_lagrange(TESTING_KZG_SETUP_G1)
|
||||
|
||||
KZG_SETUP_G1 = [bls.G1_to_bytes48(p) for p in TESTING_KZG_SETUP_G1]
|
||||
KZG_SETUP_G2 = [bls.G2_to_bytes96(p) for p in TESTING_KZG_SETUP_G2]
|
||||
KZG_SETUP_LAGRANGE = TESTING_KZG_SETUP_LAGRANGE
|
||||
ROOTS_OF_UNITY = kzg.compute_roots_of_unity(TESTING_FIELD_ELEMENTS_PER_BLOB)
|
||||
#
|
||||
# Temporarily disable Withdrawals functions for EIP4844 testnets
|
||||
#
|
||||
|
||||
|
||||
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> BlobsSidecar:
|
||||
pass'''
|
||||
def no_op(fn): # type: ignore
|
||||
# pylint: disable=unused-argument
|
||||
def wrapper(*args, **kw): # type: ignore
|
||||
return None
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_empty_list_result(fn): # type: ignore
|
||||
# pylint: disable=unused-argument
|
||||
def wrapper(*args, **kw): # type: ignore
|
||||
return []
|
||||
return wrapper
|
||||
|
||||
|
||||
process_withdrawals = no_op(process_withdrawals)
|
||||
process_bls_to_execution_change = no_op(process_bls_to_execution_change)
|
||||
get_expected_withdrawals = get_empty_list_result(get_expected_withdrawals)
|
||||
|
||||
|
||||
#
|
||||
# End
|
||||
#
|
||||
|
||||
def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]:
|
||||
# pylint: disable=unused-argument
|
||||
return "TEST"'''
|
||||
|
||||
@classmethod
|
||||
def hardcoded_custom_type_dep_constants(cls, spec_object) -> str:
|
||||
constants = {
|
||||
'BYTES_PER_FIELD_ELEMENT': spec_object.constant_vars['BYTES_PER_FIELD_ELEMENT'].value,
|
||||
'FIELD_ELEMENTS_PER_BLOB': spec_object.preset_vars['FIELD_ELEMENTS_PER_BLOB'].value,
|
||||
'MAX_BLOBS_PER_BLOCK': spec_object.preset_vars['MAX_BLOBS_PER_BLOCK'].value,
|
||||
}
|
||||
return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants}
|
||||
|
||||
|
||||
|
||||
spec_builders = {
|
||||
builder.fork: builder
|
||||
for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, EIP4844SpecBuilder)
|
||||
}
|
||||
|
||||
|
||||
def is_spec_defined_type(value: str) -> bool:
|
||||
return value.startswith(('ByteList', 'Union', 'Vector', 'List'))
|
||||
def is_byte_vector(value: str) -> bool:
|
||||
return value.startswith(('ByteVector'))
|
||||
|
||||
|
||||
def objects_to_spec(preset_name: str,
|
||||
|
@ -650,17 +700,8 @@ def objects_to_spec(preset_name: str,
|
|||
new_type_definitions = (
|
||||
'\n\n'.join(
|
||||
[
|
||||
f"class {key}({value}):\n pass\n"
|
||||
f"class {key}({value}):\n pass\n" if not is_byte_vector(value) else f"class {key}({value}): # type: ignore\n pass\n"
|
||||
for key, value in spec_object.custom_types.items()
|
||||
if not is_spec_defined_type(value)
|
||||
]
|
||||
)
|
||||
+ ('\n\n' if len([key for key, value in spec_object.custom_types.items() if is_spec_defined_type(value)]) > 0 else '')
|
||||
+ '\n\n'.join(
|
||||
[
|
||||
f"{key} = {value}\n"
|
||||
for key, value in spec_object.custom_types.items()
|
||||
if is_spec_defined_type(value)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
@ -880,7 +921,7 @@ def _build_spec(preset_name: str, fork: str,
|
|||
source_files: Sequence[Path], preset_files: Sequence[Path], config_file: Path) -> str:
|
||||
preset = load_preset(preset_files)
|
||||
config = load_config(config_file)
|
||||
all_specs = [get_spec(spec, preset, config) for spec in source_files]
|
||||
all_specs = [get_spec(spec, preset, config, preset_name) for spec in source_files]
|
||||
|
||||
spec_object = all_specs[0]
|
||||
for value in all_specs[1:]:
|
||||
|
@ -967,7 +1008,7 @@ class PySpecCommand(Command):
|
|||
specs/bellatrix/p2p-interface.md
|
||||
sync/optimistic.md
|
||||
"""
|
||||
if self.spec_fork == CAPELLA:
|
||||
if self.spec_fork in (CAPELLA, EIP4844):
|
||||
self.md_doc_paths += """
|
||||
specs/capella/beacon-chain.md
|
||||
specs/capella/fork.md
|
||||
|
@ -1131,7 +1172,8 @@ setup(
|
|||
"pycryptodome==3.15.0",
|
||||
"py_ecc==6.0.0",
|
||||
"milagro_bls_binding==1.9.0",
|
||||
"remerkleable==0.1.24",
|
||||
"remerkleable==0.1.25",
|
||||
"trie==2.0.2",
|
||||
RUAMEL_YAML_VERSION,
|
||||
"lru-dict==1.1.8",
|
||||
MARKO_VERSION,
|
||||
|
|
|
@ -523,6 +523,7 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
|||
if bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
state.validators.append(get_validator_from_deposit(deposit))
|
||||
state.balances.append(amount)
|
||||
# [New in Altair]
|
||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(uint64(0))
|
||||
|
|
|
@ -40,10 +40,19 @@ Full nodes are expected to derive light client data from historic blocks and sta
|
|||
|
||||
### `create_light_client_bootstrap`
|
||||
|
||||
To form a `LightClientBootstrap`, the following objects are needed:
|
||||
- `state`: the post state of any post-Altair block
|
||||
- `block`: the corresponding block
|
||||
|
||||
```python
|
||||
def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
|
||||
def create_light_client_bootstrap(state: BeaconState,
|
||||
block: SignedBeaconBlock) -> LightClientBootstrap:
|
||||
assert compute_epoch_at_slot(state.slot) >= ALTAIR_FORK_EPOCH
|
||||
|
||||
assert state.slot == state.latest_block_header.slot
|
||||
header = state.latest_block_header.copy()
|
||||
header.state_root = hash_tree_root(state)
|
||||
assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
|
||||
return LightClientBootstrap(
|
||||
header=BeaconBlockHeader(
|
||||
|
@ -54,7 +63,7 @@ def create_light_client_bootstrap(state: BeaconState) -> LightClientBootstrap:
|
|||
body_root=state.latest_block_header.body_root,
|
||||
),
|
||||
current_sync_committee=state.current_sync_committee,
|
||||
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
current_sync_committee_branch=compute_merkle_proof_for_state(state, CURRENT_SYNC_COMMITTEE_INDEX),
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -69,13 +78,15 @@ Blocks are considered to be epoch boundary blocks if their block root can occur
|
|||
To form a `LightClientUpdate`, the following historical states and blocks are needed:
|
||||
- `state`: the post state of any block with a post-Altair parent block
|
||||
- `block`: the corresponding block
|
||||
- `attested_state`: the post state of the block referred to by `block.parent_root`
|
||||
- `attested_state`: the post state of `attested_block`
|
||||
- `attested_block`: the block referred to by `block.parent_root`
|
||||
- `finalized_block`: the block referred to by `attested_state.finalized_checkpoint.root`, if locally available (may be unavailable, e.g., when using checkpoint sync, or if it was pruned locally)
|
||||
|
||||
```python
|
||||
def create_light_client_update(state: BeaconState,
|
||||
block: SignedBeaconBlock,
|
||||
attested_state: BeaconState,
|
||||
attested_block: SignedBeaconBlock,
|
||||
finalized_block: Optional[SignedBeaconBlock]) -> LightClientUpdate:
|
||||
assert compute_epoch_at_slot(attested_state.slot) >= ALTAIR_FORK_EPOCH
|
||||
assert sum(block.message.body.sync_aggregate.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
@ -84,13 +95,13 @@ def create_light_client_update(state: BeaconState,
|
|||
header = state.latest_block_header.copy()
|
||||
header.state_root = hash_tree_root(state)
|
||||
assert hash_tree_root(header) == hash_tree_root(block.message)
|
||||
update_signature_period = compute_sync_committee_period(compute_epoch_at_slot(block.message.slot))
|
||||
update_signature_period = compute_sync_committee_period_at_slot(block.message.slot)
|
||||
|
||||
assert attested_state.slot == attested_state.latest_block_header.slot
|
||||
attested_header = attested_state.latest_block_header.copy()
|
||||
attested_header.state_root = hash_tree_root(attested_state)
|
||||
assert hash_tree_root(attested_header) == block.message.parent_root
|
||||
update_attested_period = compute_sync_committee_period(compute_epoch_at_slot(attested_header.slot))
|
||||
assert hash_tree_root(attested_header) == hash_tree_root(attested_block.message) == block.message.parent_root
|
||||
update_attested_period = compute_sync_committee_period_at_slot(attested_block.message.slot)
|
||||
|
||||
# `next_sync_committee` is only useful if the message is signed by the current sync committee
|
||||
if update_attested_period == update_signature_period:
|
||||
|
@ -133,7 +144,7 @@ def create_light_client_update(state: BeaconState,
|
|||
Full nodes SHOULD provide the best derivable `LightClientUpdate` (according to `is_better_update`) for each sync committee period covering any epochs in range `[max(ALTAIR_FORK_EPOCH, current_epoch - MIN_EPOCHS_FOR_BLOCK_REQUESTS), current_epoch]` where `current_epoch` is defined by the current wall-clock time. Full nodes MAY also provide `LightClientUpdate` for other sync committee periods.
|
||||
|
||||
- `LightClientUpdate` are assigned to sync committee periods based on their `attested_header.slot`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period(compute_epoch_at_slot(update.attested_header.slot)) == compute_sync_committee_period(compute_epoch_at_slot(update.signature_slot))`
|
||||
- `LightClientUpdate` are only considered if `compute_sync_committee_period_at_slot(update.attested_header.slot) == compute_sync_committee_period_at_slot(update.signature_slot)`
|
||||
- Only `LightClientUpdate` with `next_sync_committee` as selected by fork choice are provided, regardless of ranking by `is_better_update`. To uniquely identify a non-finalized sync committee fork, all of `period`, `current_sync_committee` and `next_sync_committee` need to be incorporated, as sync committees may reappear over time.
|
||||
|
||||
### `create_light_client_finality_update`
|
||||
|
|
|
@ -59,7 +59,7 @@ New global topics are added to provide light clients with the latest updates.
|
|||
This topic is used to propagate the latest `LightClientFinalityUpdate` to light clients, allowing them to keep track of the latest `finalized_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `finality_update` on the network.
|
||||
- _[IGNORE]_ No other `finality_update` with a lower or equal `finalized_header.slot` was already forwarded on the network
|
||||
- _[IGNORE]_ The `finalized_header.slot` is greater than that of all previously forwarded `finality_update`s
|
||||
- _[IGNORE]_ The `finality_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `finality_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `finality_update` on the network.
|
||||
|
@ -71,12 +71,23 @@ For light clients, the following validations MUST additionally pass before forwa
|
|||
|
||||
Light clients SHOULD call `process_light_client_finality_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(finality_update.attested_header.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
| ------------------------------- | ------------------------------------ |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` and later | `altair.LightClientFinalityUpdate` |
|
||||
|
||||
###### `light_client_optimistic_update`
|
||||
|
||||
This topic is used to propagate the latest `LightClientOptimisticUpdate` to light clients, allowing them to keep track of the latest `optimistic_header`.
|
||||
|
||||
The following validations MUST pass before forwarding the `optimistic_update` on the network.
|
||||
- _[IGNORE]_ No other `optimistic_update` with a lower or equal `attested_header.slot` was already forwarded on the network
|
||||
- _[IGNORE]_ The `attested_header.slot` is greater than that of all previously forwarded `optimistic_update`s
|
||||
- _[IGNORE]_ The `optimistic_update` is received after the block at `signature_slot` was given enough time to propagate through the network -- i.e. validate that one-third of `optimistic_update.signature_slot` has transpired (`SECONDS_PER_SLOT / INTERVALS_PER_SLOT` seconds after the start of the slot, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
|
||||
For full nodes, the following validations MUST additionally pass before forwarding the `optimistic_update` on the network.
|
||||
|
@ -88,6 +99,17 @@ For light clients, the following validations MUST additionally pass before forwa
|
|||
|
||||
Light clients SHOULD call `process_light_client_optimistic_update` even if the message is ignored.
|
||||
|
||||
The gossip `ForkDigest`-context is determined based on `compute_fork_version(compute_epoch_at_slot(optimistic_update.attested_header.slot))`.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Message SSZ type |
|
||||
| ------------------------------- | ------------------------------------ |
|
||||
| `GENESIS_FORK_VERSION` | n/a |
|
||||
| `ALTAIR_FORK_VERSION` and later | `altair.LightClientOptimisticUpdate` |
|
||||
|
||||
### The Req/Resp domain
|
||||
|
||||
#### Messages
|
||||
|
@ -116,7 +138,7 @@ Requests the `LightClientBootstrap` structure corresponding to a given post-Alta
|
|||
|
||||
The request MUST be encoded as an SSZ-field.
|
||||
|
||||
Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block's post state needs to be known.
|
||||
Peers SHOULD provide results as defined in [`create_light_client_bootstrap`](./full-node.md#create_light_client_bootstrap). To fulfill a request, the requested block and its post state need to be known.
|
||||
|
||||
When a `LightClientBootstrap` instance cannot be produced for a given block root, peers SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
|
||||
|
|
|
@ -77,9 +77,9 @@ Additional documents describe how the light client sync protocol can be used:
|
|||
|
||||
```python
|
||||
class LightClientBootstrap(Container):
|
||||
# The requested beacon block header
|
||||
# Header matching the requested beacon block root
|
||||
header: BeaconBlockHeader
|
||||
# Current sync committee corresponding to `header`
|
||||
# Current sync committee corresponding to `header.state_root`
|
||||
current_sync_committee: SyncCommittee
|
||||
current_sync_committee_branch: Vector[Bytes32, floorlog2(CURRENT_SYNC_COMMITTEE_INDEX)]
|
||||
```
|
||||
|
@ -88,12 +88,12 @@ class LightClientBootstrap(Container):
|
|||
|
||||
```python
|
||||
class LightClientUpdate(Container):
|
||||
# The beacon block header that is attested to by the sync committee
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to `attested_header`
|
||||
# Next sync committee corresponding to `attested_header.state_root`
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# The finalized beacon block header attested to by Merkle branch
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
|
@ -106,9 +106,9 @@ class LightClientUpdate(Container):
|
|||
|
||||
```python
|
||||
class LightClientFinalityUpdate(Container):
|
||||
# The beacon block header that is attested to by the sync committee
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# The finalized beacon block header attested to by Merkle branch
|
||||
# Finalized header corresponding to `attested_header.state_root`
|
||||
finalized_header: BeaconBlockHeader
|
||||
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
|
@ -121,7 +121,7 @@ class LightClientFinalityUpdate(Container):
|
|||
|
||||
```python
|
||||
class LightClientOptimisticUpdate(Container):
|
||||
# The beacon block header that is attested to by the sync committee
|
||||
# Header attested to by the sync committee
|
||||
attested_header: BeaconBlockHeader
|
||||
# Sync committee aggregate signature
|
||||
sync_aggregate: SyncAggregate
|
||||
|
@ -134,9 +134,9 @@ class LightClientOptimisticUpdate(Container):
|
|||
```python
|
||||
@dataclass
|
||||
class LightClientStore(object):
|
||||
# Beacon block header that is finalized
|
||||
# Header that is finalized
|
||||
finalized_header: BeaconBlockHeader
|
||||
# Sync committees corresponding to the header
|
||||
# Sync committees corresponding to the finalized header
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
# Best available header to switch finalized head to if we see nothing else
|
||||
|
|
|
@ -110,7 +110,7 @@ The following gossip validation from prior specifications MUST NOT be applied if
|
|||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for EIP-4844.
|
||||
details on how to handle transitioning gossip topics.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
|
|
|
@ -8,14 +8,11 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Domain types](#domain-types)
|
||||
- [Preset](#preset)
|
||||
- [Misc](#misc)
|
||||
- [State list lengths](#state-list-lengths)
|
||||
- [Max operations per block](#max-operations-per-block)
|
||||
- [Execution](#execution)
|
||||
- [Configuration](#configuration)
|
||||
- [Withdrawals processing](#withdrawals-processing)
|
||||
- [Containers](#containers)
|
||||
- [New containers](#new-containers)
|
||||
- [`Withdrawal`](#withdrawal)
|
||||
|
@ -28,18 +25,13 @@
|
|||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [Helpers](#helpers)
|
||||
- [Beacon state mutators](#beacon-state-mutators)
|
||||
- [`withdraw_balance`](#withdraw_balance)
|
||||
- [Predicates](#predicates)
|
||||
- [`has_eth1_withdrawal_credential`](#has_eth1_withdrawal_credential)
|
||||
- [`is_fully_withdrawable_validator`](#is_fully_withdrawable_validator)
|
||||
- [`is_partially_withdrawable_validator`](#is_partially_withdrawable_validator)
|
||||
- [Beacon chain state transition function](#beacon-chain-state-transition-function)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Full withdrawals](#full-withdrawals)
|
||||
- [Partial withdrawals](#partial-withdrawals)
|
||||
- [Historical batches updates](#historical-batches-updates)
|
||||
- [Block processing](#block-processing)
|
||||
- [New `get_expected_withdrawals`](#new-get_expected_withdrawals)
|
||||
- [New `process_withdrawals`](#new-process_withdrawals)
|
||||
- [Modified `process_execution_payload`](#modified-process_execution_payload)
|
||||
- [Modified `process_operations`](#modified-process_operations)
|
||||
|
@ -67,8 +59,6 @@ We define the following Python custom types for type hinting and readability:
|
|||
| - | - | - |
|
||||
| `WithdrawalIndex` | `uint64` | an index of a `Withdrawal` |
|
||||
|
||||
## Constants
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
|
@ -77,18 +67,6 @@ We define the following Python custom types for type hinting and readability:
|
|||
|
||||
## Preset
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_PARTIAL_WITHDRAWALS_PER_EPOCH` | `uint64(2**8)` (= 256) |
|
||||
|
||||
### State list lengths
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - | :-: |
|
||||
| `WITHDRAWAL_QUEUE_LIMIT` | `uint64(2**40)` (= 1,099,511,627,776) | withdrawals enqueued in state |
|
||||
|
||||
### Max operations per block
|
||||
|
||||
| Name | Value |
|
||||
|
@ -101,7 +79,11 @@ We define the following Python custom types for type hinting and readability:
|
|||
| - | - | - |
|
||||
| `MAX_WITHDRAWALS_PER_PAYLOAD` | `uint64(2**4)` (= 16) | Maximum amount of withdrawals allowed in each payload |
|
||||
|
||||
## Configuration
|
||||
### Withdrawals processing
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP` | `16384` (= 2**14 ) |
|
||||
|
||||
## Containers
|
||||
|
||||
|
@ -255,34 +237,14 @@ class BeaconState(Container):
|
|||
# Execution
|
||||
latest_execution_payload_header: ExecutionPayloadHeader
|
||||
# Withdrawals
|
||||
withdrawal_queue: List[Withdrawal, WITHDRAWAL_QUEUE_LIMIT] # [New in Capella]
|
||||
next_withdrawal_index: WithdrawalIndex # [New in Capella]
|
||||
next_partial_withdrawal_validator_index: ValidatorIndex # [New in Capella]
|
||||
next_withdrawal_validator_index: ValidatorIndex # [New in Capella]
|
||||
# Deep history
|
||||
historical_batches: List[HistoricalBatchSummary, HISTORICAL_ROOTS_LIMIT] # Valid from Merge onwards
|
||||
```
|
||||
|
||||
## Helpers
|
||||
|
||||
### Beacon state mutators
|
||||
|
||||
#### `withdraw_balance`
|
||||
|
||||
```python
|
||||
def withdraw_balance(state: BeaconState, validator_index: ValidatorIndex, amount: Gwei) -> None:
|
||||
# Decrease the validator's balance
|
||||
decrease_balance(state, validator_index, amount)
|
||||
# Create a corresponding withdrawal receipt
|
||||
withdrawal = Withdrawal(
|
||||
index=state.next_withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(state.validators[validator_index].withdrawal_credentials[12:]),
|
||||
amount=amount,
|
||||
)
|
||||
state.next_withdrawal_index = WithdrawalIndex(state.next_withdrawal_index + 1)
|
||||
state.withdrawal_queue.append(withdrawal)
|
||||
```
|
||||
|
||||
### Predicates
|
||||
|
||||
#### `has_eth1_withdrawal_credential`
|
||||
|
@ -323,81 +285,6 @@ def is_partially_withdrawable_validator(validator: Validator, balance: Gwei) ->
|
|||
|
||||
## Beacon chain state transition function
|
||||
|
||||
### Epoch processing
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_inactivity_updates(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_batches_update(state)
|
||||
process_participation_flag_updates(state)
|
||||
process_sync_committee_updates(state)
|
||||
process_full_withdrawals(state) # [New in Capella]
|
||||
process_partial_withdrawals(state) # [New in Capella]
|
||||
|
||||
```
|
||||
|
||||
#### Full withdrawals
|
||||
|
||||
*Note*: The function `process_full_withdrawals` is new.
|
||||
|
||||
```python
|
||||
def process_full_withdrawals(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
for index in range(len(state.validators)):
|
||||
balance = state.balances[index]
|
||||
validator = state.validators[index]
|
||||
if is_fully_withdrawable_validator(validator, balance, current_epoch):
|
||||
withdraw_balance(state, ValidatorIndex(index), balance)
|
||||
```
|
||||
|
||||
#### Partial withdrawals
|
||||
|
||||
*Note*: The function `process_partial_withdrawals` is new.
|
||||
|
||||
```python
|
||||
def process_partial_withdrawals(state: BeaconState) -> None:
|
||||
partial_withdrawals_count = 0
|
||||
# Begin where we left off last time
|
||||
validator_index = state.next_partial_withdrawal_validator_index
|
||||
for _ in range(len(state.validators)):
|
||||
balance = state.balances[validator_index]
|
||||
validator = state.validators[validator_index]
|
||||
if is_partially_withdrawable_validator(validator, balance):
|
||||
withdraw_balance(state, validator_index, balance - MAX_EFFECTIVE_BALANCE)
|
||||
partial_withdrawals_count += 1
|
||||
|
||||
# Iterate to next validator to check for partial withdrawal
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
# Exit if performed maximum allowable withdrawals
|
||||
if partial_withdrawals_count == MAX_PARTIAL_WITHDRAWALS_PER_EPOCH:
|
||||
break
|
||||
|
||||
state.next_partial_withdrawal_validator_index = validator_index
|
||||
```
|
||||
|
||||
#### Historical batches updates
|
||||
|
||||
*Note*: The function `process_historical_batches_update` replaces `process_historical_roots_update` in phase0.
|
||||
|
||||
```python
|
||||
def process_historical_batches_update(state: BeaconState) -> None:
|
||||
# Set historical block root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatchSummary(
|
||||
block_batch_root=hash_tree_root(state.block_roots),
|
||||
state_batch_root=hash_tree_root(state.state_roots))
|
||||
state.historical_batches.append(historical_batch)
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
|
@ -408,23 +295,70 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
|||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in Capella]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in Capella]
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
```
|
||||
|
||||
#### New `get_expected_withdrawals`
|
||||
|
||||
```python
|
||||
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
epoch = get_current_epoch(state)
|
||||
withdrawal_index = state.next_withdrawal_index
|
||||
validator_index = state.next_withdrawal_validator_index
|
||||
withdrawals: List[Withdrawal] = []
|
||||
bound = min(len(state.validators), MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
for _ in range(bound):
|
||||
validator = state.validators[validator_index]
|
||||
balance = state.balances[validator_index]
|
||||
if is_fully_withdrawable_validator(validator, balance, epoch):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance,
|
||||
))
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
elif is_partially_withdrawable_validator(validator, balance):
|
||||
withdrawals.append(Withdrawal(
|
||||
index=withdrawal_index,
|
||||
validator_index=validator_index,
|
||||
address=ExecutionAddress(validator.withdrawal_credentials[12:]),
|
||||
amount=balance - MAX_EFFECTIVE_BALANCE,
|
||||
))
|
||||
withdrawal_index += WithdrawalIndex(1)
|
||||
if len(withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
break
|
||||
validator_index = ValidatorIndex((validator_index + 1) % len(state.validators))
|
||||
return withdrawals
|
||||
```
|
||||
|
||||
#### New `process_withdrawals`
|
||||
|
||||
```python
|
||||
def process_withdrawals(state: BeaconState, payload: ExecutionPayload) -> None:
|
||||
num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
|
||||
dequeued_withdrawals = state.withdrawal_queue[:num_withdrawals]
|
||||
expected_withdrawals = get_expected_withdrawals(state)
|
||||
assert len(payload.withdrawals) == len(expected_withdrawals)
|
||||
|
||||
assert len(dequeued_withdrawals) == len(payload.withdrawals)
|
||||
for dequeued_withdrawal, withdrawal in zip(dequeued_withdrawals, payload.withdrawals):
|
||||
assert dequeued_withdrawal == withdrawal
|
||||
for expected_withdrawal, withdrawal in zip(expected_withdrawals, payload.withdrawals):
|
||||
assert withdrawal == expected_withdrawal
|
||||
decrease_balance(state, withdrawal.validator_index, withdrawal.amount)
|
||||
|
||||
# Remove dequeued withdrawals from state
|
||||
state.withdrawal_queue = state.withdrawal_queue[num_withdrawals:]
|
||||
# Update the next withdrawal index if this block contained withdrawals
|
||||
if len(expected_withdrawals) != 0:
|
||||
latest_withdrawal = expected_withdrawals[-1]
|
||||
state.next_withdrawal_index = WithdrawalIndex(latest_withdrawal.index + 1)
|
||||
|
||||
# Update the next validator index to start the next withdrawal sweep
|
||||
if len(expected_withdrawals) == MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# Next sweep starts after the latest withdrawal's validator index
|
||||
next_validator_index = ValidatorIndex((expected_withdrawals[-1].validator_index + 1) % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
else:
|
||||
# Advance sweep by the max length of the sweep if there was not a full set of withdrawals
|
||||
next_index = state.next_withdrawal_validator_index + MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
next_validator_index = ValidatorIndex(next_index % len(state.validators))
|
||||
state.next_withdrawal_validator_index = next_validator_index
|
||||
```
|
||||
|
||||
#### Modified `process_execution_payload`
|
||||
|
|
|
@ -129,9 +129,8 @@ def upgrade_to_capella(pre: bellatrix.BeaconState) -> BeaconState:
|
|||
# Execution-layer
|
||||
latest_execution_payload_header=latest_execution_payload_header,
|
||||
# Withdrawals
|
||||
withdrawal_queue=[],
|
||||
next_withdrawal_index=WithdrawalIndex(0),
|
||||
next_partial_withdrawal_validator_index=ValidatorIndex(0),
|
||||
next_withdrawal_validator_index=ValidatorIndex(0),
|
||||
)
|
||||
|
||||
return post
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
# Capella -- Networking
|
||||
|
||||
This document contains the networking specification for Capella.
|
||||
|
||||
The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Modifications in Capella](#modifications-in-capella)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`bls_to_execution_change`](#bls_to_execution_change)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
# Modifications in Capella
|
||||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type.
|
||||
|
||||
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||
|
||||
| Name | Message Type |
|
||||
| - | - |
|
||||
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||
| `bls_to_execution_change` | `SignedBLSToExecutionChange` |
|
||||
|
||||
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||
|
||||
#### Global topics
|
||||
|
||||
Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella.
|
||||
Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`.
|
||||
See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details.
|
||||
|
||||
##### `bls_to_execution_change`
|
||||
|
||||
This topic is used to propagate signed bls to execution change messages to be included in future blocks.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_bls_to_execution_change` on the network:
|
||||
|
||||
- _[IGNORE]_ The `signed_bls_to_execution_change` is the first valid signed bls to execution change received
|
||||
for the validator with index `signed_bls_to_execution_change.message.validator_index`.
|
||||
- _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation.
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for
|
||||
details on how to handle transitioning gossip topics for Capella.
|
||||
|
||||
## The Req/Resp domain
|
||||
|
||||
### Messages
|
||||
|
||||
#### BeaconBlocksByRange v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||
|
||||
The Capella fork-digest is introduced to the `context` enum to specify Capella block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[0]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| ------------------------ | -------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
The Capella fork-digest is introduced to the `context` enum to specify Capella block type.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| ------------------------ | -------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
|
|
@ -18,6 +18,9 @@
|
|||
- [Block proposal](#block-proposal)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [ExecutionPayload](#executionpayload)
|
||||
- [BLS to execution changes](#bls-to-execution-changes)
|
||||
- [Enabling validator withdrawals](#enabling-validator-withdrawals)
|
||||
- [Changing from BLS to execution withdrawal credentials](#changing-from-bls-to-execution-withdrawal-credentials)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
@ -58,12 +61,8 @@ All validator responsibilities remain unchanged other than those noted below.
|
|||
expected withdrawals for the slot must be gathered from the `state` (utilizing the
|
||||
helper `get_expected_withdrawals`) and passed into the `ExecutionEngine` within `prepare_execution_payload`.
|
||||
|
||||
|
||||
```python
|
||||
def get_expected_withdrawals(state: BeaconState) -> Sequence[Withdrawal]:
|
||||
num_withdrawals = min(MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
|
||||
return state.withdrawal_queue[:num_withdrawals]
|
||||
```
|
||||
*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied.
|
||||
That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`.
|
||||
|
||||
*Note*: The only change made to `prepare_execution_payload` is to call
|
||||
`get_expected_withdrawals()` to set the new `withdrawals` field of `PayloadAttributes`.
|
||||
|
@ -106,3 +105,43 @@ def prepare_execution_payload(state: BeaconState,
|
|||
payload_attributes=payload_attributes,
|
||||
)
|
||||
```
|
||||
|
||||
##### BLS to execution changes
|
||||
|
||||
Up to `MAX_BLS_TO_EXECUTION_CHANGES`, [`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) objects can be included in the `block`. The BLS to execution changes must satisfy the verification conditions found in [BLS to execution change processing](./beacon-chain.md#new-process_bls_to_execution_change).
|
||||
|
||||
## Enabling validator withdrawals
|
||||
|
||||
Validator balances are withdrawn periodically via an automatic process. For exited validators, the full balance is withdrawn. For active validators, the balance in excess of `MAX_EFFECTIVE_BALANCE` is withdrawn.
|
||||
|
||||
There is one prerequisite for this automated process:
|
||||
the validator's withdrawal credentials pointing to an execution layer address, i.e. having an `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
|
||||
|
||||
If a validator has a `BLS_WITHDRAWAL_PREFIX` withdrawal credential prefix, to participate in withdrawals the validator must
|
||||
create a one-time message to change their withdrawal credential from the version authenticated with a BLS key to the
|
||||
version compatible with the execution layer. This message -- a `BLSToExecutionChange` -- is available starting in Capella
|
||||
|
||||
Validators who wish to enable withdrawals **MUST** assemble, sign, and broadcast this message so that it is accepted
|
||||
on the beacon chain. Validators who do not want to enable withdrawals and have the `BLS_WITHDRAWAL_PREFIX` version of
|
||||
withdrawal credentials can delay creating this message until they are ready to enable withdrawals.
|
||||
|
||||
### Changing from BLS to execution withdrawal credentials
|
||||
|
||||
First, the validator must construct a valid [`BLSToExecutionChange`](./beacon-chain.md#blstoexecutionchange) `message`.
|
||||
This `message` contains the `validator_index` for the validator who wishes to change their credentials, the `from_bls_pubkey` -- the BLS public key corresponding to the **withdrawal BLS secret key** used to form the `BLS_WITHDRAWAL_PREFIX` withdrawal credential, and the `to_execution_address` specifying the execution layer address to which the validator's balances will be withdrawn.
|
||||
|
||||
*Note*: The withdrawal key pair used to construct the `BLS_WITHDRAWAL_PREFIX` withdrawal credential should be distinct from the signing key pair used to operate the validator under typical circumstances. Consult your validator deposit tooling documentation for further details if you are not aware of the difference.
|
||||
|
||||
*Warning*: This message can only be included on-chain once and is
|
||||
irreversible so ensure the correctness and accessibility to `to_execution_address`.
|
||||
|
||||
Next, the validator signs the assembled `message: BLSToExecutionChange` with the **withdrawal BLS secret key** and this
|
||||
`signature` is placed into a `SignedBLSToExecutionChange` message along with the inner `BLSToExecutionChange` `message`.
|
||||
Note that the `SignedBLSToExecutionChange` message should pass all of the validations in [`process_bls_to_execution_change`](./beacon-chain.md#new-process_bls_to_execution_change).
|
||||
|
||||
The `SignedBLSToExecutionChange` message should then be submitted to the consensus layer network. Once included on-chain,
|
||||
the withdrawal credential change takes effect. No further action is required for a validator to enter into the automated
|
||||
withdrawal process.
|
||||
|
||||
*Note*: A node *should* prioritize locally received `BLSToExecutionChange` operations to ensure these changes make it on-chain
|
||||
through self published blocks even if the rest of the network censors.
|
||||
|
|
|
@ -122,7 +122,7 @@ This backbone is based on a pure function of the *node* identity and time:
|
|||
peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s),
|
||||
assuming the peerstore already has a large enough variety of peers.
|
||||
- Nodes can be held accountable for contributing to the backbone:
|
||||
peers that particpate in DAS but are not active on the appropriate backbone topics can be scored down.
|
||||
peers that participate in DAS but are not active on the appropriate backbone topics can be scored down.
|
||||
*Note: This is experimental, DAS should be light enough for all participants to run, but scoring needs to undergo testing*
|
||||
|
||||
A node should anticipate backbone topics to subscribe to based their own identity.
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Blob](#blob)
|
||||
- [Domain types](#domain-types)
|
||||
- [Preset](#preset)
|
||||
- [Execution](#execution)
|
||||
- [Configuration](#configuration)
|
||||
|
@ -23,6 +22,8 @@
|
|||
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc)
|
||||
- [`validate_blobs_sidecar`](#validate_blobs_sidecar)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [`kzg_commitment_to_versioned_hash`](#kzg_commitment_to_versioned_hash)
|
||||
- [`tx_peek_blob_versioned_hashes`](#tx_peek_blob_versioned_hashes)
|
||||
- [`verify_kzg_commitments_against_transactions`](#verify_kzg_commitments_against_transactions)
|
||||
|
@ -32,21 +33,20 @@
|
|||
- [`process_execution_payload`](#process_execution_payload)
|
||||
- [Blob KZG commitments](#blob-kzg-commitments)
|
||||
- [Testing](#testing)
|
||||
- [Disabling Withdrawals](#disabling-withdrawals)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This upgrade adds blobs to the beacon chain as part of EIP-4844.
|
||||
This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an extension of the Capella upgrade.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `Blob` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | |
|
||||
| `VersionedHash` | `Bytes32` | |
|
||||
| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -55,14 +55,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844.
|
|||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLOB_TX_TYPE` | `uint8(0x05)` |
|
||||
| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
|
||||
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1(0x01)` |
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BLOBS_SIDECAR` | `DomainType('0x0a000000')` |
|
||||
| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` |
|
||||
|
||||
## Preset
|
||||
|
||||
|
@ -70,7 +63,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844.
|
|||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**4)` (= 16) |
|
||||
| `MAX_BLOBS_PER_BLOCK` | `uint64(2**2)` (= 4) |
|
||||
|
||||
## Configuration
|
||||
|
||||
|
@ -96,7 +89,8 @@ class BeaconBlockBody(Container):
|
|||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
sync_aggregate: SyncAggregate
|
||||
# Execution
|
||||
execution_payload: ExecutionPayload
|
||||
execution_payload: ExecutionPayload # [Modified in EIP-4844]
|
||||
bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES]
|
||||
blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844]
|
||||
```
|
||||
|
||||
|
@ -117,10 +111,11 @@ class ExecutionPayload(Container):
|
|||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
excess_blobs: uint64 # [New in EIP-4844]
|
||||
excess_data_gas: uint256 # [New in EIP-4844]
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD]
|
||||
withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD]
|
||||
```
|
||||
|
||||
#### `ExecutionPayloadHeader`
|
||||
|
@ -140,16 +135,56 @@ class ExecutionPayloadHeader(Container):
|
|||
timestamp: uint64
|
||||
extra_data: ByteList[MAX_EXTRA_DATA_BYTES]
|
||||
base_fee_per_gas: uint256
|
||||
excess_blobs: uint64 # [New in EIP-4844]
|
||||
excess_data_gas: uint256 # [New in EIP-4844]
|
||||
# Extra payload fields
|
||||
block_hash: Hash32 # Hash of execution block
|
||||
transactions_root: Root
|
||||
withdrawals_root: Root
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### `validate_blobs_sidecar`
|
||||
|
||||
```python
|
||||
def validate_blobs_sidecar(slot: Slot,
|
||||
beacon_block_root: Root,
|
||||
expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs_sidecar: BlobsSidecar) -> None:
|
||||
assert slot == blobs_sidecar.beacon_block_slot
|
||||
assert beacon_block_root == blobs_sidecar.beacon_block_root
|
||||
blobs = blobs_sidecar.blobs
|
||||
kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
|
||||
assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof)
|
||||
```
|
||||
|
||||
#### `is_data_available`
|
||||
|
||||
The implementation of `is_data_available` is meant to change with later sharding upgrades.
|
||||
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
|
||||
and validate the sidecar with `validate_blobs_sidecar`.
|
||||
|
||||
The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
|
||||
|
||||
```python
|
||||
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
# `retrieve_blobs_sidecar` is implementation dependent, raises an exception if not available.
|
||||
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
|
||||
|
||||
# For testing, `retrieve_blobs_sidecar` returns "TEST".
|
||||
# TODO: Remove it once we have a way to inject `BlobsSidecar` into tests.
|
||||
if isinstance(sidecar, str):
|
||||
return True
|
||||
|
||||
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
|
||||
return True
|
||||
```
|
||||
|
||||
|
||||
#### `kzg_commitment_to_versioned_hash`
|
||||
|
||||
```python
|
||||
|
@ -167,10 +202,10 @@ See [the full details of `blob_versioned_hashes` offset calculation](https://gis
|
|||
def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedHash]:
|
||||
assert opaque_tx[0] == BLOB_TX_TYPE
|
||||
message_offset = 1 + uint32.decode_bytes(opaque_tx[1:5])
|
||||
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 = 156
|
||||
# field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188
|
||||
blob_versioned_hashes_offset = (
|
||||
message_offset
|
||||
+ uint32.decode_bytes(opaque_tx[(message_offset + 156):(message_offset + 160)])
|
||||
+ uint32.decode_bytes(opaque_tx[(message_offset + 188):(message_offset + 192)])
|
||||
)
|
||||
return [
|
||||
VersionedHash(opaque_tx[x:(x + 32)])
|
||||
|
@ -183,7 +218,7 @@ def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedH
|
|||
```python
|
||||
def verify_kzg_commitments_against_transactions(transactions: Sequence[Transaction],
|
||||
kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
all_versioned_hashes = []
|
||||
all_versioned_hashes: List[VersionedHash] = []
|
||||
for tx in transactions:
|
||||
if tx[0] == BLOB_TX_TYPE:
|
||||
all_versioned_hashes += tx_peek_blob_versioned_hashes(tx)
|
||||
|
@ -198,12 +233,16 @@ def verify_kzg_commitments_against_transactions(transactions: Sequence[Transacti
|
|||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
if is_execution_enabled(state, block.body):
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE)
|
||||
process_withdrawals(state, block.body.execution_payload)
|
||||
process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-4844]
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
process_sync_aggregate(state, block.body.sync_aggregate)
|
||||
process_blob_kzg_commitments(state, block.body) # [New in EIP-4844]
|
||||
|
||||
# New in EIP-4844
|
||||
assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments)
|
||||
```
|
||||
|
||||
#### Execution payload
|
||||
|
@ -221,6 +260,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
|||
assert payload.timestamp == compute_timestamp_at_slot(state, state.slot)
|
||||
# Verify the execution payload is valid
|
||||
assert execution_engine.notify_new_payload(payload)
|
||||
|
||||
# Cache execution payload header
|
||||
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=payload.parent_hash,
|
||||
|
@ -235,16 +275,18 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe
|
|||
timestamp=payload.timestamp,
|
||||
extra_data=payload.extra_data,
|
||||
base_fee_per_gas=payload.base_fee_per_gas,
|
||||
excess_blobs=payload.excess_blobs, # [New in EIP-4844]
|
||||
excess_data_gas=payload.excess_data_gas, # [New in EIP-4844]
|
||||
block_hash=payload.block_hash,
|
||||
transactions_root=hash_tree_root(payload.transactions),
|
||||
withdrawals_root=hash_tree_root(payload.withdrawals),
|
||||
)
|
||||
```
|
||||
|
||||
#### Blob KZG commitments
|
||||
|
||||
```python
|
||||
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody):
|
||||
def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# pylint: disable=unused-argument
|
||||
assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments)
|
||||
```
|
||||
|
||||
|
@ -303,3 +345,11 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32,
|
|||
|
||||
return state
|
||||
```
|
||||
|
||||
### Disabling Withdrawals
|
||||
|
||||
During testing we avoid Capella-specific updates to the state transition. We do this by replacing the following functions with a no-op implementation:
|
||||
- `process_withdrawals`
|
||||
- `process_bls_to_execution_change`
|
||||
|
||||
The `get_expected_withdrawals` function is also modified to return an empty withdrawals list. As such, the `PayloadAttributes` used to update forkchoice does not contain withdrawals.
|
||||
|
|
|
@ -44,6 +44,8 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
|||
"""
|
||||
if epoch >= EIP4844_FORK_EPOCH:
|
||||
return EIP4844_FORK_VERSION
|
||||
if epoch >= CAPELLA_FORK_EPOCH:
|
||||
return CAPELLA_FORK_VERSION
|
||||
if epoch >= BELLATRIX_FORK_EPOCH:
|
||||
return BELLATRIX_FORK_VERSION
|
||||
if epoch >= ALTAIR_FORK_EPOCH:
|
||||
|
@ -56,18 +58,35 @@ def compute_fork_version(epoch: Epoch) -> Version:
|
|||
### Fork trigger
|
||||
|
||||
TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade.
|
||||
For now we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
|
||||
For now, we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`.
|
||||
|
||||
Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` since it starts with EIP-4844 version logic.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
Since the `eip4844.BeaconState` format is equal to the `bellatrix.BeaconState` format, we only have to update `BeaconState.fork`.
|
||||
Since the `eip4844.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`.
|
||||
|
||||
```python
|
||||
def upgrade_to_eip4844(pre: bellatrix.BeaconState) -> BeaconState:
|
||||
# TODO: if Capella gets scheduled, add sync it with Capella.BeaconState
|
||||
epoch = bellatrix.get_current_epoch(pre)
|
||||
def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState:
|
||||
epoch = capella.get_current_epoch(pre)
|
||||
latest_execution_payload_header = ExecutionPayloadHeader(
|
||||
parent_hash=pre.latest_execution_payload_header.parent_hash,
|
||||
fee_recipient=pre.latest_execution_payload_header.fee_recipient,
|
||||
state_root=pre.latest_execution_payload_header.state_root,
|
||||
receipts_root=pre.latest_execution_payload_header.receipts_root,
|
||||
logs_bloom=pre.latest_execution_payload_header.logs_bloom,
|
||||
prev_randao=pre.latest_execution_payload_header.prev_randao,
|
||||
block_number=pre.latest_execution_payload_header.block_number,
|
||||
gas_limit=pre.latest_execution_payload_header.gas_limit,
|
||||
gas_used=pre.latest_execution_payload_header.gas_used,
|
||||
timestamp=pre.latest_execution_payload_header.timestamp,
|
||||
extra_data=pre.latest_execution_payload_header.extra_data,
|
||||
base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas,
|
||||
excess_data_gas=uint256(0), # [New in EIP-4844]
|
||||
block_hash=pre.latest_execution_payload_header.block_hash,
|
||||
transactions_root=pre.latest_execution_payload_header.transactions_root,
|
||||
withdrawals_root=pre.latest_execution_payload_header.withdrawals_root,
|
||||
)
|
||||
post = BeaconState(
|
||||
# Versioning
|
||||
genesis_time=pre.genesis_time,
|
||||
|
@ -108,7 +127,10 @@ def upgrade_to_eip4844(pre: bellatrix.BeaconState) -> BeaconState:
|
|||
current_sync_committee=pre.current_sync_committee,
|
||||
next_sync_committee=pre.next_sync_committee,
|
||||
# Execution-layer
|
||||
latest_execution_payload_header=pre.latest_execution_payload_header,
|
||||
latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP4844]
|
||||
# Withdrawals
|
||||
next_withdrawal_index=pre.next_withdrawal_index,
|
||||
next_withdrawal_validator_index=pre.next_withdrawal_validator_index,
|
||||
)
|
||||
|
||||
return post
|
||||
|
|
|
@ -13,17 +13,18 @@ The specification of these changes continues in the same format as the network s
|
|||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`BlobsSidecar`](#blobssidecar)
|
||||
- [`SignedBlobsSidecar`](#signedblobssidecar)
|
||||
- [`SignedBeaconBlockAndBlobsSidecar`](#signedbeaconblockandblobssidecar)
|
||||
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||
- [Topics and messages](#topics-and-messages)
|
||||
- [Global topics](#global-topics)
|
||||
- [`beacon_block`](#beacon_block)
|
||||
- [`beacon_block_and_blobs_sidecar`](#beacon_block_and_blobs_sidecar)
|
||||
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||
- [The Req/Resp domain](#the-reqresp-domain)
|
||||
- [Messages](#messages)
|
||||
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||
- [BeaconBlockAndBlobsSidecarByRoot v1](#beaconblockandblobssidecarbyroot-v1)
|
||||
- [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
- [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks)
|
||||
|
@ -50,14 +51,6 @@ class BlobsSidecar(Container):
|
|||
kzg_aggregated_proof: KZGProof
|
||||
```
|
||||
|
||||
### `SignedBlobsSidecar`
|
||||
|
||||
```python
|
||||
class SignedBlobsSidecar(Container):
|
||||
message: BlobsSidecar
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `SignedBeaconBlockAndBlobsSidecar`
|
||||
|
||||
```python
|
||||
|
@ -68,14 +61,14 @@ class SignedBeaconBlockAndBlobsSidecar(Container):
|
|||
|
||||
## The gossip domain: gossipsub
|
||||
|
||||
Some gossip meshes are upgraded in the fork of EIP4844 to support upgraded types.
|
||||
Some gossip meshes are upgraded in the fork of EIP-4844 to support upgraded types.
|
||||
|
||||
### Topics and messages
|
||||
|
||||
Topics follow the same specification as in prior upgrades.
|
||||
All topics remain stable except the beacon block topic which is updated with the modified type.
|
||||
The `beacon_block` topic is deprecated and replaced by the `beacon_block_and_blobs_sidecar` topic. All other topics remain stable.
|
||||
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Bellatrix document unless explicitly noted here.
|
||||
The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here.
|
||||
|
||||
The derivation of the `message-id` remains stable.
|
||||
|
||||
|
@ -85,28 +78,37 @@ The new topics along with the type of the `data` field of a gossipsub message ar
|
|||
| - | - |
|
||||
| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) |
|
||||
|
||||
|
||||
#### Global topics
|
||||
|
||||
EIP4844 introduces a new global topic for beacon block and blobs-sidecars.
|
||||
EIP-4844 introduces a new global topic for beacon block and blobs-sidecars.
|
||||
|
||||
##### `beacon_block`
|
||||
|
||||
This topic is deprecated and clients **MUST NOT** expose in their topic set to any peer. Implementers do not need to do
|
||||
anything beyond simply skip implementation, and it is explicitly called out as it is a departure from previous versioning
|
||||
of this topic.
|
||||
|
||||
Refer to [the section below](#transitioning-the-gossip) for details on how to transition the gossip.
|
||||
|
||||
##### `beacon_block_and_blobs_sidecar`
|
||||
|
||||
This topic is used to propagate new signed and coupled beacon blocks and blobs sidecars to all nodes on the networks.
|
||||
|
||||
The following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
|
||||
In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network.
|
||||
Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`.
|
||||
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 Points.
|
||||
- _[REJECT]_ The KZG commitments of the blobs are all correctly encoded compressed BLS G1 points
|
||||
-- i.e. `all(bls.KeyValidate(commitment) for commitment in block.body.blob_kzg_commitments)`
|
||||
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list.
|
||||
- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list
|
||||
-- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)`
|
||||
|
||||
Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`.
|
||||
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `sidecar.beacon_block_slot == block.slot`.
|
||||
- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
|
||||
-- i.e. `sidecar.beacon_block_slot == block.slot`.
|
||||
- _[REJECT]_ the `sidecar.blobs` are all well formatted, i.e. the `BLSFieldElement` in valid range (`x < BLS_MODULUS`).
|
||||
- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 Point -- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)`
|
||||
|
||||
Once the sidecar and beacon block are received together, `validate_blobs_sidecar` can unlock the data-availability fork-choice dependency.
|
||||
- _[REJECT]_ The KZG proof is a correctly encoded compressed BLS G1 point
|
||||
-- i.e. `bls.KeyValidate(blobs_sidecar.kzg_aggregated_proof)`
|
||||
- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar
|
||||
-- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)`
|
||||
|
||||
### Transitioning the gossip
|
||||
|
||||
|
@ -132,24 +134,62 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
|||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlocksByRoot v2
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||
|
||||
The EIP-4844 fork-digest is introduced to the `context` enum to specify EIP-4844 beacon block type.
|
||||
After `EIP4844_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`
|
||||
clients MUST support requesting blocks by root for pre-fork-epoch blocks.
|
||||
|
||||
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||
|
||||
[1]: # (eth2spec: skip)
|
||||
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
| ------------------------ | -------------------------- |
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `fork_version` | Chunk SSZ type |
|
||||
|--------------------------|-------------------------------|
|
||||
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||
| `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` |
|
||||
| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` |
|
||||
| `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` |
|
||||
|
||||
#### BeaconBlockAndBlobsSidecarByRoot v1
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_block_and_blobs_sidecar_by_root/1/`
|
||||
|
||||
Request Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[Root, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
Response Content:
|
||||
|
||||
```
|
||||
(
|
||||
List[SignedBeaconBlockAndBlobsSidecar, MAX_REQUEST_BLOCKS]
|
||||
)
|
||||
```
|
||||
|
||||
Requests blocks by block root (= `hash_tree_root(SignedBeaconBlockAndBlobsSidecar.beacon_block.message)`).
|
||||
The response is a list of `SignedBeaconBlockAndBlobsSidecar` whose length is less than or equal to the number of requests.
|
||||
It may be less in the case that the responding peer is missing blocks and sidecars.
|
||||
|
||||
No more than `MAX_REQUEST_BLOCKS` may be requested at a time.
|
||||
|
||||
`BeaconBlockAndBlobsSidecarByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown).
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`.
|
||||
Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload.
|
||||
|
||||
Clients MUST support requesting blocks and sidecars since the latest finalized epoch.
|
||||
|
||||
Clients MUST respond with at least one block and sidecar, if they have it.
|
||||
Clients MAY limit the number of blocks and sidecars in the response.
|
||||
|
||||
#### BlobsSidecarsByRange v1
|
||||
|
||||
|
@ -191,7 +231,7 @@ Clients MUST keep a record of signed blobs sidecars seen on the epoch range
|
|||
where `current_epoch` is defined by the current wall-clock time,
|
||||
and clients MUST support serving requests of blocks on this range.
|
||||
|
||||
Peers that are unable to reply to block requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS`
|
||||
epoch range SHOULD respond with error code `3: ResourceUnavailable`.
|
||||
Such peers that are unable to successfully reply to this range of requests MAY get descored
|
||||
or disconnected at any time.
|
||||
|
|
|
@ -10,6 +10,8 @@
|
|||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Preset](#preset)
|
||||
- [Blob](#blob)
|
||||
- [Crypto](#crypto)
|
||||
- [Trusted setup](#trusted-setup)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Bit-reversal permutation](#bit-reversal-permutation)
|
||||
|
@ -17,26 +19,35 @@
|
|||
- [`reverse_bits`](#reverse_bits)
|
||||
- [`bit_reversal_permutation`](#bit_reversal_permutation)
|
||||
- [BLS12-381 helpers](#bls12-381-helpers)
|
||||
- [`hash_to_bls_field`](#hash_to_bls_field)
|
||||
- [`bytes_to_bls_field`](#bytes_to_bls_field)
|
||||
- [`blob_to_polynomial`](#blob_to_polynomial)
|
||||
- [`compute_challenges`](#compute_challenges)
|
||||
- [`bls_modular_inverse`](#bls_modular_inverse)
|
||||
- [`div`](#div)
|
||||
- [`g1_lincomb`](#g1_lincomb)
|
||||
- [`vector_lincomb`](#vector_lincomb)
|
||||
- [`poly_lincomb`](#poly_lincomb)
|
||||
- [`compute_powers`](#compute_powers)
|
||||
- [Polynomials](#polynomials)
|
||||
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
|
||||
- [KZG](#kzg)
|
||||
- [`blob_to_kzg_commitment`](#blob_to_kzg_commitment)
|
||||
- [`verify_kzg_proof`](#verify_kzg_proof)
|
||||
- [`verify_kzg_proof_impl`](#verify_kzg_proof_impl)
|
||||
- [`compute_kzg_proof`](#compute_kzg_proof)
|
||||
- [Polynomials](#polynomials)
|
||||
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
|
||||
- [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment)
|
||||
- [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof)
|
||||
- [`verify_aggregate_kzg_proof`](#verify_aggregate_kzg_proof)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the EIP-4844 specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations.
|
||||
|
||||
Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
|
@ -46,16 +57,31 @@ This document specifies basic polynomial operations and KZG polynomial commitmen
|
|||
| `BLSFieldElement` | `uint256` | `x < BLS_MODULUS` |
|
||||
| `KZGCommitment` | `Bytes48` | Same as BLS standard "is valid pubkey" check but also allows `0x00..00` for point-at-infinity |
|
||||
| `KZGProof` | `Bytes48` | Same as for `KZGCommitment` |
|
||||
| `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | a polynomial in evaluation form |
|
||||
| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | a basic blob data |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 |
|
||||
| `ROOTS_OF_UNITY` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | Roots of unity of order FIELD_ELEMENTS_PER_BLOB over the BLS12-381 field |
|
||||
| `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element |
|
||||
|
||||
## Preset
|
||||
|
||||
### Blob
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` |
|
||||
| `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` |
|
||||
|
||||
### Crypto
|
||||
|
||||
| Name | Value | Notes |
|
||||
| - | - | - |
|
||||
| `ROOTS_OF_UNITY` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | Roots of unity of order FIELD_ELEMENTS_PER_BLOB over the BLS12-381 field |
|
||||
|
||||
### Trusted setup
|
||||
|
||||
The trusted setup is part of the preset: during testing a `minimal` insecure variant may be used,
|
||||
|
@ -63,8 +89,9 @@ but reusing the `mainnet` settings in public networks is a critical security req
|
|||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `KZG_SETUP_G2_LENGTH` | `65` |
|
||||
| `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
| `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD |
|
||||
| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD |
|
||||
|
||||
## Helper functions
|
||||
|
@ -91,7 +118,7 @@ def is_power_of_two(value: int) -> bool:
|
|||
```python
|
||||
def reverse_bits(n: int, order: int) -> int:
|
||||
"""
|
||||
Reverse the bit order of an integer n
|
||||
Reverse the bit order of an integer ``n``.
|
||||
"""
|
||||
assert is_power_of_two(order)
|
||||
# Convert n to binary with the same number of bits as "order" - 1, then reverse its bit order
|
||||
|
@ -112,14 +139,83 @@ def bit_reversal_permutation(sequence: Sequence[T]) -> Sequence[T]:
|
|||
|
||||
### BLS12-381 helpers
|
||||
|
||||
#### `hash_to_bls_field`
|
||||
|
||||
```python
|
||||
def hash_to_bls_field(data: bytes) -> BLSFieldElement:
|
||||
"""
|
||||
Hash ``data`` and convert the output to a BLS scalar field element.
|
||||
The output is not uniform over the BLS field.
|
||||
"""
|
||||
hashed_data = hash(data)
|
||||
return BLSFieldElement(int.from_bytes(hashed_data, ENDIANNESS) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `bytes_to_bls_field`
|
||||
|
||||
```python
|
||||
def bytes_to_bls_field(b: Bytes32) -> BLSFieldElement:
|
||||
"""
|
||||
Convert bytes to a BLS field scalar. The output is not uniform over the BLS field.
|
||||
Convert 32-byte value to a BLS scalar field element.
|
||||
This function does not accept inputs greater than the BLS modulus.
|
||||
"""
|
||||
return int.from_bytes(b, "little") % BLS_MODULUS
|
||||
field_element = int.from_bytes(b, ENDIANNESS)
|
||||
assert field_element < BLS_MODULUS
|
||||
return BLSFieldElement(field_element)
|
||||
```
|
||||
|
||||
#### `blob_to_polynomial`
|
||||
|
||||
```python
|
||||
def blob_to_polynomial(blob: Blob) -> Polynomial:
|
||||
"""
|
||||
Convert a blob to list of BLS field scalars.
|
||||
"""
|
||||
polynomial = Polynomial()
|
||||
for i in range(FIELD_ELEMENTS_PER_BLOB):
|
||||
value = bytes_to_bls_field(blob[i * BYTES_PER_FIELD_ELEMENT: (i + 1) * BYTES_PER_FIELD_ELEMENT])
|
||||
polynomial[i] = value
|
||||
return polynomial
|
||||
```
|
||||
|
||||
#### `compute_challenges`
|
||||
|
||||
```python
|
||||
def compute_challenges(polynomials: Sequence[Polynomial],
|
||||
commitments: Sequence[KZGCommitment]) -> Tuple[Sequence[BLSFieldElement], BLSFieldElement]:
|
||||
"""
|
||||
Return the Fiat-Shamir challenges required by the rest of the protocol.
|
||||
The Fiat-Shamir logic works as per the following pseudocode:
|
||||
|
||||
hashed_data = hash(DOMAIN_SEPARATOR, polynomials, commitments)
|
||||
r = hash(hashed_data, 0)
|
||||
r_powers = [1, r, r**2, r**3, ...]
|
||||
eval_challenge = hash(hashed_data, 1)
|
||||
|
||||
Then return `r_powers` and `eval_challenge` after converting them to BLS field elements.
|
||||
The resulting field elements are not uniform over the BLS field.
|
||||
"""
|
||||
# Append the number of polynomials and the degree of each polynomial as a domain separator
|
||||
num_polynomials = int.to_bytes(len(polynomials), 8, ENDIANNESS)
|
||||
degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS)
|
||||
data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials
|
||||
|
||||
# Append each polynomial which is composed by field elements
|
||||
for poly in polynomials:
|
||||
for field_element in poly:
|
||||
data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS)
|
||||
|
||||
# Append serialized G1 points
|
||||
for commitment in commitments:
|
||||
data += commitment
|
||||
|
||||
# Transcript has been prepared: time to create the challenges
|
||||
hashed_data = hash(data)
|
||||
r = hash_to_bls_field(hashed_data + b'\x00')
|
||||
r_powers = compute_powers(r, len(commitments))
|
||||
eval_challenge = hash_to_bls_field(hashed_data + b'\x01')
|
||||
|
||||
return r_powers, eval_challenge
|
||||
```
|
||||
|
||||
#### `bls_modular_inverse`
|
||||
|
@ -130,15 +226,17 @@ def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement:
|
|||
Compute the modular inverse of x
|
||||
i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0
|
||||
"""
|
||||
return pow(x, -1, BLS_MODULUS) if x != 0 else 0
|
||||
return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0)
|
||||
```
|
||||
|
||||
#### `div`
|
||||
|
||||
```python
|
||||
def div(x: BLSFieldElement, y: BLSFieldElement) -> BLSFieldElement:
|
||||
"""Divide two field elements: `x` by `y`"""
|
||||
return (int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS
|
||||
"""
|
||||
Divide two field elements: ``x`` by `y``.
|
||||
"""
|
||||
return BLSFieldElement((int(x) * int(bls_modular_inverse(y))) % BLS_MODULUS)
|
||||
```
|
||||
|
||||
#### `g1_lincomb`
|
||||
|
@ -155,20 +253,67 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen
|
|||
return KZGCommitment(bls.G1_to_bytes48(result))
|
||||
```
|
||||
|
||||
#### `vector_lincomb`
|
||||
#### `poly_lincomb`
|
||||
|
||||
```python
|
||||
def vector_lincomb(vectors: Sequence[Sequence[BLSFieldElement]],
|
||||
scalars: Sequence[BLSFieldElement]) -> Sequence[BLSFieldElement]:
|
||||
def poly_lincomb(polys: Sequence[Polynomial],
|
||||
scalars: Sequence[BLSFieldElement]) -> Polynomial:
|
||||
"""
|
||||
Given a list of ``vectors``, interpret it as a 2D matrix and compute the linear combination
|
||||
of each column with `scalars`: return the resulting vector.
|
||||
Given a list of ``polynomials``, interpret it as a 2D matrix and compute the linear combination
|
||||
of each column with `scalars`: return the resulting polynomials.
|
||||
"""
|
||||
result = [0] * len(vectors[0])
|
||||
for v, s in zip(vectors, scalars):
|
||||
assert len(polys) == len(scalars)
|
||||
result = [0] * FIELD_ELEMENTS_PER_BLOB
|
||||
for v, s in zip(polys, scalars):
|
||||
for i, x in enumerate(v):
|
||||
result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS
|
||||
return [BLSFieldElement(x) for x in result]
|
||||
return Polynomial([BLSFieldElement(x) for x in result])
|
||||
```
|
||||
|
||||
#### `compute_powers`
|
||||
|
||||
```python
|
||||
def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Return ``x`` to power of [0, n-1], if n > 0. When n==0, an empty array is returned.
|
||||
"""
|
||||
current_power = 1
|
||||
powers = []
|
||||
for _ in range(n):
|
||||
powers.append(BLSFieldElement(current_power))
|
||||
current_power = current_power * int(x) % BLS_MODULUS
|
||||
return powers
|
||||
```
|
||||
|
||||
|
||||
### Polynomials
|
||||
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial,
|
||||
z: BLSFieldElement) -> BLSFieldElement:
|
||||
"""
|
||||
Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain.
|
||||
Uses the barycentric formula:
|
||||
f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
|
||||
"""
|
||||
width = len(polynomial)
|
||||
assert width == FIELD_ELEMENTS_PER_BLOB
|
||||
inverse_width = bls_modular_inverse(BLSFieldElement(width))
|
||||
|
||||
# Make sure we won't divide by zero during division
|
||||
assert z not in ROOTS_OF_UNITY
|
||||
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
|
||||
result = 0
|
||||
for i in range(width):
|
||||
a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS)
|
||||
b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS)
|
||||
result += int(div(a, b) % BLS_MODULUS)
|
||||
result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width)
|
||||
return BLSFieldElement(result % BLS_MODULUS)
|
||||
```
|
||||
|
||||
### KZG
|
||||
|
@ -179,18 +324,37 @@ KZG core functions. These are also defined in EIP-4844 execution specs.
|
|||
|
||||
```python
|
||||
def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment:
|
||||
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob)
|
||||
"""
|
||||
Public method.
|
||||
"""
|
||||
return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob))
|
||||
```
|
||||
|
||||
#### `verify_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof(polynomial_kzg: KZGCommitment,
|
||||
z: BLSFieldElement,
|
||||
y: BLSFieldElement,
|
||||
z: Bytes32,
|
||||
y: Bytes32,
|
||||
kzg_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
Receives inputs as bytes.
|
||||
Public method.
|
||||
"""
|
||||
return verify_kzg_proof_impl(polynomial_kzg, bytes_to_bls_field(z), bytes_to_bls_field(y), kzg_proof)
|
||||
```
|
||||
|
||||
|
||||
#### `verify_kzg_proof_impl`
|
||||
|
||||
```python
|
||||
def verify_kzg_proof_impl(polynomial_kzg: KZGCommitment,
|
||||
z: BLSFieldElement,
|
||||
y: BLSFieldElement,
|
||||
kzg_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``.
|
||||
"""
|
||||
# Verify: P - y = Q * (X - z)
|
||||
X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z))
|
||||
|
@ -204,53 +368,87 @@ def verify_kzg_proof(polynomial_kzg: KZGCommitment,
|
|||
#### `compute_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_kzg_proof(polynomial: Sequence[BLSFieldElement], z: BLSFieldElement) -> KZGProof:
|
||||
def compute_kzg_proof(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof:
|
||||
"""
|
||||
Compute KZG proof at point `z` with `polynomial` being in evaluation form
|
||||
Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z)
|
||||
"""
|
||||
|
||||
# To avoid SSZ overflow/underflow, convert element into int
|
||||
polynomial = [int(i) for i in polynomial]
|
||||
z = int(z)
|
||||
|
||||
# Shift our polynomial first (in evaluation form we can't handle the division remainder)
|
||||
y = evaluate_polynomial_in_evaluation_form(polynomial, z)
|
||||
polynomial_shifted = [(p - int(y)) % BLS_MODULUS for p in polynomial]
|
||||
polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial]
|
||||
|
||||
# Make sure we won't divide by zero during division
|
||||
assert z not in ROOTS_OF_UNITY
|
||||
denominator_poly = [(x - z) % BLS_MODULUS for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
|
||||
denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS)
|
||||
for x in bit_reversal_permutation(ROOTS_OF_UNITY)]
|
||||
|
||||
# Calculate quotient polynomial by doing point-by-point division
|
||||
quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)]
|
||||
return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial))
|
||||
```
|
||||
|
||||
### Polynomials
|
||||
|
||||
#### `evaluate_polynomial_in_evaluation_form`
|
||||
#### `compute_aggregated_poly_and_commitment`
|
||||
|
||||
```python
|
||||
def evaluate_polynomial_in_evaluation_form(polynomial: Sequence[BLSFieldElement],
|
||||
z: BLSFieldElement) -> BLSFieldElement:
|
||||
def compute_aggregated_poly_and_commitment(
|
||||
blobs: Sequence[Blob],
|
||||
kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment, BLSFieldElement]:
|
||||
"""
|
||||
Evaluate a polynomial (in evaluation form) at an arbitrary point `z`
|
||||
Uses the barycentric formula:
|
||||
f(z) = (1 - z**WIDTH) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i])
|
||||
Return (1) the aggregated polynomial, (2) the aggregated KZG commitment,
|
||||
and (3) the polynomial evaluation random challenge.
|
||||
This function should also work with blobs == [] and kzg_commitments == []
|
||||
"""
|
||||
width = len(polynomial)
|
||||
assert width == FIELD_ELEMENTS_PER_BLOB
|
||||
inverse_width = bls_modular_inverse(width)
|
||||
assert len(blobs) == len(kzg_commitments)
|
||||
|
||||
# Make sure we won't divide by zero during division
|
||||
assert z not in ROOTS_OF_UNITY
|
||||
# Convert blobs to polynomials
|
||||
polynomials = [blob_to_polynomial(blob) for blob in blobs]
|
||||
|
||||
roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY)
|
||||
# Generate random linear combination and evaluation challenges
|
||||
r_powers, evaluation_challenge = compute_challenges(polynomials, kzg_commitments)
|
||||
|
||||
result = 0
|
||||
for i in range(width):
|
||||
result += div(int(polynomial[i]) * int(roots_of_unity_brp[i]), (z - roots_of_unity_brp[i]))
|
||||
result = result * (pow(z, width, BLS_MODULUS) - 1) * inverse_width % BLS_MODULUS
|
||||
return result
|
||||
# Create aggregated polynomial in evaluation form
|
||||
aggregated_poly = poly_lincomb(polynomials, r_powers)
|
||||
|
||||
# Compute commitment to aggregated polynomial
|
||||
aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers))
|
||||
|
||||
return aggregated_poly, aggregated_poly_commitment, evaluation_challenge
|
||||
```
|
||||
|
||||
#### `compute_aggregate_kzg_proof`
|
||||
|
||||
```python
|
||||
def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof:
|
||||
"""
|
||||
Given a list of blobs, return the aggregated KZG proof that is used to verify them against their commitments.
|
||||
Public method.
|
||||
"""
|
||||
commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
|
||||
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
|
||||
blobs,
|
||||
commitments
|
||||
)
|
||||
return compute_kzg_proof(aggregated_poly, evaluation_challenge)
|
||||
```
|
||||
|
||||
#### `verify_aggregate_kzg_proof`
|
||||
|
||||
```python
|
||||
def verify_aggregate_kzg_proof(blobs: Sequence[Blob],
|
||||
expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
kzg_aggregated_proof: KZGProof) -> bool:
|
||||
"""
|
||||
Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments.
|
||||
|
||||
Public method.
|
||||
"""
|
||||
aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment(
|
||||
blobs,
|
||||
expected_kzg_commitments,
|
||||
)
|
||||
|
||||
# Evaluate aggregated polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero)
|
||||
y = evaluate_polynomial_in_evaluation_form(aggregated_poly, evaluation_challenge)
|
||||
|
||||
# Verify aggregated proof
|
||||
return verify_kzg_proof_impl(aggregated_poly_commitment, evaluation_challenge, y, kzg_aggregated_proof)
|
||||
```
|
||||
|
|
|
@ -10,17 +10,7 @@
|
|||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Custom types](#custom-types)
|
||||
- [Containers](#containers)
|
||||
- [`BlobsAndCommitments`](#blobsandcommitments)
|
||||
- [`PolynomialAndCommitment`](#polynomialandcommitment)
|
||||
- [Helpers](#helpers)
|
||||
- [`is_data_available`](#is_data_available)
|
||||
- [`hash_to_bls_field`](#hash_to_bls_field)
|
||||
- [`compute_powers`](#compute_powers)
|
||||
- [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment)
|
||||
- [`validate_blobs_sidecar`](#validate_blobs_sidecar)
|
||||
- [`compute_proof_from_blobs`](#compute_proof_from_blobs)
|
||||
- [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments)
|
||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||
- [Block and sidecar proposal](#block-and-sidecar-proposal)
|
||||
|
@ -39,146 +29,14 @@ This document represents the changes to be made in the code of an "honest valida
|
|||
|
||||
## Prerequisites
|
||||
|
||||
This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide.
|
||||
This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide.
|
||||
All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP4844](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP-4844](./beacon-chain.md) are requisite for this document and used throughout.
|
||||
Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `Polynomial` | `List[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | a polynomial in evaluation form |
|
||||
|
||||
## Containers
|
||||
|
||||
### `BlobsAndCommitments`
|
||||
|
||||
```python
|
||||
class BlobsAndCommitments(Container):
|
||||
blobs: List[Blob, MAX_BLOBS_PER_BLOCK]
|
||||
kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK]
|
||||
```
|
||||
|
||||
### `PolynomialAndCommitment`
|
||||
|
||||
```python
|
||||
class PolynomialAndCommitment(Container):
|
||||
polynomial: Polynomial
|
||||
kzg_commitment: KZGCommitment
|
||||
```
|
||||
|
||||
|
||||
## Helpers
|
||||
|
||||
### `is_data_available`
|
||||
|
||||
The implementation of `is_data_available` is meant to change with later sharding upgrades.
|
||||
Initially, it requires every verifying actor to retrieve the matching `BlobsSidecar`,
|
||||
and validate the sidecar with `validate_blobs_sidecar`.
|
||||
|
||||
Without the sidecar the block may be processed further optimistically,
|
||||
but MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded.
|
||||
|
||||
```python
|
||||
def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool:
|
||||
# `retrieve_blobs_sidecar` is implementation dependent, raises an exception if not available.
|
||||
sidecar = retrieve_blobs_sidecar(slot, beacon_block_root)
|
||||
validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar)
|
||||
|
||||
return True
|
||||
```
|
||||
|
||||
### `hash_to_bls_field`
|
||||
|
||||
```python
|
||||
def hash_to_bls_field(x: Container) -> BLSFieldElement:
|
||||
"""
|
||||
Compute 32-byte hash of serialized container and convert it to BLS field.
|
||||
The output is not uniform over the BLS field.
|
||||
"""
|
||||
return bytes_to_bls_field(hash(ssz_serialize(x)))
|
||||
```
|
||||
|
||||
### `compute_powers`
|
||||
```python
|
||||
def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]:
|
||||
"""
|
||||
Return ``x`` to power of [0, n-1].
|
||||
"""
|
||||
current_power = 1
|
||||
powers = []
|
||||
for _ in range(n):
|
||||
powers.append(BLSFieldElement(current_power))
|
||||
current_power = current_power * int(x) % BLS_MODULUS
|
||||
return powers
|
||||
```
|
||||
|
||||
### `compute_aggregated_poly_and_commitment`
|
||||
|
||||
```python
|
||||
def compute_aggregated_poly_and_commitment(
|
||||
blobs: Sequence[Blob],
|
||||
kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment]:
|
||||
"""
|
||||
Return the aggregated polynomial and aggregated KZG commitment.
|
||||
"""
|
||||
# Generate random linear combination challenges
|
||||
r = hash_to_bls_field(BlobsAndCommitments(blobs=blobs, kzg_commitments=kzg_commitments))
|
||||
r_powers = compute_powers(r, len(kzg_commitments))
|
||||
|
||||
# Create aggregated polynomial in evaluation form
|
||||
aggregated_poly = Polynomial(vector_lincomb(blobs, r_powers))
|
||||
|
||||
# Compute commitment to aggregated polynomial
|
||||
aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers))
|
||||
|
||||
return aggregated_poly, aggregated_poly_commitment
|
||||
```
|
||||
|
||||
### `validate_blobs_sidecar`
|
||||
|
||||
```python
|
||||
def validate_blobs_sidecar(slot: Slot,
|
||||
beacon_block_root: Root,
|
||||
expected_kzg_commitments: Sequence[KZGCommitment],
|
||||
blobs_sidecar: BlobsSidecar) -> None:
|
||||
assert slot == blobs_sidecar.beacon_block_slot
|
||||
assert beacon_block_root == blobs_sidecar.beacon_block_root
|
||||
blobs = blobs_sidecar.blobs
|
||||
kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof
|
||||
assert len(expected_kzg_commitments) == len(blobs)
|
||||
|
||||
aggregated_poly, aggregated_poly_commitment = compute_aggregated_poly_and_commitment(
|
||||
blobs,
|
||||
expected_kzg_commitments,
|
||||
)
|
||||
|
||||
# Generate challenge `x` and evaluate the aggregated polynomial at `x`
|
||||
x = hash_to_bls_field(
|
||||
PolynomialAndCommitment(polynomial=aggregated_poly, kzg_commitment=aggregated_poly_commitment)
|
||||
)
|
||||
# Evaluate aggregated polynomial at `x` (evaluation function checks for div-by-zero)
|
||||
y = evaluate_polynomial_in_evaluation_form(aggregated_poly, x)
|
||||
|
||||
# Verify aggregated proof
|
||||
assert verify_kzg_proof(aggregated_poly_commitment, x, y, kzg_aggregated_proof)
|
||||
```
|
||||
|
||||
### `compute_proof_from_blobs`
|
||||
|
||||
```python
|
||||
def compute_proof_from_blobs(blobs: Sequence[Blob]) -> KZGProof:
|
||||
commitments = [blob_to_kzg_commitment(blob) for blob in blobs]
|
||||
aggregated_poly, aggregated_poly_commitment = compute_aggregated_poly_and_commitment(blobs, commitments)
|
||||
x = hash_to_bls_field(PolynomialAndCommitment(
|
||||
polynomial=aggregated_poly,
|
||||
kzg_commitment=aggregated_poly_commitment,
|
||||
))
|
||||
return compute_kzg_proof(aggregated_poly, x)
|
||||
```
|
||||
|
||||
### `get_blobs_and_kzg_commitments`
|
||||
|
||||
The interface to retrieve blobs and corresponding kzg commitments.
|
||||
|
@ -188,6 +46,7 @@ Implementers may also retrieve blobs individually per transaction.
|
|||
|
||||
```python
|
||||
def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]:
|
||||
# pylint: disable=unused-argument
|
||||
...
|
||||
```
|
||||
|
||||
|
@ -202,7 +61,7 @@ Namely, the blob handling and the addition of `SignedBeaconBlockAndBlobsSidecar`
|
|||
|
||||
##### Blob KZG commitments
|
||||
|
||||
1. After retrieving the execution payload from the execution engine as specified in Bellatrix,
|
||||
1. After retrieving the execution payload from the execution engine as specified in Capella,
|
||||
use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`.
|
||||
2. Validate `blobs` and `blob_kzg_commitments`:
|
||||
|
||||
|
@ -236,7 +95,7 @@ def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar
|
|||
beacon_block_root=hash_tree_root(block),
|
||||
beacon_block_slot=block.slot,
|
||||
blobs=blobs,
|
||||
kzg_aggregated_proof=compute_proof_from_blobs(blobs),
|
||||
kzg_aggregated_proof=compute_aggregate_kzg_proof(blobs),
|
||||
)
|
||||
```
|
||||
|
||||
|
|
|
@ -745,6 +745,8 @@ For example, if slot 4 were empty in the previous example, the returned array wo
|
|||
|
||||
`step` is deprecated and must be set to 1. Clients may respond with a single block if a larger step is returned during the deprecation transition period.
|
||||
|
||||
`/eth2/beacon_chain/req/beacon_blocks_by_range/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
|
||||
|
||||
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
||||
|
||||
The request MUST be encoded as an SSZ-container.
|
||||
|
@ -831,6 +833,8 @@ Clients MUST support requesting blocks since the latest finalized epoch.
|
|||
Clients MUST respond with at least one block, if they have it.
|
||||
Clients MAY limit the number of blocks in the response.
|
||||
|
||||
`/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period.
|
||||
|
||||
#### Ping
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/ping/1/`
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
- [`interpolate_polynomial`](#interpolate_polynomial)
|
||||
- [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form)
|
||||
- [KZG Operations](#kzg-operations)
|
||||
- [Elliptic curve helper functoins](#elliptic-curve-helper-functoins)
|
||||
- [Elliptic curve helper functions](#elliptic-curve-helper-functions)
|
||||
- [`elliptic_curve_lincomb`](#elliptic_curve_lincomb)
|
||||
- [Hash to field](#hash-to-field)
|
||||
- [`hash_to_bls_field`](#hash_to_bls_field)
|
||||
|
@ -47,7 +47,7 @@
|
|||
|
||||
## Introduction
|
||||
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the sharding specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations, and hints what the best known algorithms for these implementations are are included below.
|
||||
This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the sharding specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations, and hints what the best known algorithms for these implementations are included below.
|
||||
|
||||
## Constants
|
||||
|
||||
|
@ -313,7 +313,7 @@ def evaluate_polynomial_in_evaluation_form(poly: BLSPolynomialByEvaluations, x:
|
|||
|
||||
We are using the KZG10 polynomial commitment scheme (Kate, Zaverucha and Goldberg, 2010: https://www.iacr.org/archive/asiacrypt2010/6477178/6477178.pdf).
|
||||
|
||||
### Elliptic curve helper functoins
|
||||
### Elliptic curve helper functions
|
||||
|
||||
#### `elliptic_curve_lincomb`
|
||||
|
||||
|
|
|
@ -143,7 +143,7 @@ This ensures that blocks are only optimistically imported if one or more of the
|
|||
following are true:
|
||||
|
||||
1. The parent of the block has execution enabled.
|
||||
1. The current slot (as per the system clock) is at least
|
||||
2. The current slot (as per the system clock) is at least
|
||||
`SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY` ahead of the slot of the block being
|
||||
imported.
|
||||
|
||||
|
|
|
@ -10,12 +10,12 @@ Use an OS that has Python 3.8 or above. For example, Debian 11 (bullseye)
|
|||
```sh
|
||||
sudo apt install -y make git wget python3-venv gcc python3-dev
|
||||
```
|
||||
1. Download the latest [consensus specs](https://github.com/ethereum/consensus-specs)
|
||||
2. Download the latest [consensus specs](https://github.com/ethereum/consensus-specs)
|
||||
```sh
|
||||
git clone https://github.com/ethereum/consensus-specs.git
|
||||
cd consensus-specs
|
||||
```
|
||||
1. Create the specifications and tests:
|
||||
3. Create the specifications and tests:
|
||||
```sh
|
||||
make install_test
|
||||
make pyspec
|
||||
|
@ -31,12 +31,12 @@ To read more about creating the environment, [see here](core/pyspec/README.md).
|
|||
cd ~/consensus-specs
|
||||
. venv/bin/activate
|
||||
```
|
||||
1. Run a sanity check test against Altair fork:
|
||||
2. Run a sanity check test against Altair fork:
|
||||
```sh
|
||||
cd tests/core/pyspec
|
||||
python -m pytest -k test_empty_block_transition --fork altair eth2spec
|
||||
```
|
||||
1. The output should be similar to:
|
||||
3. The output should be similar to:
|
||||
```
|
||||
============================= test session starts ==============================
|
||||
platform linux -- Python 3.9.2, pytest-6.2.5, py-1.10.0, pluggy-1.0.0
|
||||
|
@ -114,7 +114,7 @@ In Python `yield` is used by [generators](https://wiki.python.org/moin/Generator
|
|||
we can treat it as a partial return statement that doesn't stop the function's processing, only adds to a list
|
||||
of return values. Here we add two values, the string `'pre'` and the initial state, to the list of return values.
|
||||
|
||||
[You can read more about test generators and how the are used here](generators).
|
||||
[You can read more about test generators and how they are used here](generators).
|
||||
|
||||
```python
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -417,7 +417,7 @@ In the last line you can see two conditions being asserted:
|
|||
|
||||
1. `data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot` which verifies that the attestation doesn't
|
||||
arrive too early.
|
||||
1. `state.slot <= data.slot + SLOTS_PER_EPOCH` which verifies that the attestation doesn't
|
||||
2. `state.slot <= data.slot + SLOTS_PER_EPOCH` which verifies that the attestation doesn't
|
||||
arrive too late.
|
||||
|
||||
This is how the consensus layer tests deal with edge cases, by asserting the conditions required for the
|
||||
|
@ -431,7 +431,7 @@ Now we'll write a similar test that verifies that being `SLOTS_PER_EPOCH` away i
|
|||
`test_after_epoch_slots` function. We need two changes:
|
||||
|
||||
1. Call `transition_to_slot_via_block` with one less slot to advance
|
||||
1. Don't tell `run_attestation_processing` to return an empty post state.
|
||||
2. Don't tell `run_attestation_processing` to return an empty post state.
|
||||
|
||||
The modified function is:
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.3.0-alpha.0
|
||||
1.3.0-alpha.2
|
||||
|
|
|
@ -4,7 +4,6 @@ import time
|
|||
import shutil
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from filelock import FileLock
|
||||
import sys
|
||||
import json
|
||||
from typing import Iterable, AnyStr, Any, Callable
|
||||
|
@ -13,6 +12,7 @@ from ruamel.yaml import (
|
|||
YAML,
|
||||
)
|
||||
|
||||
from filelock import FileLock
|
||||
from snappy import compress
|
||||
|
||||
from eth2spec.test import context
|
||||
|
@ -141,6 +141,10 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
tprov.prepare()
|
||||
|
||||
for test_case in tprov.make_cases():
|
||||
# If preset list is assigned, filter by presets.
|
||||
if len(presets) != 0 and test_case.preset_name not in presets:
|
||||
continue
|
||||
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(test_case.preset_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
|
@ -179,7 +183,16 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
try:
|
||||
fn(case_dir)
|
||||
except IOError as e:
|
||||
sys.exit(f'Error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}')
|
||||
error_message = (
|
||||
f'[Error] error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}'
|
||||
)
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
|
||||
sys.exit(error_message)
|
||||
|
||||
meta = dict()
|
||||
|
||||
|
@ -210,13 +223,13 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
|||
if not written_part:
|
||||
print(f"test case {case_dir} did not produce any test case parts")
|
||||
except Exception as e:
|
||||
print(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||
traceback.print_exc()
|
||||
# Write to log file
|
||||
error_message = f"[ERROR] failed to generate vector(s) for test {case_dir}: {e}"
|
||||
# Write to error log file
|
||||
with log_file.open("a+") as f:
|
||||
f.write(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||
f.write(error_message)
|
||||
traceback.print_exc(file=f)
|
||||
f.write('\n')
|
||||
traceback.print_exc()
|
||||
else:
|
||||
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||
if not written_part:
|
||||
|
|
|
@ -49,7 +49,7 @@ def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
|||
preset_name=preset_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
suite_name=getattr(tfn, 'suite_name', 'pyspec_tests'),
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=phase, preset=preset_name, bls_active=bls_active)
|
||||
|
|
|
@ -136,18 +136,22 @@ def test_invalid_signature_extra_participant(spec, state):
|
|||
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||
|
||||
|
||||
def is_duplicate_sync_committee(committee_indices):
|
||||
dup = {v for v in committee_indices if committee_indices.count(v) > 1}
|
||||
return len(dup) > 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_presets([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee_indices = compute_committee_indices(state)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count > spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee_indices))
|
||||
assert not is_duplicate_sync_committee(committee_indices)
|
||||
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
@ -157,13 +161,12 @@ def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
|||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state):
|
||||
committee_indices = compute_committee_indices(state)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [False] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee_indices))
|
||||
assert is_duplicate_sync_committee(committee_indices)
|
||||
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [False] * committee_size
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
@ -173,14 +176,13 @@ def test_sync_committee_rewards_duplicate_committee_no_participation(spec, state
|
|||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_half_participation(spec, state):
|
||||
committee_indices = compute_committee_indices(state)
|
||||
|
||||
# Preconditions of this test case
|
||||
assert is_duplicate_sync_committee(committee_indices)
|
||||
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * (committee_size // 2) + [False] * (committee_size // 2)
|
||||
assert len(committee_bits) == committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee_indices))
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
@ -190,17 +192,115 @@ def test_sync_committee_rewards_duplicate_committee_half_participation(spec, sta
|
|||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_full_participation(spec, state):
|
||||
committee_indices = compute_committee_indices(state)
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee_indices))
|
||||
assert is_duplicate_sync_committee(committee_indices)
|
||||
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [True] * committee_size
|
||||
|
||||
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
||||
|
||||
|
||||
def _run_sync_committee_selected_twice(
|
||||
spec, state,
|
||||
pre_balance, participate_first_position, participate_second_position,
|
||||
skip_reward_validation=False):
|
||||
committee_indices = compute_committee_indices(state)
|
||||
|
||||
# Preconditions of this test case
|
||||
assert is_duplicate_sync_committee(committee_indices)
|
||||
|
||||
committee_size = len(committee_indices)
|
||||
committee_bits = [False] * committee_size
|
||||
|
||||
# Find duplicate indices that get selected twice
|
||||
dup = {v for v in committee_indices if committee_indices.count(v) == 2}
|
||||
assert len(dup) > 0
|
||||
validator_index = dup.pop()
|
||||
positions = [i for i, v in enumerate(committee_indices) if v == validator_index]
|
||||
committee_bits[positions[0]] = participate_first_position
|
||||
committee_bits[positions[1]] = participate_second_position
|
||||
|
||||
# Set validator's balance
|
||||
state.balances[validator_index] = pre_balance
|
||||
state.validators[validator_index].effective_balance = min(
|
||||
pre_balance - pre_balance % spec.EFFECTIVE_BALANCE_INCREMENT,
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
)
|
||||
|
||||
yield from run_successful_sync_committee_test(
|
||||
spec, state, committee_indices, committee_bits,
|
||||
skip_reward_validation=skip_reward_validation)
|
||||
|
||||
return validator_index
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_zero_balance_only_participate_first_one(spec, state):
|
||||
validator_index = yield from _run_sync_committee_selected_twice(
|
||||
spec,
|
||||
state,
|
||||
pre_balance=0,
|
||||
participate_first_position=True,
|
||||
participate_second_position=False,
|
||||
)
|
||||
|
||||
# The validator gets reward first (balance > 0) and then gets the same amount of penalty (balance == 0)
|
||||
assert state.balances[validator_index] == 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_zero_balance_only_participate_second_one(spec, state):
|
||||
# Skip `validate_sync_committee_rewards` because it doesn't handle the balance computation order
|
||||
# inside the for loop
|
||||
validator_index = yield from _run_sync_committee_selected_twice(
|
||||
spec,
|
||||
state,
|
||||
pre_balance=0,
|
||||
participate_first_position=False,
|
||||
participate_second_position=True,
|
||||
skip_reward_validation=True,
|
||||
)
|
||||
|
||||
# The validator gets penalty first (balance is still 0) and then gets reward (balance > 0)
|
||||
assert state.balances[validator_index] > 0
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_max_effective_balance_only_participate_first_one(spec, state):
|
||||
validator_index = yield from _run_sync_committee_selected_twice(
|
||||
spec,
|
||||
state,
|
||||
pre_balance=spec.MAX_EFFECTIVE_BALANCE,
|
||||
participate_first_position=True,
|
||||
participate_second_position=False,
|
||||
)
|
||||
|
||||
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@with_presets([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee_max_effective_balance_only_participate_second_one(spec, state):
|
||||
validator_index = yield from _run_sync_committee_selected_twice(
|
||||
spec,
|
||||
state,
|
||||
pre_balance=spec.MAX_EFFECTIVE_BALANCE,
|
||||
participate_first_position=False,
|
||||
participate_second_position=True,
|
||||
)
|
||||
|
||||
assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
with_phases,
|
||||
with_altair_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR,
|
||||
)
|
||||
|
||||
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
run_deposit_processing_with_specific_fork_version,
|
||||
)
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_effective_deposit_with_previous_fork_version(spec, state):
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
# It's only effective in Altair because the default `fork_version` of `compute_domain` is `GENESIS_FORK_VERSION`.
|
||||
# Therefore it's just a normal `DepositMessage`.
|
||||
yield from run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_ineffective_deposit_with_current_fork_version(spec, state):
|
||||
yield from run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.current_version,
|
||||
effective=False,
|
||||
)
|
|
@ -212,7 +212,7 @@ def slash_some_validators_for_inactivity_scores_test(spec, state, rng=Random(404
|
|||
next_epoch_via_block(spec, future_state)
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(future_state)
|
||||
# Slash ~1/4 of validaors
|
||||
# Slash ~1/4 of validators
|
||||
for validator_index in range(len(state.validators)):
|
||||
if rng.choice(range(4)) == 0 and validator_index != proposer_index:
|
||||
spec.slash_validator(state, validator_index)
|
||||
|
|
|
@ -110,7 +110,7 @@ def test_sync_committees_progress_misc_balances_not_genesis(spec, state):
|
|||
@spec_state_test
|
||||
@always_bls
|
||||
@with_presets([MINIMAL], reason="too slow")
|
||||
def test_sync_committees_no_progress_not_boundary(spec, state):
|
||||
def test_sync_committees_no_progress_not_at_period_boundary(spec, state):
|
||||
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||
slot_not_at_period_boundary = state.slot + spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_not_at_period_boundary)
|
||||
|
|
|
@ -1,14 +1,17 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_altair_and_later,
|
||||
with_test_suite_name,
|
||||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_current_sync_committee_merkle_proof(spec, state):
|
||||
yield "state", state
|
||||
current_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
yield "object", state
|
||||
current_sync_committee_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.CURRENT_SYNC_COMMITTEE_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.current_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.CURRENT_SYNC_COMMITTEE_INDEX,
|
||||
|
@ -23,11 +26,13 @@ def test_current_sync_committee_merkle_proof(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_next_sync_committee_merkle_proof(spec, state):
|
||||
yield "state", state
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
yield "object", state
|
||||
next_sync_committee_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.next_sync_committee.hash_tree_root().hex(),
|
||||
"leaf_index": spec.NEXT_SYNC_COMMITTEE_INDEX,
|
||||
|
@ -42,11 +47,13 @@ def test_next_sync_committee_merkle_proof(spec, state):
|
|||
)
|
||||
|
||||
|
||||
@with_test_suite_name("BeaconState")
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_finality_root_merkle_proof(spec, state):
|
||||
yield "state", state
|
||||
finality_branch = spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
|
||||
yield "object", state
|
||||
finality_branch = \
|
||||
spec.compute_merkle_proof_for_state(state, spec.FINALIZED_ROOT_INDEX)
|
||||
yield "proof", {
|
||||
"leaf": "0x" + state.finalized_checkpoint.root.hex(),
|
||||
"leaf_index": spec.FINALIZED_ROOT_INDEX,
|
||||
|
|
|
@ -35,7 +35,7 @@ def setup_test(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
|
||||
trusted_block = state_transition_with_full_block(spec, state, True, True)
|
||||
trusted_block_root = trusted_block.message.hash_tree_root()
|
||||
bootstrap = spec.create_light_client_bootstrap(state)
|
||||
bootstrap = spec.create_light_client_bootstrap(state, trusted_block)
|
||||
yield "trusted_block_root", "meta", "0x" + trusted_block_root.hex()
|
||||
yield "bootstrap", bootstrap
|
||||
test.store = spec.initialize_light_client_store(trusted_block_root, bootstrap)
|
||||
|
@ -63,11 +63,11 @@ def get_checks(store):
|
|||
return {
|
||||
"finalized_header": {
|
||||
'slot': int(store.finalized_header.slot),
|
||||
'root': encode_hex(store.finalized_header.hash_tree_root()),
|
||||
'beacon_root': encode_hex(store.finalized_header.hash_tree_root()),
|
||||
},
|
||||
"optimistic_header": {
|
||||
'slot': int(store.optimistic_header.slot),
|
||||
'root': encode_hex(store.optimistic_header.hash_tree_root()),
|
||||
'beacon_root': encode_hex(store.optimistic_header.hash_tree_root()),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -85,9 +85,9 @@ def emit_force_update(test, spec, state):
|
|||
})
|
||||
|
||||
|
||||
def emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=True):
|
||||
update = spec.create_light_client_update(state, block, attested_state, finalized_block)
|
||||
if not with_next_sync_committee:
|
||||
def emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=True):
|
||||
update = spec.create_light_client_update(state, block, attested_state, attested_block, finalized_block)
|
||||
if not with_next:
|
||||
update.next_sync_committee = spec.SyncCommittee()
|
||||
update.next_sync_committee_branch = \
|
||||
[spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
@ -110,7 +110,7 @@ def compute_start_slot_at_sync_committee_period(spec, sync_committee_period):
|
|||
|
||||
|
||||
def compute_start_slot_at_next_sync_committee_period(spec, state):
|
||||
sync_committee_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
|
||||
sync_committee_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
return compute_start_slot_at_sync_committee_period(spec, sync_committee_period + 1)
|
||||
|
||||
|
||||
|
@ -135,11 +135,12 @@ def test_light_client_sync(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -160,11 +161,12 @@ def test_light_client_sync(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -184,12 +186,13 @@ def test_light_client_sync(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 2)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
transition_to(spec, state, compute_start_slot_at_next_sync_committee_period(spec, state))
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -206,10 +209,11 @@ def test_light_client_sync(spec, state):
|
|||
# sync committee
|
||||
# period boundary
|
||||
# ```
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
|
@ -226,11 +230,12 @@ def test_light_client_sync(spec, state):
|
|||
# sync committee
|
||||
# period boundary
|
||||
# ```
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
store_state = attested_state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
|
@ -247,6 +252,7 @@ def test_light_client_sync(spec, state):
|
|||
# sync committee `--- store.finalized_header
|
||||
# period boundary
|
||||
# ```
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 1)
|
||||
yield from emit_force_update(test, spec, state)
|
||||
|
@ -268,7 +274,7 @@ def test_light_client_sync(spec, state):
|
|||
# ```
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block=None)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block=None)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
|
@ -285,10 +291,11 @@ def test_light_client_sync(spec, state):
|
|||
# sync committee sync committee
|
||||
# period boundary period boundary
|
||||
# ```
|
||||
attested_block = block.copy()
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == store_state.slot
|
||||
assert test.store.next_sync_committee == store_state.next_sync_committee
|
||||
assert test.store.best_valid_update == update
|
||||
|
@ -314,11 +321,12 @@ def test_light_client_sync(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -336,7 +344,8 @@ def test_supply_sync_committee_from_past_update(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2 - 1)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
|
@ -347,7 +356,7 @@ def test_supply_sync_committee_from_past_update(spec, state):
|
|||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
|
||||
# Apply `LightClientUpdate` from the past, populating `store.next_sync_committee`
|
||||
yield from emit_update(test, spec, past_state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, past_state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -368,11 +377,12 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
next_slots(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
finalized_block = state_transition_with_full_block(spec, state, True, True)
|
||||
finalized_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, 2 * spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -386,11 +396,12 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
justified_block = state_transition_with_full_block(spec, state, True, True)
|
||||
justified_state = state.copy()
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
|
@ -400,27 +411,28 @@ def test_advance_finality_without_sync_committee(spec, state):
|
|||
past_state = finalized_state
|
||||
finalized_block = justified_block
|
||||
finalized_state = justified_state
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 1, True, True)
|
||||
_, _, state = next_slots_with_attestations(spec, state, spec.SLOTS_PER_EPOCH - 2, True, True)
|
||||
attested_block = state_transition_with_full_block(spec, state, True, True)
|
||||
attested_state = state.copy()
|
||||
sync_aggregate, _ = get_sync_aggregate(spec, state)
|
||||
block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate)
|
||||
|
||||
# Apply `LightClientUpdate` without `finalized_header` nor `next_sync_committee`
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, None, with_next_sync_committee=False)
|
||||
update = yield from emit_update(test, spec, state, block, attested_state, attested_block, None, with_next=False)
|
||||
assert test.store.finalized_header.slot == past_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update == update
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
|
||||
# Apply `LightClientUpdate` with `finalized_header` but no `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block, with_next_sync_committee=False)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, with_next=False)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert not spec.is_next_sync_committee_known(test.store)
|
||||
assert test.store.best_valid_update is None
|
||||
assert test.store.optimistic_header.slot == attested_state.slot
|
||||
|
||||
# Apply full `LightClientUpdate`, supplying `next_sync_committee`
|
||||
yield from emit_update(test, spec, state, block, attested_state, finalized_block)
|
||||
yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block)
|
||||
assert test.store.finalized_header.slot == finalized_state.slot
|
||||
assert test.store.next_sync_committee == finalized_state.next_sync_committee
|
||||
assert test.store.best_valid_update is None
|
||||
|
|
|
@ -18,13 +18,13 @@ from eth2spec.test.helpers.state import (
|
|||
from math import floor
|
||||
|
||||
|
||||
def create_update(spec, test, with_next_sync_committee, with_finality, participation_rate):
|
||||
def create_update(spec, test, with_next, with_finality, participation_rate):
|
||||
attested_state, attested_block, finalized_block = test
|
||||
num_participants = floor(spec.SYNC_COMMITTEE_SIZE * participation_rate)
|
||||
|
||||
attested_header = signed_block_to_header(spec, attested_block)
|
||||
|
||||
if with_next_sync_committee:
|
||||
if with_next:
|
||||
next_sync_committee = attested_state.next_sync_committee
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(attested_state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
else:
|
||||
|
@ -84,76 +84,76 @@ def test_update_ranking(spec, state):
|
|||
# Create updates (in descending order of quality)
|
||||
updates = [
|
||||
# Updates with sync committee finality
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without sync committee finality
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without indication of any finality
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.8),
|
||||
|
||||
# Updates with sync committee finality but no `next_sync_committee`
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without sync committee finality and also no `next_sync_committee`
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.8),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.8),
|
||||
|
||||
# Updates without indication of any finality nor `next_sync_committee`
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=1.0),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.8),
|
||||
|
||||
# Updates with low sync committee participation
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.4),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.4),
|
||||
|
||||
# Updates with very low sync committee participation
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, att, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next_sync_committee=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=1, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=1, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=0, with_finality=1, participation_rate=0.2),
|
||||
create_update(spec, sig, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, att, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, fin, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
create_update(spec, lat, with_next=0, with_finality=0, participation_rate=0.2),
|
||||
]
|
||||
yield "updates", updates
|
||||
|
||||
|
|
|
@ -51,40 +51,40 @@ def run_sync_committee_sanity_test(spec, state, fraction_full=1.0, rng=Random(45
|
|||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
def test_sync_committee_committee__full(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
def test_sync_committee_committee__half(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(1212))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
def test_sync_committee_committee__empty(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
def test_sync_committee_committee_genesis__full(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
def test_sync_committee_committee_genesis__half(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5, rng=Random(2323))
|
||||
|
||||
|
||||
@with_altair_and_later
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
def test_sync_committee_committee_genesis__empty(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
|
@ -136,6 +136,6 @@ def test_inactivity_scores_full_participation_leaking(spec, state):
|
|||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
# Full particiaption during a leak so all scores should decrease by 1
|
||||
# Full participation during a leak so all scores should decrease by 1
|
||||
for pre, post in zip(previous_inactivity_scores, state.inactivity_scores):
|
||||
assert post == pre - 1
|
||||
|
|
|
@ -37,14 +37,14 @@ def test_process_light_client_update_not_timeout(spec, state):
|
|||
# Ensure that finality checkpoint is genesis
|
||||
assert state.finalized_checkpoint.epoch == 0
|
||||
# Finality is unchanged
|
||||
finality_header = spec.BeaconBlockHeader()
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finality_header,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
|
@ -68,8 +68,8 @@ def test_process_light_client_update_at_period_boundary(spec, state):
|
|||
|
||||
# Forward to slot before next sync committee period so that next block is final one in period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT - 2)
|
||||
store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
|
||||
update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
attested_block = state_transition_with_full_block(spec, state, False, False)
|
||||
|
@ -81,14 +81,14 @@ def test_process_light_client_update_at_period_boundary(spec, state):
|
|||
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
|
||||
# Finality is unchanged
|
||||
finality_header = spec.BeaconBlockHeader()
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finality_header,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
|
@ -112,8 +112,8 @@ def test_process_light_client_update_timeout(spec, state):
|
|||
|
||||
# Forward to next sync committee period
|
||||
next_slots(spec, state, spec.UPDATE_TIMEOUT)
|
||||
store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
|
||||
update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period + 1 == update_period
|
||||
|
||||
attested_block = state_transition_with_full_block(spec, state, False, False)
|
||||
|
@ -126,14 +126,14 @@ def test_process_light_client_update_timeout(spec, state):
|
|||
next_sync_committee = state.next_sync_committee
|
||||
next_sync_committee_branch = spec.compute_merkle_proof_for_state(state, spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||
# Finality is unchanged
|
||||
finality_header = spec.BeaconBlockHeader()
|
||||
finalized_header = spec.BeaconBlockHeader()
|
||||
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||
|
||||
update = spec.LightClientUpdate(
|
||||
attested_header=attested_header,
|
||||
next_sync_committee=next_sync_committee,
|
||||
next_sync_committee_branch=next_sync_committee_branch,
|
||||
finalized_header=finality_header,
|
||||
finalized_header=finalized_header,
|
||||
finality_branch=finality_branch,
|
||||
sync_aggregate=sync_aggregate,
|
||||
signature_slot=signature_slot,
|
||||
|
@ -164,8 +164,8 @@ def test_process_light_client_update_finality_updated(spec, state):
|
|||
# Ensure that finality checkpoint has changed
|
||||
assert state.finalized_checkpoint.epoch == 3
|
||||
# Ensure that it's same period
|
||||
store_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(store.optimistic_header.slot))
|
||||
update_period = spec.compute_sync_committee_period(spec.compute_epoch_at_slot(state.slot))
|
||||
store_period = spec.compute_sync_committee_period_at_slot(store.optimistic_header.slot)
|
||||
update_period = spec.compute_sync_committee_period_at_slot(state.slot)
|
||||
assert store_period == update_period
|
||||
|
||||
attested_block = blocks[-1]
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
from eth2spec.test.context import (
|
||||
is_post_capella,
|
||||
is_post_eip4844,
|
||||
spec_configured_state_test,
|
||||
spec_state_test_with_matching_config,
|
||||
with_all_phases,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import ALTAIR
|
||||
from eth2spec.test.helpers.constants import (
|
||||
PHASE0, ALTAIR,
|
||||
ALL_PHASES,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import is_post_fork
|
||||
|
||||
|
||||
@with_phases([ALTAIR])
|
||||
|
@ -29,29 +31,28 @@ def test_config_override(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test_with_matching_config
|
||||
def test_override_config_fork_epoch(spec, state):
|
||||
if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
|
||||
return
|
||||
# Fork schedule must be consistent with state fork
|
||||
epoch = spec.get_current_epoch(state)
|
||||
if is_post_fork(spec.fork, ALTAIR):
|
||||
assert state.fork.current_version == spec.compute_fork_version(epoch)
|
||||
else:
|
||||
assert state.fork.current_version == spec.config.GENESIS_FORK_VERSION
|
||||
|
||||
assert spec.config.ALTAIR_FORK_EPOCH == spec.GENESIS_EPOCH
|
||||
if state.fork.current_version == spec.config.ALTAIR_FORK_VERSION:
|
||||
return
|
||||
# Identify state fork
|
||||
state_fork = None
|
||||
for fork in [fork for fork in ALL_PHASES if is_post_fork(spec.fork, fork)]:
|
||||
if fork == PHASE0:
|
||||
fork_version_field = 'GENESIS_FORK_VERSION'
|
||||
else:
|
||||
fork_version_field = fork.upper() + '_FORK_VERSION'
|
||||
if state.fork.current_version == getattr(spec.config, fork_version_field):
|
||||
state_fork = fork
|
||||
break
|
||||
assert state_fork is not None
|
||||
|
||||
assert spec.config.BELLATRIX_FORK_EPOCH == spec.GENESIS_EPOCH
|
||||
if state.fork.current_version == spec.config.BELLATRIX_FORK_VERSION:
|
||||
return
|
||||
|
||||
if is_post_capella(spec):
|
||||
assert spec.config.CAPELLA_FORK_EPOCH == spec.GENESIS_EPOCH
|
||||
if state.fork.current_version == spec.config.CAPELLA_FORK_VERSION:
|
||||
return
|
||||
|
||||
if is_post_eip4844(spec):
|
||||
assert spec.config.EIP4844_FORK_EPOCH == spec.GENESIS_EPOCH
|
||||
if state.fork.current_version == spec.config.EIP4844_FORK_VERSION:
|
||||
return
|
||||
|
||||
assert spec.config.SHARDING_FORK_EPOCH == spec.GENESIS_EPOCH
|
||||
if state.fork.current_version == spec.config.SHARDING_FORK_VERSION:
|
||||
return
|
||||
|
||||
assert False # Fork is missing
|
||||
# Check that all prior forks have already been triggered
|
||||
for fork in [fork for fork in ALL_PHASES if is_post_fork(state_fork, fork)]:
|
||||
if fork == PHASE0:
|
||||
continue
|
||||
fork_epoch_field = fork.upper() + '_FORK_EPOCH'
|
||||
assert getattr(spec.config, fork_epoch_field) <= epoch
|
||||
|
|
|
@ -55,7 +55,7 @@ def test_is_assigned_to_sync_committee(spec, state):
|
|||
disqualified_pubkeys = set(
|
||||
filter(lambda key: key not in sync_committee_pubkeys, active_pubkeys)
|
||||
)
|
||||
# NOTE: only check `disqualified_pubkeys` if SYNC_COMMITEE_SIZE < validator count
|
||||
# NOTE: only check `disqualified_pubkeys` if SYNC_COMMITTEE_SIZE < validator count
|
||||
if disqualified_pubkeys:
|
||||
sample_size = 3
|
||||
assert validator_count >= sample_size
|
||||
|
|
|
@ -4,55 +4,23 @@ from eth2spec.test.context import (
|
|||
with_bellatrix_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
deposit_from_context,
|
||||
run_deposit_processing,
|
||||
run_deposit_processing_with_specific_fork_version,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import (
|
||||
privkeys,
|
||||
pubkeys,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
def _run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version,
|
||||
valid,
|
||||
effective):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
pubkey = pubkeys[validator_index]
|
||||
privkey = privkeys[validator_index]
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
|
||||
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
|
||||
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
|
||||
deposit_data = spec.DepositData(
|
||||
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
||||
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
||||
)
|
||||
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
|
||||
|
||||
state.eth1_deposit_index = 0
|
||||
state.eth1_data.deposit_root = root
|
||||
state.eth1_data.deposit_count = 1
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_deposit_with_previous_fork_version__valid_ineffective(spec, state):
|
||||
def test_ineffective_deposit_with_previous_fork_version(spec, state):
|
||||
# Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`.
|
||||
# It's an ineffective deposit because it fails at BLS sig verification.
|
||||
# NOTE: it was effective in Altair.
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from _run_deposit_processing_with_specific_fork_version(
|
||||
yield from run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
valid=True,
|
||||
effective=False,
|
||||
)
|
||||
|
||||
|
@ -60,26 +28,11 @@ def test_deposit_with_previous_fork_version__valid_ineffective(spec, state):
|
|||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_deposit_with_genesis_fork_version__valid_effective(spec, state):
|
||||
def test_effective_deposit_with_genesis_fork_version(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from _run_deposit_processing_with_specific_fork_version(
|
||||
yield from run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.config.GENESIS_FORK_VERSION,
|
||||
valid=True,
|
||||
effective=True,
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_deposit_with_bad_fork_version__valid_ineffective(spec, state):
|
||||
yield from _run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.Version('0xAaBbCcDd'),
|
||||
valid=True,
|
||||
effective=False,
|
||||
)
|
||||
|
|
|
@ -3,6 +3,7 @@ from random import Random
|
|||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
build_randomized_execution_payload,
|
||||
compute_el_block_hash,
|
||||
get_execution_payload_header,
|
||||
build_state_with_incomplete_transition,
|
||||
build_state_with_complete_transition,
|
||||
|
@ -104,14 +105,14 @@ def run_bad_execution_test(spec, state):
|
|||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_execution_first_payload(spec, state):
|
||||
def test_invalid_bad_execution_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_bad_execution_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_execution_regular_payload(spec, state):
|
||||
def test_invalid_bad_execution_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_bad_execution_test(spec, state)
|
||||
|
||||
|
@ -124,18 +125,20 @@ def test_bad_parent_hash_first_payload(spec, state):
|
|||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.parent_hash = b'\x55' * 32
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=True)
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_parent_hash_regular_payload(spec, state):
|
||||
def test_invalid_bad_parent_hash_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
next_slot(spec, state)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.parent_hash = spec.Hash32()
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
@ -145,20 +148,21 @@ def run_bad_prev_randao_test(spec, state):
|
|||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.prev_randao = b'\x42' * 32
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_prev_randao_first_payload(spec, state):
|
||||
def test_invalid_bad_prev_randao_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_bad_prev_randao_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_pre_randao_regular_payload(spec, state):
|
||||
def test_invalid_bad_pre_randao_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_bad_prev_randao_test(spec, state)
|
||||
|
||||
|
@ -170,20 +174,21 @@ def run_bad_everything_test(spec, state):
|
|||
execution_payload.parent_hash = spec.Hash32()
|
||||
execution_payload.prev_randao = spec.Bytes32()
|
||||
execution_payload.timestamp = 0
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_everything_first_payload(spec, state):
|
||||
def test_invalid_bad_everything_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_bad_everything_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_bad_everything_regular_payload(spec, state):
|
||||
def test_invalid_bad_everything_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_bad_everything_test(spec, state)
|
||||
|
||||
|
@ -198,34 +203,35 @@ def run_bad_timestamp_test(spec, state, is_future):
|
|||
else:
|
||||
timestamp = execution_payload.timestamp - 1
|
||||
execution_payload.timestamp = timestamp
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_future_timestamp_first_payload(spec, state):
|
||||
def test_invalid_future_timestamp_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_bad_timestamp_test(spec, state, is_future=True)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_future_timestamp_regular_payload(spec, state):
|
||||
def test_invalid_future_timestamp_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_bad_timestamp_test(spec, state, is_future=True)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_past_timestamp_first_payload(spec, state):
|
||||
def test_invalid_past_timestamp_first_payload(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_bad_timestamp_test(spec, state, is_future=False)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_past_timestamp_regular_payload(spec, state):
|
||||
def test_invalid_past_timestamp_regular_payload(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_bad_timestamp_test(spec, state, is_future=False)
|
||||
|
||||
|
@ -235,6 +241,7 @@ def run_non_empty_extra_data_test(spec, state):
|
|||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.extra_data = b'\x45' * 12
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
assert state.latest_execution_payload_header.extra_data == execution_payload.extra_data
|
||||
|
@ -263,6 +270,7 @@ def run_non_empty_transactions_test(spec, state):
|
|||
spec.Transaction(b'\x99' * 128)
|
||||
for _ in range(num_transactions)
|
||||
]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
|
||||
|
@ -288,6 +296,7 @@ def run_zero_length_transaction_test(spec, state):
|
|||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.transactions = [spec.Transaction(b'')]
|
||||
assert len(execution_payload.transactions[0]) == 0
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_execution_payload_processing(spec, state, execution_payload)
|
||||
assert state.latest_execution_payload_header.transactions_root == execution_payload.transactions.hash_tree_root()
|
||||
|
@ -320,27 +329,27 @@ def run_randomized_non_validated_execution_fields_test(spec, state, execution_va
|
|||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_randomized_non_validated_execution_fields_first_payload__valid(spec, state):
|
||||
def test_randomized_non_validated_execution_fields_first_payload__execution_valid(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_randomized_non_validated_execution_fields_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_randomized_non_validated_execution_fields_regular_payload__valid(spec, state):
|
||||
def test_randomized_non_validated_execution_fields_regular_payload__execution_valid(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_randomized_non_validated_execution_fields_test(spec, state)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_randomized_non_validated_execution_fields_first_payload__invalid(spec, state):
|
||||
def test_invalid_randomized_non_validated_execution_fields_first_payload__execution_invalid(spec, state):
|
||||
state = build_state_with_incomplete_transition(spec, state)
|
||||
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
def test_randomized_non_validated_execution_fields_regular_payload__invalid(spec, state):
|
||||
def test_invalid_randomized_non_validated_execution_fields_regular_payload__execution_invalid(spec, state):
|
||||
state = build_state_with_complete_transition(spec, state)
|
||||
yield from run_randomized_non_validated_execution_fields_test(spec, state, execution_valid=False)
|
||||
|
|
|
@ -18,7 +18,7 @@ def _run_voluntary_exit_processing_test(
|
|||
state,
|
||||
fork_version,
|
||||
is_before_fork_epoch,
|
||||
valid):
|
||||
valid=True):
|
||||
# create a fork
|
||||
next_epoch(spec, state)
|
||||
state.fork.epoch = spec.get_current_epoch(state)
|
||||
|
@ -50,7 +50,7 @@ def _run_voluntary_exit_processing_test(
|
|||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_current_fork_version_is_before_fork_epoch__invalid(spec, state):
|
||||
def test_invalid_voluntary_exit_with_current_fork_version_is_before_fork_epoch(spec, state):
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
|
@ -63,20 +63,19 @@ def test_voluntary_exit_with_current_fork_version_is_before_fork_epoch__invalid(
|
|||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch__valid(spec, state):
|
||||
def test_voluntary_exit_with_current_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
spec,
|
||||
state,
|
||||
fork_version=state.fork.current_version,
|
||||
is_before_fork_epoch=False,
|
||||
valid=True,
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch__valid(spec, state):
|
||||
def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch(spec, state):
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
|
@ -84,14 +83,13 @@ def test_voluntary_exit_with_previous_fork_version_is_before_fork_epoch__valid(s
|
|||
state,
|
||||
fork_version=state.fork.previous_version,
|
||||
is_before_fork_epoch=True,
|
||||
valid=True,
|
||||
)
|
||||
|
||||
|
||||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch__invalid(spec, state):
|
||||
def test_invalid_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
assert state.fork.previous_version != state.fork.current_version
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
|
@ -106,7 +104,7 @@ def test_voluntary_exit_with_previous_fork_version_not_is_before_fork_epoch__inv
|
|||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch__invalid(spec, state):
|
||||
def test_invalid_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
|
@ -121,7 +119,7 @@ def test_voluntary_exit_with_genesis_fork_version_is_before_fork_epoch__invalid(
|
|||
@with_bellatrix_and_later
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch__invalid(spec, state):
|
||||
def test_invalid_voluntary_exit_with_genesis_fork_version_not_is_before_fork_epoch(spec, state):
|
||||
assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version)
|
||||
|
||||
yield from _run_voluntary_exit_processing_test(
|
||||
|
|
|
@ -4,6 +4,9 @@ from eth2spec.test.context import spec_state_test, with_phases, BELLATRIX
|
|||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
|
@ -72,6 +75,7 @@ def test_all_valid(spec, state):
|
|||
def run_func():
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_block.block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps, merge_block=True)
|
||||
# valid
|
||||
|
@ -103,6 +107,7 @@ def test_block_lookup_failed(spec, state):
|
|||
def run_func():
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_block.block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True,
|
||||
block_not_found=True)
|
||||
|
@ -136,6 +141,7 @@ def test_too_early_for_merge(spec, state):
|
|||
def run_func():
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_block.block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
|
||||
|
||||
|
@ -168,6 +174,7 @@ def test_too_late_for_merge(spec, state):
|
|||
def run_func():
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_block.block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from tick_and_add_block(spec, store, signed_block, test_steps, valid=False, merge_block=True)
|
||||
|
||||
|
|
|
@ -8,6 +8,9 @@ from eth2spec.test.helpers.attestations import (
|
|||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.fork_choice import (
|
||||
get_genesis_forkchoice_store_and_block,
|
||||
on_tick_and_append_step,
|
||||
|
@ -33,6 +36,7 @@ def test_from_syncing_to_invalid(spec, state):
|
|||
fc_store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state)
|
||||
op_store = get_optimistic_store(spec, state, anchor_block)
|
||||
mega_store = MegaStore(spec, fc_store, op_store)
|
||||
block_hashes = {}
|
||||
yield 'anchor_state', state
|
||||
yield 'anchor_block', anchor_block
|
||||
|
||||
|
@ -46,7 +50,7 @@ def test_from_syncing_to_invalid(spec, state):
|
|||
|
||||
# Block 0
|
||||
block_0 = build_empty_block_for_next_slot(spec, state)
|
||||
block_0.body.execution_payload.block_hash = spec.hash(bytes('block_0', 'UTF-8'))
|
||||
block_hashes['block_0'] = block_0.body.execution_payload.block_hash
|
||||
signed_block = state_transition_and_sign_block(spec, state, block_0)
|
||||
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps, status=PayloadStatusV1Status.VALID)
|
||||
assert spec.get_head(mega_store.fc_store) == mega_store.opt_store.head_block_root
|
||||
|
@ -57,10 +61,11 @@ def test_from_syncing_to_invalid(spec, state):
|
|||
signed_blocks_a = []
|
||||
for i in range(3):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.block_hash = spec.hash(bytes(f'chain_a_{i}', 'UTF-8'))
|
||||
block.body.execution_payload.parent_hash = (
|
||||
spec.hash(bytes(f'chain_a_{i - 1}', 'UTF-8')) if i != 0 else block_0.body.execution_payload.block_hash
|
||||
block_hashes[f'chain_a_{i - 1}'] if i != 0 else block_hashes['block_0']
|
||||
)
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes[f'chain_a_{i}'] = block.body.execution_payload.block_hash
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps, status=PayloadStatusV1Status.VALID)
|
||||
|
@ -72,10 +77,12 @@ def test_from_syncing_to_invalid(spec, state):
|
|||
state = state_0.copy()
|
||||
for i in range(3):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.block_hash = spec.hash(bytes(f'chain_b_{i}', 'UTF-8'))
|
||||
block.body.execution_payload.parent_hash = (
|
||||
spec.hash(bytes(f'chain_b_{i - 1}', 'UTF-8')) if i != 0 else block_0.body.execution_payload.block_hash
|
||||
block_hashes[f'chain_b_{i - 1}'] if i != 0 else block_hashes['block_0']
|
||||
)
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes[f'chain_b_{i}'] = block.body.execution_payload.block_hash
|
||||
|
||||
signed_block = state_transition_with_full_block(spec, state, True, True, block=block)
|
||||
signed_blocks_b.append(signed_block.copy())
|
||||
yield from add_optimistic_block(spec, mega_store, signed_block, test_steps,
|
||||
|
@ -84,8 +91,10 @@ def test_from_syncing_to_invalid(spec, state):
|
|||
|
||||
# Now add block 4 to chain `b` with INVALID
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.block_hash = spec.hash(bytes('chain_b_3', 'UTF-8'))
|
||||
block.body.execution_payload.parent_hash = signed_blocks_b[-1].message.body.execution_payload.block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
block_hashes['chain_b_3'] = block.body.execution_payload.block_hash
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
payload_status = PayloadStatusV1(
|
||||
status=PayloadStatusV1Status.INVALID,
|
||||
|
|
|
@ -3,6 +3,9 @@ from eth2spec.utils.ssz.ssz_typing import uint256, Bytes32
|
|||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.pow_block import (
|
||||
prepare_random_pow_chain,
|
||||
)
|
||||
|
@ -57,6 +60,7 @@ def test_validate_merge_block_success(spec, state):
|
|||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block)
|
||||
|
||||
|
||||
|
@ -77,6 +81,7 @@ def test_validate_merge_block_fail_parent_block_lookup(spec, state):
|
|||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block, valid=False)
|
||||
|
||||
|
||||
|
@ -88,6 +93,7 @@ def test_validate_merge_block_fail_after_terminal(spec, state):
|
|||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY + uint256(1)
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block, valid=False)
|
||||
|
||||
|
||||
|
@ -104,6 +110,7 @@ def test_validate_merge_block_tbh_override_success(spec, state):
|
|||
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block)
|
||||
|
||||
|
||||
|
@ -119,6 +126,7 @@ def test_validate_merge_block_fail_parent_hash_is_not_tbh(spec, state):
|
|||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block, valid=False)
|
||||
|
||||
|
||||
|
@ -135,6 +143,7 @@ def test_validate_merge_block_terminal_block_hash_fail_activation_not_reached(sp
|
|||
pow_chain.head().block_hash = TERMINAL_BLOCK_HASH
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block, valid=False)
|
||||
|
||||
|
||||
|
@ -150,4 +159,5 @@ def test_validate_merge_block_fail_activation_not_reached_parent_hash_is_not_tbh
|
|||
pow_chain.head().total_difficulty = spec.config.TERMINAL_TOTAL_DIFFICULTY
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.execution_payload.parent_hash = pow_chain.head().block_hash
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
run_validate_merge_block(spec, pow_chain, block, valid=False)
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_capella_and_later, always_bls
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_phases, always_bls
|
||||
|
||||
|
||||
def run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=True):
|
||||
|
@ -37,14 +38,14 @@ def run_bls_to_execution_change_processing(spec, state, signed_address_change, v
|
|||
yield 'post', state
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state)
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_not_activated(spec, state):
|
||||
validator_index = 3
|
||||
|
@ -62,7 +63,7 @@ def test_success_not_activated(spec, state):
|
|||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_in_activation_queue(spec, state):
|
||||
validator_index = 3
|
||||
|
@ -80,7 +81,7 @@ def test_success_in_activation_queue(spec, state):
|
|||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_in_exit_queue(spec, state):
|
||||
validator_index = 3
|
||||
|
@ -93,7 +94,7 @@ def test_success_in_exit_queue(spec, state):
|
|||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_exited(spec, state):
|
||||
validator_index = 4
|
||||
|
@ -110,7 +111,7 @@ def test_success_exited(spec, state):
|
|||
assert not spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_withdrawable(spec, state):
|
||||
validator_index = 4
|
||||
|
@ -128,18 +129,18 @@ def test_success_withdrawable(spec, state):
|
|||
assert spec.is_fully_withdrawable_validator(validator, balance, spec.get_current_epoch(state))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_val_index_out_of_range(spec, state):
|
||||
def test_invalid_val_index_out_of_range(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
signed_address_change = get_signed_address_change(spec, state, validator_index=len(state.validators))
|
||||
|
||||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_already_0x01(spec, state):
|
||||
def test_invalid_already_0x01(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
validator_index = len(state.validators) // 2
|
||||
validator = state.validators[validator_index]
|
||||
|
@ -149,9 +150,9 @@ def test_fail_already_0x01(spec, state):
|
|||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_incorrect_from_bls_pubkey(spec, state):
|
||||
def test_invalid_incorrect_from_bls_pubkey(spec, state):
|
||||
# Create for one validator beyond the validator list length
|
||||
validator_index = 2
|
||||
signed_address_change = get_signed_address_change(
|
||||
|
@ -163,10 +164,10 @@ def test_fail_incorrect_from_bls_pubkey(spec, state):
|
|||
yield from run_bls_to_execution_change_processing(spec, state, signed_address_change, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_fail_bad_signature(spec, state):
|
||||
def test_invalid_bad_signature(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state)
|
||||
# Mutate signature
|
||||
signed_address_change.signature = spec.BLSSignature(b'\x42' * 96)
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_capella_and_later,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_state_and_deposit,
|
||||
|
@ -10,7 +11,7 @@ from eth2spec.test.helpers.deposits import (
|
|||
from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_top_up_to_withdrawn_validator(spec, state):
|
||||
validator_index = 0
|
||||
|
|
|
@ -1,28 +1,60 @@
|
|||
import random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
with_presets,
|
||||
with_phases,
|
||||
)
|
||||
from eth2spec.test.helpers.constants import MAINNET, MINIMAL, CAPELLA
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.random import (
|
||||
randomize_state,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
prepare_expected_withdrawals,
|
||||
set_eth1_withdrawal_credential_with_balance,
|
||||
set_validator_fully_withdrawable,
|
||||
set_validator_partially_withdrawable,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_capella_and_later
|
||||
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
def verify_post_state(state, spec, expected_withdrawals,
|
||||
fully_withdrawable_indices, partial_withdrawals_indices):
|
||||
# Consider verifying also the condition when no withdrawals are expected.
|
||||
if len(expected_withdrawals) == 0:
|
||||
return
|
||||
|
||||
expected_withdrawals_validator_indices = [withdrawal.validator_index for withdrawal in expected_withdrawals]
|
||||
assert state.next_withdrawal_index == expected_withdrawals[-1].index + 1
|
||||
|
||||
if len(expected_withdrawals) == spec.MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
# NOTE: ideally we would also check in the case with
|
||||
# fewer than maximum withdrawals but that requires the pre-state info
|
||||
next_withdrawal_validator_index = (expected_withdrawals_validator_indices[-1] + 1) % len(state.validators)
|
||||
assert state.next_withdrawal_validator_index == next_withdrawal_validator_index
|
||||
|
||||
for index in fully_withdrawable_indices:
|
||||
if index in expected_withdrawals_validator_indices:
|
||||
assert state.balances[index] == 0
|
||||
else:
|
||||
assert state.balances[index] > 0
|
||||
for index in partial_withdrawals_indices:
|
||||
if index in expected_withdrawals_validator_indices:
|
||||
assert state.balances[index] == spec.MAX_EFFECTIVE_BALANCE
|
||||
else:
|
||||
assert state.balances[index] > spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
|
||||
def prepare_withdrawal_queue(spec, state, num_withdrawals):
|
||||
pre_queue_len = len(state.withdrawal_queue)
|
||||
validator_len = len(state.validators)
|
||||
for i in range(num_withdrawals):
|
||||
withdrawal = spec.Withdrawal(
|
||||
index=i + 5,
|
||||
validator_index=(i + 1000) % validator_len,
|
||||
address=b'\x42' * 20,
|
||||
amount=200000 + i,
|
||||
)
|
||||
state.withdrawal_queue.append(withdrawal)
|
||||
|
||||
assert len(state.withdrawal_queue) == num_withdrawals + pre_queue_len
|
||||
|
||||
|
||||
def run_withdrawals_processing(spec, state, execution_payload, valid=True):
|
||||
def run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=None,
|
||||
fully_withdrawable_indices=None, partial_withdrawals_indices=None, valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
|
@ -30,10 +62,12 @@ def run_withdrawals_processing(spec, state, execution_payload, valid=True):
|
|||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
expected_withdrawals = spec.get_expected_withdrawals(state)
|
||||
assert len(expected_withdrawals) <= spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
if num_expected_withdrawals is not None:
|
||||
assert len(expected_withdrawals) == num_expected_withdrawals
|
||||
|
||||
pre_withdrawal_queue = state.withdrawal_queue.copy()
|
||||
num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(pre_withdrawal_queue))
|
||||
|
||||
pre_state = state.copy()
|
||||
yield 'pre', state
|
||||
yield 'execution_payload', execution_payload
|
||||
|
||||
|
@ -46,18 +80,27 @@ def run_withdrawals_processing(spec, state, execution_payload, valid=True):
|
|||
|
||||
yield 'post', state
|
||||
|
||||
if len(pre_withdrawal_queue) == 0:
|
||||
assert len(state.withdrawal_queue) == 0
|
||||
elif len(pre_withdrawal_queue) <= num_withdrawals:
|
||||
assert len(state.withdrawal_queue) == 0
|
||||
else:
|
||||
assert state.withdrawal_queue == pre_withdrawal_queue[num_withdrawals:]
|
||||
if len(expected_withdrawals) == 0:
|
||||
next_withdrawal_validator_index = (
|
||||
pre_state.next_withdrawal_validator_index + spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
)
|
||||
assert state.next_withdrawal_validator_index == next_withdrawal_validator_index % len(state.validators)
|
||||
elif len(expected_withdrawals) <= spec.MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
bound = min(spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
assert len(spec.get_expected_withdrawals(state)) <= bound
|
||||
elif len(expected_withdrawals) > spec.MAX_WITHDRAWALS_PER_PAYLOAD:
|
||||
raise ValueError('len(expected_withdrawals) should not be greater than MAX_WITHDRAWALS_PER_PAYLOAD')
|
||||
|
||||
if fully_withdrawable_indices is not None or partial_withdrawals_indices is not None:
|
||||
verify_post_state(state, spec, expected_withdrawals, fully_withdrawable_indices, partial_withdrawals_indices)
|
||||
|
||||
return expected_withdrawals
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_empty_queue(spec, state):
|
||||
assert len(state.withdrawal_queue) == 0
|
||||
def test_success_zero_expected_withdrawals(spec, state):
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
@ -65,48 +108,144 @@ def test_success_empty_queue(spec, state):
|
|||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_in_queue(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_success_one_full_withdrawal(spec, state):
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_full_withdrawals=1)
|
||||
assert len(fully_withdrawable_indices) == 1
|
||||
assert len(partial_withdrawals_indices) == 0
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_max_per_slot_in_queue(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
def test_success_one_partial_withdrawal(spec, state):
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_partial_withdrawals=1)
|
||||
assert len(fully_withdrawable_indices) == 0
|
||||
assert len(partial_withdrawals_indices) == 1
|
||||
for index in partial_withdrawals_indices:
|
||||
assert state.balances[index] > spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices
|
||||
)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_a_lot_in_queue(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
def test_success_max_per_slot(spec, state):
|
||||
num_full_withdrawals = spec.MAX_WITHDRAWALS_PER_PAYLOAD // 2
|
||||
num_partial_withdrawals = spec.MAX_WITHDRAWALS_PER_PAYLOAD - num_full_withdrawals
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state,
|
||||
num_full_withdrawals=num_full_withdrawals, num_partial_withdrawals=num_partial_withdrawals)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MAINNET], reason="too few validators with minimal config")
|
||||
@spec_state_test
|
||||
def test_success_all_fully_withdrawable_in_one_sweep(spec, state):
|
||||
assert len(state.validators) <= spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
|
||||
withdrawal_count = len(state.validators)
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_full_withdrawals=withdrawal_count)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_all_fully_withdrawable(spec, state):
|
||||
assert len(state.validators) > spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
|
||||
withdrawal_count = spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_full_withdrawals=withdrawal_count)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MAINNET], reason="too few validators with minimal config")
|
||||
@spec_state_test
|
||||
def test_success_all_partially_withdrawable_in_one_sweep(spec, state):
|
||||
assert len(state.validators) <= spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
|
||||
withdrawal_count = len(state.validators)
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_partial_withdrawals=withdrawal_count)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MINIMAL], reason="too many validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_all_partially_withdrawable(spec, state):
|
||||
assert len(state.validators) > spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
|
||||
withdrawal_count = spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_partial_withdrawals=withdrawal_count)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
fully_withdrawable_indices=fully_withdrawable_indices,
|
||||
partial_withdrawals_indices=partial_withdrawals_indices)
|
||||
|
||||
|
||||
#
|
||||
# Failure cases in which the number of withdrawals in the execution_payload is incorrect
|
||||
#
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_empty_queue_non_empty_withdrawals(spec, state):
|
||||
assert len(state.withdrawal_queue) == 0
|
||||
|
||||
def test_invalid_non_withdrawable_non_empty_withdrawals(spec, state):
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
withdrawal = spec.Withdrawal(
|
||||
|
@ -116,54 +255,125 @@ def test_fail_empty_queue_non_empty_withdrawals(spec, state):
|
|||
amount=420,
|
||||
)
|
||||
execution_payload.withdrawals.append(withdrawal)
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_one_in_queue_none_in_withdrawals(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_invalid_one_expected_full_withdrawal_and_none_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = []
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_one_in_queue_two_in_withdrawals(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_invalid_one_expected_partial_withdrawal_and_none_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = []
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_one_expected_full_withdrawal_and_duplicate_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=2)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_max_per_slot_in_queue_one_less_in_withdrawals(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
def test_invalid_two_expected_partial_withdrawal_and_duplicate_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=2)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.withdrawals.append(execution_payload.withdrawals[0].copy())
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_a_lot_in_queue_too_few_in_withdrawals(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
def test_invalid_max_per_slot_full_withdrawals_and_one_less_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_max_per_slot_partial_withdrawals_and_one_less_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_fully_withdrawable_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_partially_withdrawable_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_a_lot_mixed_withdrawable_in_queue_too_few_in_withdrawals(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD,
|
||||
num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals = execution_payload.withdrawals[:-1]
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
@ -172,46 +382,75 @@ def test_fail_a_lot_in_queue_too_few_in_withdrawals(spec, state):
|
|||
# Failure cases in which the withdrawals in the execution_payload are incorrect
|
||||
#
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_incorrect_dequeue_index(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_invalid_incorrect_withdrawal_index(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals[0].index += 1
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_incorrect_dequeue_address(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_invalid_incorrect_address_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals[0].address = b'\xff' * 20
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_incorrect_dequeue_amount(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, 1)
|
||||
def test_invalid_incorrect_address_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals[0].address = b'\xff' * 20
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_incorrect_amount_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals[0].amount += 1
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_one_of_many_dequeued_incorrectly(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
def test_invalid_incorrect_amount_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=1)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.withdrawals[0].amount += 1
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_one_of_many_incorrectly_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
@ -222,14 +461,34 @@ def test_fail_one_of_many_dequeued_incorrectly(spec, state):
|
|||
withdrawal.index += 1
|
||||
withdrawal.address = b'\x99' * 20
|
||||
withdrawal.amount += 4000000
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_fail_many_dequeued_incorrectly(spec, state):
|
||||
prepare_withdrawal_queue(spec, state, spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
def test_invalid_one_of_many_incorrectly_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
num_withdrawals = len(execution_payload.withdrawals)
|
||||
|
||||
# Pick withdrawal in middle of list and mutate
|
||||
withdrawal = execution_payload.withdrawals[num_withdrawals // 2]
|
||||
withdrawal.index += 1
|
||||
withdrawal.address = b'\x99' * 20
|
||||
withdrawal.amount += 4000000
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_many_incorrectly_full(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
@ -240,5 +499,371 @@ def test_fail_many_dequeued_incorrectly(spec, state):
|
|||
withdrawal.address = i.to_bytes(20, 'big')
|
||||
else:
|
||||
withdrawal.amount += 1
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_many_incorrectly_partial(spec, state):
|
||||
prepare_expected_withdrawals(spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 4)
|
||||
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
for i, withdrawal in enumerate(execution_payload.withdrawals):
|
||||
if i % 3 == 0:
|
||||
withdrawal.index += 1
|
||||
elif i % 3 == 1:
|
||||
withdrawal.address = i.to_bytes(20, 'big')
|
||||
else:
|
||||
withdrawal.amount += 1
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, valid=False)
|
||||
|
||||
|
||||
#
|
||||
# More full withdrawal cases
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 10000000000
|
||||
state.balances[0] = 0
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 0
|
||||
state.balances[0] = 0
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 0
|
||||
state.balances[0] = 100000000
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_no_withdrawals_but_some_next_epoch(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
||||
# Make a few validators withdrawable at the *next* epoch
|
||||
for index in range(3):
|
||||
set_validator_fully_withdrawable(spec, state, index, current_epoch + 1)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_all_withdrawal(spec, state):
|
||||
# Make all validators withdrawable
|
||||
for index in range(len(state.validators)):
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload,
|
||||
num_expected_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
|
||||
def run_random_full_withdrawals_test(spec, state, rng):
|
||||
randomize_state(spec, state, rng)
|
||||
for index in range(len(state.validators)):
|
||||
# 50% withdrawable
|
||||
if rng.choice([True, False]):
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
validator = state.validators[index]
|
||||
# 12.5% unset credentials
|
||||
if rng.randint(0, 7) == 0:
|
||||
validator.withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
# 12.5% not enough balance
|
||||
if rng.randint(0, 7) == 0:
|
||||
state.balances[index] = 0
|
||||
# 12.5% not close enough epoch
|
||||
if rng.randint(0, 7) == 0:
|
||||
validator.withdrawable_epoch += 1
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_0(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(444))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_1(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(420))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_2(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(200))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_full_withdrawals_3(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, random.Random(2000000))
|
||||
|
||||
|
||||
#
|
||||
# More partial withdrawal cases
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
# To be partially withdrawable, the validator's effective balance must be maxed out
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index, spec.MAX_EFFECTIVE_BALANCE - 1)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
assert validator.effective_balance < spec.MAX_EFFECTIVE_BALANCE
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_no_excess_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
# To be partially withdrawable, the validator needs an excess balance
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index, spec.MAX_EFFECTIVE_BALANCE)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
assert validator.effective_balance == spec.MAX_EFFECTIVE_BALANCE
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_excess_balance_but_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
# To be partially withdrawable, the validator needs both a maxed out effective balance and an excess balance
|
||||
validator.effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=0)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_not_yet_active(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
state.validators[validator_index].activation_epoch += 4
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state) + 1
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state) + 1)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
state.validators[validator_index].slashed = True
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
|
||||
validator_index = min(len(state.validators) // 2, spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP - 1)
|
||||
state.validators[validator_index].slashed = True
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=1)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_two_partial_withdrawable(spec, state):
|
||||
set_validator_partially_withdrawable(spec, state, 0)
|
||||
set_validator_partially_withdrawable(spec, state, 1)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload, num_expected_withdrawals=2)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_success_max_partial_withdrawable(spec, state):
|
||||
# Sanity check that this test works for this state
|
||||
assert len(state.validators) >= spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
for i in range(spec.MAX_WITHDRAWALS_PER_PAYLOAD):
|
||||
set_validator_partially_withdrawable(spec, state, i)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload, num_expected_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@with_presets([MINIMAL], reason="not enough validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_max_plus_one_withdrawable(spec, state):
|
||||
# Sanity check that this test works for this state
|
||||
assert len(state.validators) >= spec.MAX_WITHDRAWALS_PER_PAYLOAD + 1
|
||||
|
||||
# More than MAX_WITHDRAWALS_PER_PAYLOAD partially withdrawable
|
||||
for i in range(spec.MAX_WITHDRAWALS_PER_PAYLOAD + 1):
|
||||
set_validator_partially_withdrawable(spec, state, i)
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
# Should only have MAX_WITHDRAWALS_PER_PAYLOAD withdrawals created
|
||||
yield from run_withdrawals_processing(
|
||||
spec, state, execution_payload, num_expected_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD)
|
||||
|
||||
|
||||
def run_random_partial_withdrawals_test(spec, state, rng):
|
||||
for _ in range(rng.randint(0, 2)):
|
||||
next_epoch(spec, state)
|
||||
randomize_state(spec, state, rng)
|
||||
|
||||
num_validators = len(state.validators)
|
||||
state.next_withdrawal_validator_index = rng.randint(0, num_validators - 1)
|
||||
|
||||
num_partially_withdrawable = rng.randint(0, num_validators - 1)
|
||||
partially_withdrawable_indices = rng.sample(range(num_validators), num_partially_withdrawable)
|
||||
for index in partially_withdrawable_indices:
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=rng.randint(1, 1000000000))
|
||||
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
# Note: due to the randomness and other block processing, some of these set as "partially withdrawable"
|
||||
# may not be partially withdrawable once we get to ``process_withdrawals``,
|
||||
# thus *not* using the optional third param in this call
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_0(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(0))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_1(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(1))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_2(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(2))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_3(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(3))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_4(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(4))
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_random_partial_withdrawals_5(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(5))
|
||||
|
|
|
@ -1,174 +0,0 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_capella_and_later,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import (
|
||||
randomize_state,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_to,
|
||||
)
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
set_validator_fully_withdrawable,
|
||||
)
|
||||
|
||||
|
||||
def run_process_full_withdrawals(spec, state, num_expected_withdrawals=None):
|
||||
run_epoch_processing_to(spec, state, 'process_full_withdrawals')
|
||||
|
||||
pre_next_withdrawal_index = state.next_withdrawal_index
|
||||
pre_withdrawal_queue = state.withdrawal_queue.copy()
|
||||
to_be_withdrawn_indices = [
|
||||
index for index, validator in enumerate(state.validators)
|
||||
if spec.is_fully_withdrawable_validator(validator, state.balances[index], spec.get_current_epoch(state))
|
||||
]
|
||||
|
||||
if num_expected_withdrawals is not None:
|
||||
assert len(to_be_withdrawn_indices) == num_expected_withdrawals
|
||||
else:
|
||||
num_expected_withdrawals = len(to_be_withdrawn_indices)
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_full_withdrawals(state)
|
||||
yield 'post', state
|
||||
|
||||
for index in to_be_withdrawn_indices:
|
||||
assert state.balances[index] == 0
|
||||
|
||||
assert len(state.withdrawal_queue) == len(pre_withdrawal_queue) + num_expected_withdrawals
|
||||
assert state.next_withdrawal_index == pre_next_withdrawal_index + num_expected_withdrawals
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_no_withdrawable_validators(spec, state):
|
||||
pre_validators = state.validators.copy()
|
||||
yield from run_process_full_withdrawals(spec, state, 0)
|
||||
|
||||
assert pre_validators == state.validators
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 10000000000
|
||||
state.balances[0] = 0
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_0_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 0
|
||||
state.balances[0] = 0
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_withdrawable_epoch_but_0_effective_balance_nonzero_balance(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, 0, current_epoch)
|
||||
|
||||
state.validators[0].effective_balance = 0
|
||||
state.balances[0] = 100000000
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_no_withdrawals_but_some_next_epoch(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
||||
# Make a few validators withdrawable at the *next* epoch
|
||||
for index in range(3):
|
||||
set_validator_fully_withdrawable(spec, state, index, current_epoch + 1)
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_single_withdrawal(spec, state):
|
||||
# Make one validator withdrawable
|
||||
set_validator_fully_withdrawable(spec, state, 0)
|
||||
|
||||
assert state.next_withdrawal_index == 0
|
||||
yield from run_process_full_withdrawals(spec, state, 1)
|
||||
|
||||
assert state.next_withdrawal_index == 1
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_multi_withdrawal(spec, state):
|
||||
# Make a few validators withdrawable
|
||||
for index in range(3):
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, 3)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_all_withdrawal(spec, state):
|
||||
# Make all validators withdrawable
|
||||
for index in range(len(state.validators)):
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, len(state.validators))
|
||||
|
||||
|
||||
def run_random_full_withdrawals_test(spec, state, rng):
|
||||
randomize_state(spec, state, rng)
|
||||
for index in range(len(state.validators)):
|
||||
# 50% withdrawable
|
||||
if rng.choice([True, False]):
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
validator = state.validators[index]
|
||||
# 12.5% unset credentials
|
||||
if rng.randint(0, 7) == 0:
|
||||
validator.withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + validator.withdrawal_credentials[1:]
|
||||
# 12.5% not enough balance
|
||||
if rng.randint(0, 7) == 0:
|
||||
state.balances[index] = 0
|
||||
# 12.5% not close enough epoch
|
||||
if rng.randint(0, 7) == 0:
|
||||
validator.withdrawable_epoch += 1
|
||||
|
||||
yield from run_process_full_withdrawals(spec, state, None)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_withdrawals_0(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, Random(444))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_withdrawals_1(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, Random(420))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_withdrawals_2(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, Random(200))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_withdrawals_3(spec, state):
|
||||
yield from run_random_full_withdrawals_test(spec, state, Random(2000000))
|
|
@ -1,262 +0,0 @@
|
|||
import random
|
||||
from eth2spec.test.helpers.constants import MINIMAL
|
||||
from eth2spec.test.context import (
|
||||
with_capella_and_later,
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
)
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_to
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
from eth2spec.test.helpers.random import randomize_state
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
set_validator_partially_withdrawable,
|
||||
set_eth1_withdrawal_credential_with_balance,
|
||||
)
|
||||
|
||||
|
||||
def run_process_partial_withdrawals(spec, state, num_expected_withdrawals=None):
|
||||
# Run rest of epoch processing before predicting partial withdrawals as
|
||||
# balance changes can affect withdrawability
|
||||
run_epoch_processing_to(spec, state, 'process_partial_withdrawals')
|
||||
|
||||
pre_next_withdrawal_index = state.next_withdrawal_index
|
||||
pre_withdrawal_queue = state.withdrawal_queue.copy()
|
||||
|
||||
partially_withdrawable_indices = [
|
||||
index for index, validator in enumerate(state.validators)
|
||||
if spec.is_partially_withdrawable_validator(validator, state.balances[index])
|
||||
]
|
||||
num_partial_withdrawals = min(len(partially_withdrawable_indices), spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
|
||||
|
||||
if num_expected_withdrawals is not None:
|
||||
assert num_partial_withdrawals == num_expected_withdrawals
|
||||
else:
|
||||
num_expected_withdrawals = num_partial_withdrawals
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_partial_withdrawals(state)
|
||||
yield 'post', state
|
||||
|
||||
post_partially_withdrawable_indices = [
|
||||
index for index, validator in enumerate(state.validators)
|
||||
if spec.is_partially_withdrawable_validator(validator, state.balances[index])
|
||||
]
|
||||
|
||||
assert len(partially_withdrawable_indices) - num_partial_withdrawals == len(post_partially_withdrawable_indices)
|
||||
|
||||
assert len(state.withdrawal_queue) == len(pre_withdrawal_queue) + num_expected_withdrawals
|
||||
assert state.next_withdrawal_index == pre_next_withdrawal_index + num_expected_withdrawals
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_no_withdrawable(spec, state):
|
||||
pre_validators = state.validators.copy()
|
||||
yield from run_process_partial_withdrawals(spec, state, 0)
|
||||
|
||||
assert pre_validators == state.validators
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
# To be partially withdrawable, the validator's effective balance must be maxed out
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index, spec.MAX_EFFECTIVE_BALANCE - 1)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
assert validator.effective_balance < spec.MAX_EFFECTIVE_BALANCE
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_no_excess_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
# To be partially withdrawable, the validator needs an excess balance
|
||||
set_eth1_withdrawal_credential_with_balance(spec, state, validator_index, spec.MAX_EFFECTIVE_BALANCE)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
assert validator.effective_balance == spec.MAX_EFFECTIVE_BALANCE
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_excess_balance_but_no_max_effective_balance(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
validator = state.validators[validator_index]
|
||||
|
||||
# To be partially withdrawable, the validator needs both a maxed out effective balance and an excess balance
|
||||
validator.effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
|
||||
assert not spec.is_partially_withdrawable_validator(validator, state.balances[validator_index])
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 0)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_not_yet_active(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
state.validators[validator_index].activation_epoch += 4
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_in_exit_queue(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state) + 1
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state) + 1)
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_active_and_slashed(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
state.validators[validator_index].slashed = True
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_one_partial_withdrawable_exited_and_slashed(spec, state):
|
||||
validator_index = len(state.validators) // 2
|
||||
state.validators[validator_index].slashed = True
|
||||
state.validators[validator_index].exit_epoch = spec.get_current_epoch(state)
|
||||
set_validator_partially_withdrawable(spec, state, validator_index)
|
||||
|
||||
assert not spec.is_active_validator(state.validators[validator_index], spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 1)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_two_partial_withdrawable(spec, state):
|
||||
set_validator_partially_withdrawable(spec, state, 0)
|
||||
set_validator_partially_withdrawable(spec, state, 1)
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, 2)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_success_max_partial_withdrawable(spec, state):
|
||||
# Sanity check that this test works for this state
|
||||
assert len(state.validators) >= spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH
|
||||
|
||||
for i in range(spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH):
|
||||
set_validator_partially_withdrawable(spec, state, i)
|
||||
|
||||
yield from run_process_partial_withdrawals(spec, state, spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_presets([MINIMAL], reason="not enough validators with mainnet config")
|
||||
@spec_state_test
|
||||
def test_success_max_plus_one_withdrawable(spec, state):
|
||||
# Sanity check that this test works for this state
|
||||
assert len(state.validators) >= spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH + 1
|
||||
|
||||
# More than MAX_PARTIAL_WITHDRAWALS_PER_EPOCH partially withdrawable
|
||||
for i in range(spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH + 1):
|
||||
set_validator_partially_withdrawable(spec, state, i)
|
||||
|
||||
# Should only have MAX_PARTIAL_WITHDRAWALS_PER_EPOCH withdrawals created
|
||||
yield from run_process_partial_withdrawals(spec, state, spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH)
|
||||
|
||||
|
||||
def run_random_partial_withdrawals_test(spec, state, rng):
|
||||
for _ in range(rng.randint(0, 2)):
|
||||
next_epoch(spec, state)
|
||||
randomize_state(spec, state, rng)
|
||||
|
||||
num_validators = len(state.validators)
|
||||
state.next_partial_withdrawal_validator_index = rng.randint(0, num_validators - 1)
|
||||
|
||||
num_partially_withdrawable = rng.randint(0, num_validators - 1)
|
||||
partially_withdrawable_indices = rng.sample(range(num_validators), num_partially_withdrawable)
|
||||
for index in partially_withdrawable_indices:
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=rng.randint(1, 1000000000))
|
||||
|
||||
# Note: due to the randomness and other epoch processing, some of these set as "partially withdrawable"
|
||||
# may not be partially withdrawable once we get to ``process_partial_withdrawals``,
|
||||
# thus *not* using the optional third param in this call
|
||||
yield from run_process_partial_withdrawals(spec, state)
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_0(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(0))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_1(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(1))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_2(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(2))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_3(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(3))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_4(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(4))
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_random_5(spec, state):
|
||||
yield from run_random_partial_withdrawals_test(spec, state, random.Random(5))
|
|
@ -0,0 +1,84 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_presets,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
BELLATRIX, CAPELLA,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.capella.fork import (
|
||||
CAPELLA_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import randomize_state
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_0(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1010))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_1(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(2020))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_2(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(3030))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_3(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(4040))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_low_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(5050))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_misc_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(6060))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
||||
|
||||
|
||||
@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(CAPELLA_FORK_TEST_META_TAGS)
|
||||
def test_capella_fork_random_large_validator_set(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(7070))
|
||||
yield from run_fork_test(phases[CAPELLA], state)
|
|
@ -1,24 +1,33 @@
|
|||
from eth2spec.test.context import (
|
||||
with_capella_and_later, spec_state_test
|
||||
with_phases, spec_state_test
|
||||
)
|
||||
|
||||
from eth2spec.test.helpers.constants import CAPELLA
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot, build_empty_block,
|
||||
build_empty_block_for_next_slot,
|
||||
build_empty_block,
|
||||
)
|
||||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.withdrawals import (
|
||||
set_validator_fully_withdrawable,
|
||||
set_validator_partially_withdrawable,
|
||||
prepare_expected_withdrawals,
|
||||
)
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
#
|
||||
# BLSToExecutionChange
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_successful_bls_change(spec, state):
|
||||
def test_success_bls_change(spec, state):
|
||||
index = 0
|
||||
signed_address_change = get_signed_address_change(spec, state, validator_index=index)
|
||||
pre_credentials = state.validators[index].withdrawal_credentials
|
||||
|
@ -39,76 +48,9 @@ def test_successful_bls_change(spec, state):
|
|||
assert post_credentials[12:] == signed_address_change.message.to_execution_address
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_full_withdrawal_in_epoch_transition(spec, state):
|
||||
index = 0
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, index, current_epoch)
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert state.balances[index] == 0
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_partial_withdrawal_in_epoch_transition(spec, state):
|
||||
index = state.next_withdrawal_index
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=1000000000000)
|
||||
pre_balance = state.balances[index]
|
||||
pre_withdrawal_queue_len = len(state.withdrawal_queue)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert state.balances[index] < pre_balance
|
||||
# Potentially less than due to sync committee penalty
|
||||
assert state.balances[index] <= spec.MAX_EFFECTIVE_BALANCE
|
||||
# Withdrawal is processed within the context of the block so queue empty
|
||||
assert len(state.withdrawal_queue) == pre_withdrawal_queue_len
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_many_partial_withdrawals_in_epoch_transition(spec, state):
|
||||
assert len(state.validators) > spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
assert spec.MAX_PARTIAL_WITHDRAWALS_PER_EPOCH > spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
for i in range(spec.MAX_WITHDRAWALS_PER_PAYLOAD + 1):
|
||||
index = (i + state.next_withdrawal_index) % len(state.validators)
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=1000000000000)
|
||||
|
||||
pre_withdrawal_queue_len = len(state.withdrawal_queue)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
# All new partial withdrawals processed except 1
|
||||
assert len(state.withdrawal_queue) == pre_withdrawal_queue_len + 1
|
||||
|
||||
|
||||
@with_capella_and_later
|
||||
@spec_state_test
|
||||
def test_exit_and_bls_change(spec, state):
|
||||
def test_success_exit_and_bls_change(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -133,3 +75,184 @@ def test_exit_and_bls_change(spec, state):
|
|||
assert not spec.is_fully_withdrawable_validator(validator, balance, current_epoch)
|
||||
assert validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert spec.is_fully_withdrawable_validator(validator, balance, validator.withdrawable_epoch)
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_duplicate_bls_changes_same_block(spec, state):
|
||||
index = 0
|
||||
signed_address_change = get_signed_address_change(spec, state, validator_index=index)
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Double BLSToExecutionChange of the same validator
|
||||
for _ in range(2):
|
||||
block.body.bls_to_execution_changes.append(signed_address_change)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_two_bls_changes_of_different_addresses_same_validator_same_block(spec, state):
|
||||
index = 0
|
||||
|
||||
signed_address_change_1 = get_signed_address_change(spec, state, validator_index=index,
|
||||
to_execution_address=b'\x12' * 20)
|
||||
signed_address_change_2 = get_signed_address_change(spec, state, validator_index=index,
|
||||
to_execution_address=b'\x34' * 20)
|
||||
assert signed_address_change_1 != signed_address_change_2
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
block.body.bls_to_execution_changes.append(signed_address_change_1)
|
||||
block.body.bls_to_execution_changes.append(signed_address_change_2)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
#
|
||||
# Withdrawals
|
||||
#
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_full_withdrawal_in_epoch_transition(spec, state):
|
||||
index = 0
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
set_validator_fully_withdrawable(spec, state, index, current_epoch)
|
||||
assert len(spec.get_expected_withdrawals(state)) == 1
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert state.balances[index] == 0
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_partial_withdrawal_in_epoch_transition(spec, state):
|
||||
index = state.next_withdrawal_index
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=1000000000000)
|
||||
pre_balance = state.balances[index]
|
||||
|
||||
assert len(spec.get_expected_withdrawals(state)) == 1
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert state.balances[index] < pre_balance
|
||||
# Potentially less than due to sync committee penalty
|
||||
assert state.balances[index] <= spec.MAX_EFFECTIVE_BALANCE
|
||||
assert len(spec.get_expected_withdrawals(state)) == 0
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_many_partial_withdrawals_in_epoch_transition(spec, state):
|
||||
assert len(state.validators) > spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
for i in range(spec.MAX_WITHDRAWALS_PER_PAYLOAD + 1):
|
||||
index = (i + state.next_withdrawal_index) % len(state.validators)
|
||||
set_validator_partially_withdrawable(spec, state, index, excess_balance=1000000000000)
|
||||
|
||||
assert len(spec.get_expected_withdrawals(state)) == spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# trigger epoch transition
|
||||
block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
assert len(spec.get_expected_withdrawals(state)) == 1
|
||||
|
||||
|
||||
def _perform_valid_withdrawal(spec, state):
|
||||
fully_withdrawable_indices, partial_withdrawals_indices = prepare_expected_withdrawals(
|
||||
spec, state, num_partial_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 2,
|
||||
num_full_withdrawals=spec.MAX_WITHDRAWALS_PER_PAYLOAD * 2)
|
||||
|
||||
next_slot(spec, state)
|
||||
pre_next_withdrawal_index = state.next_withdrawal_index
|
||||
|
||||
expected_withdrawals = spec.get_expected_withdrawals(state)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
# Block 1
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_1 = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
withdrawn_indices = [withdrawal.validator_index for withdrawal in expected_withdrawals]
|
||||
fully_withdrawable_indices = list(set(fully_withdrawable_indices).difference(set(withdrawn_indices)))
|
||||
partial_withdrawals_indices = list(set(partial_withdrawals_indices).difference(set(withdrawn_indices)))
|
||||
assert state.next_withdrawal_index == pre_next_withdrawal_index + spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
withdrawn_indices = [withdrawal.validator_index for withdrawal in expected_withdrawals]
|
||||
fully_withdrawable_indices = list(set(fully_withdrawable_indices).difference(set(withdrawn_indices)))
|
||||
partial_withdrawals_indices = list(set(partial_withdrawals_indices).difference(set(withdrawn_indices)))
|
||||
assert state.next_withdrawal_index == pre_next_withdrawal_index + spec.MAX_WITHDRAWALS_PER_PAYLOAD
|
||||
|
||||
return pre_state, signed_block_1, pre_next_withdrawal_index
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_withdrawal_success_two_blocks(spec, state):
|
||||
pre_state, signed_block_1, pre_next_withdrawal_index = _perform_valid_withdrawal(spec, state)
|
||||
|
||||
yield 'pre', pre_state
|
||||
|
||||
# Block 2
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
assert state.next_withdrawal_index == pre_next_withdrawal_index + spec.MAX_WITHDRAWALS_PER_PAYLOAD * 2
|
||||
|
||||
yield 'blocks', [signed_block_1, signed_block_2]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_phases([CAPELLA])
|
||||
@spec_state_test
|
||||
def test_invalid_withdrawal_fail_second_block_payload_isnt_compatible(spec, state):
|
||||
_perform_valid_withdrawal(spec, state)
|
||||
|
||||
# Block 2
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Modify state.next_withdrawal_index to incorrect number
|
||||
state.next_withdrawal_index += 1
|
||||
|
||||
# Only need to output the state transition of signed_block_2
|
||||
yield 'pre', state
|
||||
|
||||
signed_block_2 = state_transition_and_sign_block(spec, state, block, expect_fail=True)
|
||||
|
||||
yield 'blocks', [signed_block_2]
|
||||
yield 'post', None
|
||||
|
|
|
@ -11,11 +11,12 @@ from eth2spec.utils import bls
|
|||
|
||||
from .exceptions import SkippedTest
|
||||
from .helpers.constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, SHARDING,
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
MINIMAL, MAINNET,
|
||||
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX,
|
||||
ALL_PHASES,
|
||||
ALL_FORK_UPGRADES,
|
||||
)
|
||||
from .helpers.forks import is_post_fork
|
||||
from .helpers.typing import SpecForkName, PresetBaseName
|
||||
from .helpers.genesis import create_genesis_state
|
||||
from .utils import (
|
||||
|
@ -257,6 +258,12 @@ def dump_skipping_message(reason: str) -> None:
|
|||
raise SkippedTest(message)
|
||||
|
||||
|
||||
def description(case_description: str):
|
||||
def entry(fn):
|
||||
return with_meta_tags({'description': case_description})(fn)
|
||||
return entry
|
||||
|
||||
|
||||
def spec_test(fn):
|
||||
# Bls switch must be wrapped by vector_test,
|
||||
# to fully go through the yielded bls switch data, before setting back the BLS setting.
|
||||
|
@ -266,7 +273,7 @@ def spec_test(fn):
|
|||
return vector_test()(bls_switch(fn))
|
||||
|
||||
|
||||
# shorthand for decorating @spectest() @with_state @single_phase
|
||||
# shorthand for decorating @spec_test @with_state @single_phase
|
||||
def spec_state_test(fn):
|
||||
return spec_test(with_state(single_phase(fn)))
|
||||
|
||||
|
@ -290,30 +297,16 @@ def _check_current_version(spec, state, version_name):
|
|||
|
||||
|
||||
def config_fork_epoch_overrides(spec, state):
|
||||
overrides = {}
|
||||
if state.fork.current_version == spec.config.GENESIS_FORK_VERSION:
|
||||
pass
|
||||
elif _check_current_version(spec, state, ALTAIR):
|
||||
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
elif _check_current_version(spec, state, BELLATRIX):
|
||||
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
elif _check_current_version(spec, state, CAPELLA):
|
||||
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
elif _check_current_version(spec, state, EIP4844):
|
||||
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['EIP4844_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
elif _check_current_version(spec, state, SHARDING):
|
||||
overrides['ALTAIR_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['BELLATRIX_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['CAPELLA_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
overrides['SHARDING_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
else:
|
||||
assert False # Fork is missing
|
||||
return overrides
|
||||
return {}
|
||||
|
||||
for fork in ALL_PHASES:
|
||||
if fork != PHASE0 and _check_current_version(spec, state, fork):
|
||||
overrides = {}
|
||||
for f in ALL_PHASES:
|
||||
if f != PHASE0 and is_post_fork(fork, f):
|
||||
overrides[f.upper() + '_FORK_EPOCH'] = spec.GENESIS_EPOCH
|
||||
return overrides
|
||||
|
||||
|
||||
def spec_state_test_with_matching_config(fn):
|
||||
|
@ -408,6 +401,15 @@ def with_all_phases(fn):
|
|||
return with_phases(ALL_PHASES)(fn)
|
||||
|
||||
|
||||
def with_all_phases_from(earliest_phase):
|
||||
"""
|
||||
A decorator factory for running a tests with every phase except the ones listed
|
||||
"""
|
||||
def decorator(fn):
|
||||
return with_phases([phase for phase in ALL_PHASES if is_post_fork(phase, earliest_phase)])(fn)
|
||||
return decorator
|
||||
|
||||
|
||||
def with_all_phases_except(exclusion_phases):
|
||||
"""
|
||||
A decorator factory for running a tests with every phase except the ones listed
|
||||
|
@ -417,6 +419,12 @@ def with_all_phases_except(exclusion_phases):
|
|||
return decorator
|
||||
|
||||
|
||||
with_altair_and_later = with_all_phases_from(ALTAIR)
|
||||
with_bellatrix_and_later = with_all_phases_from(BELLATRIX)
|
||||
with_capella_and_later = with_all_phases_from(CAPELLA)
|
||||
with_eip4844_and_later = with_all_phases_from(EIP4844)
|
||||
|
||||
|
||||
def _get_preset_targets(kw):
|
||||
preset_name = DEFAULT_TEST_PRESET
|
||||
if 'preset' in kw:
|
||||
|
@ -587,28 +595,6 @@ def with_config_overrides(config_overrides):
|
|||
return decorator
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
return spec.fork not in FORKS_BEFORE_ALTAIR
|
||||
|
||||
|
||||
def is_post_bellatrix(spec):
|
||||
return spec.fork not in FORKS_BEFORE_BELLATRIX
|
||||
|
||||
|
||||
def is_post_capella(spec):
|
||||
return spec.fork == CAPELLA
|
||||
|
||||
|
||||
def is_post_eip4844(spec):
|
||||
return spec.fork == EIP4844
|
||||
|
||||
|
||||
with_altair_and_later = with_all_phases_except([PHASE0])
|
||||
with_bellatrix_and_later = with_all_phases_except([PHASE0, ALTAIR])
|
||||
with_capella_and_later = with_all_phases_except([PHASE0, ALTAIR, BELLATRIX, EIP4844])
|
||||
with_eip4844_and_later = with_all_phases_except([PHASE0, ALTAIR, BELLATRIX, CAPELLA])
|
||||
|
||||
|
||||
def only_generator(reason):
|
||||
def _decorator(inner):
|
||||
def _wrapper(*args, **kwargs):
|
||||
|
@ -620,6 +606,13 @@ def only_generator(reason):
|
|||
return _decorator
|
||||
|
||||
|
||||
def with_test_suite_name(suite_name: str):
|
||||
def _decorator(inner):
|
||||
inner.suite_name = suite_name
|
||||
return inner
|
||||
return _decorator
|
||||
|
||||
|
||||
#
|
||||
# Fork transition state tests
|
||||
#
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
from eth2spec.test.helpers.bls_to_execution_changes import get_signed_address_change
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_eip4844_and_later
|
||||
|
||||
|
||||
def run_bls_to_execution_change_processing_no_op(spec, state, signed_address_change, valid=True):
|
||||
"""
|
||||
Run ``process_bls_to_execution_change``, yielding:
|
||||
- pre-state ('pre')
|
||||
- address-change ('address_change')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
pre_state = state.copy()
|
||||
|
||||
# yield pre-state
|
||||
yield 'pre', state
|
||||
|
||||
yield 'address_change', signed_address_change
|
||||
|
||||
# If the address_change is invalid, processing is aborted, and there is no post-state.
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_bls_to_execution_change(state, signed_address_change))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# process address change
|
||||
spec.process_bls_to_execution_change(state, signed_address_change)
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
|
||||
# Make sure state has NOT been changed
|
||||
assert state == pre_state
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_no_op(spec, state):
|
||||
signed_address_change = get_signed_address_change(spec, state)
|
||||
yield from run_bls_to_execution_change_processing_no_op(spec, state, signed_address_change)
|
|
@ -0,0 +1,41 @@
|
|||
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, with_eip4844_and_later
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
build_empty_execution_payload,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_slot
|
||||
|
||||
|
||||
def run_withdrawals_processing(spec, state, execution_payload, valid=True):
|
||||
"""
|
||||
Run ``process_execution_payload``, yielding:
|
||||
- pre-state ('pre')
|
||||
- execution payload ('execution_payload')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
pre_state = state.copy()
|
||||
|
||||
yield 'pre', state
|
||||
yield 'execution_payload', execution_payload
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_withdrawals(state, execution_payload))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_withdrawals(state, execution_payload)
|
||||
|
||||
yield 'post', state
|
||||
|
||||
# Make sure state has NOT been changed
|
||||
assert state == pre_state
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_no_op(spec, state):
|
||||
next_slot(spec, state)
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
|
||||
yield from run_withdrawals_processing(spec, state, execution_payload)
|
|
@ -0,0 +1,82 @@
|
|||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_presets,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CAPELLA, EIP4844,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
from eth2spec.test.helpers.eip4844.fork import (
|
||||
EIP4844_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_base_state(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch(spec, phases, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_many_next_epoch(spec, phases, state):
|
||||
for _ in range(3):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_low_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_misc_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_large_validator_set(spec, phases, state):
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
|
@ -0,0 +1,84 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
with_presets,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.constants import (
|
||||
CAPELLA, EIP4844,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.eip4844.fork import (
|
||||
EIP4844_FORK_TEST_META_TAGS,
|
||||
run_fork_test,
|
||||
)
|
||||
from eth2spec.test.helpers.random import randomize_state
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_0(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(1010))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_1(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(2020))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_2(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(3030))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_3(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(4040))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_low_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(5050))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_misc_balances(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(6060))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
||||
|
||||
|
||||
@with_phases(phases=[CAPELLA], other_phases=[EIP4844])
|
||||
@with_presets([MINIMAL],
|
||||
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@with_meta_tags(EIP4844_FORK_TEST_META_TAGS)
|
||||
def test_eip4844_fork_random_large_validator_set(spec, phases, state):
|
||||
randomize_state(spec, state, rng=Random(7070))
|
||||
yield from run_fork_test(phases[EIP4844], state)
|
|
@ -0,0 +1,438 @@
|
|||
"""
|
||||
This module is generated from the ``random`` test generator.
|
||||
Please do not edit this file manually.
|
||||
See the README for that generator for more information.
|
||||
"""
|
||||
|
||||
from eth2spec.test.helpers.constants import EIP4844
|
||||
from eth2spec.test.context import (
|
||||
misc_balances_in_default_range_with_many_validators,
|
||||
with_phases,
|
||||
zero_activation_threshold,
|
||||
only_generator,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
spec_test,
|
||||
with_custom_state,
|
||||
single_phase,
|
||||
)
|
||||
from eth2spec.test.utils.randomized_block_tests import (
|
||||
run_generated_randomized_test,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_0(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_1(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_2(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_3(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_4(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_5(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_6(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_7(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_8(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_9(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_10(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_11(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_12(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:last_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_13(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:random_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_14(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
||||
|
||||
|
||||
@only_generator("randomized test for broad coverage, not point-to-point CI")
|
||||
@with_phases([EIP4844])
|
||||
@with_custom_state(
|
||||
balances_fn=misc_balances_in_default_range_with_many_validators,
|
||||
threshold_fn=zero_activation_threshold
|
||||
)
|
||||
@spec_test
|
||||
@single_phase
|
||||
@always_bls
|
||||
def test_randomized_15(spec, state):
|
||||
# scenario as high-level, informal text:
|
||||
# epochs:epochs_until_leak,slots:0,with-block:no_block
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
# epochs:1,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:no_block
|
||||
# epochs:0,slots:0,with-block:random_block_eip4844
|
||||
scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501
|
||||
yield from run_generated_randomized_test(
|
||||
spec,
|
||||
state,
|
||||
scenario,
|
||||
)
|
|
@ -8,6 +8,9 @@ from eth2spec.test.context import (
|
|||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
|
@ -22,6 +25,7 @@ def test_one_blob(spec, state):
|
|||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
|
@ -30,13 +34,14 @@ def test_one_blob(spec, state):
|
|||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_multiple_blobs(spec, state):
|
||||
def test_max_blobs(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=5)
|
||||
opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_blob,
|
||||
)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_verify_kzg_proof(spec, state):
|
||||
x = 3
|
||||
blob = get_sample_blob(spec)
|
||||
commitment = spec.blob_to_kzg_commitment(blob)
|
||||
polynomial = spec.blob_to_polynomial(blob)
|
||||
proof = spec.compute_kzg_proof(polynomial, x)
|
||||
|
||||
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
|
||||
assert spec.verify_kzg_proof_impl(commitment, x, y, proof)
|
|
@ -0,0 +1,23 @@
|
|||
|
||||
from eth2spec.test.helpers.constants import (
|
||||
EIP4844,
|
||||
MINIMAL,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
with_phases,
|
||||
spec_state_test,
|
||||
with_presets,
|
||||
)
|
||||
|
||||
|
||||
@with_phases([EIP4844])
|
||||
@spec_state_test
|
||||
@with_presets([MINIMAL])
|
||||
def test_tx_peek_blob_versioned_hashes(spec, state):
|
||||
otx, blobs, commitments = get_sample_opaque_tx(spec)
|
||||
data_hashes = spec.tx_peek_blob_versioned_hashes(otx)
|
||||
expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments]
|
||||
assert expected == data_hashes
|
|
@ -8,32 +8,20 @@ from eth2spec.test.context import (
|
|||
spec_state_test,
|
||||
with_eip4844_and_later,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.sharding import (
|
||||
get_sample_opaque_tx,
|
||||
get_sample_blob,
|
||||
)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_verify_kzg_proof(spec, state):
|
||||
x = 3
|
||||
polynomial = get_sample_blob(spec)
|
||||
polynomial = [int(i) for i in polynomial]
|
||||
commitment = spec.blob_to_kzg_commitment(polynomial)
|
||||
|
||||
# Get the proof
|
||||
proof = spec.compute_kzg_proof(polynomial, x)
|
||||
|
||||
y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x)
|
||||
assert spec.verify_kzg_proof(commitment, x, y, proof)
|
||||
|
||||
|
||||
def _run_validate_blobs_sidecar_test(spec, state, blob_count):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count)
|
||||
block.body.blob_kzg_commitments = blob_kzg_commitments
|
||||
block.body.execution_payload.transactions = [opaque_tx]
|
||||
block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload)
|
||||
state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
blobs_sidecar = spec.get_blobs_sidecar(block, blobs)
|
||||
|
@ -41,6 +29,12 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count):
|
|||
spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_zero_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=0)
|
||||
|
||||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_one_blob(spec, state):
|
||||
|
@ -55,5 +49,5 @@ def test_validate_blobs_sidecar_two_blobs(spec, state):
|
|||
|
||||
@with_eip4844_and_later
|
||||
@spec_state_test
|
||||
def test_validate_blobs_sidecar_ten_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=10)
|
||||
def test_validate_blobs_sidecar_max_blobs(spec, state):
|
||||
_run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK)
|
||||
|
|
|
@ -2,9 +2,10 @@ from lru import LRU
|
|||
|
||||
from typing import List
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error, is_post_altair
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||
|
@ -254,7 +255,7 @@ def state_transition_with_full_block(spec,
|
|||
sync_aggregate=None,
|
||||
block=None):
|
||||
"""
|
||||
Build and apply a block with attestions at the calculated `slot_to_attest` of current epoch and/or previous epoch.
|
||||
Build and apply a block with attestations at the calculated `slot_to_attest` of current epoch and/or previous epoch.
|
||||
"""
|
||||
if block is None:
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from eth2spec.test.context import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
|
|
|
@ -2,7 +2,7 @@ from eth2spec.utils import bls
|
|||
from eth2spec.test.helpers.keys import pubkeys, privkeys, pubkey_to_privkey
|
||||
|
||||
|
||||
def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubkey=None):
|
||||
def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubkey=None, to_execution_address=None):
|
||||
if validator_index is None:
|
||||
validator_index = 0
|
||||
|
||||
|
@ -13,11 +13,14 @@ def get_signed_address_change(spec, state, validator_index=None, withdrawal_pubk
|
|||
else:
|
||||
withdrawal_privkey = pubkey_to_privkey[withdrawal_pubkey]
|
||||
|
||||
if to_execution_address is None:
|
||||
to_execution_address = b'\x42' * 20
|
||||
|
||||
domain = spec.get_domain(state, spec.DOMAIN_BLS_TO_EXECUTION_CHANGE)
|
||||
address_change = spec.BLSToExecutionChange(
|
||||
validator_index=validator_index,
|
||||
from_bls_pubkey=withdrawal_pubkey,
|
||||
to_execution_address=b'\x42' * 20,
|
||||
to_execution_address=to_execution_address,
|
||||
)
|
||||
|
||||
signing_root = spec.compute_signing_root(address_change, domain)
|
||||
|
|
|
@ -26,18 +26,16 @@ ALL_PHASES = (
|
|||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844)
|
||||
|
||||
FORKS_BEFORE_ALTAIR = (PHASE0,)
|
||||
FORKS_BEFORE_BELLATRIX = (PHASE0, ALTAIR)
|
||||
|
||||
# TODO: no EIP4844 fork tests now. Should add when we figure out the content of Capella.
|
||||
ALL_FORK_UPGRADES = {
|
||||
# pre_fork_name: post_fork_name
|
||||
PHASE0: ALTAIR,
|
||||
ALTAIR: BELLATRIX,
|
||||
BELLATRIX: CAPELLA,
|
||||
CAPELLA: EIP4844,
|
||||
}
|
||||
ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items()
|
||||
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key not in FORKS_BEFORE_ALTAIR}
|
||||
AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0}
|
||||
AFTER_BELLATRIX_PRE_POST_FORKS = AFTER_BELLATRIX_UPGRADES.items()
|
||||
|
||||
#
|
||||
|
|
|
@ -1,9 +1,7 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.context import (
|
||||
is_post_altair,
|
||||
expect_assertion_error,
|
||||
)
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.keys import pubkeys, privkeys
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
from eth2spec.utils import bls
|
||||
|
@ -210,25 +208,50 @@ def run_deposit_processing(spec, state, deposit, validator_index, valid=True, ef
|
|||
if not effective or not bls.KeyValidate(deposit.data.pubkey):
|
||||
assert len(state.validators) == pre_validator_count
|
||||
assert len(state.balances) == pre_validator_count
|
||||
if validator_index < pre_validator_count:
|
||||
if is_top_up:
|
||||
assert get_balance(state, validator_index) == pre_balance
|
||||
else:
|
||||
if validator_index < pre_validator_count:
|
||||
# top-up
|
||||
if is_top_up:
|
||||
# Top-ups do not change effective balance
|
||||
assert state.validators[validator_index].effective_balance == pre_effective_balance
|
||||
assert len(state.validators) == pre_validator_count
|
||||
assert len(state.balances) == pre_validator_count
|
||||
else:
|
||||
# new validator
|
||||
assert len(state.validators) == pre_validator_count + 1
|
||||
assert len(state.balances) == pre_validator_count + 1
|
||||
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
|
||||
|
||||
if is_top_up:
|
||||
# Top-ups do not change effective balance
|
||||
assert state.validators[validator_index].effective_balance == pre_effective_balance
|
||||
else:
|
||||
effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit.data.amount)
|
||||
effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
assert state.validators[validator_index].effective_balance == effective_balance
|
||||
|
||||
assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
|
||||
|
||||
assert state.eth1_deposit_index == state.eth1_data.deposit_count
|
||||
|
||||
|
||||
def run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version,
|
||||
valid=True,
|
||||
effective=True):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
pubkey = pubkeys[validator_index]
|
||||
privkey = privkeys[validator_index]
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
|
||||
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
|
||||
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version)
|
||||
deposit_data = spec.DepositData(
|
||||
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
||||
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
||||
)
|
||||
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
|
||||
|
||||
state.eth1_deposit_index = 0
|
||||
state.eth1_data.deposit_root = root
|
||||
state.eth1_data.deposit_count = 1
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective)
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
from eth2spec.test.helpers.constants import (
|
||||
EIP4844,
|
||||
)
|
||||
|
||||
|
||||
EIP4844_FORK_TEST_META_TAGS = {
|
||||
'fork': EIP4844,
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_eip4844(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Participation
|
||||
'previous_epoch_participation', 'current_epoch_participation',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
# Inactivity
|
||||
'inactivity_scores',
|
||||
# Sync
|
||||
'current_sync_committee', 'next_sync_committee',
|
||||
# Withdrawals
|
||||
'next_withdrawal_index', 'next_withdrawal_validator_index',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork', 'latest_execution_payload_header']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert len(pre_state.validators) == len(post_state.validators)
|
||||
for pre_validator, post_validator in zip(pre_state.validators, post_state.validators):
|
||||
stable_validator_fields = [
|
||||
'pubkey', 'withdrawal_credentials',
|
||||
'effective_balance',
|
||||
'slashed',
|
||||
'activation_eligibility_epoch', 'activation_epoch', 'exit_epoch', 'withdrawable_epoch',
|
||||
]
|
||||
for field in stable_validator_fields:
|
||||
assert getattr(pre_validator, field) == getattr(post_validator, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
|
|
|
@ -1,69 +1,10 @@
|
|||
from eth_hash.auto import keccak
|
||||
from trie import HexaryTrie
|
||||
from rlp import encode
|
||||
from rlp.sedes import big_endian_int, Binary, List
|
||||
|
||||
from eth2spec.debug.random_value import get_random_bytes_list
|
||||
from eth2spec.test.context import is_post_capella
|
||||
|
||||
|
||||
def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||
"""
|
||||
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
|
||||
"""
|
||||
latest = state.latest_execution_payload_header
|
||||
timestamp = spec.compute_timestamp_at_slot(state, state.slot)
|
||||
empty_txs = spec.List[spec.Transaction, spec.MAX_TRANSACTIONS_PER_PAYLOAD]()
|
||||
|
||||
if randao_mix is None:
|
||||
randao_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
|
||||
|
||||
payload = spec.ExecutionPayload(
|
||||
parent_hash=latest.block_hash,
|
||||
fee_recipient=spec.ExecutionAddress(),
|
||||
state_root=latest.state_root, # no changes to the state
|
||||
receipts_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
|
||||
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||
block_number=latest.block_number + 1,
|
||||
prev_randao=randao_mix,
|
||||
gas_limit=latest.gas_limit, # retain same limit
|
||||
gas_used=0, # empty block, 0 gas
|
||||
timestamp=timestamp,
|
||||
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
|
||||
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
|
||||
block_hash=spec.Hash32(),
|
||||
transactions=empty_txs,
|
||||
)
|
||||
if is_post_capella(spec):
|
||||
num_withdrawals = min(spec.MAX_WITHDRAWALS_PER_PAYLOAD, len(state.withdrawal_queue))
|
||||
payload.withdrawals = state.withdrawal_queue[:num_withdrawals]
|
||||
|
||||
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
|
||||
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def build_randomized_execution_payload(spec, state, rng):
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
|
||||
execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
|
||||
execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
|
||||
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
|
||||
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
|
||||
)
|
||||
execution_payload.block_number = rng.randint(0, 10e10)
|
||||
execution_payload.gas_limit = rng.randint(0, 10e10)
|
||||
execution_payload.gas_used = rng.randint(0, 10e10)
|
||||
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
|
||||
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
|
||||
get_random_bytes_list(rng, extra_data_length)
|
||||
)
|
||||
execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
|
||||
execution_payload.block_hash = spec.Hash32(get_random_bytes_list(rng, 32))
|
||||
|
||||
num_transactions = rng.randint(0, 100)
|
||||
execution_payload.transactions = [
|
||||
spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
|
||||
for _ in range(num_transactions)
|
||||
]
|
||||
|
||||
return execution_payload
|
||||
from eth2spec.test.helpers.forks import is_post_capella, is_post_eip4844
|
||||
|
||||
|
||||
def get_execution_payload_header(spec, execution_payload):
|
||||
|
@ -88,6 +29,174 @@ def get_execution_payload_header(spec, execution_payload):
|
|||
return payload_header
|
||||
|
||||
|
||||
# https://eips.ethereum.org/EIPS/eip-2718
|
||||
def compute_trie_root_from_indexed_data(data):
|
||||
"""
|
||||
Computes the root hash of `patriciaTrie(rlp(Index) => Data)` for a data array.
|
||||
"""
|
||||
t = HexaryTrie(db={})
|
||||
for i, obj in enumerate(data):
|
||||
k = encode(i, big_endian_int)
|
||||
t.set(k, obj)
|
||||
return t.root_hash
|
||||
|
||||
|
||||
# https://eips.ethereum.org/EIPS/eip-4895
|
||||
# https://eips.ethereum.org/EIPS/eip-4844
|
||||
def compute_el_header_block_hash(spec,
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root=None):
|
||||
"""
|
||||
Computes the RLP execution block hash described by an `ExecutionPayloadHeader`.
|
||||
"""
|
||||
execution_payload_header_rlp = [
|
||||
# parent_hash
|
||||
(Binary(32, 32), payload_header.parent_hash),
|
||||
# ommers_hash
|
||||
(Binary(32, 32), bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
|
||||
# coinbase
|
||||
(Binary(20, 20), payload_header.fee_recipient),
|
||||
# state_root
|
||||
(Binary(32, 32), payload_header.state_root),
|
||||
# txs_root
|
||||
(Binary(32, 32), transactions_trie_root),
|
||||
# receipts_root
|
||||
(Binary(32, 32), payload_header.receipts_root),
|
||||
# logs_bloom
|
||||
(Binary(256, 256), payload_header.logs_bloom),
|
||||
# difficulty
|
||||
(big_endian_int, 0),
|
||||
# number
|
||||
(big_endian_int, payload_header.block_number),
|
||||
# gas_limit
|
||||
(big_endian_int, payload_header.gas_limit),
|
||||
# gas_used
|
||||
(big_endian_int, payload_header.gas_used),
|
||||
# timestamp
|
||||
(big_endian_int, payload_header.timestamp),
|
||||
# extradata
|
||||
(Binary(0, 32), payload_header.extra_data),
|
||||
# prev_randao
|
||||
(Binary(32, 32), payload_header.prev_randao),
|
||||
# nonce
|
||||
(Binary(8, 8), bytes.fromhex("0000000000000000")),
|
||||
# base_fee_per_gas
|
||||
(big_endian_int, payload_header.base_fee_per_gas),
|
||||
]
|
||||
if is_post_capella(spec):
|
||||
# withdrawals_root
|
||||
execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root))
|
||||
if is_post_eip4844(spec):
|
||||
# excess_data_gas
|
||||
execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas))
|
||||
|
||||
sedes = List([schema for schema, _ in execution_payload_header_rlp])
|
||||
values = [value for _, value in execution_payload_header_rlp]
|
||||
encoded = encode(values, sedes)
|
||||
|
||||
return spec.Hash32(keccak(encoded))
|
||||
|
||||
|
||||
# https://eips.ethereum.org/EIPS/eip-4895
|
||||
def get_withdrawal_rlp(spec, withdrawal):
|
||||
withdrawal_rlp = [
|
||||
# index
|
||||
(big_endian_int, withdrawal.index),
|
||||
# validator_index
|
||||
(big_endian_int, withdrawal.validator_index),
|
||||
# address
|
||||
(Binary(20, 20), withdrawal.address),
|
||||
# amount
|
||||
(big_endian_int, spec.uint256(withdrawal.amount) * (10**9)),
|
||||
]
|
||||
|
||||
sedes = List([schema for schema, _ in withdrawal_rlp])
|
||||
values = [value for _, value in withdrawal_rlp]
|
||||
return encode(values, sedes)
|
||||
|
||||
|
||||
def compute_el_block_hash(spec, payload):
|
||||
transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions)
|
||||
|
||||
if is_post_capella(spec):
|
||||
withdrawals_encoded = [get_withdrawal_rlp(spec, withdrawal) for withdrawal in payload.withdrawals]
|
||||
withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded)
|
||||
else:
|
||||
withdrawals_trie_root = None
|
||||
|
||||
payload_header = get_execution_payload_header(spec, payload)
|
||||
|
||||
return compute_el_header_block_hash(
|
||||
spec,
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root,
|
||||
)
|
||||
|
||||
|
||||
def build_empty_execution_payload(spec, state, randao_mix=None):
|
||||
"""
|
||||
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
|
||||
"""
|
||||
latest = state.latest_execution_payload_header
|
||||
timestamp = spec.compute_timestamp_at_slot(state, state.slot)
|
||||
empty_txs = spec.List[spec.Transaction, spec.MAX_TRANSACTIONS_PER_PAYLOAD]()
|
||||
|
||||
if randao_mix is None:
|
||||
randao_mix = spec.get_randao_mix(state, spec.get_current_epoch(state))
|
||||
|
||||
payload = spec.ExecutionPayload(
|
||||
parent_hash=latest.block_hash,
|
||||
fee_recipient=spec.ExecutionAddress(),
|
||||
state_root=latest.state_root, # no changes to the state
|
||||
receipts_root=spec.Bytes32(bytes.fromhex("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
|
||||
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||
block_number=latest.block_number + 1,
|
||||
prev_randao=randao_mix,
|
||||
gas_limit=latest.gas_limit, # retain same limit
|
||||
gas_used=0, # empty block, 0 gas
|
||||
timestamp=timestamp,
|
||||
extra_data=spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](),
|
||||
base_fee_per_gas=latest.base_fee_per_gas, # retain same base_fee
|
||||
transactions=empty_txs,
|
||||
)
|
||||
if is_post_capella(spec):
|
||||
payload.withdrawals = spec.get_expected_withdrawals(state)
|
||||
|
||||
payload.block_hash = compute_el_block_hash(spec, payload)
|
||||
|
||||
return payload
|
||||
|
||||
|
||||
def build_randomized_execution_payload(spec, state, rng):
|
||||
execution_payload = build_empty_execution_payload(spec, state)
|
||||
execution_payload.fee_recipient = spec.ExecutionAddress(get_random_bytes_list(rng, 20))
|
||||
execution_payload.state_root = spec.Bytes32(get_random_bytes_list(rng, 32))
|
||||
execution_payload.receipts_root = spec.Bytes32(get_random_bytes_list(rng, 32))
|
||||
execution_payload.logs_bloom = spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](
|
||||
get_random_bytes_list(rng, spec.BYTES_PER_LOGS_BLOOM)
|
||||
)
|
||||
execution_payload.block_number = rng.randint(0, 10e10)
|
||||
execution_payload.gas_limit = rng.randint(0, 10e10)
|
||||
execution_payload.gas_used = rng.randint(0, 10e10)
|
||||
extra_data_length = rng.randint(0, spec.MAX_EXTRA_DATA_BYTES)
|
||||
execution_payload.extra_data = spec.ByteList[spec.MAX_EXTRA_DATA_BYTES](
|
||||
get_random_bytes_list(rng, extra_data_length)
|
||||
)
|
||||
execution_payload.base_fee_per_gas = rng.randint(0, 2**256 - 1)
|
||||
|
||||
num_transactions = rng.randint(0, 100)
|
||||
execution_payload.transactions = [
|
||||
spec.Transaction(get_random_bytes_list(rng, rng.randint(0, 1000)))
|
||||
for _ in range(num_transactions)
|
||||
]
|
||||
|
||||
execution_payload.block_hash = compute_el_block_hash(spec, execution_payload)
|
||||
|
||||
return execution_payload
|
||||
|
||||
|
||||
def build_state_with_incomplete_transition(spec, state):
|
||||
state = build_state_with_execution_payload_header(spec, state, spec.ExecutionPayloadHeader())
|
||||
assert not spec.is_merge_transition_complete(state)
|
||||
|
|
|
@ -165,6 +165,9 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=
|
|||
elif post_spec.fork == CAPELLA:
|
||||
assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
elif post_spec.fork == EIP4844:
|
||||
assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION
|
||||
assert state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION
|
||||
|
||||
if with_block:
|
||||
return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict)
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
from .constants import (
|
||||
PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
)
|
||||
|
||||
|
||||
def is_post_fork(a, b):
|
||||
if a == EIP4844:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844]
|
||||
if a == CAPELLA:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA]
|
||||
if a == BELLATRIX:
|
||||
return b in [PHASE0, ALTAIR, BELLATRIX]
|
||||
if a == ALTAIR:
|
||||
return b in [PHASE0, ALTAIR]
|
||||
if a == PHASE0:
|
||||
return b in [PHASE0]
|
||||
raise ValueError("Unknown fork name %s" % a)
|
||||
|
||||
|
||||
def is_post_altair(spec):
|
||||
return is_post_fork(spec.fork, ALTAIR)
|
||||
|
||||
|
||||
def is_post_bellatrix(spec):
|
||||
return is_post_fork(spec.fork, BELLATRIX)
|
||||
|
||||
|
||||
def is_post_capella(spec):
|
||||
return is_post_fork(spec.fork, CAPELLA)
|
||||
|
||||
|
||||
def is_post_eip4844(spec):
|
||||
return is_post_fork(spec.fork, EIP4844)
|
|
@ -1,6 +1,11 @@
|
|||
from eth2spec.test.helpers.constants import (
|
||||
ALTAIR, BELLATRIX, CAPELLA, EIP4844,
|
||||
FORKS_BEFORE_ALTAIR, FORKS_BEFORE_BELLATRIX,
|
||||
)
|
||||
from eth2spec.test.helpers.execution_payload import (
|
||||
compute_el_header_block_hash,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import (
|
||||
is_post_altair, is_post_bellatrix, is_post_capella,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
|
||||
|
@ -27,7 +32,7 @@ def get_sample_genesis_execution_payload_header(spec,
|
|||
eth1_block_hash=None):
|
||||
if eth1_block_hash is None:
|
||||
eth1_block_hash = b'\x55' * 32
|
||||
return spec.ExecutionPayloadHeader(
|
||||
payload_header = spec.ExecutionPayloadHeader(
|
||||
parent_hash=b'\x30' * 32,
|
||||
fee_recipient=b'\x42' * 20,
|
||||
state_root=b'\x20' * 32,
|
||||
|
@ -41,6 +46,21 @@ def get_sample_genesis_execution_payload_header(spec,
|
|||
transactions_root=spec.Root(b'\x56' * 32),
|
||||
)
|
||||
|
||||
transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
|
||||
if is_post_capella(spec):
|
||||
withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
|
||||
else:
|
||||
withdrawals_trie_root = None
|
||||
|
||||
payload_header.block_hash = compute_el_header_block_hash(
|
||||
spec,
|
||||
payload_header,
|
||||
transactions_trie_root,
|
||||
withdrawals_trie_root,
|
||||
)
|
||||
return payload_header
|
||||
|
||||
|
||||
def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||
deposit_root = b'\x42' * 32
|
||||
|
@ -58,7 +78,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
previous_version = spec.config.BELLATRIX_FORK_VERSION
|
||||
current_version = spec.config.CAPELLA_FORK_VERSION
|
||||
elif spec.fork == EIP4844:
|
||||
previous_version = spec.config.BELLATRIX_FORK_VERSION
|
||||
previous_version = spec.config.CAPELLA_FORK_VERSION
|
||||
current_version = spec.config.EIP4844_FORK_VERSION
|
||||
|
||||
state = spec.BeaconState(
|
||||
|
@ -88,7 +108,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
if validator.effective_balance >= activation_threshold:
|
||||
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
||||
validator.activation_epoch = spec.GENESIS_EPOCH
|
||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||
if is_post_altair(spec):
|
||||
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||
state.inactivity_scores.append(spec.uint64(0))
|
||||
|
@ -96,13 +116,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = spec.hash_tree_root(state.validators)
|
||||
|
||||
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||
if is_post_altair(spec):
|
||||
# Fill in sync committees
|
||||
# Note: A duplicate committee is assigned for the current and next committee at genesis
|
||||
state.current_sync_committee = spec.get_next_sync_committee(state)
|
||||
state.next_sync_committee = spec.get_next_sync_committee(state)
|
||||
|
||||
if spec.fork not in FORKS_BEFORE_BELLATRIX:
|
||||
if is_post_bellatrix(spec):
|
||||
# Initialize the execution payload header (with block number and genesis time set to 0)
|
||||
state.latest_execution_payload_header = get_sample_genesis_execution_payload_header(
|
||||
spec,
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from eth2spec.test.context import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.context import is_post_altair
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ from random import Random
|
|||
from lru import LRU
|
||||
|
||||
from eth2spec.phase0.mainnet import VALIDATOR_REGISTRY_LIMIT # equal everywhere, fine to import
|
||||
from eth2spec.test.context import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.forks import is_post_altair, is_post_bellatrix
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
)
|
||||
|
@ -206,7 +206,7 @@ def run_get_inclusion_delay_deltas(spec, state):
|
|||
rewarded_proposer_indices.add(earliest_attestation.proposer_index)
|
||||
|
||||
# Ensure all expected proposers have been rewarded
|
||||
# Track rewarde indices
|
||||
# Track reward indices
|
||||
proposing_indices = [a.proposer_index for a in eligible_attestations]
|
||||
for index in proposing_indices:
|
||||
if index in rewarded_proposer_indices:
|
||||
|
|
|
@ -34,13 +34,14 @@ class ECDSASignature(Container):
|
|||
class BlobTransaction(Container):
|
||||
chain_id: uint256
|
||||
nonce: uint64
|
||||
priority_fee_per_gas: uint256
|
||||
max_basefee_per_gas: uint256
|
||||
max_priority_fee_per_gas: uint256
|
||||
max_fee_per_gas: uint256
|
||||
gas: uint64
|
||||
to: Union[None, Bytes20] # Address = Bytes20
|
||||
value: uint256
|
||||
data: ByteList[MAX_CALLDATA_SIZE]
|
||||
access_list: List[AccessTuple, MAX_ACCESS_LIST_SIZE]
|
||||
max_fee_per_data_gas: uint256
|
||||
blob_versioned_hashes: List[Bytes32, MAX_VERSIONED_HASHES_LIST_SIZE]
|
||||
|
||||
|
||||
|
@ -53,10 +54,16 @@ def get_sample_blob(spec, rng=None):
|
|||
if rng is None:
|
||||
rng = random.Random(5566)
|
||||
|
||||
return spec.Blob([
|
||||
values = [
|
||||
rng.randint(0, spec.BLS_MODULUS - 1)
|
||||
for _ in range(spec.FIELD_ELEMENTS_PER_BLOB)
|
||||
])
|
||||
]
|
||||
|
||||
b = bytes()
|
||||
for v in values:
|
||||
b += v.to_bytes(32, spec.ENDIANNESS)
|
||||
|
||||
return spec.Blob(b)
|
||||
|
||||
|
||||
def get_sample_opaque_tx(spec, blob_count=1, rng=None):
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from eth2spec.test.context import expect_assertion_error, is_post_altair
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.block import apply_empty_block, sign_block, transition_unsigned_block
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||
|
||||
|
||||
|
|
|
@ -107,10 +107,11 @@ def validate_sync_committee_rewards(spec, pre_state, post_state, committee_indic
|
|||
committee_bits,
|
||||
)
|
||||
|
||||
assert post_state.balances[index] == pre_state.balances[index] + reward - penalty
|
||||
balance = pre_state.balances[index] + reward
|
||||
assert post_state.balances[index] == (0 if balance < penalty else balance - penalty)
|
||||
|
||||
|
||||
def run_sync_committee_processing(spec, state, block, expect_exception=False):
|
||||
def run_sync_committee_processing(spec, state, block, expect_exception=False, skip_reward_validation=False):
|
||||
"""
|
||||
Processes everything up to the sync committee work, then runs the sync committee work in isolation, and
|
||||
produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes.
|
||||
|
@ -131,14 +132,15 @@ def run_sync_committee_processing(spec, state, block, expect_exception=False):
|
|||
else:
|
||||
committee_indices = compute_committee_indices(state, state.current_sync_committee)
|
||||
committee_bits = block.body.sync_aggregate.sync_committee_bits
|
||||
validate_sync_committee_rewards(
|
||||
spec,
|
||||
pre_state,
|
||||
state,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
block.proposer_index
|
||||
)
|
||||
if not skip_reward_validation:
|
||||
validate_sync_committee_rewards(
|
||||
spec,
|
||||
pre_state,
|
||||
state,
|
||||
committee_indices,
|
||||
committee_bits,
|
||||
block.proposer_index
|
||||
)
|
||||
|
||||
|
||||
def _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits):
|
||||
|
@ -156,6 +158,6 @@ def _build_block_for_next_slot_with_sync_participation(spec, state, committee_in
|
|||
return block
|
||||
|
||||
|
||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits):
|
||||
def run_successful_sync_committee_test(spec, state, committee_indices, committee_bits, skip_reward_validation=False):
|
||||
block = _build_block_for_next_slot_with_sync_participation(spec, state, committee_indices, committee_bits)
|
||||
yield from run_sync_committee_processing(spec, state, block)
|
||||
yield from run_sync_committee_processing(spec, state, block, skip_reward_validation=skip_reward_validation)
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
import random
|
||||
|
||||
|
||||
def set_validator_fully_withdrawable(spec, state, index, withdrawable_epoch=None):
|
||||
if withdrawable_epoch is None:
|
||||
withdrawable_epoch = spec.get_current_epoch(state)
|
||||
|
||||
validator = state.validators[index]
|
||||
validator.withdrawable_epoch = withdrawable_epoch
|
||||
# set exit epoch as well to avoid interactions with other epoch process, e.g. forced ejecions
|
||||
# set exit epoch as well to avoid interactions with other epoch process, e.g. forced ejections
|
||||
if validator.exit_epoch > withdrawable_epoch:
|
||||
validator.exit_epoch = withdrawable_epoch
|
||||
|
||||
|
@ -29,3 +32,20 @@ def set_validator_partially_withdrawable(spec, state, index, excess_balance=1000
|
|||
validator = state.validators[index]
|
||||
|
||||
assert spec.is_partially_withdrawable_validator(validator, state.balances[index])
|
||||
|
||||
|
||||
def prepare_expected_withdrawals(spec, state,
|
||||
num_full_withdrawals=0, num_partial_withdrawals=0, rng=random.Random(5566)):
|
||||
bound = min(len(state.validators), spec.MAX_VALIDATORS_PER_WITHDRAWALS_SWEEP)
|
||||
assert num_full_withdrawals + num_partial_withdrawals <= bound
|
||||
eligible_validator_indices = list(range(bound))
|
||||
sampled_indices = rng.sample(eligible_validator_indices, num_full_withdrawals + num_partial_withdrawals)
|
||||
fully_withdrawable_indices = rng.sample(sampled_indices, num_full_withdrawals)
|
||||
partial_withdrawals_indices = list(set(sampled_indices).difference(set(fully_withdrawable_indices)))
|
||||
|
||||
for index in fully_withdrawable_indices:
|
||||
set_validator_fully_withdrawable(spec, state, index)
|
||||
for index in partial_withdrawals_indices:
|
||||
set_validator_partially_withdrawable(spec, state, index)
|
||||
|
||||
return fully_withdrawable_indices, partial_withdrawals_indices
|
||||
|
|
|
@ -23,7 +23,7 @@ from eth2spec.utils.ssz.ssz_typing import Bitlist
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
def test_one_basic_attestation(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -34,7 +34,7 @@ def test_success(spec, state):
|
|||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@single_phase
|
||||
def test_success_multi_proposer_index_iterations(spec, state):
|
||||
def test_multi_proposer_index_iterations(spec, state):
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
@ -44,7 +44,7 @@ def test_success_multi_proposer_index_iterations(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_previous_epoch(spec, state):
|
||||
def test_previous_epoch(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_epoch_via_block(spec, state)
|
||||
|
||||
|
@ -58,55 +58,55 @@ def test_invalid_attestation_signature(spec, state):
|
|||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_empty_participants_zeroes_sig(spec, state):
|
||||
def test_invalid_empty_participants_zeroes_sig(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
|
||||
attestation.signature = spec.BLSSignature(b'\x00' * 96)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_empty_participants_seemingly_valid_sig(spec, state):
|
||||
def test_invalid_empty_participants_seemingly_valid_sig(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, filter_participant_set=lambda comm: []) # 0 participants
|
||||
# Special BLS value, valid for zero pubkeys on some (but not all) BLS implementations.
|
||||
attestation.signature = spec.BLSSignature(b'\xc0' + b'\x00' * 95)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_before_inclusion_delay(spec, state):
|
||||
def test_invalid_before_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
# do not increment slot to allow for inclusion delay
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_after_epoch_slots(spec, state):
|
||||
def test_invalid_after_epoch_slots(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
transition_to_slot_via_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_old_source_epoch(spec, state):
|
||||
def test_invalid_old_source_epoch(spec, state):
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 5)
|
||||
state.finalized_checkpoint.epoch = 2
|
||||
state.previous_justified_checkpoint.epoch = 3
|
||||
|
@ -121,19 +121,19 @@ def test_old_source_epoch(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_wrong_index_for_committee_signature(spec, state):
|
||||
def test_invalid_wrong_index_for_committee_signature(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.index += 1
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
def reduce_state_committee_count_from_max(spec, state):
|
||||
|
@ -148,7 +148,7 @@ def reduce_state_committee_count_from_max(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_0(spec, state):
|
||||
def test_invalid_wrong_index_for_slot_0(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
|
@ -157,13 +157,13 @@ def test_wrong_index_for_slot_0(spec, state):
|
|||
# Invalid index: current committees per slot is less than the max
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_1(spec, state):
|
||||
def test_invalid_wrong_index_for_slot_1(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
@ -175,7 +175,7 @@ def test_wrong_index_for_slot_1(spec, state):
|
|||
# Invalid index: off by one
|
||||
attestation.data.index = committee_count
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -188,12 +188,12 @@ def test_invalid_index(spec, state):
|
|||
# Invalid index: off by one (with respect to valid range) on purpose
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_mismatched_target_and_slot(spec, state):
|
||||
def test_invalid_mismatched_target_and_slot(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
next_epoch_via_block(spec, state)
|
||||
|
||||
|
@ -202,24 +202,24 @@ def test_mismatched_target_and_slot(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_old_target_epoch(spec, state):
|
||||
def test_invalid_old_target_epoch(spec, state):
|
||||
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2) # target epoch will be too old to handle
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_future_target_epoch(spec, state):
|
||||
def test_invalid_future_target_epoch(spec, state):
|
||||
assert spec.MIN_ATTESTATION_INCLUSION_DELAY < spec.SLOTS_PER_EPOCH * 2
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
|
@ -236,12 +236,12 @@ def test_future_target_epoch(spec, state):
|
|||
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_new_source_epoch(spec, state):
|
||||
def test_invalid_new_source_epoch(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -249,12 +249,12 @@ def test_new_source_epoch(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_source_root_is_target_root(spec, state):
|
||||
def test_invalid_source_root_is_target_root(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -262,7 +262,7 @@ def test_source_root_is_target_root(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -289,7 +289,7 @@ def test_invalid_current_source_root(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -315,12 +315,12 @@ def test_invalid_previous_source_root(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_bad_source_root(spec, state):
|
||||
def test_invalid_bad_source_root(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -328,24 +328,24 @@ def test_bad_source_root(spec, state):
|
|||
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_many_aggregation_bits(spec, state):
|
||||
def test_invalid_too_many_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# one too many bits
|
||||
attestation.aggregation_bits.append(0b0)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_too_few_aggregation_bits(spec, state):
|
||||
def test_invalid_too_few_aggregation_bits(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -357,7 +357,7 @@ def test_too_few_aggregation_bits(spec, state):
|
|||
# one too few bits
|
||||
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
#
|
||||
|
@ -366,7 +366,7 @@ def test_too_few_aggregation_bits(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_min_inclusion_delay(spec, state):
|
||||
def test_correct_attestation_included_at_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -375,7 +375,7 @@ def test_correct_min_inclusion_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_sqrt_epoch_delay(spec, state):
|
||||
def test_correct_attestation_included_at_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
|
@ -384,7 +384,7 @@ def test_correct_sqrt_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_epoch_delay(spec, state):
|
||||
def test_correct_attestation_included_at_one_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
|
@ -393,13 +393,13 @@ def test_correct_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_after_epoch_delay(spec, state):
|
||||
def test_invalid_correct_attestation_included_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
#
|
||||
|
@ -408,7 +408,7 @@ def test_correct_after_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_min_inclusion_delay(spec, state):
|
||||
def test_incorrect_head_included_at_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -420,7 +420,7 @@ def test_incorrect_head_min_inclusion_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_sqrt_epoch_delay(spec, state):
|
||||
def test_incorrect_head_included_at_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
|
@ -432,7 +432,7 @@ def test_incorrect_head_sqrt_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_epoch_delay(spec, state):
|
||||
def test_incorrect_head_included_at_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
|
@ -444,7 +444,7 @@ def test_incorrect_head_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_after_epoch_delay(spec, state):
|
||||
def test_invalid_incorrect_head_included_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
|
@ -453,7 +453,7 @@ def test_incorrect_head_after_epoch_delay(spec, state):
|
|||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
#
|
||||
|
@ -475,7 +475,7 @@ def test_incorrect_head_and_target_min_inclusion_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
|
||||
def test_incorrect_head_and_target_included_at_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
|
@ -488,7 +488,7 @@ def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_epoch_delay(spec, state):
|
||||
def test_incorrect_head_and_target_included_at_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
|
@ -501,7 +501,7 @@ def test_incorrect_head_and_target_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
||||
def test_invalid_incorrect_head_and_target_included_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
@ -510,7 +510,7 @@ def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
|||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
||||
|
||||
#
|
||||
|
@ -519,7 +519,7 @@ def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_min_inclusion_delay(spec, state):
|
||||
def test_incorrect_target_included_at_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
|
@ -531,7 +531,7 @@ def test_incorrect_target_min_inclusion_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_sqrt_epoch_delay(spec, state):
|
||||
def test_incorrect_target_included_at_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
|
@ -543,7 +543,7 @@ def test_incorrect_target_sqrt_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_epoch_delay(spec, state):
|
||||
def test_incorrect_target_included_at_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
|
@ -555,7 +555,7 @@ def test_incorrect_target_epoch_delay(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_after_epoch_delay(spec, state):
|
||||
def test_invalid_incorrect_target_included_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
@ -563,4 +563,4 @@ def test_incorrect_target_after_epoch_delay(spec, state):
|
|||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
yield from run_attestation_processing(spec, state, attestation, valid=False)
|
||||
|
|
|
@ -91,7 +91,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_double(spec, state):
|
||||
def test_basic_double(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
@ -99,7 +99,7 @@ def test_success_double(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_surround(spec, state):
|
||||
def test_basic_surround(spec, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
|
||||
state.current_justified_checkpoint.epoch += 1
|
||||
|
@ -119,7 +119,7 @@ def test_success_surround(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success_already_exited_recent(spec, state):
|
||||
def test_already_exited_recent(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
for index in slashed_indices:
|
||||
|
@ -131,7 +131,7 @@ def test_success_already_exited_recent(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success_proposer_index_slashed(spec, state):
|
||||
def test_proposer_index_slashed(spec, state):
|
||||
# Transition past genesis slot because generally doesn't have a proposer
|
||||
next_epoch_via_block(spec, state)
|
||||
|
||||
|
@ -147,7 +147,7 @@ def test_success_proposer_index_slashed(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_attestation_from_future(spec, state):
|
||||
def test_attestation_from_future(spec, state):
|
||||
# Transition state to future to enable generation of a "future" attestation
|
||||
future_state = state.copy()
|
||||
next_epoch_via_block(spec, future_state)
|
||||
|
@ -165,7 +165,7 @@ def test_success_attestation_from_future(spec, state):
|
|||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_low_balances(spec, state):
|
||||
def test_low_balances(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
@ -175,7 +175,7 @@ def test_success_low_balances(spec, state):
|
|||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_misc_balances(spec, state):
|
||||
def test_misc_balances(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing)
|
||||
|
@ -185,7 +185,7 @@ def test_success_misc_balances(spec, state):
|
|||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_success_with_effective_balance_disparity(spec, state):
|
||||
def test_with_effective_balance_disparity(spec, state):
|
||||
# Jitter balances to be different from effective balances
|
||||
rng = Random(12345)
|
||||
for i in range(len(state.balances)):
|
||||
|
@ -200,7 +200,7 @@ def test_success_with_effective_balance_disparity(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success_already_exited_long_ago(spec, state):
|
||||
def test_already_exited_long_ago(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
for index in slashed_indices:
|
||||
|
@ -213,30 +213,30 @@ def test_success_already_exited_long_ago(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1(spec, state):
|
||||
def test_invalid_incorrect_sig_1(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_2(spec, state):
|
||||
def test_invalid_incorrect_sig_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1_and_2(spec, state):
|
||||
def test_invalid_incorrect_sig_1_and_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_same_data(spec, state):
|
||||
def test_invalid_same_data(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indexed_att_1 = attester_slashing.attestation_1
|
||||
|
@ -244,12 +244,12 @@ def test_same_data(spec, state):
|
|||
indexed_att_1.data = att_2_data
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_double_or_surround(spec, state):
|
||||
def test_invalid_no_double_or_surround(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
att_1_data = get_attestation_1_data(spec, attester_slashing)
|
||||
|
@ -257,12 +257,12 @@ def test_no_double_or_surround(spec, state):
|
|||
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_participants_already_slashed(spec, state):
|
||||
def test_invalid_participants_already_slashed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set all indices to slashed
|
||||
|
@ -270,63 +270,63 @@ def test_participants_already_slashed(spec, state):
|
|||
for index in validator_indices:
|
||||
state.validators[index].slashed = True
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_high_index(spec, state):
|
||||
def test_invalid_att1_high_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
|
||||
attester_slashing.attestation_1.attesting_indices = indices
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_high_index(spec, state):
|
||||
def test_invalid_att2_high_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_2)
|
||||
indices.append(spec.ValidatorIndex(len(state.validators))) # off by 1
|
||||
attester_slashing.attestation_2.attesting_indices = indices
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_empty_indices(spec, state):
|
||||
def test_invalid_att1_empty_indices(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
attester_slashing.attestation_1.attesting_indices = []
|
||||
attester_slashing.attestation_1.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_empty_indices(spec, state):
|
||||
def test_invalid_att2_empty_indices(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
attester_slashing.attestation_2.attesting_indices = []
|
||||
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_all_empty_indices(spec, state):
|
||||
def test_invalid_all_empty_indices(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False)
|
||||
|
||||
attester_slashing.attestation_1.attesting_indices = []
|
||||
|
@ -335,13 +335,13 @@ def test_all_empty_indices(spec, state):
|
|||
attester_slashing.attestation_2.attesting_indices = []
|
||||
attester_slashing.attestation_2.signature = spec.bls.G2_POINT_AT_INFINITY
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_extra_index(spec, state):
|
||||
def test_invalid_att1_bad_extra_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
|
@ -351,13 +351,13 @@ def test_att1_bad_extra_index(spec, state):
|
|||
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||
# see if the bad extra index is spotted, and slashing is aborted.
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_replaced_index(spec, state):
|
||||
def test_invalid_att1_bad_replaced_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_1.attesting_indices
|
||||
|
@ -367,13 +367,13 @@ def test_att1_bad_replaced_index(spec, state):
|
|||
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||
# see if the bad replaced index is spotted, and slashing is aborted.
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_extra_index(spec, state):
|
||||
def test_invalid_att2_bad_extra_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_2.attesting_indices
|
||||
|
@ -383,13 +383,13 @@ def test_att2_bad_extra_index(spec, state):
|
|||
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||
# see if the bad extra index is spotted, and slashing is aborted.
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_replaced_index(spec, state):
|
||||
def test_invalid_att2_bad_replaced_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_2.attesting_indices
|
||||
|
@ -399,13 +399,13 @@ def test_att2_bad_replaced_index(spec, state):
|
|||
# Do not sign the modified attestation (it's ok to slash if attester signed, not if they did not),
|
||||
# see if the bad replaced index is spotted, and slashing is aborted.
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_normal_signed(spec, state):
|
||||
def test_invalid_att1_duplicate_index_normal_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indices = list(attester_slashing.attestation_1.attesting_indices)
|
||||
|
@ -419,13 +419,13 @@ def test_att1_duplicate_index_normal_signed(spec, state):
|
|||
attester_slashing.attestation_1.attesting_indices = sorted(indices)
|
||||
|
||||
# it will just appear normal, unless the double index is spotted
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_normal_signed(spec, state):
|
||||
def test_invalid_att2_duplicate_index_normal_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
indices = list(attester_slashing.attestation_2.attesting_indices)
|
||||
|
@ -439,13 +439,13 @@ def test_att2_duplicate_index_normal_signed(spec, state):
|
|||
attester_slashing.attestation_2.attesting_indices = sorted(indices)
|
||||
|
||||
# it will just appear normal, unless the double index is spotted
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_double_signed(spec, state):
|
||||
def test_invalid_att1_duplicate_index_double_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indices = list(attester_slashing.attestation_1.attesting_indices)
|
||||
|
@ -454,13 +454,13 @@ def test_att1_duplicate_index_double_signed(spec, state):
|
|||
attester_slashing.attestation_1.attesting_indices = sorted(indices)
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1) # will have one attester signing it double
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_double_signed(spec, state):
|
||||
def test_invalid_att2_duplicate_index_double_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
indices = list(attester_slashing.attestation_2.attesting_indices)
|
||||
|
@ -469,12 +469,12 @@ def test_att2_duplicate_index_double_signed(spec, state):
|
|||
attester_slashing.attestation_2.attesting_indices = sorted(indices)
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_2) # will have one attester signing it double
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_unsorted_att_1(spec, state):
|
||||
def test_invalid_unsorted_att_1(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_1.attesting_indices
|
||||
|
@ -482,12 +482,12 @@ def test_unsorted_att_1(spec, state):
|
|||
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_unsorted_att_2(spec, state):
|
||||
def test_invalid_unsorted_att_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
indices = attester_slashing.attestation_2.attesting_indices
|
||||
|
@ -495,4 +495,4 @@ def test_unsorted_att_2(spec, state):
|
|||
indices[1], indices[2] = indices[2], indices[1] # unsort second and third index
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_2)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, valid=False)
|
||||
|
|
|
@ -34,7 +34,7 @@ def run_block_header_processing(spec, state, block, prepare_state=True, valid=Tr
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_block_header(spec, state):
|
||||
def test_basic_block_header(spec, state):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
yield from run_block_header_processing(spec, state, block)
|
||||
|
||||
|
@ -87,7 +87,7 @@ def test_invalid_multiple_blocks_single_slot(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_slashed(spec, state):
|
||||
def test_invalid_proposer_slashed(spec, state):
|
||||
# use stub state to get proposer index of next slot
|
||||
stub_state = deepcopy(state)
|
||||
next_slot(spec, stub_state)
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
from eth2spec.test.context import spec_state_test, always_bls, with_all_phases
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
build_deposit,
|
||||
deposit_from_context,
|
||||
prepare_state_and_deposit,
|
||||
run_deposit_processing,
|
||||
run_deposit_processing_with_specific_fork_version,
|
||||
sign_deposit_data,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -92,56 +91,29 @@ def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_other_version(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
|
||||
pubkey = pubkeys[validator_index]
|
||||
privkey = privkeys[validator_index]
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:]
|
||||
|
||||
# Go through the effort of manually signing, not something normally done. This sig domain will be invalid.
|
||||
deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount)
|
||||
domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=spec.Version('0xaabbccdd'))
|
||||
deposit_data = spec.DepositData(
|
||||
pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount,
|
||||
signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain))
|
||||
)
|
||||
deposit, root, _ = deposit_from_context(spec, [deposit_data], 0)
|
||||
|
||||
state.eth1_deposit_index = 0
|
||||
state.eth1_data.deposit_root = root
|
||||
state.eth1_data.deposit_count = 1
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_valid_sig_but_forked_state(spec, state):
|
||||
def test_correct_sig_but_forked_state(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
# deposits will always be valid, regardless of the current fork
|
||||
state.fork.current_version = spec.Version('0x1234abcd')
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_new_deposit(spec, state):
|
||||
def test_incorrect_sig_new_deposit(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=False)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, effective=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_top_up__max_effective_balance(spec, state):
|
||||
def test_top_up__max_effective_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
@ -157,7 +129,7 @@ def test_success_top_up__max_effective_balance(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_top_up__less_effective_balance(spec, state):
|
||||
def test_top_up__less_effective_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
@ -176,7 +148,7 @@ def test_success_top_up__less_effective_balance(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_top_up__zero_balance(spec, state):
|
||||
def test_top_up__zero_balance(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True)
|
||||
|
@ -196,18 +168,18 @@ def test_success_top_up__zero_balance(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_top_up(spec, state):
|
||||
def test_incorrect_sig_top_up(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
||||
|
||||
# invalid signatures, in top-ups, are allowed!
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_withdrawal_credentials_top_up(spec, state):
|
||||
def test_incorrect_withdrawal_credentials_top_up(spec, state):
|
||||
validator_index = 0
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE // 4
|
||||
withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:]
|
||||
|
@ -220,12 +192,12 @@ def test_invalid_withdrawal_credentials_top_up(spec, state):
|
|||
)
|
||||
|
||||
# inconsistent withdrawal credentials, in top-ups, are allowed!
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index, valid=True, effective=True)
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_wrong_deposit_for_deposit_count(spec, state):
|
||||
def test_invalid_wrong_deposit_for_deposit_count(spec, state):
|
||||
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
|
||||
|
||||
# build root for deposit_1
|
||||
|
@ -266,7 +238,7 @@ def test_wrong_deposit_for_deposit_count(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_bad_merkle_proof(spec, state):
|
||||
def test_invalid_bad_merkle_proof(spec, state):
|
||||
validator_index = len(state.validators)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount)
|
||||
|
@ -307,3 +279,15 @@ def test_key_validate_invalid_decompression(spec, state):
|
|||
deposit = prepare_state_and_deposit(spec, state, validator_index, amount, pubkey=pubkey, signed=True)
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_ineffective_deposit_with_bad_fork_version(spec, state):
|
||||
yield from run_deposit_processing_with_specific_fork_version(
|
||||
spec,
|
||||
state,
|
||||
fork_version=spec.Version('0xAaBbCcDd'),
|
||||
effective=False,
|
||||
)
|
||||
|
|
|
@ -34,7 +34,7 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
def test_basic(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
|
||||
|
@ -42,7 +42,7 @@ def test_success(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_slashed_and_proposer_index_the_same(spec, state):
|
||||
def test_slashed_and_proposer_index_the_same(spec, state):
|
||||
# Get proposer for next slot
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
proposer_index = block.proposer_index
|
||||
|
@ -57,7 +57,7 @@ def test_success_slashed_and_proposer_index_the_same(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success_block_header_from_future(spec, state):
|
||||
def test_block_header_from_future(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, slot=state.slot + 5, signed_1=True, signed_2=True)
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing)
|
||||
|
@ -66,31 +66,31 @@ def test_success_block_header_from_future(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1(spec, state):
|
||||
def test_invalid_incorrect_sig_1(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_2(spec, state):
|
||||
def test_invalid_incorrect_sig_2(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1_and_2(spec, state):
|
||||
def test_invalid_incorrect_sig_1_and_2(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_sig_1_and_2_swap(spec, state):
|
||||
def test_invalid_incorrect_sig_1_and_2_swap(spec, state):
|
||||
# Get valid signatures for the slashings
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
|
@ -98,18 +98,18 @@ def test_invalid_sig_1_and_2_swap(spec, state):
|
|||
signature_1 = proposer_slashing.signed_header_1.signature
|
||||
proposer_slashing.signed_header_1.signature = proposer_slashing.signed_header_2.signature
|
||||
proposer_slashing.signed_header_2.signature = signature_1
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_proposer_index(spec, state):
|
||||
def test_invalid_incorrect_proposer_index(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
# Index just too high (by 1)
|
||||
proposer_slashing.signed_header_1.message.proposer_index = len(state.validators)
|
||||
proposer_slashing.signed_header_2.message.proposer_index = len(state.validators)
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -125,12 +125,12 @@ def test_invalid_different_proposer_indices(spec, state):
|
|||
header_2.proposer_index = active_indices[0]
|
||||
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[header_2.proposer_index])
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_epochs_are_different(spec, state):
|
||||
def test_invalid_slots_of_different_epochs(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
# set slots to be in different epochs
|
||||
|
@ -139,23 +139,23 @@ def test_epochs_are_different(spec, state):
|
|||
header_2.slot += spec.SLOTS_PER_EPOCH
|
||||
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[proposer_index])
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_headers_are_same_sigs_are_same(spec, state):
|
||||
def test_invalid_headers_are_same_sigs_are_same(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
# set headers to be the same
|
||||
proposer_slashing.signed_header_2 = proposer_slashing.signed_header_1.copy()
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_headers_are_same_sigs_are_different(spec, state):
|
||||
def test_invalid_headers_are_same_sigs_are_different(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
# set headers to be the same
|
||||
|
@ -165,36 +165,36 @@ def test_headers_are_same_sigs_are_different(spec, state):
|
|||
|
||||
assert proposer_slashing.signed_header_1.signature != proposer_slashing.signed_header_2.signature
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_is_not_activated(spec, state):
|
||||
def test_invalid_proposer_is_not_activated(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set proposer to be not active yet
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_is_slashed(spec, state):
|
||||
def test_invalid_proposer_is_slashed(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set proposer to slashed
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].slashed = True
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_proposer_is_withdrawn(spec, state):
|
||||
def test_invalid_proposer_is_withdrawn(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# move 1 epoch into future, to allow for past withdrawable epoch
|
||||
|
@ -204,4 +204,4 @@ def test_proposer_is_withdrawn(spec, state):
|
|||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].withdrawable_epoch = current_epoch - 1
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, valid=False)
|
||||
|
|
|
@ -14,7 +14,7 @@ from eth2spec.test.helpers.voluntary_exits import (
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_success(spec, state):
|
||||
def test_basic(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -33,7 +33,7 @@ def test_success(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature(spec, state):
|
||||
def test_invalid_incorrect_signature(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -46,7 +46,7 @@ def test_invalid_signature(spec, state):
|
|||
)
|
||||
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, 12345)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
||||
|
||||
def run_test_success_exit_queue(spec, state):
|
||||
|
@ -134,7 +134,7 @@ def test_default_exit_epoch_subsequent_exit(spec, state):
|
|||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_validator_exit_in_future(spec, state):
|
||||
def test_invalid_validator_exit_in_future(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -148,12 +148,12 @@ def test_validator_exit_in_future(spec, state):
|
|||
)
|
||||
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_validator_invalid_validator_index(spec, state):
|
||||
def test_invalid_validator_incorrect_validator_index(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -167,12 +167,12 @@ def test_validator_invalid_validator_index(spec, state):
|
|||
)
|
||||
signed_voluntary_exit = sign_voluntary_exit(spec, state, voluntary_exit, privkey)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_validator_not_active(spec, state):
|
||||
def test_invalid_validator_not_active(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
|
||||
|
@ -182,12 +182,12 @@ def test_validator_not_active(spec, state):
|
|||
signed_voluntary_exit = sign_voluntary_exit(
|
||||
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_validator_already_exited(spec, state):
|
||||
def test_invalid_validator_already_exited(spec, state):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow validator able to exit
|
||||
state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
@ -201,12 +201,12 @@ def test_validator_already_exited(spec, state):
|
|||
signed_voluntary_exit = sign_voluntary_exit(
|
||||
spec, state, spec.VoluntaryExit(epoch=current_epoch, validator_index=validator_index), privkey)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_validator_not_active_long_enough(spec, state):
|
||||
def test_invalid_validator_not_active_long_enough(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
validator_index = spec.get_active_validator_indices(state, current_epoch)[0]
|
||||
privkey = pubkey_to_privkey[state.validators[validator_index].pubkey]
|
||||
|
@ -219,4 +219,4 @@ def test_validator_not_active_long_enough(spec, state):
|
|||
spec.config.SHARD_COMMITTEE_PERIOD
|
||||
)
|
||||
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, False)
|
||||
yield from run_voluntary_exit_processing(spec, state, signed_voluntary_exit, valid=False)
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
from random import Random
|
||||
from eth2spec.test.context import is_post_altair, spec_state_test, with_all_phases
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.forks import is_post_altair
|
||||
from eth2spec.test.helpers.state import transition_to, next_epoch_via_block, next_slot
|
||||
from eth2spec.test.helpers.voluntary_exits import get_unslashed_exited_validators
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ def test_activation_queue_no_activation_no_finality(spec, state):
|
|||
def test_activation_queue_sorting(spec, state):
|
||||
churn_limit = spec.get_validator_churn_limit(state)
|
||||
|
||||
# try to activate more than the per-epoch churn linmit
|
||||
# try to activate more than the per-epoch churn limit
|
||||
mock_activations = churn_limit * 2
|
||||
|
||||
epoch = spec.get_current_epoch(state)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue