mirror of
https://github.com/status-im/eth2.0-specs.git
synced 2025-02-20 22:38:11 +00:00
Merge branch 'dev' into tests-compression
This commit is contained in:
commit
813608928c
@ -35,13 +35,13 @@ commands:
|
||||
description: "Restore the cache with pyspec keys"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
save_pyspec_cached_venv:
|
||||
description: Save a venv into a cache with pyspec keys"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v22-pyspec
|
||||
venv_name: v24-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
venv_path: ./venv
|
||||
restore_deposit_contract_tester_cached_venv:
|
||||
@ -111,7 +111,7 @@ jobs:
|
||||
- checkout
|
||||
- run:
|
||||
name: Check table of contents
|
||||
command: sudo npm install -g doctoc && make check_toc
|
||||
command: sudo npm install -g doctoc@2 && make check_toc
|
||||
codespell:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
@ -120,7 +120,7 @@ jobs:
|
||||
- checkout
|
||||
- run:
|
||||
name: Check codespell
|
||||
command: pip install codespell --user && make codespell
|
||||
command: pip install 'codespell<3.0.0,>=2.0.0' --user && make codespell
|
||||
lint:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
@ -216,15 +216,17 @@ workflows:
|
||||
- lint:
|
||||
requires:
|
||||
- test
|
||||
- install_deposit_contract_web3_tester:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- test_deposit_contract_web3_tests:
|
||||
requires:
|
||||
- install_deposit_contract_web3_tester
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - install_deposit_contract_web3_tester:
|
||||
# requires:
|
||||
# - checkout_specs
|
||||
# - test_deposit_contract_web3_tests:
|
||||
# requires:
|
||||
# - install_deposit_contract_web3_tester
|
||||
build_and_test_deposit_contract:
|
||||
jobs:
|
||||
- build_deposit_contract
|
||||
- test_deposit_contract:
|
||||
requires:
|
||||
- build_deposit_contract
|
||||
# NOTE: Since phase 0 has been launched, we disabled the deposit contract tests.
|
||||
# - test_deposit_contract:
|
||||
# requires:
|
||||
# - build_deposit_contract
|
||||
|
@ -1,2 +1,3 @@
|
||||
uint
|
||||
byteorder
|
||||
byteorder
|
||||
ether
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -17,6 +17,7 @@ eth2.0-spec-tests/
|
||||
# Dynamically built from Markdown spec
|
||||
tests/core/pyspec/eth2spec/phase0/
|
||||
tests/core/pyspec/eth2spec/phase1/
|
||||
tests/core/pyspec/eth2spec/lightclient_patch/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
|
17
Makefile
17
Makefile
@ -20,7 +20,7 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
||||
# To check generator matching:
|
||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SPEC_DIR)/lightclient/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
|
||||
@ -49,6 +49,7 @@ partial_clean:
|
||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
|
||||
rm -rf $(PY_SPEC_DIR)/phase0
|
||||
rm -rf $(PY_SPEC_DIR)/phase1
|
||||
rm -rf $(PY_SPEC_DIR)/lightclient
|
||||
rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||
rm -rf $(PY_SPEC_DIR)/.coverage
|
||||
rm -rf $(PY_SPEC_DIR)/test-reports
|
||||
@ -81,19 +82,19 @@ pyspec:
|
||||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
python3.8 -m venv venv; . venv/bin/activate; pip3 install .[lint]; pip3 install -e .[test]
|
||||
python3 -m venv venv; . venv/bin/activate; python3 -m pip install .[lint]; python3 -m pip install -e .[test]
|
||||
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec -cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
find_test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov=eth2spec.lightclient_patch.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: pyspec
|
||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
python3 -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||
|
||||
open_cov:
|
||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||
@ -112,7 +113,7 @@ codespell:
|
||||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1
|
||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1 -p eth2spec.lightclient_patch
|
||||
|
||||
lint_generators: pyspec
|
||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||
@ -132,11 +133,11 @@ test_deposit_contract:
|
||||
dapp test -v --fuzz-runs 5
|
||||
|
||||
install_deposit_contract_web3_tester:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; python3 -m pip install -r requirements.txt
|
||||
|
||||
test_deposit_contract_web3_tests:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
||||
python -m pytest .
|
||||
python3 -m pytest .
|
||||
|
||||
# Runs a generator, identified by param 1
|
||||
define run_generator
|
||||
|
21
README.md
21
README.md
@ -11,29 +11,28 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||
|
||||
[](https://github.com/ethereum/eth2.0-specs/releases/) [](https://badge.fury.io/py/eth2spec)
|
||||
|
||||
|
||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
||||
|
||||
### Phase 0
|
||||
|
||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||
* [Honest Validator](specs/phase0/validator.md)
|
||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||
|
||||
### Phase 1
|
||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
||||
* [Custody Game](specs/phase1/custody-game.md)
|
||||
* [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
|
||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
||||
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
|
||||
### Light clients
|
||||
|
||||
### Phase 2
|
||||
* [Beacon chain changes](specs/lightclient/beacon-chain.md)
|
||||
* [Light client sync protocol](specs/lightclient/sync-protocol.md)
|
||||
|
||||
Phase 2 is still actively in R&D and does not yet have any formal specifications.
|
||||
### Sharding
|
||||
|
||||
See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work.
|
||||
The sharding spec is still actively in R&D; see the most recent available pull request [here](https://github.com/ethereum/eth2.0-specs/pull/2146) and some technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD).
|
||||
|
||||
### Merge
|
||||
|
||||
The merge is still actively in R&D; see an [ethresear.ch](https://ethresear.ch) post describing the proposed basic mechanism [here](https://ethresear.ch/t/the-eth1-eth2-transition/6265) and the section of [ethereum.org](https://ethereum.org) describing the merge at a high level [here](https://ethereum.org/en/eth2/docking/).
|
||||
|
||||
### Accompanying documents can be found in [specs](specs) and include:
|
||||
|
||||
|
31
configs/mainnet/lightclient_patch.yaml
Normal file
31
configs/mainnet/lightclient_patch.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
# Mainnet preset - lightclient patch
|
||||
|
||||
CONFIG_NAME: "mainnet"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24) (= 50,331,648)
|
||||
HF1_INACTIVITY_PENALTY_QUOTIENT: 50331648
|
||||
# 2**6 (= 64)
|
||||
HF1_MIN_SLASHING_PENALTY_QUOTIENT: 64
|
||||
# 2
|
||||
HF1_PROPORTIONAL_SLASHING_MULTIPLIER: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10 (=1,024)
|
||||
SYNC_COMMITTEE_SIZE: 1024
|
||||
# 2**6 (=64)
|
||||
SYNC_SUBCOMMITTEE_SIZE: 64
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# 2**8 (= 256)
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
@ -18,8 +18,8 @@ CHURN_LIMIT_QUOTIENT: 65536
|
||||
SHUFFLE_ROUND_COUNT: 90
|
||||
# `2**14` (= 16,384)
|
||||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||
# Jan 3, 2020
|
||||
MIN_GENESIS_TIME: 1578009600
|
||||
# Dec 1, 2020, 12pm UTC
|
||||
MIN_GENESIS_TIME: 1606824000
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
@ -54,7 +54,7 @@ SECONDS_PER_ETH1_BLOCK: 14
|
||||
DEPOSIT_CHAIN_ID: 1
|
||||
DEPOSIT_NETWORK_ID: 1
|
||||
# **TBD**
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa
|
||||
|
||||
|
||||
# Gwei values
|
||||
|
31
configs/minimal/lightclient_patch.yaml
Normal file
31
configs/minimal/lightclient_patch.yaml
Normal file
@ -0,0 +1,31 @@
|
||||
# Minimal preset - lightclient patch
|
||||
|
||||
CONFIG_NAME: "minimal"
|
||||
|
||||
# Updated penalty values
|
||||
# ---------------------------------------------------------------
|
||||
# 3 * 2**24) (= 50,331,648)
|
||||
HF1_INACTIVITY_PENALTY_QUOTIENT: 50331648
|
||||
# 2**6 (= 64)
|
||||
HF1_MIN_SLASHING_PENALTY_QUOTIENT: 64
|
||||
# 2
|
||||
HF1_PROPORTIONAL_SLASHING_MULTIPLIER: 2
|
||||
|
||||
|
||||
# Misc
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
SYNC_COMMITTEE_SIZE: 32
|
||||
# [customized]
|
||||
SYNC_SUBCOMMITTEE_SIZE: 16
|
||||
|
||||
|
||||
# Time parameters
|
||||
# ---------------------------------------------------------------
|
||||
# [customized]
|
||||
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||
|
||||
|
||||
# Signature domains
|
||||
# ---------------------------------------------------------------
|
||||
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
@ -54,7 +54,7 @@ SECONDS_PER_ETH1_BLOCK: 14
|
||||
# Ethereum Goerli testnet
|
||||
DEPOSIT_CHAIN_ID: 5
|
||||
DEPOSIT_NETWORK_ID: 5
|
||||
# **TBD**
|
||||
# Configured on a per testnet basis
|
||||
DEPOSIT_CONTRACT_ADDRESS: 0x1234567890123456789012345678901234567890
|
||||
|
||||
|
||||
|
64
setup.py
64
setup.py
@ -52,8 +52,9 @@ def get_spec(file_name: str) -> SpecObject:
|
||||
else:
|
||||
# Handle function definitions & ssz_objects
|
||||
if pulling_from is not None:
|
||||
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
|
||||
name = line[6:-12]
|
||||
if len(line) > 18 and line[:6] == 'class ' and (line[-12:] == '(Container):' or '(phase' in line):
|
||||
end = -12 if line[-12:] == '(Container):' else line.find('(')
|
||||
name = line[6:end]
|
||||
# Check consistency with markdown header
|
||||
assert name == current_name
|
||||
block_type = CodeBlockType.SSZ
|
||||
@ -156,6 +157,40 @@ SSZObject = TypeVar('SSZObject', bound=View)
|
||||
|
||||
CONFIG_NAME = 'mainnet'
|
||||
'''
|
||||
LIGHTCLIENT_IMPORT = '''from eth2spec.phase0 import spec as phase0
|
||||
from eth2spec.config.config_util import apply_constants_config
|
||||
from typing import (
|
||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from lru import LRU
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
# Whenever lightclient is loaded, make sure we have the latest phase0
|
||||
from importlib import reload
|
||||
reload(phase0)
|
||||
|
||||
|
||||
SSZVariableName = str
|
||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||
SSZObject = TypeVar('SSZObject', bound=View)
|
||||
|
||||
CONFIG_NAME = 'mainnet'
|
||||
'''
|
||||
|
||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
def ceillog2(x: int) -> uint64:
|
||||
if x < 1:
|
||||
@ -351,6 +386,7 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
||||
fork_imports = {
|
||||
'phase0': PHASE0_IMPORTS,
|
||||
'phase1': PHASE1_IMPORTS,
|
||||
'lightclient_patch': LIGHTCLIENT_IMPORT,
|
||||
}
|
||||
|
||||
|
||||
@ -413,10 +449,20 @@ class PySpecCommand(Command):
|
||||
specs/phase1/beacon-chain.md
|
||||
specs/phase1/shard-transition.md
|
||||
specs/phase1/fork-choice.md
|
||||
specs/phase1/phase1-fork.md
|
||||
specs/phase1/fork.md
|
||||
specs/phase1/shard-fork-choice.md
|
||||
specs/phase1/validator.md
|
||||
"""
|
||||
elif self.spec_fork == "lightclient_patch":
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
specs/phase0/weak-subjectivity.md
|
||||
specs/lightclient/beacon-chain.md
|
||||
specs/lightclient/fork.md
|
||||
"""
|
||||
# TODO: add specs/lightclient/sync-protocol.md back when the GeneralizedIndex helpers are included.
|
||||
else:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
|
||||
@ -516,13 +562,12 @@ setup(
|
||||
url="https://github.com/ethereum/eth2.0-specs",
|
||||
include_package_data=False,
|
||||
package_data={'configs': ['*.yaml'],
|
||||
|
||||
'specs': ['**/*.md'],
|
||||
'eth2spec': ['VERSION.txt']},
|
||||
package_dir={
|
||||
"eth2spec": "tests/core/pyspec/eth2spec",
|
||||
"configs": "configs",
|
||||
"specs": "specs"
|
||||
"specs": "specs",
|
||||
},
|
||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||
py_modules=["eth2spec"],
|
||||
@ -536,11 +581,12 @@ setup(
|
||||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==5.0.0",
|
||||
"milagro_bls_binding==1.5.0",
|
||||
"py_ecc==5.2.0",
|
||||
"milagro_bls_binding==1.6.3",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.17",
|
||||
"remerkleable==0.1.18",
|
||||
"ruamel.yaml==0.16.5",
|
||||
"lru-dict==1.1.6"
|
||||
"lru-dict==1.1.6",
|
||||
"python-snappy==0.5.4",
|
||||
]
|
||||
)
|
||||
|
680
specs/lightclient/beacon-chain.md
Normal file
680
specs/lightclient/beacon-chain.md
Normal file
@ -0,0 +1,680 @@
|
||||
# Ethereum 2.0 HF1
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Participation flag indices](#participation-flag-indices)
|
||||
- [Participation flag fractions](#participation-flag-fractions)
|
||||
- [Misc](#misc)
|
||||
- [Configuration](#configuration)
|
||||
- [Updated penalty values](#updated-penalty-values)
|
||||
- [Misc](#misc-1)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Domain types](#domain-types)
|
||||
- [Containers](#containers)
|
||||
- [Extended containers](#extended-containers)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [New containers](#new-containers)
|
||||
- [`SyncCommittee`](#synccommittee)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`Predicates`](#predicates)
|
||||
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||
- [Misc](#misc-2)
|
||||
- [`get_flag_indices_and_numerators`](#get_flag_indices_and_numerators)
|
||||
- [`add_flag`](#add_flag)
|
||||
- [`has_flag`](#has_flag)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
||||
- [`get_sync_committee`](#get_sync_committee)
|
||||
- [`get_base_reward`](#get_base_reward)
|
||||
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||
- [`get_flag_deltas`](#get_flag_deltas)
|
||||
- [New `get_inactivity_penalty_deltas`](#new-get_inactivity_penalty_deltas)
|
||||
- [Beacon state mutators](#beacon-state-mutators)
|
||||
- [New `slash_validator`](#new-slash_validator)
|
||||
- [Block processing](#block-processing)
|
||||
- [Modified `process_attestation`](#modified-process_attestation)
|
||||
- [New `process_deposit`](#new-process_deposit)
|
||||
- [Sync committee processing](#sync-committee-processing)
|
||||
- [Epoch processing](#epoch-processing)
|
||||
- [Justification and finalization](#justification-and-finalization)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Slashings](#slashings)
|
||||
- [Participation flags updates](#participation-flags-updates)
|
||||
- [Sync committee updates](#sync-committee-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This is a patch implementing the first hard fork to the beacon chain, tentatively named HF1 pending a permanent name.
|
||||
It has four main features:
|
||||
|
||||
* Light client support via sync committees
|
||||
* Incentive accounting reforms, reducing spec complexity
|
||||
and [TODO] reducing the cost of processing chains that have very little or zero participation for a long span of epochs
|
||||
* Update penalty configuration values, moving them toward their planned maximally punitive configuration
|
||||
* Fork choice rule changes to address weaknesses recently discovered in the existing fork choice
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `ParticipationFlags` | `uint8` | A succinct representation of 8 boolean participation flags |
|
||||
|
||||
## Constants
|
||||
|
||||
### Participation flag indices
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_HEAD_FLAG_INDEX` | `0` |
|
||||
| `TIMELY_SOURCE_FLAG_INDEX` | `1` |
|
||||
| `TIMELY_TARGET_FLAG_INDEX` | `2` |
|
||||
|
||||
### Participation flag fractions
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `TIMELY_HEAD_FLAG_NUMERATOR` | `12` |
|
||||
| `TIMELY_SOURCE_FLAG_NUMERATOR` | `12` |
|
||||
| `TIMELY_TARGET_FLAG_NUMERATOR` | `32` |
|
||||
| `FLAG_DENOMINATOR` | `64` |
|
||||
|
||||
**Note**: The participatition flag fractions add up to 7/8.
|
||||
The remaining 1/8 is for proposer incentives and other future micro-incentives.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Updated penalty values
|
||||
|
||||
This patch updates a few configuration values to move penalty constants toward their final, maxmium security values.
|
||||
|
||||
*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `HF1_INACTIVITY_PENALTY_QUOTIENT` | `uint64(3 * 2**24)` (= 50,331,648) |
|
||||
| `HF1_MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**6)` (=64) |
|
||||
| `HF1_PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(2)` |
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SYNC_COMMITTEE_SIZE` | `uint64(2**10)` (= 1,024) |
|
||||
| `SYNC_SUBCOMMITTEE_SIZE` | `uint64(2**6)` (= 64) |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||
|
||||
### Domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_SYNC_COMMITTEE` | `DomainType('0x07000000')` |
|
||||
|
||||
## Containers
|
||||
|
||||
### Extended containers
|
||||
|
||||
*Note*: Extended SSZ containers inherit all fields from the parent in the original
|
||||
order and append any additional fields to the end.
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Operations
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
# Sync committee aggregate signature
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE] # [New in HF1]
|
||||
sync_committee_signature: BLSSignature # [New in HF1]
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Participation
|
||||
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Light client sync committees
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
```
|
||||
|
||||
### New containers
|
||||
|
||||
#### `SyncCommittee`
|
||||
|
||||
```python
|
||||
class SyncCommittee(Container):
|
||||
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
|
||||
pubkey_aggregates: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE // SYNC_SUBCOMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### `Predicates`
|
||||
|
||||
#### `eth2_fast_aggregate_verify`
|
||||
|
||||
```python
|
||||
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||
"""
|
||||
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||
"""
|
||||
if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY:
|
||||
return True
|
||||
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||
```
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_flag_indices_and_numerators`
|
||||
|
||||
```python
|
||||
def get_flag_indices_and_numerators() -> Sequence[Tuple[int, int]]:
|
||||
return (
|
||||
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_FLAG_NUMERATOR),
|
||||
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_FLAG_NUMERATOR),
|
||||
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_FLAG_NUMERATOR),
|
||||
)
|
||||
```
|
||||
|
||||
#### `add_flag`
|
||||
|
||||
```python
|
||||
def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags | flag
|
||||
```
|
||||
|
||||
#### `has_flag`
|
||||
|
||||
```python
|
||||
def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||
flag = ParticipationFlags(2**flag_index)
|
||||
return flags & flag == flag
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_sync_committee_indices`
|
||||
|
||||
```python
|
||||
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
"""
|
||||
Return the sequence of sync committee indices (which may include duplicate indices) for a given state and epoch.
|
||||
"""
|
||||
MAX_RANDOM_BYTE = 2**8 - 1
|
||||
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
||||
active_validator_count = uint64(len(active_validator_indices))
|
||||
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
|
||||
i = 0
|
||||
sync_committee_indices: List[ValidatorIndex] = []
|
||||
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||
candidate_index = active_validator_indices[shuffled_index]
|
||||
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||
effective_balance = state.validators[candidate_index].effective_balance
|
||||
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||
sync_committee_indices.append(candidate_index)
|
||||
i += 1
|
||||
return sync_committee_indices
|
||||
```
|
||||
|
||||
#### `get_sync_committee`
|
||||
|
||||
```python
|
||||
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
||||
"""
|
||||
Return the sync committee for a given state and epoch.
|
||||
"""
|
||||
indices = get_sync_committee_indices(state, epoch)
|
||||
validators = [state.validators[index] for index in indices]
|
||||
pubkeys = [validator.pubkey for validator in validators]
|
||||
aggregates = [
|
||||
bls.AggregatePKs(pubkeys[i:i + SYNC_SUBCOMMITTEE_SIZE])
|
||||
for i in range(0, len(pubkeys), SYNC_SUBCOMMITTEE_SIZE)
|
||||
]
|
||||
return SyncCommittee(pubkeys=pubkeys, pubkey_aggregates=aggregates)
|
||||
```
|
||||
|
||||
#### `get_base_reward`
|
||||
|
||||
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||
total_balance = get_total_active_balance(state)
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
return Gwei(effective_balance * BASE_REWARD_FACTOR // integer_squareroot(total_balance))
|
||||
```
|
||||
|
||||
#### `get_unslashed_participating_indices`
|
||||
|
||||
```python
|
||||
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
|
||||
"""
|
||||
Retrieve the active and unslashed validator indices for the given epoch and flag index.
|
||||
"""
|
||||
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
if epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||
participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
|
||||
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
|
||||
```
|
||||
|
||||
#### `get_flag_deltas`
|
||||
|
||||
```python
|
||||
def get_flag_deltas(state: BeaconState,
|
||||
flag_index: int,
|
||||
numerator: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Compute the rewards and penalties associated with a particular duty, by scanning through the participation
|
||||
flags to determine who participated and who did not and assigning them the appropriate rewards and penalties.
|
||||
"""
|
||||
rewards = [Gwei(0)] * len(state.validators)
|
||||
penalties = [Gwei(0)] * len(state.validators)
|
||||
|
||||
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
||||
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
||||
active_increments = get_total_active_balance(state) // increment
|
||||
for index in get_eligible_validator_indices(state):
|
||||
base_reward = get_base_reward(state, index)
|
||||
if index in unslashed_participating_indices:
|
||||
if is_in_inactivity_leak(state):
|
||||
# Optimal participation is fully rewarded to cancel the inactivity penalty
|
||||
rewards[index] = base_reward * numerator // FLAG_DENOMINATOR
|
||||
else:
|
||||
rewards[index] = (
|
||||
(base_reward * numerator * unslashed_participating_increments)
|
||||
// (active_increments * FLAG_DENOMINATOR)
|
||||
)
|
||||
else:
|
||||
penalties[index] = base_reward * numerator // FLAG_DENOMINATOR
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
#### New `get_inactivity_penalty_deltas`
|
||||
|
||||
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
||||
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||
|
||||
```python
|
||||
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||
"""
|
||||
Compute the penalties associated with the inactivity leak, by scanning through the participation
|
||||
flags to determine who participated and who did not, applying the leak penalty globally and applying
|
||||
compensatory rewards to participants.
|
||||
"""
|
||||
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||
if is_in_inactivity_leak(state):
|
||||
reward_numerator_sum = sum(numerator for (_, numerator) in get_flag_indices_and_numerators())
|
||||
matching_target_attesting_indices = get_unslashed_participating_indices(
|
||||
state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)
|
||||
)
|
||||
for index in get_eligible_validator_indices(state):
|
||||
# If validator is performing optimally this cancels all attestation rewards for a neutral balance
|
||||
penalties[index] += Gwei(get_base_reward(state, index) * reward_numerator_sum // FLAG_DENOMINATOR)
|
||||
if index not in matching_target_attesting_indices:
|
||||
effective_balance = state.validators[index].effective_balance
|
||||
penalties[index] += Gwei(
|
||||
effective_balance * get_finality_delay(state)
|
||||
// HF1_INACTIVITY_PENALTY_QUOTIENT
|
||||
)
|
||||
|
||||
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||
return rewards, penalties
|
||||
```
|
||||
|
||||
### Beacon state mutators
|
||||
|
||||
#### New `slash_validator`
|
||||
|
||||
*Note*: The function `slash_validator` is modified
|
||||
with the substitution of `MIN_SLASHING_PENALTY_QUOTIENT` with `HF1_MIN_SLASHING_PENALTY_QUOTIENT`.
|
||||
|
||||
```python
|
||||
def slash_validator(state: BeaconState,
|
||||
slashed_index: ValidatorIndex,
|
||||
whistleblower_index: ValidatorIndex=None) -> None:
|
||||
"""
|
||||
Slash the validator with index ``slashed_index``.
|
||||
"""
|
||||
epoch = get_current_epoch(state)
|
||||
initiate_validator_exit(state, slashed_index)
|
||||
validator = state.validators[slashed_index]
|
||||
validator.slashed = True
|
||||
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||
decrease_balance(state, slashed_index, validator.effective_balance // HF1_MIN_SLASHING_PENALTY_QUOTIENT)
|
||||
|
||||
# Apply proposer and whistleblower rewards
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
if whistleblower_index is None:
|
||||
whistleblower_index = proposer_index
|
||||
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||
proposer_reward = Gwei(whistleblower_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, proposer_index, proposer_reward)
|
||||
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
process_operations(state, block.body) # [Modified in HF1]
|
||||
process_sync_committee(state, block.body) # [New in HF1]
|
||||
```
|
||||
|
||||
#### Modified `process_attestation`
|
||||
|
||||
*Note*: The function `process_attestation` is modified to do incentive accounting with epoch participation flags.
|
||||
|
||||
```python
|
||||
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||
|
||||
committee = get_beacon_committee(state, data.slot, data.index)
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
if data.target.epoch == get_current_epoch(state):
|
||||
epoch_participation = state.current_epoch_participation
|
||||
justified_checkpoint = state.current_justified_checkpoint
|
||||
else:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
justified_checkpoint = state.previous_justified_checkpoint
|
||||
|
||||
# Matching roots
|
||||
is_matching_head = data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||
is_matching_source = data.source == justified_checkpoint
|
||||
is_matching_target = data.target.root == get_block_root(state, data.target.epoch)
|
||||
assert is_matching_source
|
||||
|
||||
# Verify signature
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
|
||||
# Participation flag indices
|
||||
participation_flag_indices = []
|
||||
if is_matching_head and is_matching_target and state.slot <= data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
|
||||
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
|
||||
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
|
||||
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
|
||||
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
|
||||
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
# Update epoch participation flags
|
||||
proposer_reward_numerator = 0
|
||||
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||
for flag_index, flag_numerator in get_flag_indices_and_numerators():
|
||||
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||
proposer_reward_numerator += get_base_reward(state, index) * flag_numerator
|
||||
|
||||
# Reward proposer
|
||||
proposer_reward = Gwei(proposer_reward_numerator // (FLAG_DENOMINATOR * PROPOSER_REWARD_QUOTIENT))
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||
```
|
||||
|
||||
|
||||
#### New `process_deposit`
|
||||
|
||||
*Note*: The function `process_deposit` is modified to initialize `previous_epoch_participation` and `current_epoch_participation`.
|
||||
|
||||
```python
|
||||
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||
# Verify the Merkle branch
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(deposit.data),
|
||||
branch=deposit.proof,
|
||||
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||
index=state.eth1_deposit_index,
|
||||
root=state.eth1_data.deposit_root,
|
||||
)
|
||||
|
||||
# Deposits must be processed in order
|
||||
state.eth1_deposit_index += 1
|
||||
|
||||
pubkey = deposit.data.pubkey
|
||||
amount = deposit.data.amount
|
||||
validator_pubkeys = [v.pubkey for v in state.validators]
|
||||
if pubkey not in validator_pubkeys:
|
||||
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||
deposit_message = DepositMessage(
|
||||
pubkey=deposit.data.pubkey,
|
||||
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||
amount=deposit.data.amount,
|
||||
)
|
||||
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||
signing_root = compute_signing_root(deposit_message, domain)
|
||||
if not bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||
return
|
||||
|
||||
# Add validator and balance entries
|
||||
state.validators.append(get_validator_from_deposit(state, deposit))
|
||||
state.balances.append(amount)
|
||||
# [Added in hf-1] Initialize empty participation flags for new validator
|
||||
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||
else:
|
||||
# Increase balance by deposit amount
|
||||
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||
increase_balance(state, index, amount)
|
||||
```
|
||||
|
||||
#### Sync committee processing
|
||||
|
||||
```python
|
||||
def process_sync_committee(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify sync committee aggregate signature signing over the previous slot block root
|
||||
previous_slot = Slot(max(int(state.slot), 1) - 1)
|
||||
committee_indices = get_sync_committee_indices(state, get_current_epoch(state))
|
||||
participant_indices = [index for index, bit in zip(committee_indices, body.sync_committee_bits) if bit]
|
||||
committee_pubkeys = state.current_sync_committee.pubkeys
|
||||
participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, body.sync_committee_bits) if bit]
|
||||
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||
assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, body.sync_committee_signature)
|
||||
|
||||
# Reward sync committee participants
|
||||
proposer_rewards = Gwei(0)
|
||||
active_validator_count = uint64(len(get_active_validator_indices(state, get_current_epoch(state))))
|
||||
for participant_index in participant_indices:
|
||||
proposer_reward = get_proposer_reward(state, participant_index)
|
||||
proposer_rewards += proposer_reward
|
||||
base_reward = get_base_reward(state, participant_index)
|
||||
max_participant_reward = base_reward - proposer_reward
|
||||
reward = Gwei(max_participant_reward * active_validator_count // (len(committee_indices) * SLOTS_PER_EPOCH))
|
||||
increase_balance(state, participant_index, reward)
|
||||
|
||||
# Reward beacon proposer
|
||||
increase_balance(state, get_beacon_proposer_index(state), proposer_rewards)
|
||||
```
|
||||
|
||||
### Epoch processing
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state) # [Modified in HF1]
|
||||
process_rewards_and_penalties(state) # [Modified in HF1]
|
||||
process_registry_updates(state)
|
||||
process_slashings(state) # [Modified in HF1]
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_flag_updates(state) # [New in HF1]
|
||||
process_sync_committee_updates(state) # [New in HF1]
|
||||
```
|
||||
|
||||
#### Justification and finalization
|
||||
|
||||
*Note*: The function `process_justification_and_finalization` is modified with `matching_target_attestations` replaced by `matching_target_indices`.
|
||||
|
||||
```python
|
||||
def process_justification_and_finalization(state: BeaconState) -> None:
|
||||
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||
return
|
||||
previous_epoch = get_previous_epoch(state)
|
||||
current_epoch = get_current_epoch(state)
|
||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||
old_current_justified_checkpoint = state.current_justified_checkpoint
|
||||
|
||||
# Process justifications
|
||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
||||
state.justification_bits[0] = 0b0
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||
root=get_block_root(state, previous_epoch))
|
||||
state.justification_bits[1] = 0b1
|
||||
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, current_epoch)
|
||||
if get_total_balance(state, matching_target_indices) * 3 >= get_total_active_balance(state) * 2:
|
||||
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||
root=get_block_root(state, current_epoch))
|
||||
state.justification_bits[0] = 0b1
|
||||
|
||||
# Process finalizations
|
||||
bits = state.justification_bits
|
||||
# The 2nd/3rd/4th most recent epochs are justified, the 2nd using the 4th as source
|
||||
if all(bits[1:4]) and old_previous_justified_checkpoint.epoch + 3 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 2nd/3rd most recent epochs are justified, the 2nd using the 3rd as source
|
||||
if all(bits[1:3]) and old_previous_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_previous_justified_checkpoint
|
||||
# The 1st/2nd/3rd most recent epochs are justified, the 1st using the 3rd as source
|
||||
if all(bits[0:3]) and old_current_justified_checkpoint.epoch + 2 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
# The 1st/2nd most recent epochs are justified, the 1st using the 2nd as source
|
||||
if all(bits[0:2]) and old_current_justified_checkpoint.epoch + 1 == current_epoch:
|
||||
state.finalized_checkpoint = old_current_justified_checkpoint
|
||||
```
|
||||
|
||||
#### Rewards and penalties
|
||||
|
||||
*Note*: The function `process_rewards_and_penalties` is modified to support the incentive reforms.
|
||||
|
||||
```python
|
||||
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch
|
||||
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||
return
|
||||
flag_deltas = [
|
||||
get_flag_deltas(state, flag_index, flag_numerator)
|
||||
for (flag_index, flag_numerator) in get_flag_indices_and_numerators()
|
||||
]
|
||||
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||
for (rewards, penalties) in deltas:
|
||||
for index in range(len(state.validators)):
|
||||
increase_balance(state, ValidatorIndex(index), rewards[index])
|
||||
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||
```
|
||||
|
||||
#### Slashings
|
||||
|
||||
*Note*: The function `process_slashings` is modified to use `HF1_PROPORTIONAL_SLASHING_MULTIPLIER`.
|
||||
|
||||
```python
|
||||
def process_slashings(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
total_balance = get_total_active_balance(state)
|
||||
adjusted_total_slashing_balance = min(sum(state.slashings) * HF1_PROPORTIONAL_SLASHING_MULTIPLIER, total_balance)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||
penalty = penalty_numerator // total_balance * increment
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Participation flags updates
|
||||
|
||||
*Note*: The function `process_participation_flag_updates` is new.
|
||||
|
||||
```python
|
||||
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||
state.previous_epoch_participation = state.current_epoch_participation
|
||||
state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||
```
|
||||
|
||||
#### Sync committee updates
|
||||
|
||||
*Note*: The function `process_sync_committee_updates` is new.
|
||||
|
||||
```python
|
||||
def process_sync_committee_updates(state: BeaconState) -> None:
|
||||
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||
state.current_sync_committee = state.next_sync_committee
|
||||
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
```
|
82
specs/lightclient/fork.md
Normal file
82
specs/lightclient/fork.md
Normal file
@ -0,0 +1,82 @@
|
||||
# Ethereum 2.0 Light Client Support -- From Phase 0 to Light Client Patch
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Light-client patch](#fork-to-light-client-patch)
|
||||
- [Fork trigger](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of moving from Phase 0 to Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `LIGHTCLIENT_PATCH_FORK_VERSION` | `Version('0x01000000')` |
|
||||
| `LIGHTCLIENT_PATCH_FORK_SLOT` | `Slot(0)` **TBD** |
|
||||
|
||||
## Fork to Light-client patch
|
||||
|
||||
### Fork trigger
|
||||
|
||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `LIGHTCLIENT_PATCH_FORK_SLOT`, where `LIGHTCLIENT_PATCH_FORK_SLOT % SLOTS_PER_EPOCH == 0`.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
After `process_slots` of Phase 0 finishes, if `state.slot == LIGHTCLIENT_PATCH_FORK_SLOT`, an irregular state change is made to upgrade to light-client patch.
|
||||
|
||||
```python
|
||||
def upgrade_to_lightclient_patch(pre: phase0.BeaconState) -> BeaconState:
|
||||
epoch = get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
genesis_time=pre.genesis_time,
|
||||
genesis_validators_root=pre.genesis_validators_root,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=LIGHTCLIENT_PATCH_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=pre.validators,
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Participation
|
||||
previous_epoch_participation=[ParticipationFlags(0) for _ in range(len(pre.validators))],
|
||||
current_epoch_participation=[ParticipationFlags(0) for _ in range(len(pre.validators))],
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
)
|
||||
# Fill in sync committees
|
||||
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
||||
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
return post
|
||||
```
|
184
specs/lightclient/sync-protocol.md
Normal file
184
specs/lightclient/sync-protocol.md
Normal file
@ -0,0 +1,184 @@
|
||||
# Minimal Light Client
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Misc](#misc)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Containers](#containers)
|
||||
- [`LightClientSnapshot`](#lightclientsnapshot)
|
||||
- [`LightClientUpdate`](#lightclientupdate)
|
||||
- [`LightClientStore`](#lightclientstore)
|
||||
- [Light client state updates](#light-client-state-updates)
|
||||
- [`is_valid_light_client_update`](#is_valid_light_client_update)
|
||||
- [`apply_light_client_update`](#apply_light_client_update)
|
||||
- [`process_light_client_update`](#process_light_client_update)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
Eth2 is designed to be light client friendly for constrained environments to
|
||||
access Eth2 with reasonable safety and liveness.
|
||||
Such environments include resource-constrained devices (e.g. phones for trust-minimised wallets)
|
||||
and metered VMs (e.g. blockchain VMs for cross-chain bridges).
|
||||
|
||||
This document suggests a minimal light client design for the beacon chain that
|
||||
uses sync committees introduced in [this beacon chain extension](./beacon-chain.md).
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `FINALIZED_ROOT_INDEX` | `Index(BeaconState, 'finalized_checkpoint', 'root')` |
|
||||
| `NEXT_SYNC_COMMITTEE_INDEX` | `Index(BeaconState, 'next_sync_committee')` |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` |
|
||||
| `MAX_VALID_LIGHT_CLIENT_UPDATES` | `uint64(2**64 - 1)` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `LIGHT_CLIENT_UPDATE_TIMEOUT` | `Slot(2**13)` | slots | ~27 hours |
|
||||
|
||||
## Containers
|
||||
|
||||
#### `LightClientSnapshot`
|
||||
|
||||
```python
|
||||
class LightClientSnapshot(Container):
|
||||
# Beacon block header
|
||||
header: BeaconBlockHeader
|
||||
# Sync committees corresponding to the header
|
||||
current_sync_committee: SyncCommittee
|
||||
next_sync_committee: SyncCommittee
|
||||
```
|
||||
|
||||
#### `LightClientUpdate`
|
||||
|
||||
```python
|
||||
class LightClientUpdate(Container):
|
||||
# Update beacon block header
|
||||
header: BeaconBlockHeader
|
||||
# Next sync committee corresponding to the header
|
||||
next_sync_committee: SyncCommittee
|
||||
next_sync_committee_branch: Vector[Bytes32, log2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||
# Finality proof for the update header
|
||||
finality_header: BeaconBlockHeader
|
||||
finality_branch: Vector[Bytes32, log2(FINALIZED_ROOT_INDEX)]
|
||||
# Sync committee aggregate signature
|
||||
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||
sync_committee_signature: BLSSignature
|
||||
# Fork version for the aggregate signature
|
||||
fork_version: Version
|
||||
```
|
||||
|
||||
#### `LightClientStore`
|
||||
|
||||
```python
|
||||
class LightClientStore(Container):
|
||||
snapshot: LightClientSnapshot
|
||||
valid_updates: List[LightClientUpdate, MAX_VALID_LIGHT_CLIENT_UPDATES]
|
||||
```
|
||||
|
||||
## Light client state updates
|
||||
|
||||
A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot)` where `current_slot` is the current slot based on some local clock.
|
||||
|
||||
#### `is_valid_light_client_update`
|
||||
|
||||
```python
|
||||
def is_valid_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> bool:
|
||||
# Verify update slot is larger than snapshot slot
|
||||
assert update.header.slot > snapshot.header.slot
|
||||
|
||||
# Verify update does not skip a sync committee period
|
||||
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
assert update_period in (snapshot_period, snapshot_period + 1)
|
||||
|
||||
# Verify update header root is the finalized root of the finality header, if specified
|
||||
if update.finality_header == BeaconBlockHeader():
|
||||
signed_header = update.header
|
||||
assert update.finality_branch == [ZERO_HASH for _ in range(log2(FINALIZED_ROOT_INDEX))]
|
||||
else:
|
||||
signed_header = update.finality_header
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.header),
|
||||
branch=update.finality_branch,
|
||||
depth=log2(FINALIZED_ROOT_INDEX),
|
||||
index=FINALIZED_ROOT_INDEX % 2**log2(FINALIZED_ROOT_INDEX),
|
||||
root=update.finality_header.state_root,
|
||||
)
|
||||
|
||||
# Verify update next sync committee if the update period incremented
|
||||
if update_period == snapshot_period:
|
||||
sync_committee = snapshot.current_sync_committee
|
||||
assert update.next_sync_committee_branch == [ZERO_HASH for _ in range(log2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||
else:
|
||||
sync_committee = snapshot.next_sync_committee
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(update.next_sync_committee),
|
||||
branch=update.next_sync_committee_branch,
|
||||
depth=log2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
index=NEXT_SYNC_COMMITTEE_INDEX % 2**log2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||
root=update.header.state_root,
|
||||
)
|
||||
|
||||
# Verify sync committee has sufficient participants
|
||||
assert sum(update.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||
|
||||
# Verify sync committee aggregate signature
|
||||
participant_pubkeys = [pubkey for (bit, pubkey) in zip(update.sync_committee_bits, sync_committee.pubkeys) if bit]
|
||||
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version)
|
||||
signing_root = compute_signing_root(signed_header, domain)
|
||||
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, update.sync_committee_signature)
|
||||
|
||||
return True
|
||||
```
|
||||
|
||||
#### `apply_light_client_update`
|
||||
|
||||
```python
|
||||
def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> None:
|
||||
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
if update_period == snapshot_period + 1:
|
||||
snapshot.current_sync_committee = snapshot.next_sync_committee
|
||||
snapshot.next_sync_committee = update.next_sync_committee
|
||||
snapshot.header = update.header
|
||||
```
|
||||
|
||||
#### `process_light_client_update`
|
||||
|
||||
```python
|
||||
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot) -> None:
|
||||
# Validate update
|
||||
assert is_valid_light_client_update(store.snapshot, update)
|
||||
store.valid_updates.append(update)
|
||||
|
||||
if sum(update.sync_committee_bits) * 3 > len(update.sync_committee_bits) * 2 and update.header != update.finality_header:
|
||||
# Apply update if 2/3 quorum is reached and we have a finality proof
|
||||
apply_light_client_update(store, update)
|
||||
store.valid_updates = []
|
||||
elif current_slot > snapshot.header.slot + LIGHT_CLIENT_UPDATE_TIMEOUT:
|
||||
# Forced best update when the update timeout has elapsed
|
||||
apply_light_client_update(store, max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
|
||||
store.valid_updates = []
|
||||
```
|
@ -1,13 +1,10 @@
|
||||
# Ethereum 2.0 Phase 0 -- The Beacon Chain
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Notation](#notation)
|
||||
- [Custom types](#custom-types)
|
||||
@ -16,6 +13,7 @@
|
||||
- [Misc](#misc)
|
||||
- [Gwei values](#gwei-values)
|
||||
- [Initial values](#initial-values)
|
||||
- [Withdrawal prefixes](#withdrawal-prefixes)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [State list lengths](#state-list-lengths)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
@ -60,7 +58,7 @@
|
||||
- [Crypto](#crypto)
|
||||
- [`hash`](#hash)
|
||||
- [`hash_tree_root`](#hash_tree_root)
|
||||
- [BLS Signatures](#bls-signatures)
|
||||
- [BLS signatures](#bls-signatures)
|
||||
- [Predicates](#predicates)
|
||||
- [`is_active_validator`](#is_active_validator)
|
||||
- [`is_eligible_for_activation_queue`](#is_eligible_for_activation_queue)
|
||||
@ -116,7 +114,12 @@
|
||||
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||
- [Registry updates](#registry-updates)
|
||||
- [Slashings](#slashings)
|
||||
- [Final updates](#final-updates)
|
||||
- [Eth1 data votes updates](#eth1-data-votes-updates)
|
||||
- [Effective balances updates](#effective-balances-updates)
|
||||
- [Slashings balances updates](#slashings-balances-updates)
|
||||
- [Randao mixes updates](#randao-mixes-updates)
|
||||
- [Historical roots updates](#historical-roots-updates)
|
||||
- [Participation records rotation](#participation-records-rotation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [RANDAO](#randao)
|
||||
@ -177,7 +180,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
## Configuration
|
||||
|
||||
*Note*: The default mainnet configuration values are included here for spec-design purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory. These configurations are updated for releases and may be out of sync during `dev` changes.
|
||||
*Note*: The default mainnet configuration values are included here for illustrative purposes. The different configurations for mainnet, testnets, and YAML-based testing can be found in the [`configs/constant_presets`](../../configs) directory.
|
||||
|
||||
### Misc
|
||||
|
||||
@ -191,7 +194,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| `CHURN_LIMIT_QUOTIENT` | `uint64(2**16)` (= 65,536) |
|
||||
| `SHUFFLE_ROUND_COUNT` | `uint64(90)` |
|
||||
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `uint64(2**14)` (= 16,384) |
|
||||
| `MIN_GENESIS_TIME` | `uint64(1578009600)` (Jan 3, 2020) |
|
||||
| `MIN_GENESIS_TIME` | `uint64(1606824000)` (Dec 1, 2020, 12pm UTC) |
|
||||
| `HYSTERESIS_QUOTIENT` | `uint64(4)` |
|
||||
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `uint64(1)` |
|
||||
| `HYSTERESIS_UPWARD_MULTIPLIER` | `uint64(5)` |
|
||||
@ -212,7 +215,13 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `GENESIS_FORK_VERSION` | `Version('0x00000000')` |
|
||||
|
||||
### Withdrawal prefixes
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLS_WITHDRAWAL_PREFIX` | `Bytes1('0x00')` |
|
||||
| `ETH1_ADDRESS_WITHDRAWAL_PREFIX` | `Bytes1('0x01')` |
|
||||
|
||||
### Time parameters
|
||||
|
||||
@ -253,7 +262,7 @@ The following values are (non-configurable) constants used throughout the specif
|
||||
|
||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accoutable safety margin.
|
||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||
|
||||
### Max operations per block
|
||||
|
||||
@ -603,17 +612,17 @@ def bytes_to_uint64(data: bytes) -> uint64:
|
||||
|
||||
`def hash_tree_root(object: SSZSerializable) -> Root` is a function for hashing objects into a single root by utilizing a hash tree structure, as defined in the [SSZ spec](../../ssz/simple-serialize.md#merkleization).
|
||||
|
||||
#### BLS Signatures
|
||||
#### BLS signatures
|
||||
|
||||
Eth2 makes use of BLS signatures as specified in the [IETF draft BLS specification draft-irtf-cfrg-bls-signature-04](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04). Specifically, eth2 uses the `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` ciphersuite which implements the following interfaces:
|
||||
The [IETF BLS signature draft standard v4](https://tools.ietf.org/html/draft-irtf-cfrg-bls-signature-04) with ciphersuite `BLS_SIG_BLS12381G2_XMD:SHA-256_SSWU_RO_POP_` defines the following functions:
|
||||
|
||||
- `def Sign(SK: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(PK: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Sign(privkey: int, message: Bytes) -> BLSSignature`
|
||||
- `def Verify(pubkey: BLSPubkey, message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def Aggregate(signatures: Sequence[BLSSignature]) -> BLSSignature`
|
||||
- `def FastAggregateVerify(PKs: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(PKs: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
- `def FastAggregateVerify(pubkeys: Sequence[BLSPubkey], message: Bytes, signature: BLSSignature) -> bool`
|
||||
- `def AggregateVerify(pubkeys: Sequence[BLSPubkey], messages: Sequence[Bytes], signature: BLSSignature) -> bool`
|
||||
|
||||
Within these specifications, BLS signatures are treated as a module for notational clarity, thus to verify a signature `bls.Verify(...)` is used.
|
||||
The above functions are accessed through the `bls` module, e.g. `bls.Verify`.
|
||||
|
||||
### Predicates
|
||||
|
||||
@ -1191,8 +1200,6 @@ def is_valid_genesis_state(state: BeaconState) -> bool:
|
||||
return True
|
||||
```
|
||||
|
||||
*Note*: The `is_valid_genesis_state` function (including `MIN_GENESIS_TIME` and `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT`) is a placeholder for testing. It has yet to be finalized by the community, and can be updated as necessary.
|
||||
|
||||
### Genesis block
|
||||
|
||||
Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||
@ -1202,7 +1209,7 @@ Let `genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))`.
|
||||
The post-state corresponding to a pre-state `state` and a signed block `signed_block` is defined as `state_transition(state, signed_block)`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
|
||||
|
||||
```python
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> BeaconState:
|
||||
def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, validate_result: bool=True) -> None:
|
||||
block = signed_block.message
|
||||
# Process slots (including those with no blocks) since block
|
||||
process_slots(state, block.slot)
|
||||
@ -1214,8 +1221,6 @@ def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, valida
|
||||
# Verify state root
|
||||
if validate_result:
|
||||
assert block.state_root == hash_tree_root(state)
|
||||
# Return post-state
|
||||
return state
|
||||
```
|
||||
|
||||
```python
|
||||
@ -1257,7 +1262,12 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
@ -1564,15 +1574,19 @@ def process_slashings(state: BeaconState) -> None:
|
||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||
```
|
||||
|
||||
#### Final updates
|
||||
|
||||
#### Eth1 data votes updates
|
||||
```python
|
||||
def process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
def process_eth1_data_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset eth1 data votes
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
```
|
||||
|
||||
#### Effective balances updates
|
||||
|
||||
```python
|
||||
def process_effective_balance_updates(state: BeaconState) -> None:
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
@ -1584,14 +1598,41 @@ def process_final_updates(state: BeaconState) -> None:
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
```
|
||||
|
||||
#### Slashings balances updates
|
||||
|
||||
```python
|
||||
def process_slashings_reset(state: BeaconState) -> None:
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
```
|
||||
|
||||
#### Randao mixes updates
|
||||
|
||||
```python
|
||||
def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Set randao mix
|
||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||
```
|
||||
|
||||
#### Historical roots updates
|
||||
```python
|
||||
def process_historical_roots_update(state: BeaconState) -> None:
|
||||
# Set historical root accumulator
|
||||
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||
```
|
||||
|
||||
#### Participation records rotation
|
||||
|
||||
```python
|
||||
def process_participation_record_updates(state: BeaconState) -> None:
|
||||
# Rotate current/previous epoch attestations
|
||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||
state.current_epoch_attestations = []
|
||||
|
@ -1,13 +1,10 @@
|
||||
# Ethereum 2.0 Phase 0 -- Deposit Contract
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
@ -43,7 +40,7 @@ These configurations are updated for releases and may be out of sync during `dev
|
||||
| - | - |
|
||||
| `DEPOSIT_CHAIN_ID` | `1` |
|
||||
| `DEPOSIT_NETWORK_ID` | `1` |
|
||||
| `DEPOSIT_CONTRACT_ADDRESS` | **TBD** |
|
||||
| `DEPOSIT_CONTRACT_ADDRESS` | `0x00000000219ab540356cBB839Cbe05303d7705Fa` |
|
||||
|
||||
## Ethereum 1.0 deposit contract
|
||||
|
||||
@ -61,12 +58,13 @@ The amount of ETH (rounded down to the closest Gwei) sent to the deposit contrac
|
||||
|
||||
#### Withdrawal credentials
|
||||
|
||||
One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawing validator balance (e.g. to another validator, or to shards). The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows:
|
||||
One of the `DepositData` fields is `withdrawal_credentials` which constrains validator withdrawals.
|
||||
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||
The withdrawal prefixes currently supported are `BLS_WITHDRAWAL_PREFIX` and `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
|
||||
Read more in the [validator guide](./validator.md#withdrawal-credentials).
|
||||
|
||||
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey
|
||||
|
||||
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
|
||||
*Note*: The deposit contract does not validate the `withdrawal_credentials` field.
|
||||
Support for new withdrawal prefixes can be added without modifying the deposit contract.
|
||||
|
||||
#### `DepositEvent` log
|
||||
|
||||
|
@ -1,13 +1,10 @@
|
||||
# Ethereum 2.0 Phase 0 -- Beacon Chain Fork Choice
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Configuration](#configuration)
|
||||
@ -226,11 +223,10 @@ def get_head(store: Store) -> Root:
|
||||
blocks = get_filtered_block_tree(store)
|
||||
# Execute the LMD-GHOST fork choice
|
||||
head = store.justified_checkpoint.root
|
||||
justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch)
|
||||
while True:
|
||||
children = [
|
||||
root for root in blocks.keys()
|
||||
if blocks[root].parent_root == head and blocks[root].slot > justified_slot
|
||||
if blocks[root].parent_root == head
|
||||
]
|
||||
if len(children) == 0:
|
||||
return head
|
||||
@ -355,7 +351,8 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||
|
||||
# Check the block is valid and compute the post-state
|
||||
state = state_transition(pre_state, signed_block, True)
|
||||
state = pre_state.copy()
|
||||
state_transition(state, signed_block, True)
|
||||
# Add new block to the store
|
||||
store.blocks[hash_tree_root(block)] = block
|
||||
# Add new state for this block to the store
|
||||
|
@ -14,7 +14,6 @@ It consists of four main sections:
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Network fundamentals](#network-fundamentals)
|
||||
- [Transport](#transport)
|
||||
- [Encryption and identification](#encryption-and-identification)
|
||||
@ -102,7 +101,7 @@ It consists of four main sections:
|
||||
- [Compression/Encoding](#compressionencoding)
|
||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
||||
- [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression)
|
||||
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
||||
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||
@ -213,8 +212,8 @@ including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsu
|
||||
|
||||
The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md#parameters) will be used:
|
||||
|
||||
- `D` (topic stable mesh target count): 6
|
||||
- `D_low` (topic stable mesh low watermark): 5
|
||||
- `D` (topic stable mesh target count): 8
|
||||
- `D_low` (topic stable mesh low watermark): 6
|
||||
- `D_high` (topic stable mesh high watermark): 12
|
||||
- `D_lazy` (gossip target): 6
|
||||
- `heartbeat_interval` (frequency of heartbeat, seconds): 0.7
|
||||
@ -293,7 +292,7 @@ If one or more validations fail while processing the items in order, return eith
|
||||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||
|
||||
There are three additional global topics are used to propagate lower frequency validator messages
|
||||
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||
|
||||
##### `beacon_block`
|
||||
@ -314,6 +313,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue blocks for processing once the parent block is retrieved).
|
||||
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
||||
- _[REJECT]_ The block is from a higher slot than its parent.
|
||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
||||
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||
== store.finalized_checkpoint.root`
|
||||
@ -333,6 +333,8 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
||||
- _[IGNORE]_ `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||
i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot`
|
||||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||
compute_epoch_at_slot(aggregate.data.slot)`
|
||||
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
|
||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||
@ -418,7 +420,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||
- _[REJECT]_ The signature of `attestation` is valid.
|
||||
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
||||
(via both gossip and non-gossip sources)
|
||||
(a client MAY queue aggregates for processing once block is retrieved).
|
||||
(a client MAY queue attestations for processing once block is retrieved).
|
||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||
@ -936,7 +938,7 @@ where the fields of `ENRForkID` are defined as
|
||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated.
|
||||
If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||
|
||||
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available.
|
||||
*Note*: `fork_digest` is composed of values that are not known until the genesis block/state are available.
|
||||
Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known.
|
||||
One notable exception to this rule is the distribution of bootnode ENRs prior to genesis.
|
||||
In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as
|
||||
@ -1217,14 +1219,12 @@ For minimum and maximum allowable slot broadcast times,
|
||||
Although messages can at times be eagerly gossiped to the network,
|
||||
the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot.
|
||||
|
||||
The value of this constant is currently a placeholder and will be tuned based on data observed in testnets.
|
||||
|
||||
### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets?
|
||||
|
||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||
The exact grouping will be dependent on more involved network tests.
|
||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||
The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||
|
||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||
|
||||
@ -1370,7 +1370,7 @@ Thus, it may happen that we need to transmit an empty list - there are several w
|
||||
|
||||
Semantically, it is not an error that a block is missing during a slot making option 2 unnatural.
|
||||
|
||||
Option 1 allows allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
Option 1 allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||
|
||||
Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response,
|
||||
but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen.
|
||||
@ -1496,7 +1496,7 @@ This looks different depending on the interaction layer:
|
||||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||
|
||||
### Why are using Snappy for compression?
|
||||
### Why are we using Snappy for compression?
|
||||
|
||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||
|
@ -1,6 +1,6 @@
|
||||
# Ethereum 2.0 Phase 0 -- Honest Validator
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||
This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||
|
||||
## Table of contents
|
||||
|
||||
@ -8,7 +8,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Constants](#constants)
|
||||
@ -16,7 +15,9 @@
|
||||
- [Becoming a validator](#becoming-a-validator)
|
||||
- [Initialization](#initialization)
|
||||
- [BLS public key](#bls-public-key)
|
||||
- [BLS withdrawal key](#bls-withdrawal-key)
|
||||
- [Withdrawal credentials](#withdrawal-credentials)
|
||||
- [`BLS_WITHDRAWAL_PREFIX`](#bls_withdrawal_prefix)
|
||||
- [`ETH1_ADDRESS_WITHDRAWAL_PREFIX`](#eth1_address_withdrawal_prefix)
|
||||
- [Submit deposit](#submit-deposit)
|
||||
- [Process deposit](#process-deposit)
|
||||
- [Validator index](#validator-index)
|
||||
@ -65,6 +66,7 @@
|
||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||
- [Proposer slashing](#proposer-slashing)
|
||||
- [Attester slashing](#attester-slashing)
|
||||
- [Protection best practices](#protection-best-practices)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
@ -100,14 +102,41 @@ A validator must initialize many parameters locally before submitting a deposit
|
||||
|
||||
Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
|
||||
|
||||
#### BLS withdrawal key
|
||||
#### Withdrawal credentials
|
||||
|
||||
A secondary withdrawal private key, `withdrawal_privkey`, must also be securely generated along with the resultant `withdrawal_pubkey`. This `withdrawal_privkey` does not have to be available for signing during the normal lifetime of a validator and can live in "cold storage".
|
||||
The `withdrawal_credentials` field constrains validator withdrawals.
|
||||
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||
|
||||
The validator constructs their `withdrawal_credentials` via the following:
|
||||
The following withdrawal prefixes are currently supported.
|
||||
|
||||
* Set `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`.
|
||||
* Set `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]`.
|
||||
##### `BLS_WITHDRAWAL_PREFIX`
|
||||
|
||||
Withdrawal credentials with the BLS withdrawal prefix allow a BLS key pair
|
||||
`(bls_withdrawal_privkey, bls_withdrawal_pubkey)` to trigger withdrawals.
|
||||
The `withdrawal_credentials` field must be such that:
|
||||
|
||||
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:] == hash(bls_withdrawal_pubkey)[1:]`
|
||||
|
||||
*Note*: The `bls_withdrawal_privkey` is not required for validating and can be kept in cold storage.
|
||||
|
||||
##### `ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||
|
||||
Withdrawal credentials with the Eth1 address withdrawal prefix specify
|
||||
a 20-byte Eth1 address `eth1_withdrawal_address` as the recipient for all withdrawals.
|
||||
The `eth1_withdrawal_address` can be the address of either an externally owned account or of a contract.
|
||||
|
||||
The `withdrawal_credentials` field must be such that:
|
||||
|
||||
* `withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||
* `withdrawal_credentials[1:12] == b'\x00' * 11`
|
||||
* `withdrawal_credentials[12:] == eth1_withdrawal_address`
|
||||
|
||||
After the merge of the current Ethereum application layer (Eth1) into the Beacon Chain (Eth2),
|
||||
withdrawals to `eth1_withdrawal_address` will be normal ETH transfers (with no payload other than the validator's ETH)
|
||||
triggered by a user transaction that will set the gas price and gas limit as well pay fees.
|
||||
As long as the account or contract with address `eth1_withdrawal_address` can receive ETH transfers,
|
||||
the future withdrawal protocol is agnostic to all other implementation details.
|
||||
|
||||
### Submit deposit
|
||||
|
||||
@ -289,7 +318,7 @@ class Eth1Block(Container):
|
||||
|
||||
Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns the Eth1 data for a given Eth1 block.
|
||||
|
||||
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state)` where:
|
||||
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where:
|
||||
|
||||
```python
|
||||
def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
|
||||
@ -327,7 +356,9 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
||||
|
||||
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state.eth1_data
|
||||
# Non-substantive casting for linter
|
||||
state_eth1_data: Eth1Data = state.eth1_data
|
||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||
|
||||
return max(
|
||||
valid_votes,
|
||||
@ -358,6 +389,10 @@ The `proof` for each deposit must be constructed against the deposit root contai
|
||||
|
||||
Up to `MAX_VOLUNTARY_EXITS`, [`VoluntaryExit`](./beacon-chain.md#voluntaryexit) objects can be included in the `block`. The exits must satisfy the verification conditions found in [exits processing](./beacon-chain.md#voluntary-exits).
|
||||
|
||||
*Note*: If a slashing for a validator is included in the same block as a
|
||||
voluntary exit, the voluntary exit will fail and cause the block to be invalid
|
||||
due to the slashing being processed first. Implementers must take heed of this
|
||||
operation interaction when packing blocks.
|
||||
|
||||
#### Packaging into a `SignedBeaconBlock`
|
||||
|
||||
@ -372,7 +407,7 @@ It is useful to be able to run a state transition function (working on a copy of
|
||||
def compute_new_state_root(state: BeaconState, block: BeaconBlock) -> Root:
|
||||
temp_state: BeaconState = state.copy()
|
||||
signed_block = SignedBeaconBlock(message=block)
|
||||
temp_state = state_transition(temp_state, signed_block, validate_result=False)
|
||||
state_transition(temp_state, signed_block, validate_result=False)
|
||||
return hash_tree_root(temp_state)
|
||||
```
|
||||
|
||||
@ -604,3 +639,13 @@ Specifically, when signing an `Attestation`, a validator should perform the foll
|
||||
2. Generate and broadcast attestation.
|
||||
|
||||
If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast attestation and can effectively avoid slashing.
|
||||
|
||||
## Protection best practices
|
||||
|
||||
A validator client should be considered standalone and should consider the beacon node as untrusted. This means that the validator client should protect:
|
||||
|
||||
1) Private keys -- private keys should be protected from being exported accidentally or by an attacker.
|
||||
2) Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object.
|
||||
3) Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format.
|
||||
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity leak and potential ejection due to low balance.
|
||||
A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event.
|
||||
|
@ -1,23 +1,24 @@
|
||||
# Ethereum 2.0 Phase 0 -- Weak Subjectivity Guide
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Custom Types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
- [Configuration](#configuration)
|
||||
- [Weak Subjectivity Checkpoint](#weak-subjectivity-checkpoint)
|
||||
- [Weak Subjectivity Period](#weak-subjectivity-period)
|
||||
- [Calculating the Weak Subjectivity Period](#calculating-the-weak-subjectivity-period)
|
||||
- [`compute_weak_subjectivity_period`](#compute_weak_subjectivity_period)
|
||||
- [Weak Subjectivity Sync](#weak-subjectivity-sync)
|
||||
- [Weak Subjectivity Sync Procedure](#weak-subjectivity-sync-procedure)
|
||||
- [Checking for Stale Weak Subjectivity Checkpoint](#checking-for-stale-weak-subjectivity-checkpoint)
|
||||
- [`is_within_weak_subjectivity_period`](#is_within_weak_subjectivity_period)
|
||||
- [Distributing Weak Subjectivity Checkpoints](#distributing-weak-subjectivity-checkpoints)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
@ -37,15 +38,27 @@ For more information about weak subjectivity and why it is required, please refe
|
||||
This document uses data structures, constants, functions, and terminology from
|
||||
[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md).
|
||||
|
||||
## Custom Types
|
||||
|
||||
| Name | SSZ Equivalent | Description |
|
||||
|---|---|---|
|
||||
| `Ether` | `uint64` | an amount in Ether |
|
||||
|
||||
## Constants
|
||||
|
||||
| Name | Value |
|
||||
|----------------|--------------|
|
||||
| Name | Value |
|
||||
|---|---|
|
||||
| `ETH_TO_GWEI` | `uint64(10**9)` |
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value |
|
||||
|---|---|
|
||||
| `SAFETY_DECAY` | `uint64(10)` |
|
||||
|
||||
## Weak Subjectivity Checkpoint
|
||||
|
||||
Any `Checkpoint` can used be a Weak Subjectivity Checkpoint.
|
||||
Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint.
|
||||
These Weak Subjectivity Checkpoints are distributed by providers,
|
||||
downloaded by users and/or distributed as a part of clients, and used as input while syncing a client.
|
||||
|
||||
@ -62,38 +75,64 @@ a safety margin of at least `1/3 - SAFETY_DECAY/100`.
|
||||
|
||||
### Calculating the Weak Subjectivity Period
|
||||
|
||||
*Note*: `compute_weak_subjectivity_period()` is planned to be updated when a more accurate calculation is made.
|
||||
A detailed analysis of the calculation of the weak subjectivity period is made in [this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf).
|
||||
|
||||
*Note*: The expressions in the report use fractions, whereas eth2.0-specs uses only `uint64` arithmetic. The expressions have been simplified to avoid computing fractions, and more details can be found [here](https://www.overleaf.com/read/wgjzjdjpvpsd).
|
||||
|
||||
*Note*: The calculations here use `Ether` instead of `Gwei`, because the large magnitude of balances in `Gwei` can cause an overflow while computing using `uint64` arithmetic operations. Using `Ether` reduces the magnitude of the multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the scope for overflows in `uint64`.
|
||||
|
||||
#### `compute_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||
weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
validator_count = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
||||
"""
|
||||
Returns the weak subjectivity period for the current ``state``.
|
||||
This computation takes into account the effect of:
|
||||
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||
- validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
|
||||
A detailed calculation can be found at:
|
||||
https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
|
||||
"""
|
||||
ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||
N = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||
t = get_total_active_balance(state) // N // ETH_TO_GWEI
|
||||
T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
|
||||
delta = get_validator_churn_limit(state)
|
||||
Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
|
||||
D = SAFETY_DECAY
|
||||
|
||||
if T * (200 + 3 * D) < t * (200 + 12 * D):
|
||||
epochs_for_validator_set_churn = (
|
||||
N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
|
||||
)
|
||||
epochs_for_balance_top_ups = (
|
||||
N * (200 + 3 * D) // (600 * Delta)
|
||||
)
|
||||
ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
|
||||
else:
|
||||
weak_subjectivity_period += SAFETY_DECAY * validator_count // (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
||||
return weak_subjectivity_period
|
||||
ws_period += (
|
||||
3 * N * D * t // (200 * Delta * (T - t))
|
||||
)
|
||||
|
||||
return ws_period
|
||||
```
|
||||
|
||||
*Details about the calculation*:
|
||||
- `100` appears in the denominator to get the actual percentage ratio from `SAFETY_DECAY`
|
||||
- For more information about other terms in this equation, refer to
|
||||
[Weak Subjectivity in Eth2.0](https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2)
|
||||
A brief reference for what these values look like in practice ([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)):
|
||||
|
||||
A brief reference for what these values look like in practice:
|
||||
|
||||
| `validator_count` | `weak_subjectivity_period` |
|
||||
| ---- | ---- |
|
||||
| 1024 | 268 |
|
||||
| 2048 | 281 |
|
||||
| 4096 | 307 |
|
||||
| 8192 | 358 |
|
||||
| 16384 | 460 |
|
||||
| 32768 | 665 |
|
||||
| 65536 | 1075 |
|
||||
| 131072 | 1894 |
|
||||
| 262144 | 3532 |
|
||||
| 524288 | 3532 |
|
||||
| Safety Decay | Avg. Val. Balance (ETH) | Val. Count | Weak Sub. Period (Epochs) |
|
||||
| ---- | ---- | ---- | ---- |
|
||||
| 10 | 28 | 32768 | 504 |
|
||||
| 10 | 28 | 65536 | 752 |
|
||||
| 10 | 28 | 131072 | 1248 |
|
||||
| 10 | 28 | 262144 | 2241 |
|
||||
| 10 | 28 | 524288 | 2241 |
|
||||
| 10 | 28 | 1048576 | 2241 |
|
||||
| 10 | 32 | 32768 | 665 |
|
||||
| 10 | 32 | 65536 | 1075 |
|
||||
| 10 | 32 | 131072 | 1894 |
|
||||
| 10 | 32 | 262144 | 3532 |
|
||||
| 10 | 32 | 524288 | 3532 |
|
||||
| 10 | 32 | 1048576 | 3532 |
|
||||
|
||||
## Weak Subjectivity Sync
|
||||
|
||||
@ -104,22 +143,28 @@ Clients should allow users to input a Weak Subjectivity Checkpoint at startup, a
|
||||
1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format,
|
||||
where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`.
|
||||
Example of the format:
|
||||
|
||||
```
|
||||
0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544
|
||||
```
|
||||
2. - *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
|
||||
2. Check the weak subjectivity requirements:
|
||||
- *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
- *IF* `epoch_number <= store.finalized_checkpoint.epoch`,
|
||||
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
||||
Emit descriptive critical error if this assert fails, then exit client process.
|
||||
|
||||
### Checking for Stale Weak Subjectivity Checkpoint
|
||||
|
||||
Clients may choose to validate that the input Weak Subjectivity Checkpoint is not stale at the time of startup.
|
||||
To support this mechanism, the client needs to take the state at the Weak Subjectivity Checkpoint as
|
||||
a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source).
|
||||
The check can be implemented in the following way:
|
||||
|
||||
#### `is_within_weak_subjectivity_period`
|
||||
|
||||
```python
|
||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||
@ -133,4 +178,5 @@ def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_c
|
||||
```
|
||||
|
||||
## Distributing Weak Subjectivity Checkpoints
|
||||
|
||||
This section will be updated soon.
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Configuration](#configuration)
|
||||
@ -1055,11 +1054,16 @@ def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_challenge_deadlines(state)
|
||||
process_reveal_deadlines(state) # Phase 1
|
||||
process_challenge_deadlines(state) # Phase 1
|
||||
process_slashings(state)
|
||||
process_final_updates(state) # phase 0 final updates
|
||||
process_phase_1_final_updates(state)
|
||||
process_eth1_data_reset(state)
|
||||
process_effective_balance_updates(state)
|
||||
process_slashings_reset(state)
|
||||
process_randao_mixes_reset(state)
|
||||
process_historical_roots_update(state)
|
||||
process_participation_record_updates(state)
|
||||
process_phase_1_final_updates(state) # Phase 1
|
||||
```
|
||||
|
||||
#### Phase 1 final updates
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Updated data structures](#updated-data-structures)
|
||||
- [Extended `Store`](#extended-store)
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Phase 1](#fork-to-phase-1)
|
@ -8,7 +8,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Constants](#constants)
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Helpers](#helpers)
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Shard block verification functions](#shard-block-verification-functions)
|
||||
@ -72,14 +71,13 @@ The post-state corresponding to a pre-state `shard_state` and a signed block `si
|
||||
def shard_state_transition(shard_state: ShardState,
|
||||
signed_block: SignedShardBlock,
|
||||
beacon_parent_state: BeaconState,
|
||||
validate_result: bool = True) -> ShardState:
|
||||
validate_result: bool = True) -> None:
|
||||
assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message)
|
||||
|
||||
if validate_result:
|
||||
assert verify_shard_block_signature(beacon_parent_state, signed_block)
|
||||
|
||||
process_shard_block(shard_state, signed_block.message)
|
||||
return shard_state
|
||||
```
|
||||
|
||||
```python
|
||||
|
@ -8,7 +8,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Constants](#constants)
|
||||
@ -68,7 +67,7 @@ A validator is an entity that participates in the consensus of the Ethereum 2.0
|
||||
|
||||
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
||||
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use as a reference throughout.
|
||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use them as a reference throughout.
|
||||
|
||||
## Constants
|
||||
|
||||
@ -353,7 +352,7 @@ Aggregation selection and the core of this duty are largely unchanged from Phase
|
||||
|
||||
Note the timing of when to broadcast aggregates is altered in Phase 1+.
|
||||
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot`-that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot` -- that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
||||
|
||||
##### `AggregateAndProof`
|
||||
|
||||
|
@ -7,7 +7,6 @@
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Generalized Merkle tree index](#generalized-merkle-tree-index)
|
||||
- [SSZ object to index](#ssz-object-to-index)
|
||||
|
@ -1,13 +1,10 @@
|
||||
# SimpleSerialize (SSZ)
|
||||
|
||||
**Notice**: This document is a work-in-progress describing typing, serialization, and Merkleization of Eth2 objects.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Constants](#constants)
|
||||
- [Typing](#typing)
|
||||
- [Basic types](#basic-types)
|
||||
@ -249,16 +246,4 @@ We similarly define "summary types" and "expansion types". For example, [`Beacon
|
||||
|
||||
## Implementations
|
||||
|
||||
| Language | Project | Maintainer | Implementation |
|
||||
|-|-|-|-|
|
||||
| Python | Ethereum 2.0 | Ethereum Foundation | [https://github.com/ethereum/py-ssz](https://github.com/ethereum/py-ssz) |
|
||||
| Rust | Lighthouse | Sigma Prime | [https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz](https://github.com/sigp/lighthouse/tree/master/eth2/utils/ssz) |
|
||||
| Nim | Nimbus | Status | [https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim](https://github.com/status-im/nim-beacon-chain/blob/master/beacon_chain/ssz.nim) |
|
||||
| Rust | Shasper | ParityTech | [https://github.com/paritytech/shasper/tree/master/utils/ssz](https://github.com/paritytech/shasper/tree/master/utils/ssz) |
|
||||
| TypeScript | Lodestar | ChainSafe Systems | [https://github.com/ChainSafe/ssz-js](https://github.com/ChainSafe/ssz) |
|
||||
| Java | Cava | ConsenSys | [https://www.github.com/ConsenSys/cava/tree/master/ssz](https://www.github.com/ConsenSys/cava/tree/master/ssz) |
|
||||
| Go | Prysm | Prysmatic Labs | [https://github.com/prysmaticlabs/go-ssz](https://github.com/prysmaticlabs/go-ssz) |
|
||||
| Swift | Yeeth | Dean Eigenmann | [https://github.com/yeeth/SimpleSerialize.swift](https://github.com/yeeth/SimpleSerialize.swift) |
|
||||
| C# | | Jordan Andrews | [https://github.com/codingupastorm/csharp-ssz](https://github.com/codingupastorm/csharp-ssz) |
|
||||
| C# | Cortex | Sly Gryphon | [https://www.nuget.org/packages/Cortex.SimpleSerialize](https://www.nuget.org/packages/Cortex.SimpleSerialize) |
|
||||
| C++ | | Jiyun Kim | [https://github.com/NAKsir-melody/cpp_ssz](https://github.com/NAKsir-melody/cpp_ssz) |
|
||||
See https://github.com/ethereum/eth2.0-specs/issues/2138 for a list of current known implementations.
|
||||
|
@ -1,40 +0,0 @@
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Iterable
|
||||
|
||||
from gen_base.gen_typing import TestCase
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: str, bls_active: bool = True) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: to run tests against particular phase and/or fork.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=fork_name, bls_active=bls_active)
|
||||
)
|
@ -1,4 +0,0 @@
|
||||
ruamel.yaml==0.16.5
|
||||
eth-utils==1.6.0
|
||||
pytest>=4.4
|
||||
python-snappy==0.5.4
|
@ -1,12 +0,0 @@
|
||||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='gen_helpers',
|
||||
packages=['gen_base', 'gen_from_tests'],
|
||||
install_requires=[
|
||||
"ruamel.yaml==0.16.5",
|
||||
"eth-utils==1.6.0",
|
||||
"pytest>=4.4",
|
||||
"python-snappy==0.5.4",
|
||||
]
|
||||
)
|
@ -27,7 +27,7 @@ python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-ch
|
||||
|
||||
After installing, you can install the optional dependencies for testing and linting.
|
||||
With makefile: `make install_test`.
|
||||
Or manually: run `pip install .[testing]` and `pip install .[linting]`.
|
||||
Or manually: run `pip install .[test]` and `pip install .[lint]`.
|
||||
|
||||
These tests are not intended for client-consumption.
|
||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||
|
@ -1 +1 @@
|
||||
0.12.3
|
||||
1.0.1
|
@ -4,7 +4,7 @@
|
||||
|
||||
A util to quickly write new test suite generators with.
|
||||
|
||||
See [Generators documentation](../../generators/README.md) for integration details.
|
||||
See [Generators documentation](../../../../generators/README.md) for integration details.
|
||||
|
||||
Options:
|
||||
|
@ -8,12 +8,13 @@ from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
|
||||
from gen_base.gen_typing import TestProvider
|
||||
from snappy import compress
|
||||
|
||||
from eth2spec.test import context
|
||||
from eth2spec.test.exceptions import SkippedTest
|
||||
|
||||
from .gen_typing import TestProvider
|
||||
|
||||
|
||||
# Flag that the runner does NOT run test via pytest
|
||||
context.is_pytest = False
|
||||
@ -120,10 +121,11 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||
|
||||
print(f"generating tests with config '{config_name}' ...")
|
||||
for test_case in tprov.make_cases():
|
||||
case_dir = Path(output_dir) / Path(config_name) / Path(test_case.fork_name) \
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name) \
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
|
||||
case_dir = (
|
||||
Path(output_dir) / Path(config_name) / Path(test_case.fork_name)
|
||||
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||
)
|
||||
if case_dir.exists():
|
||||
if not args.force:
|
||||
print(f'Skipping already existing test: {case_dir}')
|
105
tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py
Normal file
105
tests/core/pyspec/eth2spec/gen_helpers/gen_from_tests/gen.py
Normal file
@ -0,0 +1,105 @@
|
||||
from importlib import reload, import_module
|
||||
from inspect import getmembers, isfunction
|
||||
from typing import Any, Callable, Dict, Iterable, Optional
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import ALL_CONFIGS, TESTGEN_FORKS, SpecForkName, ConfigName
|
||||
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner
|
||||
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
|
||||
|
||||
|
||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||
fork_name: SpecForkName, bls_active: bool = True,
|
||||
phase: Optional[str]=None) -> Iterable[TestCase]:
|
||||
"""
|
||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||
:param runner_name: to categorize the test in general as.
|
||||
:param handler_name: to categorize the test specialization as.
|
||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||
:param fork_name: the folder name for these tests.
|
||||
(if multiple forks are applicable, indicate the last fork)
|
||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
|
||||
:return: an iterable of test cases.
|
||||
"""
|
||||
fn_names = [
|
||||
name for (name, _) in getmembers(src, isfunction)
|
||||
if name.startswith('test_')
|
||||
]
|
||||
|
||||
if phase is None:
|
||||
phase = fork_name
|
||||
|
||||
print("generating test vectors from tests source: %s" % src.__name__)
|
||||
for name in fn_names:
|
||||
tfn = getattr(src, name)
|
||||
|
||||
# strip off the `test_`
|
||||
case_name = name
|
||||
if case_name.startswith('test_'):
|
||||
case_name = case_name[5:]
|
||||
|
||||
yield TestCase(
|
||||
fork_name=fork_name,
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
suite_name='pyspec_tests',
|
||||
case_name=case_name,
|
||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
|
||||
)
|
||||
|
||||
|
||||
def get_provider(create_provider_fn: Callable[[SpecForkName, str, str, ConfigName], TestProvider],
|
||||
config_name: ConfigName,
|
||||
fork_name: SpecForkName,
|
||||
all_mods: Dict[str, Dict[str, str]]) -> Iterable[TestProvider]:
|
||||
for key, mod_name in all_mods[fork_name].items():
|
||||
yield create_provider_fn(
|
||||
fork_name=fork_name,
|
||||
handler_name=key,
|
||||
tests_src_mod_name=mod_name,
|
||||
config_name=config_name,
|
||||
)
|
||||
|
||||
|
||||
def get_create_provider_fn(
|
||||
runner_name: str, config_name: ConfigName, specs: Iterable[Any]
|
||||
) -> Callable[[SpecForkName, str, str, ConfigName], TestProvider]:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
for spec in specs:
|
||||
reload(spec)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def create_provider(fork_name: SpecForkName, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: ConfigName) -> TestProvider:
|
||||
def cases_fn() -> Iterable[TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name=runner_name,
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
return create_provider
|
||||
|
||||
|
||||
def run_state_test_generators(runner_name: str, specs: Iterable[Any], all_mods: Dict[str, Dict[str, str]]) -> None:
|
||||
"""
|
||||
Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||
"""
|
||||
for config_name in ALL_CONFIGS:
|
||||
for fork_name in TESTGEN_FORKS:
|
||||
if fork_name in all_mods:
|
||||
gen_runner.run_generator(runner_name, get_provider(
|
||||
create_provider_fn=get_create_provider_fn(runner_name, config_name, specs),
|
||||
config_name=config_name,
|
||||
fork_name=fork_name,
|
||||
all_mods=all_mods,
|
||||
))
|
@ -2,6 +2,7 @@ import pytest
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .exceptions import SkippedTest
|
||||
@ -19,6 +20,7 @@ from importlib import reload
|
||||
def reload_specs():
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
reload(spec_lightclient_patch)
|
||||
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
@ -28,11 +30,17 @@ ConfigName = NewType("ConfigName", str)
|
||||
|
||||
PHASE0 = SpecForkName('phase0')
|
||||
PHASE1 = SpecForkName('phase1')
|
||||
ALL_PHASES = (PHASE0, PHASE1)
|
||||
LIGHTCLIENT_PATCH = SpecForkName('lightclient_patch')
|
||||
|
||||
ALL_PHASES = (PHASE0, PHASE1, LIGHTCLIENT_PATCH)
|
||||
|
||||
MAINNET = ConfigName('mainnet')
|
||||
MINIMAL = ConfigName('minimal')
|
||||
|
||||
ALL_CONFIGS = (MINIMAL, MAINNET)
|
||||
|
||||
# The forks that output to the test vectors.
|
||||
TESTGEN_FORKS = (PHASE0, LIGHTCLIENT_PATCH)
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
@ -47,14 +55,18 @@ class SpecPhase0(Spec):
|
||||
|
||||
|
||||
class SpecPhase1(Spec):
|
||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
||||
...
|
||||
...
|
||||
|
||||
|
||||
class SpecLightclient(Spec):
|
||||
...
|
||||
|
||||
|
||||
# add transfer, bridge, etc. as the spec evolves
|
||||
class SpecForks(TypedDict, total=False):
|
||||
PHASE0: SpecPhase0
|
||||
PHASE1: SpecPhase1
|
||||
LIGHTCLIENT_PATCH: SpecLightclient
|
||||
|
||||
|
||||
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||
@ -70,6 +82,8 @@ def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Ca
|
||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||
# Decide based on performance/consistency results later.
|
||||
state = phases[PHASE1].upgrade_to_phase1(state)
|
||||
elif spec.fork == LIGHTCLIENT_PATCH:
|
||||
state = phases[LIGHTCLIENT_PATCH].upgrade_to_lightclient_patch(state)
|
||||
|
||||
return state
|
||||
|
||||
@ -326,23 +340,28 @@ def with_phases(phases, other_phases=None):
|
||||
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases += set(other_phases)
|
||||
available_phases |= set(other_phases)
|
||||
|
||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||
available_phases.add(PHASE0)
|
||||
|
||||
# Populate all phases for multi-phase tests
|
||||
phase_dir = {}
|
||||
if PHASE0 in available_phases:
|
||||
phase_dir[PHASE0] = spec_phase0
|
||||
if PHASE1 in available_phases:
|
||||
phase_dir[PHASE1] = spec_phase1
|
||||
if LIGHTCLIENT_PATCH in available_phases:
|
||||
phase_dir[LIGHTCLIENT_PATCH] = spec_lightclient_patch
|
||||
|
||||
# return is ignored whenever multiple phases are ran. If
|
||||
if PHASE0 in run_phases:
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if PHASE1 in run_phases:
|
||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
||||
if LIGHTCLIENT_PATCH in run_phases:
|
||||
ret = fn(spec=spec_lightclient_patch, phases=phase_dir, *args, **kw)
|
||||
return ret
|
||||
return wrapper
|
||||
return decorator
|
||||
@ -376,3 +395,11 @@ def only_full_crosslink(fn):
|
||||
return None
|
||||
return fn(*args, spec=spec, state=state, **kw)
|
||||
return wrapper
|
||||
|
||||
|
||||
def is_post_lightclient_patch(spec):
|
||||
if spec.fork in [PHASE0, PHASE1]:
|
||||
# TODO: PHASE1 fork is temporarily parallel to LIGHTCLIENT_PATCH.
|
||||
# Will make PHASE1 fork inherit LIGHTCLIENT_PATCH later.
|
||||
return False
|
||||
return True
|
||||
|
@ -2,7 +2,7 @@ from lru import LRU
|
||||
|
||||
from typing import List
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1
|
||||
from eth2spec.test.context import expect_assertion_error, PHASE1, is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
@ -30,17 +30,22 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
current_epoch_count = len(state.current_epoch_attestations)
|
||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||
|
||||
# process attestation
|
||||
spec.process_attestation(state, attestation)
|
||||
|
||||
# Make sure the attestation has been processed
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
if not is_post_lightclient_patch(spec):
|
||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
else:
|
||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||
# After accounting reform, there are cases when processing an attestation does not result in any flag updates
|
||||
pass
|
||||
|
||||
# yield post-state
|
||||
yield 'post', state
|
||||
@ -315,7 +320,8 @@ def prepare_state_with_attestations(spec, state, participation_fn=None):
|
||||
next_slot(spec, state)
|
||||
|
||||
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
return attestations
|
||||
|
||||
|
@ -1,8 +1,11 @@
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation, sign_indexed_attestation
|
||||
|
||||
|
||||
def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||
attestation_1 = get_valid_attestation(spec, state, signed=signed_1)
|
||||
def get_valid_attester_slashing(spec, state, slot=None, signed_1=False, signed_2=False, filter_participant_set=None):
|
||||
attestation_1 = get_valid_attestation(
|
||||
spec, state,
|
||||
slot=slot, signed=signed_1, filter_participant_set=filter_participant_set
|
||||
)
|
||||
|
||||
attestation_2 = attestation_1.copy()
|
||||
attestation_2.data.target.root = b'\x01' * 32
|
||||
@ -16,14 +19,17 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||
)
|
||||
|
||||
|
||||
def get_valid_attester_slashing_by_indices(spec, state, indices_1, indices_2=None, signed_1=False, signed_2=False):
|
||||
def get_valid_attester_slashing_by_indices(spec, state,
|
||||
indices_1, indices_2=None,
|
||||
slot=None,
|
||||
signed_1=False, signed_2=False):
|
||||
if indices_2 is None:
|
||||
indices_2 = indices_1
|
||||
|
||||
assert indices_1 == sorted(indices_1)
|
||||
assert indices_2 == sorted(indices_2)
|
||||
|
||||
attester_slashing = get_valid_attester_slashing(spec, state)
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, slot=slot)
|
||||
|
||||
attester_slashing.attestation_1.attesting_indices = indices_1
|
||||
attester_slashing.attestation_2.attesting_indices = indices_2
|
||||
|
@ -1,3 +1,4 @@
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
@ -89,6 +90,10 @@ def build_empty_block(spec, state, slot=None):
|
||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
empty_block.parent_root = parent_block_root
|
||||
|
||||
if is_post_lightclient_patch(spec):
|
||||
empty_block.body.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
||||
|
@ -56,7 +56,10 @@ def deposit_from_context(spec, deposit_data_list, index):
|
||||
deposit_data = deposit_data_list[index]
|
||||
root = hash_tree_root(List[spec.DepositData, 2**spec.DEPOSIT_CONTRACT_TREE_DEPTH](*deposit_data_list))
|
||||
tree = calc_merkle_tree_from_leaves(tuple([d.hash_tree_root() for d in deposit_data_list]))
|
||||
proof = list(get_merkle_proof(tree, item_index=index, tree_len=32)) + [(index + 1).to_bytes(32, 'little')]
|
||||
proof = (
|
||||
list(get_merkle_proof(tree, item_index=index, tree_len=32))
|
||||
+ [len(deposit_data_list).to_bytes(32, 'little')]
|
||||
)
|
||||
leaf = deposit_data.hash_tree_root()
|
||||
assert spec.is_valid_merkle_branch(leaf, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH + 1, index, root)
|
||||
deposit = spec.Deposit(proof=proof, data=deposit_data)
|
||||
|
@ -1,14 +1,29 @@
|
||||
|
||||
process_calls = [
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_final_updates',
|
||||
'after_process_final_updates',
|
||||
]
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
|
||||
|
||||
def get_process_calls(spec):
|
||||
return [
|
||||
# PHASE0
|
||||
'process_justification_and_finalization',
|
||||
'process_rewards_and_penalties',
|
||||
'process_registry_updates',
|
||||
'process_reveal_deadlines',
|
||||
'process_challenge_deadlines',
|
||||
'process_slashings',
|
||||
'process_eth1_data_reset',
|
||||
'process_effective_balance_updates',
|
||||
'process_slashings_reset',
|
||||
'process_randao_mixes_reset',
|
||||
'process_historical_roots_update',
|
||||
# HF1 replaced `process_participation_record_updates` with `process_participation_flag_updates`
|
||||
'process_participation_flag_updates' if is_post_lightclient_patch(spec) else (
|
||||
'process_participation_record_updates'
|
||||
),
|
||||
'process_sync_committee_updates',
|
||||
# PHASE1
|
||||
'process_phase_1_final_updates',
|
||||
]
|
||||
|
||||
|
||||
def run_epoch_processing_to(spec, state, process_name: str):
|
||||
@ -25,7 +40,7 @@ def run_epoch_processing_to(spec, state, process_name: str):
|
||||
spec.process_slot(state)
|
||||
|
||||
# process components of epoch transition before final-updates
|
||||
for name in process_calls:
|
||||
for name in get_process_calls(spec):
|
||||
if name == process_name:
|
||||
break
|
||||
# only run when present. Later phases introduce more to the epoch-processing.
|
156
tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
Normal file
156
tests/core/pyspec/eth2spec/test/helpers/multi_operations.py
Normal file
@ -0,0 +1,156 @@
|
||||
from random import Random
|
||||
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing_by_indices
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import build_deposit, deposit_from_context
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
|
||||
|
||||
def run_slash_and_exit(spec, state, slash_index, exit_index, valid=True):
|
||||
"""
|
||||
Helper function to run a test that slashes and exits two validators
|
||||
"""
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
proposer_slashing = get_valid_proposer_slashing(
|
||||
spec, state, slashed_index=slash_index, signed_1=True, signed_2=True)
|
||||
signed_exit = prepare_signed_exits(spec, state, [exit_index])[0]
|
||||
|
||||
block.body.proposer_slashings.append(proposer_slashing)
|
||||
block.body.voluntary_exits.append(signed_exit)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=(not valid))
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
|
||||
if not valid:
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def get_random_proposer_slashings(spec, state, rng):
|
||||
num_slashings = rng.randrange(spec.MAX_PROPOSER_SLASHINGS)
|
||||
indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
|
||||
slashings = [
|
||||
get_valid_proposer_slashing(
|
||||
spec, state,
|
||||
slashed_index=indices.pop(rng.randrange(len(indices))), signed_1=True, signed_2=True,
|
||||
)
|
||||
for _ in range(num_slashings)
|
||||
]
|
||||
return slashings
|
||||
|
||||
|
||||
def get_random_attester_slashings(spec, state, rng):
|
||||
num_slashings = rng.randrange(spec.MAX_ATTESTER_SLASHINGS)
|
||||
indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy()
|
||||
slot_range = list(range(state.slot - spec.SLOTS_PER_HISTORICAL_ROOT + 1, state.slot))
|
||||
slashings = [
|
||||
get_valid_attester_slashing_by_indices(
|
||||
spec, state,
|
||||
sorted([indices.pop(rng.randrange(len(indices))) for _ in range(rng.randrange(1, 4))]),
|
||||
slot=slot_range.pop(rng.randrange(len(slot_range))),
|
||||
signed_1=True, signed_2=True,
|
||||
)
|
||||
for _ in range(num_slashings)
|
||||
]
|
||||
return slashings
|
||||
|
||||
|
||||
def get_random_attestations(spec, state, rng):
|
||||
num_attestations = rng.randrange(spec.MAX_ATTESTATIONS)
|
||||
|
||||
attestations = [
|
||||
get_valid_attestation(
|
||||
spec, state,
|
||||
slot=rng.randrange(state.slot - spec.SLOTS_PER_EPOCH + 1, state.slot),
|
||||
signed=True,
|
||||
)
|
||||
for _ in range(num_attestations)
|
||||
]
|
||||
return attestations
|
||||
|
||||
|
||||
def prepare_state_and_get_random_deposits(spec, state, rng):
|
||||
num_deposits = rng.randrange(spec.MAX_DEPOSITS)
|
||||
|
||||
deposit_data_leaves = [spec.DepositData() for _ in range(len(state.validators))]
|
||||
deposits = []
|
||||
|
||||
# First build deposit data leaves
|
||||
for i in range(num_deposits):
|
||||
index = len(state.validators) + i
|
||||
_, root, deposit_data_leaves = build_deposit(
|
||||
spec,
|
||||
deposit_data_leaves,
|
||||
pubkeys[index],
|
||||
privkeys[index],
|
||||
spec.MAX_EFFECTIVE_BALANCE,
|
||||
withdrawal_credentials=b'\x00' * 32,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
state.eth1_data.deposit_root = root
|
||||
state.eth1_data.deposit_count += num_deposits
|
||||
|
||||
# Then for that context, build deposits/proofs
|
||||
for i in range(num_deposits):
|
||||
index = len(state.validators) + i
|
||||
deposit, _, _ = deposit_from_context(spec, deposit_data_leaves, index)
|
||||
deposits.append(deposit)
|
||||
|
||||
return deposits
|
||||
|
||||
|
||||
def get_random_voluntary_exits(spec, state, to_be_slashed_indices, rng):
|
||||
num_exits = rng.randrange(spec.MAX_VOLUNTARY_EXITS)
|
||||
indices = set(spec.get_active_validator_indices(state, spec.get_current_epoch(state)).copy())
|
||||
eligible_indices = indices - to_be_slashed_indices
|
||||
exit_indices = [eligible_indices.pop() for _ in range(num_exits)]
|
||||
return prepare_signed_exits(spec, state, exit_indices)
|
||||
|
||||
|
||||
def run_test_full_random_operations(spec, state, rng=Random(2080)):
|
||||
# move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit
|
||||
state.slot += spec.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
# prepare state for deposits before building block
|
||||
deposits = prepare_state_and_get_random_deposits(spec, state, rng)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.proposer_slashings = get_random_proposer_slashings(spec, state, rng)
|
||||
block.body.attester_slashings = get_random_attester_slashings(spec, state, rng)
|
||||
block.body.attestations = get_random_attestations(spec, state, rng)
|
||||
block.body.deposits = deposits
|
||||
|
||||
# cannot include to be slashed indices as exits
|
||||
slashed_indices = set([
|
||||
slashing.signed_header_1.message.proposer_index
|
||||
for slashing in block.body.proposer_slashings
|
||||
])
|
||||
for attester_slashing in block.body.attester_slashings:
|
||||
slashed_indices = slashed_indices.union(attester_slashing.attestation_1.attesting_indices)
|
||||
slashed_indices = slashed_indices.union(attester_slashing.attestation_2.attesting_indices)
|
||||
block.body.voluntary_exits = get_random_voluntary_exits(spec, state, slashed_indices, rng)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
@ -1,8 +1,16 @@
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
from eth2spec.test.helpers.state import get_balance
|
||||
|
||||
|
||||
def get_min_slashing_penalty_quotient(spec):
|
||||
if is_post_lightclient_patch(spec):
|
||||
return spec.HF1_MIN_SLASHING_PENALTY_QUOTIENT
|
||||
else:
|
||||
return spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
|
||||
|
||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
slashed_validator = state.validators[slashed_index]
|
||||
assert slashed_validator.slashed
|
||||
@ -10,7 +18,7 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
|
||||
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||
if proposer_index != slashed_index:
|
||||
# slashed validator lost initial slash penalty
|
||||
|
@ -2,6 +2,7 @@ from random import Random
|
||||
from lru import LRU
|
||||
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.test.context import is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
@ -37,24 +38,35 @@ def run_deltas(spec, state):
|
||||
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||
"""
|
||||
yield 'pre', state
|
||||
|
||||
if is_post_lightclient_patch(spec):
|
||||
def get_source_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_FLAG_NUMERATOR)
|
||||
|
||||
def get_head_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_FLAG_NUMERATOR)
|
||||
|
||||
def get_target_deltas(state):
|
||||
return spec.get_flag_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_FLAG_NUMERATOR)
|
||||
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_source_deltas,
|
||||
spec.get_source_deltas if not is_post_lightclient_patch(spec) else get_source_deltas,
|
||||
spec.get_matching_source_attestations,
|
||||
'source_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_target_deltas,
|
||||
spec.get_target_deltas if not is_post_lightclient_patch(spec) else get_target_deltas,
|
||||
spec.get_matching_target_attestations,
|
||||
'target_deltas',
|
||||
)
|
||||
yield from run_attestation_component_deltas(
|
||||
spec,
|
||||
state,
|
||||
spec.get_head_deltas,
|
||||
spec.get_head_deltas if not is_post_lightclient_patch(spec) else get_head_deltas,
|
||||
spec.get_matching_head_attestations,
|
||||
'head_deltas',
|
||||
)
|
||||
@ -62,6 +74,16 @@ def run_deltas(spec, state):
|
||||
yield from run_get_inactivity_penalty_deltas(spec, state)
|
||||
|
||||
|
||||
def deltas_name_to_flag_index(spec, deltas_name):
|
||||
if 'source' in deltas_name:
|
||||
return spec.TIMELY_SOURCE_FLAG_INDEX
|
||||
elif 'head' in deltas_name:
|
||||
return spec.TIMELY_HEAD_FLAG_INDEX
|
||||
elif 'target' in deltas_name:
|
||||
return spec.TIMELY_TARGET_FLAG_INDEX
|
||||
raise ValueError("Wrong deltas_name %s" % deltas_name)
|
||||
|
||||
|
||||
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
||||
"""
|
||||
Run ``component_delta_fn``, yielding:
|
||||
@ -71,8 +93,14 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||
|
||||
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_indices = spec.get_unslashed_participating_indices(
|
||||
state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)
|
||||
)
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
if index not in eligible_indices:
|
||||
@ -101,6 +129,12 @@ def run_get_inclusion_delay_deltas(spec, state):
|
||||
Run ``get_inclusion_delay_deltas``, yielding:
|
||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||
"""
|
||||
if is_post_lightclient_patch(spec):
|
||||
# No inclusion_delay_deltas
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=[0] * len(state.validators),
|
||||
penalties=[0] * len(state.validators))
|
||||
return
|
||||
|
||||
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
||||
|
||||
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
@ -148,8 +182,14 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
|
||||
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||
|
||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||
else:
|
||||
matching_attesting_indices = spec.get_unslashed_participating_indices(
|
||||
state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)
|
||||
)
|
||||
reward_numerator_sum = sum(numerator for (_, numerator) in spec.get_flag_indices_and_numerators())
|
||||
|
||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||
for index in range(len(state.validators)):
|
||||
@ -159,8 +199,14 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||
continue
|
||||
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
base_reward = spec.get_base_reward(state, index)
|
||||
base_penalty = spec.BASE_REWARDS_PER_EPOCH * base_reward - spec.get_proposer_reward(state, index)
|
||||
# Compute base_penalty
|
||||
if not is_post_lightclient_patch(spec):
|
||||
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||
base_reward = spec.get_base_reward(state, index)
|
||||
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||
else:
|
||||
base_penalty = spec.get_base_reward(state, index) * reward_numerator_sum // spec.FLAG_DENOMINATOR
|
||||
|
||||
if not has_enough_for_reward(spec, state, index):
|
||||
assert penalties[index] == 0
|
||||
elif index in matching_attesting_indices:
|
||||
@ -262,8 +308,13 @@ def run_test_full_all_correct(spec, state):
|
||||
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
for a in state.previous_epoch_attestations:
|
||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||
if not is_post_lightclient_patch(spec):
|
||||
for a in state.previous_epoch_attestations:
|
||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
if rng.choice([True, False]):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@ -272,8 +323,12 @@ def run_test_partial(spec, state, fraction_filled):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
# Remove portion of attestations
|
||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||
if not is_post_lightclient_patch(spec):
|
||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||
else:
|
||||
for index in range(int(len(state.validators) * fraction_filled)):
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@ -328,13 +383,18 @@ def run_test_some_very_low_effective_balances_that_attested(spec, state):
|
||||
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
# Remove attestation
|
||||
attestation = state.previous_epoch_attestations[0]
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||
# Set removed indices effective balance to very low amount
|
||||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||
for i, index in enumerate(indices):
|
||||
state.validators[index].effective_balance = i
|
||||
if not is_post_lightclient_patch(spec):
|
||||
# Remove attestation
|
||||
attestation = state.previous_epoch_attestations[0]
|
||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||
# Set removed indices effective balance to very low amount
|
||||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||
for i, index in enumerate(indices):
|
||||
state.validators[index].effective_balance = i
|
||||
else:
|
||||
index = 0
|
||||
state.validators[index].effective_balance = 1
|
||||
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||
|
||||
yield from run_deltas(spec, state)
|
||||
|
||||
@ -442,16 +502,43 @@ def run_test_full_random(spec, state, rng=Random(8020)):
|
||||
|
||||
cached_prepare_state_with_attestations(spec, state)
|
||||
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False]) for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
for pending_attestation in state.previous_epoch_attestations:
|
||||
# ~1/3 have bad target
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.target.root = b'\x55' * 32
|
||||
# ~1/3 have bad head
|
||||
if rng.randint(0, 2) == 0:
|
||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||
# ~50% participation
|
||||
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||
for _ in pending_attestation.aggregation_bits]
|
||||
# Random inclusion delay
|
||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
# ~1/3 have bad head or bad target or not timely enough
|
||||
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||
flags = state.previous_epoch_participation[index]
|
||||
|
||||
def set_flag(index, value):
|
||||
nonlocal flags
|
||||
flag = spec.ParticipationFlags(2**index)
|
||||
if value:
|
||||
flags |= flag
|
||||
else:
|
||||
flags &= 0xff ^ flag
|
||||
|
||||
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||
if is_timely_correct_head:
|
||||
# If timely head, then must be timely target
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||
# If timely head, then must be timely source
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||
else:
|
||||
# ~50% of remaining have bad target or not timely enough
|
||||
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||
# ~50% of remaining have bad source or not timely enough
|
||||
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||
state.previous_epoch_participation[index] = flags
|
||||
yield from run_deltas(spec, state)
|
||||
|
33
tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
Normal file
33
tests/core/pyspec/eth2spec/test/helpers/sync_committee.py
Normal file
@ -0,0 +1,33 @@
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
|
||||
def compute_sync_committee_signature(spec, state, slot, privkey):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_SYNC_COMMITTEE, spec.compute_epoch_at_slot(slot))
|
||||
if slot == state.slot:
|
||||
block_root = build_empty_block_for_next_slot(spec, state).parent_root
|
||||
else:
|
||||
block_root = spec.get_block_root_at_slot(state, slot)
|
||||
signing_root = spec.compute_signing_root(block_root, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
|
||||
|
||||
def compute_aggregate_sync_committee_signature(spec, state, slot, participants):
|
||||
if len(participants) == 0:
|
||||
return spec.G2_POINT_AT_INFINITY
|
||||
|
||||
signatures = []
|
||||
for validator_index in participants:
|
||||
privkey = privkeys[validator_index]
|
||||
signatures.append(
|
||||
compute_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
slot,
|
||||
privkey,
|
||||
)
|
||||
)
|
||||
return bls.Aggregate(signatures)
|
@ -1,4 +1,19 @@
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
|
||||
|
||||
def prepare_signed_exits(spec, state, indices):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
def create_signed_exit(index):
|
||||
exit = spec.VoluntaryExit(
|
||||
epoch=spec.get_current_epoch(state),
|
||||
validator_index=index,
|
||||
)
|
||||
signing_root = spec.compute_signing_root(exit, domain)
|
||||
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||
|
||||
return [create_signed_exit(index) for index in indices]
|
||||
|
||||
|
||||
def sign_voluntary_exit(spec, state, voluntary_exit, privkey):
|
||||
|
@ -0,0 +1,319 @@
|
||||
from collections import Counter
|
||||
import random
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
transition_unsigned_block,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
transition_to,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
MAINNET, MINIMAL,
|
||||
expect_assertion_error,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
)
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
def get_committee_indices(spec, state, duplicates=False):
|
||||
'''
|
||||
This utility function allows the caller to ensure there are or are not
|
||||
duplicate validator indices in the returned committee based on
|
||||
the boolean ``duplicates``.
|
||||
'''
|
||||
state = state.copy()
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||
while True:
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
if duplicates:
|
||||
if len(committee) != len(set(committee)):
|
||||
return committee
|
||||
else:
|
||||
if len(committee) == len(set(committee)):
|
||||
return committee
|
||||
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_invalid_signature_missing_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
random_participant = random.choice(committee)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one participant whose signature was included.
|
||||
block.body.sync_committee_bits = [index != random_participant for index in committee]
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee, # full committee signs
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_invalid_signature_extra_participant(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
random_participant = random.choice(committee)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Exclude one signature even though the block claims the entire committee participated.
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
[index for index in committee if index != random_participant],
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
def compute_sync_committee_participant_reward(spec, state, participant_index, active_validator_count, committee_size):
|
||||
base_reward = spec.get_base_reward(state, participant_index)
|
||||
proposer_reward = spec.get_proposer_reward(state, participant_index)
|
||||
max_participant_reward = base_reward - proposer_reward
|
||||
return max_participant_reward * active_validator_count // committee_size // spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=False)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size == len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * committee_size
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
expected_reward = 0
|
||||
|
||||
if index == block.proposer_index:
|
||||
expected_reward += sum([spec.get_proposer_reward(state, index) for index in committee])
|
||||
|
||||
if index in committee:
|
||||
expected_reward += compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
state,
|
||||
index,
|
||||
active_validator_count,
|
||||
committee_size
|
||||
)
|
||||
|
||||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||
@spec_state_test
|
||||
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||
committee = get_committee_indices(spec, state, duplicates=True)
|
||||
committee_size = len(committee)
|
||||
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||
|
||||
# Preconditions of this test case
|
||||
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||
assert committee_size > len(set(committee))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
pre_balances = state.balances.copy()
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * committee_size
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
multiplicities = Counter(committee)
|
||||
|
||||
for index in range(len(state.validators)):
|
||||
expected_reward = 0
|
||||
|
||||
if index == block.proposer_index:
|
||||
expected_reward += sum([spec.get_proposer_reward(state, index) for index in committee])
|
||||
|
||||
if index in committee:
|
||||
reward = compute_sync_committee_participant_reward(
|
||||
spec,
|
||||
state,
|
||||
index,
|
||||
active_validator_count,
|
||||
committee_size,
|
||||
)
|
||||
expected_reward += reward * multiplicities[index]
|
||||
|
||||
assert state.balances[index] == pre_balances[index] + expected_reward
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_past_block(spec, state):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
blocks = []
|
||||
for _ in range(2):
|
||||
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# Valid sync committee signature here...
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||
# Invalid signature from a slot other than the previous
|
||||
invalid_block.body.sync_committee_bits = [True] * len(committee)
|
||||
invalid_block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
invalid_block.slot - 2,
|
||||
committee,
|
||||
)
|
||||
blocks.append(invalid_block)
|
||||
|
||||
expect_assertion_error(lambda: transition_unsigned_block(spec, state, invalid_block))
|
||||
|
||||
yield 'blocks', blocks
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_signature_previous_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_commitee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_commitee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
# Use the previous sync committee to produce the signature.
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
# Ensure that the pubkey sets are different.
|
||||
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * len(committee)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee,
|
||||
)
|
||||
|
||||
yield 'blocks', [block]
|
||||
expect_assertion_error(lambda: spec.process_sync_committee(state, block.body))
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_valid_signature_future_committee(spec, state):
|
||||
# NOTE: the `state` provided is at genesis and the process to select
|
||||
# sync committees currently returns the same committee for the first and second
|
||||
# periods at genesis.
|
||||
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
old_current_sync_committee = state.current_sync_committee
|
||||
old_next_sync_committee = state.next_sync_committee
|
||||
|
||||
epoch_in_future_sync_committee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||
slot_in_future_sync_committee_period = epoch_in_future_sync_committee_period * spec.SLOTS_PER_EPOCH
|
||||
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||
|
||||
sync_committee = state.current_sync_committee
|
||||
|
||||
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||
|
||||
assert sync_committee == expected_sync_committee
|
||||
assert sync_committee != old_current_sync_committee
|
||||
assert sync_committee != old_next_sync_committee
|
||||
|
||||
pubkeys = [validator.pubkey for validator in state.validators]
|
||||
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [True] * len(committee_indices)
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
committee_indices,
|
||||
)
|
||||
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
@ -0,0 +1,37 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_sync_committees_progress(spec, state):
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
# NOTE: if not in the genesis epoch, period math below needs to be
|
||||
# adjusted relative to the current epoch
|
||||
assert current_epoch == 0
|
||||
|
||||
first_sync_committee = state.current_sync_committee
|
||||
second_sync_committee = state.next_sync_committee
|
||||
|
||||
slot_at_end_of_current_period = spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH - 1
|
||||
transition_to(spec, state, slot_at_end_of_current_period)
|
||||
|
||||
# Ensure assignments have not changed:
|
||||
assert state.current_sync_committee == first_sync_committee
|
||||
assert state.next_sync_committee == second_sync_committee
|
||||
|
||||
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
|
||||
|
||||
# Can compute the third committee having computed final balances in the last epoch
|
||||
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||
third_sync_committee = spec.get_sync_committee(state, 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||
|
||||
assert state.current_sync_committee == second_sync_committee
|
||||
assert state.next_sync_committee == third_sync_committee
|
@ -0,0 +1,113 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, LIGHTCLIENT_PATCH,
|
||||
with_phases,
|
||||
with_custom_state,
|
||||
spec_test, with_state,
|
||||
low_balances, misc_balances, large_validator_set,
|
||||
)
|
||||
from eth2spec.test.utils import with_meta_tags
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_epoch_via_block,
|
||||
)
|
||||
|
||||
|
||||
HF1_FORK_TEST_META_TAGS = {
|
||||
'fork': 'altair',
|
||||
}
|
||||
|
||||
|
||||
def run_fork_test(post_spec, pre_state):
|
||||
yield 'pre', pre_state
|
||||
|
||||
post_state = post_spec.upgrade_to_lightclient_patch(pre_state)
|
||||
|
||||
# Stable fields
|
||||
stable_fields = [
|
||||
'genesis_time', 'genesis_validators_root', 'slot',
|
||||
# History
|
||||
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||
# Eth1
|
||||
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||
# Registry
|
||||
'validators', 'balances',
|
||||
# Randomness
|
||||
'randao_mixes',
|
||||
# Slashings
|
||||
'slashings',
|
||||
# Finality
|
||||
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||
]
|
||||
for field in stable_fields:
|
||||
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||
|
||||
# Modified fields
|
||||
modified_fields = ['fork']
|
||||
for field in modified_fields:
|
||||
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||
|
||||
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||
assert post_state.fork.current_version == post_spec.LIGHTCLIENT_PATCH_FORK_VERSION
|
||||
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||
|
||||
yield 'post', post_state
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_base_state(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch(spec, phases, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||
next_epoch_via_block(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@spec_test
|
||||
@with_state
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_many_next_epoch(spec, phases, state):
|
||||
for _ in range(3):
|
||||
next_epoch(spec, state)
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_low_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_misc_balances(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
||||
|
||||
|
||||
@with_phases(phases=[PHASE0], other_phases=[LIGHTCLIENT_PATCH])
|
||||
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
@with_meta_tags(HF1_FORK_TEST_META_TAGS)
|
||||
def test_fork_random_large_validator_set(spec, phases, state):
|
||||
yield from run_fork_test(phases[LIGHTCLIENT_PATCH], state)
|
@ -0,0 +1,75 @@
|
||||
import random
|
||||
from eth2spec.test.helpers.state import (
|
||||
state_transition_and_sign_block,
|
||||
next_epoch,
|
||||
)
|
||||
from eth2spec.test.helpers.block import (
|
||||
build_empty_block_for_next_slot,
|
||||
)
|
||||
from eth2spec.test.helpers.sync_committee import (
|
||||
compute_aggregate_sync_committee_signature,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
|
||||
|
||||
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
block.body.sync_committee_bits = [index in participants for index in committee]
|
||||
block.body.sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||
spec,
|
||||
state,
|
||||
block.slot - 1,
|
||||
participants,
|
||||
)
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
|
||||
yield 'blocks', [signed_block]
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee(spec, state):
|
||||
next_epoch(spec, state)
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_half_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
@ -2,10 +2,13 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
always_bls, never_bls,
|
||||
with_all_phases,
|
||||
with_all_phases_except,
|
||||
spec_test,
|
||||
low_balances,
|
||||
with_custom_state,
|
||||
single_phase)
|
||||
single_phase,
|
||||
PHASE1,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
run_attestation_processing,
|
||||
get_valid_attestation,
|
||||
@ -135,20 +138,44 @@ def test_wrong_index_for_committee_signature(spec, state):
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot(spec, state):
|
||||
def reduce_state_committee_count_from_max(spec, state):
|
||||
"""
|
||||
Modified ``state`` to ensure that it has fewer committees at each slot than ``MAX_COMMITTEES_PER_SLOT``
|
||||
"""
|
||||
while spec.get_committee_count_per_slot(state, spec.get_current_epoch(state)) >= spec.MAX_COMMITTEES_PER_SLOT:
|
||||
state.validators = state.validators[:len(state.validators) // 2]
|
||||
state.balances = state.balances[:len(state.balances) // 2]
|
||||
|
||||
index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_0(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.index = index
|
||||
# Invalid index: current committees per slot is less than the max
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT - 1
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_wrong_index_for_slot_1(spec, state):
|
||||
reduce_state_committee_count_from_max(spec, state)
|
||||
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
committee_count = spec.get_committee_count_per_slot(state, current_epoch)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, index=0)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# Invalid index: off by one
|
||||
attestation.data.index = committee_count
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
@ -160,7 +187,7 @@ def test_invalid_index(spec, state):
|
||||
attestation = get_valid_attestation(spec, state)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
# off by one (with respect to valid range) on purpose
|
||||
# Invalid index: off by one (with respect to valid range) on purpose
|
||||
attestation.data.index = spec.MAX_COMMITTEES_PER_SLOT
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
@ -305,3 +332,212 @@ def test_too_few_aggregation_bits(spec, state):
|
||||
attestation.aggregation_bits = attestation.aggregation_bits[:-1]
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Full correct atttestation contents at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_correct_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=True, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head but correct source/target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Incorrect head and target but correct source at different slot inclusions
|
||||
#
|
||||
|
||||
# Note: current phase 1 spec checks
|
||||
# `assert data.beacon_block_root == get_block_root_at_slot(state, compute_previous_slot(state.slot))`
|
||||
# so this test can't pass that until phase 1 refactor is merged
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_head_and_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.beacon_block_root = b'\x42' * 32
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
||||
|
||||
#
|
||||
# Correct head and source but incorrect target at different slot inclusions
|
||||
#
|
||||
|
||||
@with_all_phases_except([PHASE1])
|
||||
@spec_state_test
|
||||
def test_incorrect_target_min_inclusion_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False)
|
||||
next_slots(spec, state, spec.MIN_ATTESTATION_INCLUSION_DELAY)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_sqrt_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.integer_squareroot(spec.SLOTS_PER_EPOCH))
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_incorrect_target_after_epoch_delay(spec, state):
|
||||
attestation = get_valid_attestation(spec, state, signed=False, on_time=False)
|
||||
# increment past latest inclusion slot
|
||||
next_slots(spec, state, spec.SLOTS_PER_EPOCH + 1)
|
||||
|
||||
attestation.data.target.root = b'\x42' * 32
|
||||
sign_attestation(spec, state, attestation)
|
||||
|
||||
yield from run_attestation_processing(spec, state, attestation, False)
|
||||
|
@ -4,6 +4,7 @@ from eth2spec.test.context import (
|
||||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||
from eth2spec.test.helpers.proposer_slashings import get_min_slashing_penalty_quotient
|
||||
from eth2spec.test.helpers.state import (
|
||||
get_balance,
|
||||
next_epoch_via_block,
|
||||
@ -70,7 +71,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
||||
expected_balance = (
|
||||
pre_proposer_balance
|
||||
+ total_proposer_rewards
|
||||
- pre_slashings[proposer_index] // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||
- pre_slashings[proposer_index] // get_min_slashing_penalty_quotient(spec)
|
||||
)
|
||||
|
||||
assert get_balance(state, proposer_index) == expected_balance
|
||||
|
@ -94,6 +94,49 @@ def test_new_deposit_over_max(spec, state):
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_new_deposit_eth1_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX
|
||||
+ b'\x00' * 11 # specified 0s
|
||||
+ b'\x59' * 20 # a 20-byte eth1 address
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(
|
||||
spec, state,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_new_deposit_non_versioned_withdrawal_credentials(spec, state):
|
||||
# fresh deposit = next validator index = validator appended to registry
|
||||
validator_index = len(state.validators)
|
||||
withdrawal_credentials = (
|
||||
b'\xFF' # Non specified withdrawal credentials version
|
||||
+ b'\x02' * 31 # Garabage bytes
|
||||
)
|
||||
amount = spec.MAX_EFFECTIVE_BALANCE
|
||||
deposit = prepare_state_and_deposit(
|
||||
spec, state,
|
||||
validator_index,
|
||||
amount,
|
||||
withdrawal_credentials=withdrawal_credentials,
|
||||
signed=True,
|
||||
)
|
||||
|
||||
yield from run_deposit_processing(spec, state, deposit, validator_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
|
@ -1,46 +1,11 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_final_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
||||
def run_process_effective_balance_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_effective_balance_updates')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -48,7 +13,7 @@ def test_eth1_vote_reset(spec, state):
|
||||
def test_effective_balance_hysteresis(spec, state):
|
||||
# Prepare state up to the final-updates.
|
||||
# Then overwrite the balances, we only want to focus to be on the hysteresis based changes.
|
||||
run_epoch_processing_to(spec, state, 'process_final_updates')
|
||||
run_epoch_processing_to(spec, state, 'process_effective_balance_updates')
|
||||
# Set some edge cases for balances
|
||||
max = spec.MAX_EFFECTIVE_BALANCE
|
||||
min = spec.EJECTION_BALANCE
|
||||
@ -79,21 +44,7 @@ def test_effective_balance_hysteresis(spec, state):
|
||||
state.validators[i].effective_balance = pre_eff
|
||||
state.balances[i] = bal
|
||||
|
||||
yield 'pre', state
|
||||
spec.process_final_updates(state)
|
||||
yield 'post', state
|
||||
yield from run_process_effective_balance_updates(spec, state)
|
||||
|
||||
for i, (_, _, post_eff, name) in enumerate(cases):
|
||||
assert state.validators[i].effective_balance == post_eff, name
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_final_updates(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
@ -0,0 +1,43 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
|
||||
def run_process_eth1_data_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_eth1_data_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
transition_to(spec, state, spec.SLOTS_PER_EPOCH - 1)
|
||||
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
deposit_count=state.eth1_deposit_index,
|
||||
block_hash=b'\xbb' * 32))
|
||||
|
||||
yield from run_process_eth1_data_reset(spec, state)
|
||||
|
||||
assert len(state.eth1_data_votes) == 0
|
@ -0,0 +1,20 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_historical_roots_update(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_historical_roots_update')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_historical_root_accumulator(spec, state):
|
||||
# skip ahead to near the end of the historical roots period (excl block before epoch processing)
|
||||
state.slot = spec.SLOTS_PER_HISTORICAL_ROOT - 1
|
||||
history_len = len(state.historical_roots)
|
||||
|
||||
yield from run_process_historical_roots_update(spec, state)
|
||||
|
||||
assert len(state.historical_roots) == history_len + 1
|
@ -1,6 +1,6 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
run_epoch_processing_with
|
||||
from eth2spec.test.context import is_post_lightclient_patch, spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with,
|
||||
)
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
|
||||
@ -16,12 +16,20 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
previous_epoch = spec.get_previous_epoch(state)
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
|
||||
if current_epoch == epoch:
|
||||
attestations = state.current_epoch_attestations
|
||||
elif previous_epoch == epoch:
|
||||
attestations = state.previous_epoch_attestations
|
||||
if not is_post_lightclient_patch(spec):
|
||||
if current_epoch == epoch:
|
||||
attestations = state.current_epoch_attestations
|
||||
elif previous_epoch == epoch:
|
||||
attestations = state.previous_epoch_attestations
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
if current_epoch == epoch:
|
||||
epoch_participation = state.current_epoch_participation
|
||||
elif previous_epoch == epoch:
|
||||
epoch_participation = state.previous_epoch_participation
|
||||
else:
|
||||
raise Exception(f"cannot include attestations in epoch ${epoch} from epoch ${current_epoch}")
|
||||
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
remaining_balance = int(total_balance * 2 // 3) # can become negative
|
||||
@ -52,19 +60,28 @@ def add_mock_attestations(spec, state, epoch, source, target, sufficient_support
|
||||
for i in range(max(len(committee) // 5, 1)):
|
||||
aggregation_bits[i] = 0
|
||||
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=spec.AttestationData(
|
||||
slot=slot,
|
||||
beacon_block_root=b'\xff' * 32, # irrelevant to testing
|
||||
source=source,
|
||||
target=target,
|
||||
index=index,
|
||||
),
|
||||
inclusion_delay=1,
|
||||
))
|
||||
if messed_up_target:
|
||||
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
|
||||
# Update state
|
||||
if not is_post_lightclient_patch(spec):
|
||||
attestations.append(spec.PendingAttestation(
|
||||
aggregation_bits=aggregation_bits,
|
||||
data=spec.AttestationData(
|
||||
slot=slot,
|
||||
beacon_block_root=b'\xff' * 32, # irrelevant to testing
|
||||
source=source,
|
||||
target=target,
|
||||
index=index,
|
||||
),
|
||||
inclusion_delay=1,
|
||||
))
|
||||
if messed_up_target:
|
||||
attestations[len(attestations) - 1].data.target.root = b'\x99' * 32
|
||||
else:
|
||||
for i, index in enumerate(committee):
|
||||
if aggregation_bits[i]:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_HEAD_FLAG_INDEX)
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_SOURCE_FLAG_INDEX)
|
||||
if not messed_up_target:
|
||||
epoch_participation[index] |= spec.ParticipationFlags(2**spec.TIMELY_TARGET_FLAG_INDEX)
|
||||
|
||||
|
||||
def get_checkpoints(spec, epoch):
|
||||
|
@ -0,0 +1,21 @@
|
||||
from eth2spec.test.context import PHASE0, spec_state_test, with_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_participation_record_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_participation_record_updates')
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_updated_participation_record(spec, state):
|
||||
state.previous_epoch_attestations = [spec.PendingAttestation(proposer_index=100)]
|
||||
current_epoch_attestations = [spec.PendingAttestation(proposer_index=200)]
|
||||
state.current_epoch_attestations = current_epoch_attestations
|
||||
|
||||
yield from run_process_participation_record_updates(spec, state)
|
||||
|
||||
assert state.previous_epoch_attestations == current_epoch_attestations
|
||||
assert state.current_epoch_attestations == []
|
@ -0,0 +1,21 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_randao_mixes_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_randao_mixes_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_updated_randao_mixes(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] = b'\x56' * 32
|
||||
|
||||
yield from run_process_randao_mixes_reset(spec, state)
|
||||
|
||||
assert state.randao_mixes[next_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR] == spec.get_randao_mix(
|
||||
state, spec.get_current_epoch(state)
|
||||
)
|
@ -1,7 +1,7 @@
|
||||
from eth2spec.test.helpers.deposits import mock_deposit
|
||||
from eth2spec.test.helpers.state import next_epoch, next_slots
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
|
||||
def run_process_registry_updates(spec, state):
|
||||
|
@ -1,9 +1,11 @@
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, spec_test,
|
||||
with_all_phases, single_phase,
|
||||
with_phases, PHASE0, PHASE1,
|
||||
with_custom_state,
|
||||
zero_activation_threshold,
|
||||
misc_balances, low_single_balance,
|
||||
is_post_lightclient_patch,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
@ -12,11 +14,12 @@ from eth2spec.test.helpers.state import (
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
add_attestations_to_state,
|
||||
get_valid_attestation,
|
||||
sign_attestation,
|
||||
prepare_state_with_attestations,
|
||||
)
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from random import Random
|
||||
|
||||
|
||||
@ -63,7 +66,7 @@ def test_genesis_epoch_full_attestations_no_rewards(spec, state):
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_attestations_random_incorrect_fields(spec, state):
|
||||
attestations = prepare_state_with_attestations(spec, state)
|
||||
@ -156,10 +159,13 @@ def run_with_participation(spec, state, participation_fn):
|
||||
return att_participants
|
||||
|
||||
attestations = prepare_state_with_attestations(spec, state, participation_fn=participation_tracker)
|
||||
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
proposer_indices = [a.proposer_index for a in state.previous_epoch_attestations]
|
||||
else:
|
||||
sync_committee_indices = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
@ -167,12 +173,16 @@ def run_with_participation(spec, state, participation_fn):
|
||||
|
||||
for index in range(len(pre_state.validators)):
|
||||
if spec.is_in_inactivity_leak(state):
|
||||
# Proposers can still make money during a leak
|
||||
if index in proposer_indices and index in participated:
|
||||
# Proposers can still make money during a leak before LIGHTCLIENT_PATCH
|
||||
if not is_post_lightclient_patch(spec) and index in proposer_indices and index in participated:
|
||||
assert state.balances[index] > pre_state.balances[index]
|
||||
# If not proposer but participated optimally, should have exactly neutral balance
|
||||
elif index in attesting_indices:
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
if is_post_lightclient_patch(spec) and index in sync_committee_indices:
|
||||
# The sync committee reward has not been canceled, so the sync committee participants still earn it
|
||||
assert state.balances[index] >= pre_state.balances[index]
|
||||
else:
|
||||
# If not proposer but participated optimally, should have exactly neutral balance
|
||||
assert state.balances[index] == pre_state.balances[index]
|
||||
else:
|
||||
assert state.balances[index] < pre_state.balances[index]
|
||||
else:
|
||||
@ -278,6 +288,135 @@ def test_duplicate_attestation(spec, state):
|
||||
assert single_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_1(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for the same inclusion delay
|
||||
Earlier attestation (by list order) is correct, later has incorrect head
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [correct_attestation, incorrect_attestation], inclusion_slot)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_2(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for the same inclusion delay
|
||||
Earlier attestation (by list order) has incorrect head, later is correct
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [incorrect_attestation, correct_attestation], inclusion_slot)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
# Inclusion delay does not take into account correctness so equal reward
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
# TODO: update to all phases when https://github.com/ethereum/eth2.0-specs/pull/2024 is merged
|
||||
# Currently disabled for Phase 1+ due to the mechanics of on-time-attestations complicating what should be a simple test
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_duplicate_participants_different_attestation_3(spec, state):
|
||||
"""
|
||||
Same attesters get two different attestations on chain for *different* inclusion delay
|
||||
Earlier attestation (by list order) has incorrect head, later is correct
|
||||
Note: although these are slashable, they can validly be included
|
||||
"""
|
||||
correct_attestation = get_valid_attestation(spec, state, signed=True)
|
||||
incorrect_attestation = correct_attestation.copy()
|
||||
incorrect_attestation.data.beacon_block_root = b'\x42' * 32
|
||||
sign_attestation(spec, state, incorrect_attestation)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, correct_attestation)
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
single_correct_state = state.copy()
|
||||
dup_state = state.copy()
|
||||
|
||||
inclusion_slot = state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
add_attestations_to_state(spec, single_correct_state, [correct_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [incorrect_attestation], inclusion_slot)
|
||||
add_attestations_to_state(spec, dup_state, [correct_attestation], inclusion_slot + 1)
|
||||
|
||||
next_epoch(spec, single_correct_state)
|
||||
next_epoch(spec, dup_state)
|
||||
|
||||
# Run non-duplicate inclusion rewards for comparison. Do not yield test vectors
|
||||
for _ in run_process_rewards_and_penalties(spec, single_correct_state):
|
||||
pass
|
||||
|
||||
# Output duplicate inclusion to test vectors
|
||||
yield from run_process_rewards_and_penalties(spec, dup_state)
|
||||
|
||||
for index in participants:
|
||||
assert state.balances[index] < single_correct_state.balances[index]
|
||||
# Inclusion delay does not take into account correctness so equal reward
|
||||
assert single_correct_state.balances[index] == dup_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
# Case when some eligible attestations are slashed. Modifies attesting_balance and consequently rewards/penalties.
|
||||
@ -289,7 +428,8 @@ def test_attestations_some_slashed(spec, state):
|
||||
for i in range(spec.MIN_PER_EPOCH_CHURN_LIMIT):
|
||||
spec.slash_validator(state, attesting_indices_before_slashings[i])
|
||||
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import (
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases, is_post_lightclient_patch
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with, run_epoch_processing_to
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
@ -23,12 +23,19 @@ def slash_validators(spec, state, indices, out_epochs):
|
||||
] = total_slashed_balance
|
||||
|
||||
|
||||
def get_slashing_multiplier(spec):
|
||||
if is_post_lightclient_patch(spec):
|
||||
return spec.HF1_PROPORTIONAL_SLASHING_MULTIPLIER
|
||||
else:
|
||||
return spec.PROPORTIONAL_SLASHING_MULTIPLIER
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_max_penalties(spec, state):
|
||||
# Slashed count to ensure that enough validators are slashed to induce maximum penalties
|
||||
slashed_count = min(
|
||||
(len(state.validators) // spec.PROPORTIONAL_SLASHING_MULTIPLIER) + 1,
|
||||
(len(state.validators) // get_slashing_multiplier(spec)) + 1,
|
||||
# Can't slash more than validator count!
|
||||
len(state.validators)
|
||||
)
|
||||
@ -40,7 +47,7 @@ def test_max_penalties(spec, state):
|
||||
total_balance = spec.get_total_active_balance(state)
|
||||
total_penalties = sum(state.slashings)
|
||||
|
||||
assert total_balance // spec.PROPORTIONAL_SLASHING_MULTIPLIER <= total_penalties
|
||||
assert total_balance // get_slashing_multiplier(spec) <= total_penalties
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
@ -50,7 +57,30 @@ def test_max_penalties(spec, state):
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_small_penalty(spec, state):
|
||||
def test_low_penalty(spec, state):
|
||||
# Slashed count is one tenth of validator set
|
||||
slashed_count = (len(state.validators) // 10) + 1
|
||||
out_epoch = spec.get_current_epoch(state) + (spec.EPOCHS_PER_SLASHINGS_VECTOR // 2)
|
||||
|
||||
slashed_indices = list(range(slashed_count))
|
||||
slash_validators(spec, state, slashed_indices, [out_epoch] * slashed_count)
|
||||
|
||||
pre_state = state.copy()
|
||||
|
||||
yield from run_process_slashings(spec, state)
|
||||
|
||||
for i in slashed_indices:
|
||||
assert 0 < state.balances[i] < pre_state.balances[i]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_minimal_penalty(spec, state):
|
||||
#
|
||||
# When very few slashings, the resulting slashing penalty gets rounded down
|
||||
# to zero so the result of `process_slashings` is null
|
||||
#
|
||||
|
||||
# Just the bare minimum for this one validator
|
||||
state.balances[0] = state.validators[0].effective_balance = spec.EJECTION_BALANCE
|
||||
# All the other validators get the maximum.
|
||||
@ -74,11 +104,13 @@ def test_small_penalty(spec, state):
|
||||
|
||||
expected_penalty = (
|
||||
state.validators[0].effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (3 * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// total_balance
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
assert state.balances[0] == pre_slash_balances[0] - expected_penalty
|
||||
|
||||
assert expected_penalty == 0
|
||||
assert state.balances[0] == pre_slash_balances[0]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@ -96,7 +128,7 @@ def test_scaled_penalties(spec, state):
|
||||
state.slashings[5] = base + (incr * 6)
|
||||
state.slashings[spec.EPOCHS_PER_SLASHINGS_VECTOR - 1] = base + (incr * 7)
|
||||
|
||||
slashed_count = len(state.validators) // (spec.PROPORTIONAL_SLASHING_MULTIPLIER + 1)
|
||||
slashed_count = len(state.validators) // (get_slashing_multiplier(spec) + 1)
|
||||
|
||||
assert slashed_count > 10
|
||||
|
||||
@ -134,7 +166,7 @@ def test_scaled_penalties(spec, state):
|
||||
v = state.validators[i]
|
||||
expected_penalty = (
|
||||
v.effective_balance // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
* (spec.PROPORTIONAL_SLASHING_MULTIPLIER * total_penalties)
|
||||
* (get_slashing_multiplier(spec) * total_penalties)
|
||||
// (total_balance)
|
||||
* spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
)
|
||||
|
@ -0,0 +1,20 @@
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases
|
||||
from eth2spec.test.helpers.epoch_processing import (
|
||||
run_epoch_processing_with
|
||||
)
|
||||
|
||||
|
||||
def run_process_slashings_reset(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_slashings_reset')
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_flush_slashings(spec, state):
|
||||
next_epoch = spec.get_current_epoch(state) + 1
|
||||
state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] = 100
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] != 0
|
||||
|
||||
yield from run_process_slashings_reset(spec, state)
|
||||
|
||||
assert state.slashings[next_epoch % spec.EPOCHS_PER_SLASHINGS_VECTOR] == 0
|
@ -59,16 +59,6 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
yield from run_is_valid_genesis_state(spec, state, valid=True)
|
||||
|
||||
|
||||
# TODO: not part of the genesis function yet. Erroneously merged.
|
||||
# @with_phases([PHASE0])
|
||||
# @spec_test
|
||||
# def test_is_valid_genesis_state_false_not_enough_balance(spec):
|
||||
# state = create_valid_beacon_state(spec)
|
||||
# state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE - 1
|
||||
#
|
||||
# yield from run_is_valid_genesis_state(spec, state, valid=False)
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_test
|
||||
@single_phase
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
|
||||
@ -32,7 +32,7 @@ def test_full_but_partial_participation(spec, state):
|
||||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_one_attestation_one_correct(spec, state):
|
||||
yield from rewards_helpers.run_test_one_attestation_one_correct(spec, state)
|
||||
@ -75,7 +75,7 @@ def test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@ -86,7 +86,7 @@ def test_full_half_correct_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_correct_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@ -97,7 +97,7 @@ def test_full_correct_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@ -108,7 +108,7 @@ def test_full_half_incorrect_target_incorrect_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_half_incorrect_target_correct_head(spec, state):
|
||||
yield from rewards_helpers.run_test_full_fraction_incorrect(
|
||||
@ -119,31 +119,31 @@ def test_full_half_incorrect_target_correct_head(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_one_slot(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_one_slot(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_delay_max_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_full_delay_max_slots(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_full_mixed_delay(spec, state):
|
||||
yield from rewards_helpers.run_test_full_mixed_delay(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_proposer_not_in_attestations(spec, state):
|
||||
yield from rewards_helpers.run_test_proposer_not_in_attestations(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
def test_duplicate_attestations_at_later_slots(spec, state):
|
||||
yield from rewards_helpers.run_test_duplicate_attestations_at_later_slots(spec, state)
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, with_all_phases, with_phases, spec_state_test
|
||||
from eth2spec.test.helpers.rewards import leaking
|
||||
import eth2spec.test.helpers.rewards as rewards_helpers
|
||||
|
||||
@ -38,7 +38,7 @@ def test_full_but_partial_participation_leak(spec, state):
|
||||
yield from rewards_helpers.run_test_full_but_partial_participation(spec, state)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_one_attestation_one_correct_leak(spec, state):
|
||||
@ -87,7 +87,7 @@ def test_some_very_low_effective_balances_that_did_not_attest_leak(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||
@ -99,7 +99,7 @@ def test_full_half_correct_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||
@ -111,7 +111,7 @@ def test_full_correct_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||
@ -123,7 +123,7 @@ def test_full_half_incorrect_target_incorrect_head_leak(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases([PHASE0, PHASE1])
|
||||
@spec_state_test
|
||||
@leaking()
|
||||
def test_full_half_incorrect_target_correct_head_leak(spec, state):
|
||||
|
@ -29,6 +29,12 @@ def test_full_random_2(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(3030))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_full_random_3(spec, state):
|
||||
yield from rewards_helpers.run_test_full_random(spec, state, rng=Random(4040))
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@spec_test
|
||||
|
@ -1,3 +1,4 @@
|
||||
from random import Random
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.test.helpers.state import (
|
||||
@ -9,7 +10,7 @@ from eth2spec.test.helpers.block import (
|
||||
sign_block,
|
||||
transition_unsigned_block,
|
||||
)
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
from eth2spec.test.helpers.attester_slashings import (
|
||||
get_valid_attester_slashing_by_indices,
|
||||
get_valid_attester_slashing,
|
||||
@ -18,7 +19,12 @@ from eth2spec.test.helpers.attester_slashings import (
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing, check_proposer_slashing_effect
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
from eth2spec.test.helpers.voluntary_exits import prepare_signed_exits
|
||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
||||
from eth2spec.test.helpers.multi_operations import (
|
||||
run_slash_and_exit,
|
||||
run_test_full_random_operations,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, PHASE1, MINIMAL,
|
||||
@ -29,6 +35,7 @@ from eth2spec.test.context import (
|
||||
with_configs,
|
||||
with_custom_state,
|
||||
large_validator_set,
|
||||
is_post_lightclient_patch,
|
||||
)
|
||||
|
||||
|
||||
@ -774,15 +781,19 @@ def test_attestation(spec, state):
|
||||
spec, state, shard_transition=shard_transition, index=index, signed=True, on_time=True
|
||||
)
|
||||
|
||||
if not is_post_lightclient_patch(spec):
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
|
||||
# Add to state via block transition
|
||||
pre_current_attestations_len = len(state.current_epoch_attestations)
|
||||
attestation_block.body.attestations.append(attestation)
|
||||
signed_attestation_block = state_transition_and_sign_block(spec, state, attestation_block)
|
||||
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
|
||||
# Epoch transition should move to previous_epoch_attestations
|
||||
pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
|
||||
# Epoch transition should move to previous_epoch_attestations
|
||||
pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
|
||||
else:
|
||||
pre_current_epoch_participation_root = spec.hash_tree_root(state.current_epoch_participation)
|
||||
|
||||
epoch_block = build_empty_block(spec, state, state.slot + spec.SLOTS_PER_EPOCH)
|
||||
signed_epoch_block = state_transition_and_sign_block(spec, state, epoch_block)
|
||||
@ -790,22 +801,13 @@ def test_attestation(spec, state):
|
||||
yield 'blocks', [signed_attestation_block, signed_epoch_block]
|
||||
yield 'post', state
|
||||
|
||||
assert len(state.current_epoch_attestations) == 0
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
|
||||
|
||||
def prepare_signed_exits(spec, state, indices):
|
||||
domain = spec.get_domain(state, spec.DOMAIN_VOLUNTARY_EXIT)
|
||||
|
||||
def create_signed_exit(index):
|
||||
exit = spec.VoluntaryExit(
|
||||
epoch=spec.get_current_epoch(state),
|
||||
validator_index=index,
|
||||
)
|
||||
signing_root = spec.compute_signing_root(exit, domain)
|
||||
return spec.SignedVoluntaryExit(message=exit, signature=bls.Sign(privkeys[index], signing_root))
|
||||
|
||||
return [create_signed_exit(index) for index in indices]
|
||||
if not is_post_lightclient_patch(spec):
|
||||
assert len(state.current_epoch_attestations) == 0
|
||||
assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
|
||||
else:
|
||||
for index in range(len(state.validators)):
|
||||
assert state.current_epoch_participation[index] == spec.ParticipationFlags(0b0000_0000)
|
||||
assert spec.hash_tree_root(state.previous_epoch_participation) == pre_current_epoch_participation_root
|
||||
|
||||
|
||||
# In phase1 a committee is computed for SHARD_COMMITTEE_PERIOD slots ago,
|
||||
@ -895,6 +897,23 @@ def test_multiple_different_validator_exits_same_block(spec, state):
|
||||
assert state.validators[index].exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
def test_slash_and_exit_same_index(spec, state):
|
||||
validator_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||
yield from run_slash_and_exit(spec, state, validator_index, validator_index, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
def test_slash_and_exit_diff_index(spec, state):
|
||||
slash_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-1]
|
||||
exit_index = spec.get_active_validator_indices(state, spec.get_current_epoch(state))[-2]
|
||||
yield from run_slash_and_exit(spec, state, slash_index, exit_index)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_balance_driven_status_transitions(spec, state):
|
||||
@ -1013,3 +1032,27 @@ def test_eth1_data_votes_no_consensus(spec, state):
|
||||
|
||||
yield 'blocks', blocks
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_0(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2020))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_1(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2021))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_2(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2022))
|
||||
|
||||
|
||||
@with_phases([PHASE0])
|
||||
@spec_state_test
|
||||
def test_full_random_operations_3(spec, state):
|
||||
yield from run_test_full_random_operations(spec, state, rng=Random(2023))
|
||||
|
@ -1,4 +1,4 @@
|
||||
from eth2spec.test.context import PHASE0, with_all_phases, spec_state_test
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH, with_all_phases, spec_state_test
|
||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
from eth2spec.test.helpers.state import transition_to, state_transition_and_sign_block, next_epoch, next_slot
|
||||
@ -18,12 +18,12 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
if spec.fork == PHASE0:
|
||||
if spec.fork in (PHASE0, LIGHTCLIENT_PATCH):
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
)
|
||||
else:
|
||||
elif spec.fork == PHASE1:
|
||||
latest_message = spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
always_bls,
|
||||
@ -12,7 +13,7 @@ from eth2spec.test.helpers.attestations import (
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_on_time_success(spec, state):
|
||||
@ -23,7 +24,7 @@ def test_on_time_success(spec, state):
|
||||
yield from run_attestation_processing(spec, state, attestation)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_success(spec, state):
|
||||
|
@ -9,6 +9,7 @@ from eth2spec.test.helpers.attestations import (
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
MINIMAL,
|
||||
expect_assertion_error,
|
||||
disable_process_reveal_deadlines,
|
||||
@ -68,7 +69,7 @@ def run_custody_chunk_response_processing(spec, state, custody_response, valid=T
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@disable_process_reveal_deadlines
|
||||
@ -92,7 +93,7 @@ def test_challenge_appended(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -118,7 +119,7 @@ def test_challenge_empty_element_replaced(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -144,7 +145,7 @@ def test_duplicate_challenge(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -172,7 +173,7 @@ def test_second_challenge(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge1)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -197,7 +198,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -222,7 +223,7 @@ def test_many_epochs_custody(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -243,7 +244,7 @@ def test_off_chain_attestation(spec, state):
|
||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -275,7 +276,7 @@ def test_custody_response(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -306,7 +307,7 @@ def test_custody_response_chunk_index_2(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -338,7 +339,7 @@ def test_custody_response_multiple_epochs(spec, state):
|
||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
|
@ -1,6 +1,7 @@
|
||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@ -39,7 +40,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@ -49,7 +50,7 @@ def test_success(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_too_early(spec, state):
|
||||
@ -58,7 +59,7 @@ def test_reveal_too_early(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_wrong_period(spec, state):
|
||||
@ -67,7 +68,7 @@ def test_wrong_period(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_reveal(spec, state):
|
||||
@ -77,7 +78,7 @@ def test_late_reveal(spec, state):
|
||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_double_reveal(spec, state):
|
||||
|
@ -11,6 +11,7 @@ from eth2spec.test.helpers.state import get_balance, transition_to
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
MINIMAL,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@ -112,7 +113,7 @@ def run_standard_custody_slashing_test(spec,
|
||||
yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -120,7 +121,7 @@ def test_custody_slashing(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -128,7 +129,7 @@ def test_incorrect_custody_slashing(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, correct=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -136,7 +137,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
@ -144,7 +145,7 @@ def test_many_epochs_custody(spec, state):
|
||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@disable_process_reveal_deadlines
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
|
@ -2,6 +2,7 @@ from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
@ -41,7 +42,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
@ -50,7 +51,7 @@ def test_success(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_current_epoch(spec, state):
|
||||
@ -59,7 +60,7 @@ def test_reveal_from_current_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_reveal_from_past_epoch(spec, state):
|
||||
@ -69,7 +70,7 @@ def test_reveal_from_past_epoch(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding(spec, state):
|
||||
@ -81,7 +82,7 @@ def test_reveal_with_custody_padding(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
@ -93,7 +94,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_double_reveal(spec, state):
|
||||
@ -114,7 +115,7 @@ def test_double_reveal(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_revealer_is_slashed(spec, state):
|
||||
@ -124,7 +125,7 @@ def test_revealer_is_slashed(spec, state):
|
||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls
|
||||
def test_far_future_epoch(spec, state):
|
||||
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
only_full_crosslink,
|
||||
spec_state_test,
|
||||
@ -90,21 +91,21 @@ def run_successful_crosslink_tests(spec, state, target_len_offset_slot):
|
||||
assert bool(pending_attestation.crosslink_success) is True
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_basic_crosslinks(spec, state):
|
||||
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=1)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_multiple_offset_slots(spec, state):
|
||||
yield from run_successful_crosslink_tests(spec, state, target_len_offset_slot=2)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_no_winning_root(spec, state):
|
||||
@ -152,7 +153,7 @@ def test_no_winning_root(spec, state):
|
||||
assert state.shard_states == pre_shard_states
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_wrong_shard_transition_root(spec, state):
|
||||
|
@ -8,13 +8,14 @@ from eth2spec.test.helpers.attestations import (
|
||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
MINIMAL,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
@ -25,7 +26,7 @@ def run_process_challenge_deadlines(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_slashed_after_chunk_challenge(spec, state):
|
||||
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
)
|
||||
from eth2spec.test.helpers.custody import (
|
||||
get_valid_chunk_challenge,
|
||||
@ -16,7 +17,7 @@ from eth2spec.test.context import (
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
|
||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
||||
run_chunk_challenge_processing,
|
||||
@ -29,7 +30,7 @@ def run_process_custody_final_updates(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_delay(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@ -42,7 +43,7 @@ def test_validator_withdrawal_delay(spec, state):
|
||||
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@ -67,7 +68,7 @@ def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@ -116,7 +117,7 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
|
@ -4,12 +4,13 @@ from eth2spec.test.helpers.custody import (
|
||||
from eth2spec.test.helpers.state import transition_to
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
MINIMAL,
|
||||
with_all_phases_except,
|
||||
with_configs,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
|
||||
|
||||
|
||||
@ -17,7 +18,7 @@ def run_process_challenge_deadlines(spec, state):
|
||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||
@ -37,7 +38,7 @@ def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||
assert state.validators[0].slashed == 1
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL], reason="too slow")
|
||||
def test_validator_not_slashed_after_reveal(spec, state):
|
||||
|
@ -1,7 +1,9 @@
|
||||
from typing import Dict, Sequence
|
||||
|
||||
from eth2spec.test.context import (
|
||||
PHASE0, MINIMAL,
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
MINIMAL,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
only_full_crosslink,
|
||||
@ -98,7 +100,7 @@ def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, comm
|
||||
assert post_shard_state.gasprice > pre_gasprice
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||
@ -112,7 +114,7 @@ def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
||||
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||
@ -131,7 +133,7 @@ def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||
@ -165,7 +167,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@with_configs([MINIMAL])
|
||||
def test_custody_key_reveal(spec, state):
|
||||
@ -179,7 +181,7 @@ def test_custody_key_reveal(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_early_derived_secret_reveal(spec, state):
|
||||
transition_to_valid_shard_slot(spec, state)
|
||||
@ -190,7 +192,7 @@ def test_early_derived_secret_reveal(spec, state):
|
||||
yield from run_beacon_block(spec, state, block)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_custody_slashing(spec, state):
|
||||
|
@ -1,5 +1,6 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
always_bls,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
@ -43,7 +44,7 @@ def run_shard_blocks(spec, shard_state, signed_shard_block, beacon_parent_state,
|
||||
shard_state.latest_block_root == pre_shard_state.latest_block_root
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -63,7 +64,7 @@ def test_valid_shard_block(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_shard_parent_root(spec, state):
|
||||
@ -79,7 +80,7 @@ def test_invalid_shard_parent_root(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_beacon_parent_root(spec, state):
|
||||
@ -94,7 +95,7 @@ def test_invalid_beacon_parent_root(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_slot(spec, state):
|
||||
@ -110,7 +111,7 @@ def test_invalid_slot(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_invalid_proposer_index(spec, state):
|
||||
@ -130,7 +131,7 @@ def test_invalid_proposer_index(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -151,7 +152,7 @@ def test_out_of_bound_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -170,7 +171,7 @@ def test_invalid_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -189,7 +190,7 @@ def test_empty_block_body(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -208,7 +209,7 @@ def test_invalid_signature(spec, state):
|
||||
#
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
@ -225,7 +226,7 @@ def test_max_offset(spec, state):
|
||||
yield from run_shard_blocks(spec, shard_state, signed_shard_block, beacon_state)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
@only_full_crosslink
|
||||
|
@ -1,6 +1,13 @@
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
|
||||
from eth2spec.test.context import PHASE0, spec_state_test, with_all_phases_except, never_bls, only_full_crosslink
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
never_bls,
|
||||
only_full_crosslink,
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||
from eth2spec.test.helpers.shard_block import (
|
||||
build_shard_block,
|
||||
@ -145,7 +152,7 @@ def create_and_apply_beacon_and_shard_blocks(spec, state, store, shard, shard_bl
|
||||
return has_shard_committee
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@never_bls # Set to never_bls for testing `check_pending_shard_blocks`
|
||||
def test_basic(spec, state):
|
||||
@ -206,7 +213,7 @@ def create_simple_fork(spec, state, store, shard):
|
||||
return head_block, forking_block
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_shard_simple_fork(spec, state):
|
||||
@ -231,7 +238,7 @@ def test_shard_simple_fork(spec, state):
|
||||
assert spec.get_shard_head(store, shard) == forking_block.message.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
@only_full_crosslink
|
||||
def test_shard_latest_messages_for_different_shards(spec, state):
|
||||
|
@ -1,12 +1,13 @@
|
||||
from eth2spec.test.context import (
|
||||
PHASE0,
|
||||
LIGHTCLIENT_PATCH,
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
)
|
||||
from eth2spec.test.helpers.state import next_epoch
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_get_committee_count_delta(spec, state):
|
||||
assert spec.get_committee_count_delta(state, 0, 0) == 0
|
||||
@ -23,7 +24,7 @@ def test_get_committee_count_delta(spec, state):
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_current_epoch_start(spec, state):
|
||||
assert state.current_epoch_start_shard == 0
|
||||
@ -39,7 +40,7 @@ def test_get_start_shard_current_epoch_start(spec, state):
|
||||
assert start_shard == state.current_epoch_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_next_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
@ -57,7 +58,7 @@ def test_get_start_shard_next_slot(spec, state):
|
||||
assert start_shard == expected_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_previous_slot(spec, state):
|
||||
next_epoch(spec, state)
|
||||
@ -76,7 +77,7 @@ def test_get_start_shard_previous_slot(spec, state):
|
||||
assert start_shard == expected_start_shard
|
||||
|
||||
|
||||
@with_all_phases_except([PHASE0])
|
||||
@with_all_phases_except([PHASE0, LIGHTCLIENT_PATCH])
|
||||
@spec_state_test
|
||||
def test_get_start_shard_far_past_epoch(spec, state):
|
||||
initial_epoch = spec.get_current_epoch(state)
|
||||
|
@ -32,10 +32,17 @@ The provided pre-state is already transitioned to just before the specific sub-t
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
Sub-transitions:
|
||||
|
||||
- `justification_and_finalization`
|
||||
- `rewards_and_penalties` (limited to `minimal` config)
|
||||
- `rewards_and_penalties`
|
||||
- `registry_updates`
|
||||
- `slashings`
|
||||
- `final_updates`
|
||||
- `eth1_data_reset`
|
||||
- `effective_balance_updates`
|
||||
- `slashings_reset`
|
||||
- `randao_mixes_reset`
|
||||
- `historical_roots_update`
|
||||
- `participation_record_updates`
|
||||
|
||||
The resulting state should match the expected `post` state.
|
||||
|
47
tests/formats/forks/README.md
Normal file
47
tests/formats/forks/README.md
Normal file
@ -0,0 +1,47 @@
|
||||
# Forks
|
||||
|
||||
The aim of the fork tests is to ensure that a pre-fork state can be transformed
|
||||
into a valid post-fork state, utilizing the `upgrade` function found in the relevant `fork.md` spec.
|
||||
|
||||
There is only one handler: `fork`. Each fork (after genesis) is handled with the same format,
|
||||
and the particular fork boundary being tested is noted in `meta.yaml`.
|
||||
|
||||
## Test case format
|
||||
|
||||
### `meta.yaml`
|
||||
|
||||
A yaml file to signify which fork boundary is being tested.
|
||||
|
||||
```yaml
|
||||
fork: str -- Fork being transitioned to
|
||||
```
|
||||
|
||||
#### Fork strings
|
||||
|
||||
Key of valid `fork` strings that might be found in `meta.yaml`
|
||||
|
||||
| String ID | Pre-fork | Post-fork | Function |
|
||||
| - | - | - | - |
|
||||
| `altair` | Phase 0 | Altair | `upgrade_to_lightclient_patch` |
|
||||
|
||||
### `pre.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state before running the fork transition.
|
||||
|
||||
Also available as `pre.ssz`.
|
||||
|
||||
### `post.yaml`
|
||||
|
||||
A YAML-encoded `BeaconState`, the state after applying the fork transition.
|
||||
|
||||
Also available as `post.ssz`.
|
||||
|
||||
*Note*: This type is the `BeaconState` after the fork and is *not* the same type as `pre`.
|
||||
|
||||
## Processing
|
||||
|
||||
To process this test, pass `pre` into the upgrade function defined by the `fork` in `meta.yaml`.
|
||||
|
||||
## Condition
|
||||
|
||||
The resulting state should match the expected `post`.
|
@ -36,7 +36,7 @@ Prerequisites:
|
||||
|
||||
### Cleaning
|
||||
|
||||
This removes the existing virtual environments (`/test_generators/<generator>/venv`) and generated tests (`/yaml_tests/`).
|
||||
This removes the existing virtual environments (`/tests/generators/<generator>/venv`) and generated tests (`../eth2.0-spec-tests/tests`).
|
||||
|
||||
```bash
|
||||
make clean
|
||||
@ -47,7 +47,7 @@ make clean
|
||||
This runs all of the generators.
|
||||
|
||||
```bash
|
||||
make -j 4 gen_yaml_tests
|
||||
make -j 4 generate_tests
|
||||
```
|
||||
|
||||
The `-j N` flag makes the generators run in parallel, with `N` being the amount of cores.
|
||||
@ -55,10 +55,10 @@ The `-j N` flag makes the generators run in parallel, with `N` being the amount
|
||||
|
||||
### Running a single generator
|
||||
|
||||
The makefile auto-detects generators in the `test_generators` directory and provides a tests-gen target for each generator. See example:
|
||||
The makefile auto-detects generators in the `tests/generators` directory and provides a tests-gen target (gen_<generator_name>) for each generator. See example:
|
||||
|
||||
```bash
|
||||
make ./eth2.0-spec-tests/tests/shuffling/
|
||||
make gen_ssz_static
|
||||
```
|
||||
|
||||
## Developing a generator
|
||||
@ -78,9 +78,8 @@ It's recommended to extend the base-generator.
|
||||
|
||||
Create a `requirements.txt` in the root of your generator directory:
|
||||
```
|
||||
../../core/gen_helpers
|
||||
../../core/config_helpers
|
||||
../../core/pyspec
|
||||
pytest>=4.4
|
||||
../../../
|
||||
```
|
||||
|
||||
The config helper and pyspec is optional, but preferred. We encourage generators to derive tests from the spec itself in order to prevent code duplication and outdated tests.
|
||||
@ -103,7 +102,7 @@ Write a `main.py` file. The shuffling test generator is a good minimal starting
|
||||
```python
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from eth_utils import to_tuple
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
from preset_loader import loader
|
||||
from typing import Iterable
|
||||
|
||||
@ -163,35 +162,40 @@ To extend this, one could decide to parametrize the `shuffling_test_cases` funct
|
||||
Another example, to generate tests from pytests:
|
||||
|
||||
```python
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
return config_name
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name='phase0'
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gen_runner.run_generator("epoch_processing", [
|
||||
create_provider('final_updates', test_process_final_updates, 'minimal'),
|
||||
...
|
||||
])
|
||||
phase_0_mods = {key: 'eth2spec.test.phase0.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
'slots',
|
||||
]}
|
||||
lightclient_patch_mods = {**{key: 'eth2spec.test.lightclient_patch.sanity.test_' + key for key in [
|
||||
'blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.sanity.test_' + key for key in [
|
||||
'blocks', # more phase 1 specific block tests
|
||||
'shard_blocks',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="sanity", specs=specs, all_mods=all_mods)
|
||||
```
|
||||
|
||||
Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `generate_from_tests` utility.
|
||||
|
||||
Here multiple phases load the configuration, and the stream of test cases is derived from a pytest file using the `eth2spec.gen_helpers.gen_from_tests.gen.run_state_test_generators` utility. Note that this helper generates all available tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||
|
||||
Recommendations:
|
||||
- You can have more than just one test provider.
|
||||
@ -200,14 +204,13 @@ Recommendations:
|
||||
- Use config `minimal` for performance and simplicity, but also implement a suite with the `mainnet` config where necessary.
|
||||
- You may be able to write your test case provider in a way where it does not make assumptions on constants.
|
||||
If so, you can generate test cases with different configurations for the same scenario (see example).
|
||||
- See [`tests/core/gen_helpers/README.md`](../core/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
- See [`tests/core/gen_helpers/README.md`](../core/pyspec/eth2spec/gen_helpers/README.md) for command line options for generators.
|
||||
|
||||
## How to add a new test generator
|
||||
|
||||
To add a new test generator that builds `New Tests`:
|
||||
|
||||
1. Create a new directory `new_tests` within the `test_generators` directory.
|
||||
1. Create a new directory `new_tests` within the `tests/generators` directory.
|
||||
Note that `new_tests` is also the name of the directory in which the tests will appear in the tests repository later.
|
||||
2. Your generator is assumed to have a `requirements.txt` file,
|
||||
with any dependencies it may need. Leave it empty if your generator has none.
|
||||
@ -216,8 +219,8 @@ To add a new test generator that builds `New Tests`:
|
||||
4. Your generator is called with `-o some/file/path/for_testing/can/be_anything -c some/other/path/to_configs/`.
|
||||
The base generator helps you handle this; you only have to define test case providers.
|
||||
5. Finally, add any linting or testing commands to the
|
||||
[circleci config file](../.circleci/config.yml) if desired to increase code quality.
|
||||
Or add it to the [`Makefile`](../Makefile), if it can be run locally.
|
||||
[circleci config file](../../.circleci/config.yml) if desired to increase code quality.
|
||||
Or add it to the [`Makefile`](../../Makefile), if it can be run locally.
|
||||
|
||||
*Note*: You do not have to change the makefile.
|
||||
However, if necessary (e.g. not using Python, or mixing in other languages), submit an issue, and it can be a special case.
|
||||
|
@ -13,7 +13,7 @@ import milagro_bls_binding as milagro_bls
|
||||
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing
|
||||
|
||||
|
||||
def to_bytes(i):
|
||||
@ -270,7 +270,7 @@ def case04_fast_aggregate_verify():
|
||||
yield f'fast_aggregate_verify_infinity_pubkey', {
|
||||
'input': {
|
||||
'pubkeys': [encode_hex(pubkey) for pubkey in pubkeys_with_infinity],
|
||||
'messages': encode_hex(SAMPLE_MESSAGE),
|
||||
'message': encode_hex(SAMPLE_MESSAGE),
|
||||
'signature': encode_hex(aggregate_signature),
|
||||
},
|
||||
'output': False,
|
||||
|
@ -1,4 +1,2 @@
|
||||
py_ecc==5.0.0
|
||||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
pytest>=4.4
|
||||
../../../
|
||||
|
@ -1,59 +1,42 @@
|
||||
from typing import Iterable
|
||||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from importlib import reload, import_module
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.lightclient_patch import spec as spec_lightclient_patch
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.test.context import PHASE0, PHASE1
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.test.context import PHASE0, PHASE1, LIGHTCLIENT_PATCH
|
||||
|
||||
|
||||
def create_provider(fork_name: str, handler_name: str,
|
||||
tests_src_mod_name: str, config_name: str) -> gen_typing.TestProvider:
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
bls.use_milagro()
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
tests_src = import_module(tests_src_mod_name)
|
||||
return generate_from_tests(
|
||||
runner_name='epoch_processing',
|
||||
handler_name=handler_name,
|
||||
src=tests_src,
|
||||
fork_name=fork_name,
|
||||
)
|
||||
|
||||
return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||
specs = (spec_phase0, spec_lightclient_patch, spec_phase1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
phase_0_mods = {key: 'eth2spec.test.phase0.epoch_processing.test_process_' + key for key in [
|
||||
'final_updates',
|
||||
'justification_and_finalization',
|
||||
'registry_updates',
|
||||
'rewards_and_penalties',
|
||||
'registry_updates',
|
||||
'slashings',
|
||||
'eth1_data_reset',
|
||||
'effective_balance_updates',
|
||||
'slashings_reset',
|
||||
'randao_mixes_reset',
|
||||
'historical_roots_update',
|
||||
'participation_record_updates',
|
||||
]}
|
||||
lightclient_patch_mods = {
|
||||
**{key: 'eth2spec.test.lightclient_patch.epoch_processing.test_process_' + key for key in [
|
||||
'sync_committee_updates',
|
||||
]},
|
||||
**phase_0_mods,
|
||||
} # also run the previous phase 0 tests
|
||||
phase_1_mods = {**{key: 'eth2spec.test.phase1.epoch_processing.test_process_' + key for key in [
|
||||
'reveal_deadlines',
|
||||
'challenge_deadlines',
|
||||
'custody_final_updates',
|
||||
'reveal_deadlines',
|
||||
]}, **phase_0_mods} # also run the previous phase 0 tests (but against phase 1 spec)
|
||||
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE0, key, mod_name, 'minimal') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE0, key, mod_name, 'mainnet') for key, mod_name in phase_0_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE1, key, mod_name, 'minimal') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
gen_runner.run_generator(f"epoch_processing", [
|
||||
create_provider(PHASE1, key, mod_name, 'mainnet') for key, mod_name in phase_1_mods.items()
|
||||
])
|
||||
all_mods = {
|
||||
PHASE0: phase_0_mods,
|
||||
LIGHTCLIENT_PATCH: lightclient_patch_mods,
|
||||
PHASE1: phase_1_mods,
|
||||
}
|
||||
|
||||
run_state_test_generators(runner_name="epoch_processing", specs=specs, all_mods=all_mods)
|
||||
|
@ -1,2 +1,2 @@
|
||||
../../core/gen_helpers
|
||||
../../../
|
||||
pytest>=4.4
|
||||
../../../
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user