Merge branch 'dev' into bbr-ws
This commit is contained in:
commit
5792afca46
|
@ -35,13 +35,13 @@ commands:
|
||||||
description: "Restore the cache with pyspec keys"
|
description: "Restore the cache with pyspec keys"
|
||||||
steps:
|
steps:
|
||||||
- restore_cached_venv:
|
- restore_cached_venv:
|
||||||
venv_name: v22-pyspec
|
venv_name: v24-pyspec
|
||||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||||
save_pyspec_cached_venv:
|
save_pyspec_cached_venv:
|
||||||
description: Save a venv into a cache with pyspec keys"
|
description: Save a venv into a cache with pyspec keys"
|
||||||
steps:
|
steps:
|
||||||
- save_cached_venv:
|
- save_cached_venv:
|
||||||
venv_name: v22-pyspec
|
venv_name: v24-pyspec
|
||||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||||
venv_path: ./venv
|
venv_path: ./venv
|
||||||
restore_deposit_contract_tester_cached_venv:
|
restore_deposit_contract_tester_cached_venv:
|
||||||
|
|
|
@ -16,7 +16,8 @@ eth2.0-spec-tests/
|
||||||
|
|
||||||
# Dynamically built from Markdown spec
|
# Dynamically built from Markdown spec
|
||||||
tests/core/pyspec/eth2spec/phase0/
|
tests/core/pyspec/eth2spec/phase0/
|
||||||
tests/core/pyspec/eth2spec/phase1/
|
tests/core/pyspec/eth2spec/altair/
|
||||||
|
tests/core/pyspec/eth2spec/merge/
|
||||||
|
|
||||||
# coverage reports
|
# coverage reports
|
||||||
.htmlcov
|
.htmlcov
|
||||||
|
|
48
Makefile
48
Makefile
|
@ -2,7 +2,10 @@ SPEC_DIR = ./specs
|
||||||
SSZ_DIR = ./ssz
|
SSZ_DIR = ./ssz
|
||||||
TEST_LIBS_DIR = ./tests/core
|
TEST_LIBS_DIR = ./tests/core
|
||||||
TEST_GENERATORS_DIR = ./tests/generators
|
TEST_GENERATORS_DIR = ./tests/generators
|
||||||
|
# The working dir during testing
|
||||||
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
||||||
|
ETH2SPEC_MODULE_DIR = $(PY_SPEC_DIR)/eth2spec
|
||||||
|
TEST_REPORT_DIR = $(PY_SPEC_DIR)/test-reports
|
||||||
TEST_VECTOR_DIR = ../eth2.0-spec-tests/tests
|
TEST_VECTOR_DIR = ../eth2.0-spec-tests/tests
|
||||||
GENERATOR_DIR = ./tests/generators
|
GENERATOR_DIR = ./tests/generators
|
||||||
SOLIDITY_DEPOSIT_CONTRACT_DIR = ./solidity_deposit_contract
|
SOLIDITY_DEPOSIT_CONTRACT_DIR = ./solidity_deposit_contract
|
||||||
|
@ -20,13 +23,19 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
||||||
# To check generator matching:
|
# To check generator matching:
|
||||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||||
|
|
||||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SSZ_DIR)/*.md) \
|
||||||
|
$(wildcard $(SPEC_DIR)/merge/*.md) \
|
||||||
|
$(wildcard $(SPEC_DIR)/custody/*.md) \
|
||||||
|
$(wildcard $(SPEC_DIR)/das/*.md) \
|
||||||
|
$(wildcard $(SPEC_DIR)/sharding/*.md)
|
||||||
|
|
||||||
COV_HTML_OUT=.htmlcov
|
COV_HTML_OUT=.htmlcov
|
||||||
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
|
COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||||
|
COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html
|
||||||
|
|
||||||
CURRENT_DIR = ${CURDIR}
|
CURRENT_DIR = ${CURDIR}
|
||||||
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
|
LINTER_CONFIG_FILE = $(CURRENT_DIR)/linter.ini
|
||||||
|
GENERATOR_ERROR_LOG_FILE = $(CURRENT_DIR)/$(TEST_VECTOR_DIR)/testgen_error_log.txt
|
||||||
|
|
||||||
export DAPP_SKIP_BUILD:=1
|
export DAPP_SKIP_BUILD:=1
|
||||||
export DAPP_SRC:=$(SOLIDITY_DEPOSIT_CONTRACT_DIR)
|
export DAPP_SRC:=$(SOLIDITY_DEPOSIT_CONTRACT_DIR)
|
||||||
|
@ -35,7 +44,8 @@ export DAPP_JSON:=build/combined.json
|
||||||
|
|
||||||
.PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \
|
.PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \
|
||||||
install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \
|
install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \
|
||||||
compile_deposit_contract test_compile_deposit_contract check_toc
|
compile_deposit_contract test_compile_deposit_contract check_toc \
|
||||||
|
detect_generator_incomplete detect_generator_error_log
|
||||||
|
|
||||||
all: $(PY_SPEC_ALL_TARGETS)
|
all: $(PY_SPEC_ALL_TARGETS)
|
||||||
|
|
||||||
|
@ -47,16 +57,17 @@ partial_clean:
|
||||||
rm -f .coverage
|
rm -f .coverage
|
||||||
rm -rf $(PY_SPEC_DIR)/.pytest_cache
|
rm -rf $(PY_SPEC_DIR)/.pytest_cache
|
||||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
|
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
|
||||||
rm -rf $(PY_SPEC_DIR)/phase0
|
rm -rf $(ETH2SPEC_MODULE_DIR)/phase0
|
||||||
rm -rf $(PY_SPEC_DIR)/phase1
|
rm -rf $(ETH2SPEC_MODULE_DIR)/altair
|
||||||
rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
rm -rf $(ETH2SPEC_MODULE_DIR)/merge
|
||||||
rm -rf $(PY_SPEC_DIR)/.coverage
|
rm -rf $(COV_HTML_OUT_DIR)
|
||||||
rm -rf $(PY_SPEC_DIR)/test-reports
|
rm -rf $(TEST_REPORT_DIR)
|
||||||
rm -rf eth2spec.egg-info dist build
|
rm -rf eth2spec.egg-info dist build
|
||||||
rm -rf build
|
rm -rf build
|
||||||
|
|
||||||
clean: partial_clean
|
clean: partial_clean
|
||||||
rm -rf venv
|
rm -rf venv
|
||||||
|
# legacy cleanup. The pyspec venv should be located at the repository root
|
||||||
rm -rf $(PY_SPEC_DIR)/venv
|
rm -rf $(PY_SPEC_DIR)/venv
|
||||||
rm -rf $(DEPOSIT_CONTRACT_COMPILER_DIR)/venv
|
rm -rf $(DEPOSIT_CONTRACT_COMPILER_DIR)/venv
|
||||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/venv
|
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/venv
|
||||||
|
@ -81,19 +92,19 @@ pyspec:
|
||||||
|
|
||||||
# installs the packages to run pyspec tests
|
# installs the packages to run pyspec tests
|
||||||
install_test:
|
install_test:
|
||||||
python3.8 -m venv venv; . venv/bin/activate; pip3 install .[lint]; pip3 install -e .[test]
|
python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[lint]; python3 -m pip install -e .[test]
|
||||||
|
|
||||||
test: pyspec
|
test: pyspec
|
||||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||||
|
|
||||||
find_test: pyspec
|
find_test: pyspec
|
||||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.spec --cov=eth2spec.altair.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||||
|
|
||||||
citest: pyspec
|
citest: pyspec
|
||||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
python -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
python3 -m pytest -n 4 --bls-type=milagro --junitxml=eth2spec/test_results.xml eth2spec
|
||||||
|
|
||||||
open_cov:
|
open_cov:
|
||||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||||
|
@ -109,10 +120,11 @@ check_toc: $(MARKDOWN_FILES:=.toc)
|
||||||
codespell:
|
codespell:
|
||||||
codespell . --skip ./.git -I .codespell-whitelist
|
codespell . --skip ./.git -I .codespell-whitelist
|
||||||
|
|
||||||
|
# TODO: add future merge, sharding, etc. packages to linting.
|
||||||
lint: pyspec
|
lint: pyspec
|
||||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||||
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \
|
||||||
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.phase1
|
&& mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.merge
|
||||||
|
|
||||||
lint_generators: pyspec
|
lint_generators: pyspec
|
||||||
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
. venv/bin/activate; cd $(TEST_GENERATORS_DIR); \
|
||||||
|
@ -132,11 +144,11 @@ test_deposit_contract:
|
||||||
dapp test -v --fuzz-runs 5
|
dapp test -v --fuzz-runs 5
|
||||||
|
|
||||||
install_deposit_contract_web3_tester:
|
install_deposit_contract_web3_tester:
|
||||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
|
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; python3 -m pip install -r requirements.txt
|
||||||
|
|
||||||
test_deposit_contract_web3_tests:
|
test_deposit_contract_web3_tests:
|
||||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
||||||
python -m pytest .
|
python3 -m pytest .
|
||||||
|
|
||||||
# Runs a generator, identified by param 1
|
# Runs a generator, identified by param 1
|
||||||
define run_generator
|
define run_generator
|
||||||
|
@ -170,3 +182,9 @@ $(TEST_VECTOR_DIR)/:
|
||||||
# (creation of output dir is a dependency)
|
# (creation of output dir is a dependency)
|
||||||
gen_%: $(TEST_VECTOR_DIR)
|
gen_%: $(TEST_VECTOR_DIR)
|
||||||
$(call run_generator,$*)
|
$(call run_generator,$*)
|
||||||
|
|
||||||
|
detect_generator_incomplete: $(TEST_VECTOR_DIR)
|
||||||
|
find $(TEST_VECTOR_DIR) -name "INCOMPLETE"
|
||||||
|
|
||||||
|
detect_generator_error_log: $(TEST_VECTOR_DIR)
|
||||||
|
[ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist"
|
||||||
|
|
51
README.md
51
README.md
|
@ -11,10 +11,13 @@ This repository hosts the current Eth2 specifications. Discussions about design
|
||||||
|
|
||||||
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec)
|
||||||
|
|
||||||
|
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into features.
|
||||||
|
Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready.
|
||||||
|
|
||||||
Core specifications for Eth2 clients be found in [specs](specs/). These are divided into phases. Each subsequent phase depends upon the prior. The current phases specified are:
|
The current features are:
|
||||||
|
|
||||||
### Phase 0
|
### Phase 0
|
||||||
|
|
||||||
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
* [The Beacon Chain](specs/phase0/beacon-chain.md)
|
||||||
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
* [Beacon Chain Fork Choice](specs/phase0/fork-choice.md)
|
||||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||||
|
@ -22,19 +25,43 @@ Core specifications for Eth2 clients be found in [specs](specs/). These are divi
|
||||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||||
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md.md)
|
* [Weak Subjectivity](specs/phase0/weak-subjectivity.md.md)
|
||||||
|
|
||||||
### Phase 1
|
### Altair
|
||||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
|
||||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
|
||||||
* [Custody Game](specs/phase1/custody-game.md)
|
|
||||||
* [Shard Transition and Fraud Proofs](specs/phase1/shard-transition.md)
|
|
||||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
|
||||||
* [Beacon Chain Fork Choice for Shards](specs/phase1/fork-choice.md)
|
|
||||||
|
|
||||||
### Phase 2
|
* [Beacon chain changes](specs/altair/beacon-chain.md)
|
||||||
|
* [Altair fork](specs/altair/fork.md)
|
||||||
|
* [Light client sync protocol](specs/altair/sync-protocol.md)
|
||||||
|
* [Honest Validator guide changes](specs/altair/validator.md)
|
||||||
|
* [P2P Networking](specs/altair/p2p-interface.md)
|
||||||
|
|
||||||
Phase 2 is still actively in R&D and does not yet have any formal specifications.
|
### Merge
|
||||||
|
|
||||||
See the [Eth2 Phase 2 Wiki](https://hackmd.io/UzysWse1Th240HELswKqVA?view) for current progress, discussions, and definitions regarding this work.
|
The merge is still actively in R&D. The specifications outline a general direction for engineering work,
|
||||||
|
while the details are in review and may change.
|
||||||
|
|
||||||
|
* Background material:
|
||||||
|
* An [ethresear.ch](https://ethresear.ch) post [describing the basic mechanism](https://ethresear.ch/t/the-eth1-eth2-transition/6265)
|
||||||
|
* [ethereum.org](https://ethereum.org) high-level description of the merge [here](https://ethereum.org/en/eth2/docking/)
|
||||||
|
* Specifications:
|
||||||
|
* [Beacon Chain changes](specs/merge/beacon-chain.md)
|
||||||
|
* [Fork Choice changes](specs/merge/fork-choice.md)
|
||||||
|
* [Validator additions](specs/merge/validator.md)
|
||||||
|
|
||||||
|
### Sharding
|
||||||
|
|
||||||
|
Sharding follows the merge, and is divided into three parts:
|
||||||
|
|
||||||
|
* Sharding base functionality - In early engineering phase
|
||||||
|
* [Beacon Chain changes](specs/sharding/beacon-chain.md)
|
||||||
|
* [P2P Network changes](specs/sharding/p2p-interface.md)
|
||||||
|
* Custody Game - Ready, dependent on sharding
|
||||||
|
* [Beacon Chain changes](specs/custody_game/beacon-chain.md)
|
||||||
|
* [Validator custody work](specs/custody_game/validator.md)
|
||||||
|
* Data Availability Sampling - In active R&D
|
||||||
|
* Technical details [here](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD).
|
||||||
|
* [Core types and functions](specs/das/das-core.md)
|
||||||
|
* [P2P Networking](specs/das/p2p-interface.md)
|
||||||
|
* [Fork Choice](specs/das/fork-choice.md)
|
||||||
|
* [Sampling process](specs/das/sampling.md)
|
||||||
|
|
||||||
### Accompanying documents can be found in [specs](specs) and include:
|
### Accompanying documents can be found in [specs](specs) and include:
|
||||||
|
|
||||||
|
@ -59,14 +86,12 @@ The following are the broad design goals for Ethereum 2.0:
|
||||||
* to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time
|
* to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time
|
||||||
* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain)
|
* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain)
|
||||||
|
|
||||||
|
|
||||||
## Useful external resources
|
## Useful external resources
|
||||||
|
|
||||||
* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#)
|
* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#)
|
||||||
* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB)
|
* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB)
|
||||||
* [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052)
|
* [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052)
|
||||||
|
|
||||||
|
|
||||||
## For spec contributors
|
## For spec contributors
|
||||||
|
|
||||||
Documentation on the different components used during spec writing can be found here:
|
Documentation on the different components used during spec writing can be found here:
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
This directory contains a set of constants presets used for testing, testnets, and mainnet.
|
This directory contains a set of constants presets used for testing, testnets, and mainnet.
|
||||||
|
|
||||||
A preset file contains all the constants known for its target.
|
A preset file contains all the constants known for its target.
|
||||||
Later-fork constants can be ignored, e.g. ignore Phase 1 constants as a client that only supports Phase 0 currently.
|
Later-fork constants can be ignored, e.g. ignore Sharding constants as a client that only supports Phase 0 currently.
|
||||||
|
|
||||||
|
|
||||||
## Forking
|
## Forking
|
||||||
|
@ -15,11 +15,10 @@ Over time, the need to sync an older state may be deprecated.
|
||||||
In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed.
|
In this case, the prefix on the new constant may be removed, and the old constant will keep a special name before completely being removed.
|
||||||
|
|
||||||
A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations.
|
A previous iteration of forking made use of "timelines", but this collides with the definitions used in the spec (constants for special forking slots, etc.), and was not integrated sufficiently in any of the spec tools or implementations.
|
||||||
Instead, the config essentially doubles as fork definition now, e.g. changing the value for `PHASE_1_FORK_SLOT` changes the fork.
|
Instead, the config essentially doubles as fork definition now, e.g. changing the value for `ALTAIR_FORK_EPOCH` changes the fork.
|
||||||
|
|
||||||
Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number.
|
Another reason to prefer forking through constants is the ability to program a forking moment based on context, instead of being limited to a static slot number.
|
||||||
|
|
||||||
|
|
||||||
## Format
|
## Format
|
||||||
|
|
||||||
Each preset is a key-value mapping.
|
Each preset is a key-value mapping.
|
||||||
|
@ -32,4 +31,4 @@ Each preset is a key-value mapping.
|
||||||
|
|
||||||
Presets may contain comments to describe the values.
|
Presets may contain comments to describe the values.
|
||||||
|
|
||||||
See [`mainnet_phase0.yaml`](./mainnet_phase0.yaml) for a complete example.
|
See [`mainnet/phase0.yaml`](./mainnet/phase0.yaml) for a complete example.
|
||||||
|
|
|
@ -0,0 +1,51 @@
|
||||||
|
# Mainnet preset - Altair
|
||||||
|
|
||||||
|
# Updated penalty values
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 3 * 2**24 (= 50,331,648)
|
||||||
|
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
|
||||||
|
# 2**6 (= 64)
|
||||||
|
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||||
|
# 2
|
||||||
|
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||||
|
|
||||||
|
|
||||||
|
# Sync committee
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**9 (= 512)
|
||||||
|
SYNC_COMMITTEE_SIZE: 512
|
||||||
|
# 2**9 (= 512)
|
||||||
|
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 512
|
||||||
|
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**2 (= 4)
|
||||||
|
INACTIVITY_SCORE_BIAS: 4
|
||||||
|
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||||
|
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000
|
||||||
|
DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000
|
||||||
|
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 0x01000000
|
||||||
|
ALTAIR_FORK_VERSION: 0x01000000
|
||||||
|
# TBD
|
||||||
|
ALTAIR_FORK_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
|
||||||
|
# Sync protocol
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 1
|
||||||
|
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||||
|
|
||||||
|
|
||||||
|
# Validator
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**2 (= 4)
|
||||||
|
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: 4
|
|
@ -0,0 +1,48 @@
|
||||||
|
# Mainnet preset - Custody Game
|
||||||
|
|
||||||
|
# Time parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**1 (= 2) epochs, 12.8 minutes
|
||||||
|
RANDAO_PENALTY_EPOCHS: 2
|
||||||
|
# 2**15 (= 32,768) epochs, ~146 days
|
||||||
|
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 32768
|
||||||
|
# 2**14 (= 16,384) epochs ~73 days
|
||||||
|
EPOCHS_PER_CUSTODY_PERIOD: 16384
|
||||||
|
# 2**11 (= 2,048) epochs, ~9 days
|
||||||
|
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
|
||||||
|
# 2**15 (= 32,768) epochs, ~146 days
|
||||||
|
MAX_CHUNK_CHALLENGE_DELAY: 32768
|
||||||
|
|
||||||
|
# Misc parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**256 - 189
|
||||||
|
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
|
||||||
|
# 3
|
||||||
|
CUSTODY_SECRETS: 3
|
||||||
|
# 1/1024 chance of custody bit 1
|
||||||
|
CUSTODY_PROBABILITY_EXPONENT: 10
|
||||||
|
|
||||||
|
# Max operations
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**8 (= 256)
|
||||||
|
MAX_CUSTODY_KEY_REVEALS: 256
|
||||||
|
# 2**0 (= 1)
|
||||||
|
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
||||||
|
# 2**2 (= 2)
|
||||||
|
MAX_CUSTODY_CHUNK_CHALLENGES: 4
|
||||||
|
# 2** 4 (= 16)
|
||||||
|
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 16
|
||||||
|
# 2**0 (= 1)
|
||||||
|
MAX_CUSTODY_SLASHINGS: 1
|
||||||
|
|
||||||
|
# Reward and penalty quotients
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
||||||
|
# 2**8 (= 256)
|
||||||
|
MINOR_REWARD_QUOTIENT: 256
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||||
|
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
|
||||||
|
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
|
|
@ -0,0 +1,7 @@
|
||||||
|
# Mainnet preset - The Merge
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
MERGE_FORK_VERSION: 0x02000000
|
||||||
|
# TBD, temporarily max uint64 value: 2**64 - 1
|
||||||
|
MERGE_FORK_EPOCH: 18446744073709551615
|
|
@ -1,7 +1,5 @@
|
||||||
# Mainnet preset
|
# Mainnet preset
|
||||||
|
|
||||||
CONFIG_NAME: "mainnet"
|
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
# 2**6 (= 64)
|
# 2**6 (= 64)
|
||||||
|
|
|
@ -1,101 +0,0 @@
|
||||||
# Mainnet preset - phase 1
|
|
||||||
|
|
||||||
CONFIG_NAME: "mainnet"
|
|
||||||
|
|
||||||
# phase1-fork
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
PHASE_1_FORK_VERSION: 0x01000000
|
|
||||||
# [STUB]
|
|
||||||
PHASE_1_FORK_SLOT: 0
|
|
||||||
INITIAL_ACTIVE_SHARDS: 64
|
|
||||||
|
|
||||||
|
|
||||||
# beacon-chain
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Misc
|
|
||||||
# 2**10 (= 1,024)
|
|
||||||
MAX_SHARDS: 1024
|
|
||||||
# 2**7 (= 128)
|
|
||||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
|
||||||
# 2**3 (= 8)
|
|
||||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
|
||||||
|
|
||||||
# Shard block configs
|
|
||||||
# 2**20 (= 1048,576) bytes
|
|
||||||
MAX_SHARD_BLOCK_SIZE: 1048576
|
|
||||||
# 2**18 (= 262,144) bytes
|
|
||||||
TARGET_SHARD_BLOCK_SIZE: 262144
|
|
||||||
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
|
|
||||||
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
|
||||||
# len(SHARD_BLOCK_OFFSETS)
|
|
||||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
|
||||||
# 2**12 (= 4,096)
|
|
||||||
BYTES_PER_CUSTODY_CHUNK: 4096
|
|
||||||
# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)
|
|
||||||
CUSTODY_RESPONSE_DEPTH: 8
|
|
||||||
|
|
||||||
# Gwei values
|
|
||||||
# 2**14 (= 16,384) Gwei
|
|
||||||
MAX_GASPRICE: 16384
|
|
||||||
# 2**3 (= 8) Gwei
|
|
||||||
MIN_GASPRICE: 8
|
|
||||||
|
|
||||||
# Time parameters
|
|
||||||
# 2**3 (= 8) | online epochs
|
|
||||||
ONLINE_PERIOD: 8
|
|
||||||
# 2**8 (= 256) | epochs
|
|
||||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
|
||||||
|
|
||||||
# Max operations per block
|
|
||||||
# 2**20 (= 1,048,576)
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576
|
|
||||||
|
|
||||||
# Domain types
|
|
||||||
DOMAIN_SHARD_PROPOSAL: 0x80000000
|
|
||||||
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
|
||||||
DOMAIN_LIGHT_CLIENT: 0x82000000
|
|
||||||
# custody-game spec
|
|
||||||
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
|
||||||
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
|
|
||||||
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
|
|
||||||
|
|
||||||
# custody-game
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Time parameters
|
|
||||||
# 2**1 (= 2) epochs, 12.8 minutes
|
|
||||||
RANDAO_PENALTY_EPOCHS: 2
|
|
||||||
# 2**15 (= 32,768) epochs, ~146 days
|
|
||||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 32768
|
|
||||||
# 2**14 (= 16,384) epochs ~73 days
|
|
||||||
EPOCHS_PER_CUSTODY_PERIOD: 16384
|
|
||||||
# 2**11 (= 2,048) epochs, ~9 days
|
|
||||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
|
|
||||||
# 2**15 (= 32,768) epochs, ~146 days
|
|
||||||
MAX_CHUNK_CHALLENGE_DELAY: 32768
|
|
||||||
|
|
||||||
# Misc parameters
|
|
||||||
# 2**256 - 189
|
|
||||||
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
|
|
||||||
# 3
|
|
||||||
CUSTODY_SECRETS: 3
|
|
||||||
# 2**5 (= 32) bytes
|
|
||||||
BYTES_PER_CUSTODY_ATOM: 32
|
|
||||||
# 1/1024 chance of custody bit 1
|
|
||||||
CUSTODY_PROBABILITY_EXPONENT: 10
|
|
||||||
|
|
||||||
# Max operations
|
|
||||||
# 2**8 (= 256)
|
|
||||||
MAX_CUSTODY_KEY_REVEALS: 256
|
|
||||||
# 2**0 (= 1)
|
|
||||||
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
|
||||||
# 2**2 (= 2)
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGES: 4
|
|
||||||
# 2** 4 (= 16)
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 16
|
|
||||||
# 2**0 (= 1)
|
|
||||||
MAX_CUSTODY_SLASHINGS: 1
|
|
||||||
|
|
||||||
# Reward and penalty quotients
|
|
||||||
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
|
||||||
# 2**8 (= 256)
|
|
||||||
MINOR_REWARD_QUOTIENT: 256
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
# Mainnet preset - Sharding
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
SHARDING_FORK_VERSION: 0x03000000
|
||||||
|
# TBD, temporarily max uint64 value: 2**64 - 1
|
||||||
|
SHARDING_FORK_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
|
||||||
|
# Beacon-chain
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# Misc
|
||||||
|
# 2**10 (= 1,024)
|
||||||
|
MAX_SHARDS: 1024
|
||||||
|
# 2**6 = 64
|
||||||
|
INITIAL_ACTIVE_SHARDS: 64
|
||||||
|
# 2**3 (= 8)
|
||||||
|
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||||
|
# 2**4 (= 16)
|
||||||
|
MAX_SHARD_PROPOSER_SLASHINGS: 16
|
||||||
|
|
||||||
|
# Shard block configs
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||||
|
# 2**11 (= 2,048)
|
||||||
|
MAX_SAMPLES_PER_BLOCK: 2048
|
||||||
|
# 2**10 (= 1,1024)
|
||||||
|
TARGET_SAMPLES_PER_BLOCK: 1024
|
||||||
|
|
||||||
|
# Gwei values
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**33 (= 8,589,934,592) Gwei
|
||||||
|
MAX_GASPRICE: 8589934592
|
||||||
|
# 2**3 (= 8) Gwei
|
||||||
|
MIN_GASPRICE: 8
|
||||||
|
|
||||||
|
# Time parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**8 (= 256) | epochs
|
||||||
|
SHARD_COMMITTEE_PERIOD: 256
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||||
|
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
|
@ -0,0 +1,52 @@
|
||||||
|
# Minimal preset - Altair
|
||||||
|
|
||||||
|
# Updated penalty values
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 3 * 2**24 (= 50,331,648)
|
||||||
|
INACTIVITY_PENALTY_QUOTIENT_ALTAIR: 50331648
|
||||||
|
# 2**6 (= 64)
|
||||||
|
MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR: 64
|
||||||
|
# 2
|
||||||
|
PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR: 2
|
||||||
|
|
||||||
|
|
||||||
|
# Sync committee
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# [customized]
|
||||||
|
SYNC_COMMITTEE_SIZE: 32
|
||||||
|
# [customized]
|
||||||
|
EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8
|
||||||
|
|
||||||
|
|
||||||
|
# Misc
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**2 (= 4)
|
||||||
|
INACTIVITY_SCORE_BIAS: 4
|
||||||
|
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_SYNC_COMMITTEE: 0x07000000
|
||||||
|
DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF: 0x08000000
|
||||||
|
DOMAIN_CONTRIBUTION_AND_PROOF: 0x09000000
|
||||||
|
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# [customized] Highest byte set to 0x01 to avoid collisions with mainnet versioning
|
||||||
|
ALTAIR_FORK_VERSION: 0x01000001
|
||||||
|
# [customized]
|
||||||
|
ALTAIR_FORK_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
|
||||||
|
# Sync protocol
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 1
|
||||||
|
MIN_SYNC_COMMITTEE_PARTICIPANTS: 1
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Validator
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**2 (= 4)
|
||||||
|
TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE: 4
|
|
@ -0,0 +1,48 @@
|
||||||
|
# Minimal preset - Custody Game
|
||||||
|
|
||||||
|
# Time parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**1 (= 2) epochs, 12.8 minutes
|
||||||
|
RANDAO_PENALTY_EPOCHS: 2
|
||||||
|
# [customized] quicker for testing
|
||||||
|
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 64
|
||||||
|
# [customized] quicker for testing
|
||||||
|
EPOCHS_PER_CUSTODY_PERIOD: 32
|
||||||
|
# [customized] quicker for testing
|
||||||
|
CUSTODY_PERIOD_TO_RANDAO_PADDING: 8
|
||||||
|
# [customize for faster testing]
|
||||||
|
MAX_CHUNK_CHALLENGE_DELAY: 64
|
||||||
|
|
||||||
|
# Misc parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**256 - 189
|
||||||
|
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
|
||||||
|
# 3
|
||||||
|
CUSTODY_SECRETS: 3
|
||||||
|
# 1/4 chance of custody bit 1 [customized for faster testing]
|
||||||
|
CUSTODY_PROBABILITY_EXPONENT: 2
|
||||||
|
|
||||||
|
# Max operations
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**8 (= 256)
|
||||||
|
MAX_CUSTODY_KEY_REVEALS: 256
|
||||||
|
# 2**0 (= 1)
|
||||||
|
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
||||||
|
# [customized]
|
||||||
|
MAX_CUSTODY_CHUNK_CHALLENGES: 2
|
||||||
|
# [customized]
|
||||||
|
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 8
|
||||||
|
# 2**0 (= 1)
|
||||||
|
MAX_CUSTODY_SLASHINGS: 1
|
||||||
|
|
||||||
|
# Reward and penalty quotients
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
||||||
|
# 2**8 (= 256)
|
||||||
|
MINOR_REWARD_QUOTIENT: 256
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||||
|
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
|
||||||
|
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
|
|
@ -0,0 +1,7 @@
|
||||||
|
# Minimal preset - The Merge
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
MERGE_FORK_VERSION: 0x02000001
|
||||||
|
# TBD, temporarily max uint64 value: 2**64 - 1
|
||||||
|
MERGE_FORK_EPOCH: 18446744073709551615
|
|
@ -1,7 +1,5 @@
|
||||||
# Minimal preset
|
# Minimal preset
|
||||||
|
|
||||||
CONFIG_NAME: "minimal"
|
|
||||||
|
|
||||||
# Misc
|
# Misc
|
||||||
# ---------------------------------------------------------------
|
# ---------------------------------------------------------------
|
||||||
|
|
||||||
|
|
|
@ -1,105 +0,0 @@
|
||||||
# Minimal preset - phase 1
|
|
||||||
|
|
||||||
CONFIG_NAME: "minimal"
|
|
||||||
|
|
||||||
# phase1-fork
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# [customized] for testnet distinction
|
|
||||||
PHASE_1_FORK_VERSION: 0x01000001
|
|
||||||
# [STUB]
|
|
||||||
PHASE_1_FORK_SLOT: 0
|
|
||||||
# [customized] reduced for testing
|
|
||||||
INITIAL_ACTIVE_SHARDS: 2
|
|
||||||
|
|
||||||
|
|
||||||
# beacon-chain
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Misc
|
|
||||||
# [customized] reduced for testing
|
|
||||||
MAX_SHARDS: 8
|
|
||||||
# 2**7 (= 128)
|
|
||||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
|
||||||
# 2**3 (= 8)
|
|
||||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
|
||||||
|
|
||||||
# Shard block configs
|
|
||||||
# 2**20 (= 1048,576) bytes
|
|
||||||
MAX_SHARD_BLOCK_SIZE: 1048576
|
|
||||||
# 2**18 (= 262,144) bytes
|
|
||||||
TARGET_SHARD_BLOCK_SIZE: 262144
|
|
||||||
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
|
|
||||||
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
|
||||||
# len(SHARD_BLOCK_OFFSETS)
|
|
||||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
|
||||||
# 2**12 (= 4,096)
|
|
||||||
BYTES_PER_CUSTODY_CHUNK: 4096
|
|
||||||
# ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)
|
|
||||||
CUSTODY_RESPONSE_DEPTH: 8
|
|
||||||
|
|
||||||
# Gwei values
|
|
||||||
# 2**14 (= 16,384) Gwei
|
|
||||||
MAX_GASPRICE: 16384
|
|
||||||
# 2**3 (= 8) Gwei
|
|
||||||
MIN_GASPRICE: 8
|
|
||||||
|
|
||||||
# Time parameters
|
|
||||||
# 2**3 (= 8) | online epochs
|
|
||||||
ONLINE_PERIOD: 8
|
|
||||||
# 2**8 (= 256) | epochs
|
|
||||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
|
||||||
|
|
||||||
# Max operations per block
|
|
||||||
# 2**20 (= 1,048,576)
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS: 1048576
|
|
||||||
|
|
||||||
# Domain types
|
|
||||||
DOMAIN_SHARD_PROPOSAL: 0x80000000
|
|
||||||
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
|
||||||
DOMAIN_LIGHT_CLIENT: 0x82000000
|
|
||||||
# custody-game spec
|
|
||||||
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
|
||||||
DOMAIN_LIGHT_SELECTION_PROOF: 0x84000000
|
|
||||||
DOMAIN_LIGHT_AGGREGATE_AND_PROOF: 0x85000000
|
|
||||||
|
|
||||||
# custody-game
|
|
||||||
# ---------------------------------------------------------------
|
|
||||||
# Time parameters
|
|
||||||
# 2**1 (= 2) epochs
|
|
||||||
RANDAO_PENALTY_EPOCHS: 2
|
|
||||||
# [customized] quicker for testing
|
|
||||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 64
|
|
||||||
# [customized] quicker for testing
|
|
||||||
EPOCHS_PER_CUSTODY_PERIOD: 32
|
|
||||||
# [customized] quicker for testing
|
|
||||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 8
|
|
||||||
# [customize for faster testing]
|
|
||||||
MAX_CHUNK_CHALLENGE_DELAY: 64
|
|
||||||
|
|
||||||
|
|
||||||
# Misc parameters
|
|
||||||
# 2**256 - 189
|
|
||||||
CUSTODY_PRIME: 115792089237316195423570985008687907853269984665640564039457584007913129639747
|
|
||||||
# 3
|
|
||||||
CUSTODY_SECRETS: 3
|
|
||||||
# 2**5 (= 32) bytes
|
|
||||||
BYTES_PER_CUSTODY_ATOM: 32
|
|
||||||
# 1/4 chance of custody bit 1 [customized for faster testing]
|
|
||||||
CUSTODY_PROBABILITY_EXPONENT: 2
|
|
||||||
|
|
||||||
|
|
||||||
# Max operations
|
|
||||||
# 2**8 (= 256)
|
|
||||||
MAX_CUSTODY_KEY_REVEALS: 256
|
|
||||||
# 2**0 (= 1)
|
|
||||||
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
|
||||||
# [customized]
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGES: 2
|
|
||||||
# [customized]
|
|
||||||
MAX_CUSTODY_CHUNK_CHALLENGE_RESP: 8
|
|
||||||
# 2**0 (= 1)
|
|
||||||
MAX_CUSTODY_SLASHINGS: 1
|
|
||||||
|
|
||||||
# Reward and penalty quotients
|
|
||||||
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
|
||||||
# 2**8 (= 256)
|
|
||||||
MINOR_REWARD_QUOTIENT: 256
|
|
|
@ -0,0 +1,45 @@
|
||||||
|
# Minimal preset - Sharding
|
||||||
|
|
||||||
|
# Fork
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
SHARDING_FORK_VERSION: 0x03000001
|
||||||
|
# TBD, temporarily max uint64 value: 2**64 - 1
|
||||||
|
MERGE_FORK_EPOCH: 18446744073709551615
|
||||||
|
|
||||||
|
|
||||||
|
# Beacon-chain
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# Misc
|
||||||
|
# [customized] reduced for testing
|
||||||
|
MAX_SHARDS: 8
|
||||||
|
# [customized] reduced for testing
|
||||||
|
INITIAL_ACTIVE_SHARDS: 2
|
||||||
|
# 2**3 (= 8)
|
||||||
|
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||||
|
# [customized] reduced for testing
|
||||||
|
MAX_SHARD_PROPOSER_SLASHINGS: 4
|
||||||
|
|
||||||
|
# Shard block configs
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
MAX_SHARD_HEADERS_PER_SHARD: 4
|
||||||
|
# 2**11 (= 2,048)
|
||||||
|
MAX_SAMPLES_PER_BLOCK: 2048
|
||||||
|
# 2**10 (= 1,1024)
|
||||||
|
TARGET_SAMPLES_PER_BLOCK: 1024
|
||||||
|
|
||||||
|
# Gwei values
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**33 (= 8,589,934,592) Gwei
|
||||||
|
MAX_GASPRICE: 8589934592
|
||||||
|
# 2**3 (= 8) Gwei
|
||||||
|
MIN_GASPRICE: 8
|
||||||
|
|
||||||
|
# Time parameters
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
# 2**8 (= 256) | epochs
|
||||||
|
SHARD_COMMITTEE_PERIOD: 256
|
||||||
|
|
||||||
|
# Signature domains
|
||||||
|
# ---------------------------------------------------------------
|
||||||
|
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||||
|
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
602
setup.py
602
setup.py
|
@ -1,168 +1,280 @@
|
||||||
from enum import Enum, auto
|
|
||||||
from setuptools import setup, find_packages, Command
|
from setuptools import setup, find_packages, Command
|
||||||
from setuptools.command.build_py import build_py
|
from setuptools.command.build_py import build_py
|
||||||
from distutils import dir_util
|
from distutils import dir_util
|
||||||
from distutils.util import convert_path
|
from distutils.util import convert_path
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
from typing import Dict, NamedTuple, List
|
import string
|
||||||
|
from typing import Dict, NamedTuple, List, Sequence, Optional
|
||||||
|
from abc import ABC, abstractmethod
|
||||||
|
import ast
|
||||||
|
|
||||||
FUNCTION_REGEX = r'^def [\w_]*'
|
|
||||||
|
# NOTE: have to programmatically include third-party dependencies in `setup.py`.
|
||||||
|
MARKO_VERSION = "marko==1.0.2"
|
||||||
|
try:
|
||||||
|
import marko
|
||||||
|
except ImportError:
|
||||||
|
import pip
|
||||||
|
pip.main(["install", MARKO_VERSION])
|
||||||
|
|
||||||
|
from marko.block import Heading, FencedCode, LinkRefDef, BlankLine
|
||||||
|
from marko.inline import CodeSpan
|
||||||
|
from marko.ext.gfm import gfm
|
||||||
|
from marko.ext.gfm.elements import Table, Paragraph
|
||||||
|
|
||||||
|
|
||||||
|
# Definitions in context.py
|
||||||
|
PHASE0 = 'phase0'
|
||||||
|
ALTAIR = 'altair'
|
||||||
|
MERGE = 'merge'
|
||||||
|
|
||||||
|
CONFIG_LOADER = '''
|
||||||
|
apply_constants_config(globals())
|
||||||
|
'''
|
||||||
|
|
||||||
|
# The helper functions that are used when defining constants
|
||||||
|
CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||||
|
def ceillog2(x: int) -> uint64:
|
||||||
|
if x < 1:
|
||||||
|
raise ValueError(f"ceillog2 accepts only positive values, x={x}")
|
||||||
|
return uint64((x - 1).bit_length())
|
||||||
|
|
||||||
|
|
||||||
|
def floorlog2(x: int) -> uint64:
|
||||||
|
if x < 1:
|
||||||
|
raise ValueError(f"floorlog2 accepts only positive values, x={x}")
|
||||||
|
return uint64(x.bit_length() - 1)
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
class SpecObject(NamedTuple):
|
class SpecObject(NamedTuple):
|
||||||
functions: Dict[str, str]
|
functions: Dict[str, str]
|
||||||
custom_types: Dict[str, str]
|
custom_types: Dict[str, str]
|
||||||
constants: Dict[str, str]
|
constants: Dict[str, str]
|
||||||
|
ssz_dep_constants: Dict[str, str] # the constants that depend on ssz_objects
|
||||||
ssz_objects: Dict[str, str]
|
ssz_objects: Dict[str, str]
|
||||||
dataclasses: Dict[str, str]
|
dataclasses: Dict[str, str]
|
||||||
|
|
||||||
|
|
||||||
class CodeBlockType(Enum):
|
def _get_name_from_heading(heading: Heading) -> Optional[str]:
|
||||||
SSZ = auto()
|
last_child = heading.children[-1]
|
||||||
DATACLASS = auto()
|
if isinstance(last_child, CodeSpan):
|
||||||
FUNCTION = auto()
|
return last_child.children
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def _get_source_from_code_block(block: FencedCode) -> str:
|
||||||
|
return block.children[0].children.strip()
|
||||||
|
|
||||||
|
|
||||||
|
def _get_function_name_from_source(source: str) -> str:
|
||||||
|
fn = ast.parse(source).body[0]
|
||||||
|
return fn.name
|
||||||
|
|
||||||
|
|
||||||
|
def _get_class_info_from_source(source: str) -> (str, Optional[str]):
|
||||||
|
class_def = ast.parse(source).body[0]
|
||||||
|
base = class_def.bases[0]
|
||||||
|
if isinstance(base, ast.Name):
|
||||||
|
parent_class = base.id
|
||||||
|
else:
|
||||||
|
# NOTE: SSZ definition derives from earlier phase...
|
||||||
|
# e.g. `phase0.SignedBeaconBlock`
|
||||||
|
# TODO: check for consistency with other phases
|
||||||
|
parent_class = None
|
||||||
|
return class_def.name, parent_class
|
||||||
|
|
||||||
|
|
||||||
|
def _is_constant_id(name: str) -> bool:
|
||||||
|
if name[0] not in string.ascii_uppercase + '_':
|
||||||
|
return False
|
||||||
|
return all(map(lambda c: c in string.ascii_uppercase + '_' + string.digits, name[1:]))
|
||||||
|
|
||||||
|
|
||||||
|
ETH2_SPEC_COMMENT_PREFIX = "eth2spec:"
|
||||||
|
|
||||||
|
|
||||||
|
def _get_eth2_spec_comment(child: LinkRefDef) -> Optional[str]:
|
||||||
|
_, _, title = child._parse_info
|
||||||
|
if not (title[0] == "(" and title[len(title)-1] == ")"):
|
||||||
|
return None
|
||||||
|
title = title[1:len(title)-1]
|
||||||
|
if not title.startswith(ETH2_SPEC_COMMENT_PREFIX):
|
||||||
|
return None
|
||||||
|
return title[len(ETH2_SPEC_COMMENT_PREFIX):].strip()
|
||||||
|
|
||||||
|
|
||||||
def get_spec(file_name: str) -> SpecObject:
|
def get_spec(file_name: str) -> SpecObject:
|
||||||
"""
|
|
||||||
Takes in the file name of a spec.md file, opens it and returns a parsed spec object.
|
|
||||||
|
|
||||||
Note: This function makes heavy use of the inherent ordering of dicts,
|
|
||||||
if this is not supported by your python version, it will not work.
|
|
||||||
"""
|
|
||||||
pulling_from = None # line number of start of latest object
|
|
||||||
current_name = None # most recent section title
|
|
||||||
functions: Dict[str, str] = {}
|
functions: Dict[str, str] = {}
|
||||||
constants: Dict[str, str] = {}
|
constants: Dict[str, str] = {}
|
||||||
|
ssz_dep_constants: Dict[str, str] = {}
|
||||||
ssz_objects: Dict[str, str] = {}
|
ssz_objects: Dict[str, str] = {}
|
||||||
dataclasses: Dict[str, str] = {}
|
dataclasses: Dict[str, str] = {}
|
||||||
function_matcher = re.compile(FUNCTION_REGEX)
|
|
||||||
block_type = CodeBlockType.FUNCTION
|
|
||||||
custom_types: Dict[str, str] = {}
|
custom_types: Dict[str, str] = {}
|
||||||
for linenum, line in enumerate(open(file_name).readlines()):
|
|
||||||
line = line.rstrip()
|
|
||||||
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
|
|
||||||
current_name = line[line[:-1].rfind('`') + 1: -1]
|
|
||||||
if line[:9] == '```python':
|
|
||||||
assert pulling_from is None
|
|
||||||
pulling_from = linenum + 1
|
|
||||||
elif line[:3] == '```':
|
|
||||||
pulling_from = None
|
|
||||||
else:
|
|
||||||
# Handle function definitions & ssz_objects
|
|
||||||
if pulling_from is not None:
|
|
||||||
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
|
|
||||||
name = line[6:-12]
|
|
||||||
# Check consistency with markdown header
|
|
||||||
assert name == current_name
|
|
||||||
block_type = CodeBlockType.SSZ
|
|
||||||
elif line[:10] == '@dataclass':
|
|
||||||
block_type = CodeBlockType.DATACLASS
|
|
||||||
elif function_matcher.match(line) is not None:
|
|
||||||
current_name = function_matcher.match(line).group(0)
|
|
||||||
block_type = CodeBlockType.FUNCTION
|
|
||||||
|
|
||||||
if block_type == CodeBlockType.SSZ:
|
with open(file_name) as source_file:
|
||||||
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
|
document = gfm.parse(source_file.read())
|
||||||
elif block_type == CodeBlockType.DATACLASS:
|
|
||||||
dataclasses[current_name] = dataclasses.get(current_name, '') + line + '\n'
|
|
||||||
elif block_type == CodeBlockType.FUNCTION:
|
|
||||||
functions[current_name] = functions.get(current_name, '') + line + '\n'
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Handle constant and custom types table entries
|
current_name = None
|
||||||
elif pulling_from is None and len(line) > 0 and line[0] == '|':
|
should_skip = False
|
||||||
row = line[1:].split('|')
|
for child in document.children:
|
||||||
if len(row) >= 2:
|
if isinstance(child, BlankLine):
|
||||||
for i in range(2):
|
continue
|
||||||
row[i] = row[i].strip().strip('`')
|
if should_skip:
|
||||||
if '`' in row[i]:
|
should_skip = False
|
||||||
row[i] = row[i][:row[i].find('`')]
|
continue
|
||||||
is_constant_def = True
|
if isinstance(child, Heading):
|
||||||
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
|
current_name = _get_name_from_heading(child)
|
||||||
is_constant_def = False
|
elif isinstance(child, FencedCode):
|
||||||
for c in row[0]:
|
if child.lang != "python":
|
||||||
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
|
continue
|
||||||
is_constant_def = False
|
source = _get_source_from_code_block(child)
|
||||||
if is_constant_def:
|
if source.startswith("def"):
|
||||||
constants[row[0]] = row[1].replace('**TBD**', '2**32')
|
current_name = _get_function_name_from_source(source)
|
||||||
elif row[1].startswith('uint') or row[1].startswith('Bytes'):
|
functions[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
|
||||||
custom_types[row[0]] = row[1]
|
elif source.startswith("@dataclass"):
|
||||||
return SpecObject(functions, custom_types, constants, ssz_objects, dataclasses)
|
dataclasses[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
|
||||||
|
elif source.startswith("class"):
|
||||||
|
class_name, parent_class = _get_class_info_from_source(source)
|
||||||
|
# check consistency with spec
|
||||||
|
assert class_name == current_name
|
||||||
|
if parent_class:
|
||||||
|
assert parent_class == "Container"
|
||||||
|
# NOTE: trim whitespace from spec
|
||||||
|
ssz_objects[current_name] = "\n".join(line.rstrip() for line in source.splitlines())
|
||||||
|
else:
|
||||||
|
raise Exception("unrecognized python code element")
|
||||||
|
elif isinstance(child, Table):
|
||||||
|
for row in child.children:
|
||||||
|
cells = row.children
|
||||||
|
if len(cells) >= 2:
|
||||||
|
name_cell = cells[0]
|
||||||
|
name = name_cell.children[0].children
|
||||||
|
value_cell = cells[1]
|
||||||
|
value = value_cell.children[0].children
|
||||||
|
if isinstance(value, list):
|
||||||
|
# marko parses `**X**` as a list containing a X
|
||||||
|
value = value[0].children
|
||||||
|
if _is_constant_id(name):
|
||||||
|
if value.startswith("get_generalized_index"):
|
||||||
|
ssz_dep_constants[name] = value
|
||||||
|
else:
|
||||||
|
constants[name] = value.replace("TBD", "2**32")
|
||||||
|
elif value.startswith("uint") or value.startswith("Bytes") or value.startswith("ByteList"):
|
||||||
|
custom_types[name] = value
|
||||||
|
elif isinstance(child, LinkRefDef):
|
||||||
|
comment = _get_eth2_spec_comment(child)
|
||||||
|
if comment == "skip":
|
||||||
|
should_skip = True
|
||||||
|
|
||||||
|
return SpecObject(
|
||||||
|
functions=functions,
|
||||||
|
custom_types=custom_types,
|
||||||
|
constants=constants,
|
||||||
|
ssz_dep_constants=ssz_dep_constants,
|
||||||
|
ssz_objects=ssz_objects,
|
||||||
|
dataclasses=dataclasses,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
CONFIG_LOADER = '''
|
class SpecBuilder(ABC):
|
||||||
apply_constants_config(globals())
|
@property
|
||||||
'''
|
@abstractmethod
|
||||||
|
def fork(self) -> str:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
PHASE0_IMPORTS = '''from eth2spec.config.config_util import apply_constants_config
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def imports(cls) -> str:
|
||||||
|
"""
|
||||||
|
Import objects from other libraries.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def preparations(cls) -> str:
|
||||||
|
"""
|
||||||
|
Define special types/constants for building pyspec or call functions.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def sundry_functions(cls) -> str:
|
||||||
|
"""
|
||||||
|
The functions that are (1) defined abstractly in specs or (2) adjusted for getting better performance.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
The constants that are required for SSZ objects.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
|
||||||
|
"""
|
||||||
|
The constants that are required for custom types.
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def invariant_checks(cls) -> str:
|
||||||
|
"""
|
||||||
|
The invariant checks
|
||||||
|
"""
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def build_spec(cls, source_files: List[str]) -> str:
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Phase0SpecBuilder
|
||||||
|
#
|
||||||
|
class Phase0SpecBuilder(SpecBuilder):
|
||||||
|
fork: str = PHASE0
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def imports(cls) -> str:
|
||||||
|
return '''from lru import LRU
|
||||||
|
from dataclasses import (
|
||||||
|
dataclass,
|
||||||
|
field,
|
||||||
|
)
|
||||||
from typing import (
|
from typing import (
|
||||||
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
|
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
|
||||||
)
|
)
|
||||||
|
|
||||||
from dataclasses import (
|
from eth2spec.config.config_util import apply_constants_config
|
||||||
dataclass,
|
|
||||||
field,
|
|
||||||
)
|
|
||||||
|
|
||||||
from lru import LRU
|
|
||||||
|
|
||||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
||||||
from eth2spec.utils.ssz.ssz_typing import (
|
from eth2spec.utils.ssz.ssz_typing import (
|
||||||
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
View, boolean, Container, List, Vector, uint8, uint32, uint64,
|
||||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist)
|
||||||
)
|
from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
|
|
||||||
from eth2spec.utils.hash_function import hash
|
from eth2spec.utils.hash_function import hash
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def preparations(cls) -> str:
|
||||||
|
return '''
|
||||||
SSZObject = TypeVar('SSZObject', bound=View)
|
SSZObject = TypeVar('SSZObject', bound=View)
|
||||||
|
|
||||||
CONFIG_NAME = 'mainnet'
|
CONFIG_NAME = 'mainnet'
|
||||||
'''
|
'''
|
||||||
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
|
||||||
from eth2spec.config.config_util import apply_constants_config
|
|
||||||
from typing import (
|
|
||||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable, Optional
|
|
||||||
)
|
|
||||||
from typing import List as PyList
|
|
||||||
|
|
||||||
from dataclasses import (
|
@classmethod
|
||||||
dataclass,
|
def sundry_functions(cls) -> str:
|
||||||
field,
|
return '''
|
||||||
)
|
|
||||||
|
|
||||||
from lru import LRU
|
|
||||||
|
|
||||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes
|
|
||||||
from eth2spec.utils.ssz.ssz_typing import (
|
|
||||||
View, boolean, Container, List, Vector, uint8, uint32, uint64, bit,
|
|
||||||
ByteList, ByteVector, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
|
||||||
)
|
|
||||||
from eth2spec.utils import bls
|
|
||||||
|
|
||||||
from eth2spec.utils.hash_function import hash
|
|
||||||
|
|
||||||
# Whenever phase 1 is loaded, make sure we have the latest phase0
|
|
||||||
from importlib import reload
|
|
||||||
reload(phase0)
|
|
||||||
|
|
||||||
|
|
||||||
SSZVariableName = str
|
|
||||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
|
||||||
SSZObject = TypeVar('SSZObject', bound=View)
|
|
||||||
|
|
||||||
CONFIG_NAME = 'mainnet'
|
|
||||||
'''
|
|
||||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
|
||||||
def ceillog2(x: int) -> uint64:
|
|
||||||
if x < 1:
|
|
||||||
raise ValueError(f"ceillog2 accepts only positive values, x={x}")
|
|
||||||
return uint64((x - 1).bit_length())
|
|
||||||
'''
|
|
||||||
PHASE0_SUNDRY_FUNCTIONS = '''
|
|
||||||
def get_eth1_data(block: Eth1Block) -> Eth1Data:
|
def get_eth1_data(block: Eth1Block) -> Eth1Data:
|
||||||
"""
|
"""
|
||||||
A stub function return mocking Eth1Data.
|
A stub function return mocking Eth1Data.
|
||||||
|
@ -233,16 +345,136 @@ get_attesting_indices = cache_this(
|
||||||
),
|
),
|
||||||
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
|
_get_attesting_indices, lru_size=SLOTS_PER_EPOCH * MAX_COMMITTEES_PER_SLOT * 3)'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||||
|
return {}
|
||||||
|
|
||||||
PHASE1_SUNDRY_FUNCTIONS = '''
|
@classmethod
|
||||||
|
def hardcoded_custom_type_dep_constants(cls) -> Dict[str, str]:
|
||||||
|
return {}
|
||||||
|
|
||||||
_get_start_shard = get_start_shard
|
@classmethod
|
||||||
get_start_shard = cache_this(
|
def invariant_checks(cls) -> str:
|
||||||
lambda state, slot: (state.validators.hash_tree_root(), slot),
|
return ''
|
||||||
_get_start_shard, lru_size=SLOTS_PER_EPOCH * 3)'''
|
|
||||||
|
@classmethod
|
||||||
|
def build_spec(cls, source_files: Sequence[str]) -> str:
|
||||||
|
return _build_spec(cls.fork, source_files)
|
||||||
|
|
||||||
|
|
||||||
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_class_objects: Dict[str, str]) -> str:
|
#
|
||||||
|
# AltairSpecBuilder
|
||||||
|
#
|
||||||
|
class AltairSpecBuilder(Phase0SpecBuilder):
|
||||||
|
fork: str = ALTAIR
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def imports(cls) -> str:
|
||||||
|
return super().imports() + '\n' + '''
|
||||||
|
from typing import NewType, Union
|
||||||
|
from importlib import reload
|
||||||
|
|
||||||
|
from eth2spec.phase0 import spec as phase0
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Path
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def preparations(cls):
|
||||||
|
return super().preparations() + '\n' + '''
|
||||||
|
# Whenever this spec version is loaded, make sure we have the latest phase0
|
||||||
|
reload(phase0)
|
||||||
|
|
||||||
|
SSZVariableName = str
|
||||||
|
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sundry_functions(cls) -> str:
|
||||||
|
return super().sundry_functions() + '\n\n' + '''
|
||||||
|
def get_generalized_index(ssz_class: Any, *path: Sequence[Union[int, SSZVariableName]]) -> GeneralizedIndex:
|
||||||
|
ssz_path = Path(ssz_class)
|
||||||
|
for item in path:
|
||||||
|
ssz_path = ssz_path / item
|
||||||
|
return GeneralizedIndex(ssz_path.gindex())'''
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]:
|
||||||
|
constants = {
|
||||||
|
'FINALIZED_ROOT_INDEX': 'GeneralizedIndex(105)',
|
||||||
|
'NEXT_SYNC_COMMITTEE_INDEX': 'GeneralizedIndex(55)',
|
||||||
|
}
|
||||||
|
return {**super().hardcoded_ssz_dep_constants(), **constants}
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def invariant_checks(cls) -> str:
|
||||||
|
return '''
|
||||||
|
assert (
|
||||||
|
TIMELY_HEAD_WEIGHT + TIMELY_SOURCE_WEIGHT + TIMELY_TARGET_WEIGHT + SYNC_REWARD_WEIGHT + PROPOSER_WEIGHT
|
||||||
|
) == WEIGHT_DENOMINATOR'''
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# MergeSpecBuilder
|
||||||
|
#
|
||||||
|
class MergeSpecBuilder(Phase0SpecBuilder):
|
||||||
|
fork: str = MERGE
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def imports(cls):
|
||||||
|
return super().imports() + '\n' + '''
|
||||||
|
from eth2spec.phase0 import spec as phase0
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Bytes20, ByteList, ByteVector, uint256
|
||||||
|
from importlib import reload
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def preparations(cls):
|
||||||
|
return super().preparations() + '\n' + '''
|
||||||
|
reload(phase0)
|
||||||
|
'''
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def sundry_functions(cls) -> str:
|
||||||
|
return super().sundry_functions() + '\n\n' + """
|
||||||
|
ExecutionState = Any
|
||||||
|
|
||||||
|
|
||||||
|
def get_pow_block(hash: Bytes32) -> PowBlock:
|
||||||
|
return PowBlock(block_hash=hash, is_valid=True, is_processed=True, total_difficulty=TRANSITION_TOTAL_DIFFICULTY)
|
||||||
|
|
||||||
|
|
||||||
|
def get_execution_state(execution_state_root: Bytes32) -> ExecutionState:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def get_pow_chain_head() -> PowBlock:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def verify_execution_state_transition(execution_payload: ExecutionPayload) -> bool:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def produce_execution_payload(parent_hash: Hash32, timestamp: uint64) -> ExecutionPayload:
|
||||||
|
pass"""
|
||||||
|
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def hardcoded_custom_type_dep_constants(cls) -> str:
|
||||||
|
constants = {
|
||||||
|
'MAX_BYTES_PER_OPAQUE_TRANSACTION': 'uint64(2**20)',
|
||||||
|
}
|
||||||
|
return {**super().hardcoded_custom_type_dep_constants(), **constants}
|
||||||
|
|
||||||
|
|
||||||
|
spec_builders = {
|
||||||
|
builder.fork: builder
|
||||||
|
for builder in (Phase0SpecBuilder, AltairSpecBuilder, MergeSpecBuilder)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def objects_to_spec(spec_object: SpecObject, builder: SpecBuilder, ordered_class_objects: Dict[str, str]) -> str:
|
||||||
"""
|
"""
|
||||||
Given all the objects that constitute a spec, combine them into a single pyfile.
|
Given all the objects that constitute a spec, combine them into a single pyfile.
|
||||||
"""
|
"""
|
||||||
|
@ -251,32 +483,51 @@ def objects_to_spec(spec_object: SpecObject, imports: str, fork: str, ordered_cl
|
||||||
[
|
[
|
||||||
f"class {key}({value}):\n pass\n"
|
f"class {key}({value}):\n pass\n"
|
||||||
for key, value in spec_object.custom_types.items()
|
for key, value in spec_object.custom_types.items()
|
||||||
|
if not value.startswith('ByteList')
|
||||||
|
]
|
||||||
|
)
|
||||||
|
+ ('\n\n' if len([key for key, value in spec_object.custom_types.items() if value.startswith('ByteList')]) > 0 else '')
|
||||||
|
+ '\n\n'.join(
|
||||||
|
[
|
||||||
|
f"{key} = {value}\n"
|
||||||
|
for key, value in spec_object.custom_types.items()
|
||||||
|
if value.startswith('ByteList')
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
for k in list(spec_object.functions):
|
for k in list(spec_object.functions):
|
||||||
if "ceillog2" in k:
|
if "ceillog2" in k or "floorlog2" in k:
|
||||||
del spec_object.functions[k]
|
del spec_object.functions[k]
|
||||||
functions_spec = '\n\n'.join(spec_object.functions.values())
|
functions_spec = '\n\n\n'.join(spec_object.functions.values())
|
||||||
for k in list(spec_object.constants.keys()):
|
for k in list(spec_object.constants.keys()):
|
||||||
if k == "BLS12_381_Q":
|
if k == "BLS12_381_Q":
|
||||||
spec_object.constants[k] += " # noqa: E501"
|
spec_object.constants[k] += " # noqa: E501"
|
||||||
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))
|
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))
|
||||||
ordered_class_objects_spec = '\n\n'.join(ordered_class_objects.values())
|
ordered_class_objects_spec = '\n\n\n'.join(ordered_class_objects.values())
|
||||||
|
ssz_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_ssz_dep_constants()[x]), builder.hardcoded_ssz_dep_constants()))
|
||||||
|
ssz_dep_constants_verification = '\n'.join(map(lambda x: 'assert %s == %s' % (x, spec_object.ssz_dep_constants[x]), builder.hardcoded_ssz_dep_constants()))
|
||||||
|
custom_type_dep_constants = '\n'.join(map(lambda x: '%s = %s' % (x, builder.hardcoded_custom_type_dep_constants()[x]), builder.hardcoded_custom_type_dep_constants()))
|
||||||
spec = (
|
spec = (
|
||||||
imports
|
builder.imports()
|
||||||
+ '\n\n' + f"fork = \'{fork}\'\n"
|
+ builder.preparations()
|
||||||
|
+ '\n\n' + f"fork = \'{builder.fork}\'\n"
|
||||||
|
# The constants that some SSZ containers require. Need to be defined before `new_type_definitions`
|
||||||
|
+ ('\n\n' + custom_type_dep_constants + '\n' if custom_type_dep_constants != '' else '')
|
||||||
+ '\n\n' + new_type_definitions
|
+ '\n\n' + new_type_definitions
|
||||||
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
|
+ '\n' + CONSTANT_DEP_SUNDRY_CONSTANTS_FUNCTIONS
|
||||||
|
# The constants that some SSZ containers require. Need to be defined before `constants_spec`
|
||||||
|
+ ('\n\n' + ssz_dep_constants if ssz_dep_constants != '' else '')
|
||||||
+ '\n\n' + constants_spec
|
+ '\n\n' + constants_spec
|
||||||
+ '\n\n' + CONFIG_LOADER
|
+ '\n\n' + CONFIG_LOADER
|
||||||
+ '\n\n' + ordered_class_objects_spec
|
+ '\n\n' + ordered_class_objects_spec
|
||||||
+ '\n\n' + functions_spec
|
+ '\n\n\n' + functions_spec
|
||||||
+ '\n' + PHASE0_SUNDRY_FUNCTIONS
|
+ '\n\n' + builder.sundry_functions()
|
||||||
|
# Since some constants are hardcoded in setup.py, the following assertions verify that the hardcoded constants are
|
||||||
|
# as same as the spec definition.
|
||||||
|
+ ('\n\n\n' + ssz_dep_constants_verification if ssz_dep_constants_verification != '' else '')
|
||||||
|
+ ('\n' + builder.invariant_checks() if builder.invariant_checks() != '' else '')
|
||||||
|
+ '\n'
|
||||||
)
|
)
|
||||||
if fork == 'phase1':
|
|
||||||
spec += '\n' + PHASE1_SUNDRY_FUNCTIONS
|
|
||||||
spec += '\n'
|
|
||||||
return spec
|
return spec
|
||||||
|
|
||||||
|
|
||||||
|
@ -294,10 +545,10 @@ def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, st
|
||||||
|
|
||||||
ignored_dependencies = [
|
ignored_dependencies = [
|
||||||
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
|
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
|
||||||
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
'Bytes1', 'Bytes4', 'Bytes20', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||||
'bytes', 'byte', 'ByteList', 'ByteVector',
|
'bytes', 'byte', 'ByteList', 'ByteVector',
|
||||||
'Dict', 'dict', 'field',
|
'Dict', 'dict', 'field', 'ceillog2', 'floorlog2', 'Set',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
@ -338,23 +589,25 @@ def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
||||||
"""
|
"""
|
||||||
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
|
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
|
||||||
"""
|
"""
|
||||||
functions0, custom_types0, constants0, ssz_objects0, dataclasses0 = spec0
|
functions0, custom_types0, constants0, ssz_dep_constants0, ssz_objects0, dataclasses0 = spec0
|
||||||
functions1, custom_types1, constants1, ssz_objects1, dataclasses1 = spec1
|
functions1, custom_types1, constants1, ssz_dep_constants1, ssz_objects1, dataclasses1 = spec1
|
||||||
functions = combine_functions(functions0, functions1)
|
functions = combine_functions(functions0, functions1)
|
||||||
custom_types = combine_constants(custom_types0, custom_types1)
|
custom_types = combine_constants(custom_types0, custom_types1)
|
||||||
constants = combine_constants(constants0, constants1)
|
constants = combine_constants(constants0, constants1)
|
||||||
|
ssz_dep_constants = combine_constants(ssz_dep_constants0, ssz_dep_constants1)
|
||||||
ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
|
ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
|
||||||
dataclasses = combine_functions(dataclasses0, dataclasses1)
|
dataclasses = combine_functions(dataclasses0, dataclasses1)
|
||||||
return SpecObject(functions, custom_types, constants, ssz_objects, dataclasses)
|
return SpecObject(
|
||||||
|
functions=functions,
|
||||||
|
custom_types=custom_types,
|
||||||
|
constants=constants,
|
||||||
|
ssz_dep_constants=ssz_dep_constants,
|
||||||
|
ssz_objects=ssz_objects,
|
||||||
|
dataclasses=dataclasses,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
fork_imports = {
|
def _build_spec(fork: str, source_files: Sequence[str]) -> str:
|
||||||
'phase0': PHASE0_IMPORTS,
|
|
||||||
'phase1': PHASE1_IMPORTS,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def build_spec(fork: str, source_files: List[str]) -> str:
|
|
||||||
all_specs = [get_spec(spec) for spec in source_files]
|
all_specs = [get_spec(spec) for spec in source_files]
|
||||||
|
|
||||||
spec_object = all_specs[0]
|
spec_object = all_specs[0]
|
||||||
|
@ -364,7 +617,7 @@ def build_spec(fork: str, source_files: List[str]) -> str:
|
||||||
class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}
|
class_objects = {**spec_object.ssz_objects, **spec_object.dataclasses}
|
||||||
dependency_order_class_objects(class_objects, spec_object.custom_types)
|
dependency_order_class_objects(class_objects, spec_object.custom_types)
|
||||||
|
|
||||||
return objects_to_spec(spec_object, fork_imports[fork], fork, class_objects)
|
return objects_to_spec(spec_object, spec_builders[fork], class_objects)
|
||||||
|
|
||||||
|
|
||||||
class PySpecCommand(Command):
|
class PySpecCommand(Command):
|
||||||
|
@ -387,7 +640,7 @@ class PySpecCommand(Command):
|
||||||
def initialize_options(self):
|
def initialize_options(self):
|
||||||
"""Set default values for options."""
|
"""Set default values for options."""
|
||||||
# Each user option must be listed here with their default value.
|
# Each user option must be listed here with their default value.
|
||||||
self.spec_fork = 'phase0'
|
self.spec_fork = PHASE0
|
||||||
self.md_doc_paths = ''
|
self.md_doc_paths = ''
|
||||||
self.out_dir = 'pyspec_output'
|
self.out_dir = 'pyspec_output'
|
||||||
|
|
||||||
|
@ -396,26 +649,34 @@ class PySpecCommand(Command):
|
||||||
if len(self.md_doc_paths) == 0:
|
if len(self.md_doc_paths) == 0:
|
||||||
print("no paths were specified, using default markdown file paths for pyspec"
|
print("no paths were specified, using default markdown file paths for pyspec"
|
||||||
" build (spec fork: %s)" % self.spec_fork)
|
" build (spec fork: %s)" % self.spec_fork)
|
||||||
if self.spec_fork == "phase0":
|
if self.spec_fork == PHASE0:
|
||||||
self.md_doc_paths = """
|
self.md_doc_paths = """
|
||||||
specs/phase0/beacon-chain.md
|
specs/phase0/beacon-chain.md
|
||||||
specs/phase0/fork-choice.md
|
specs/phase0/fork-choice.md
|
||||||
specs/phase0/validator.md
|
specs/phase0/validator.md
|
||||||
specs/phase0/weak-subjectivity.md
|
specs/phase0/weak-subjectivity.md
|
||||||
"""
|
"""
|
||||||
elif self.spec_fork == "phase1":
|
elif self.spec_fork == ALTAIR:
|
||||||
self.md_doc_paths = """
|
self.md_doc_paths = """
|
||||||
specs/phase0/beacon-chain.md
|
specs/phase0/beacon-chain.md
|
||||||
specs/phase0/fork-choice.md
|
specs/phase0/fork-choice.md
|
||||||
specs/phase0/validator.md
|
specs/phase0/validator.md
|
||||||
specs/phase0/weak-subjectivity.md
|
specs/phase0/weak-subjectivity.md
|
||||||
specs/phase1/custody-game.md
|
specs/altair/beacon-chain.md
|
||||||
specs/phase1/beacon-chain.md
|
specs/altair/fork.md
|
||||||
specs/phase1/shard-transition.md
|
specs/altair/validator.md
|
||||||
specs/phase1/fork-choice.md
|
specs/altair/p2p-interface.md
|
||||||
specs/phase1/phase1-fork.md
|
specs/altair/sync-protocol.md
|
||||||
specs/phase1/shard-fork-choice.md
|
"""
|
||||||
specs/phase1/validator.md
|
elif self.spec_fork == MERGE:
|
||||||
|
self.md_doc_paths = """
|
||||||
|
specs/phase0/beacon-chain.md
|
||||||
|
specs/phase0/fork-choice.md
|
||||||
|
specs/phase0/validator.md
|
||||||
|
specs/phase0/weak-subjectivity.md
|
||||||
|
specs/merge/beacon-chain.md
|
||||||
|
specs/merge/fork-choice.md
|
||||||
|
specs/merge/validator.md
|
||||||
"""
|
"""
|
||||||
else:
|
else:
|
||||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||||
|
@ -427,7 +688,7 @@ class PySpecCommand(Command):
|
||||||
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
|
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
spec_str = build_spec(self.spec_fork, self.parsed_md_doc_paths)
|
spec_str = spec_builders[self.spec_fork].build_spec(self.parsed_md_doc_paths)
|
||||||
if self.dry_run:
|
if self.dry_run:
|
||||||
self.announce('dry run successfully prepared contents for spec.'
|
self.announce('dry run successfully prepared contents for spec.'
|
||||||
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}"')
|
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}"')
|
||||||
|
@ -455,7 +716,7 @@ class BuildPyCommand(build_py):
|
||||||
self.run_command('pyspec')
|
self.run_command('pyspec')
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
for spec_fork in fork_imports:
|
for spec_fork in spec_builders:
|
||||||
self.run_pyspec_cmd(spec_fork=spec_fork)
|
self.run_pyspec_cmd(spec_fork=spec_fork)
|
||||||
|
|
||||||
super(BuildPyCommand, self).run()
|
super(BuildPyCommand, self).run()
|
||||||
|
@ -483,7 +744,7 @@ class PyspecDevCommand(Command):
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
print("running build_py command")
|
print("running build_py command")
|
||||||
for spec_fork in fork_imports:
|
for spec_fork in spec_builders:
|
||||||
self.run_pyspec_cmd(spec_fork=spec_fork)
|
self.run_pyspec_cmd(spec_fork=spec_fork)
|
||||||
|
|
||||||
commands = {
|
commands = {
|
||||||
|
@ -516,13 +777,12 @@ setup(
|
||||||
url="https://github.com/ethereum/eth2.0-specs",
|
url="https://github.com/ethereum/eth2.0-specs",
|
||||||
include_package_data=False,
|
include_package_data=False,
|
||||||
package_data={'configs': ['*.yaml'],
|
package_data={'configs': ['*.yaml'],
|
||||||
|
|
||||||
'specs': ['**/*.md'],
|
'specs': ['**/*.md'],
|
||||||
'eth2spec': ['VERSION.txt']},
|
'eth2spec': ['VERSION.txt']},
|
||||||
package_dir={
|
package_dir={
|
||||||
"eth2spec": "tests/core/pyspec/eth2spec",
|
"eth2spec": "tests/core/pyspec/eth2spec",
|
||||||
"configs": "configs",
|
"configs": "configs",
|
||||||
"specs": "specs"
|
"specs": "specs",
|
||||||
},
|
},
|
||||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||||
py_modules=["eth2spec"],
|
py_modules=["eth2spec"],
|
||||||
|
@ -531,16 +791,18 @@ setup(
|
||||||
extras_require={
|
extras_require={
|
||||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||||
"lint": ["flake8==3.7.7", "mypy==0.750"],
|
"lint": ["flake8==3.7.7", "mypy==0.750"],
|
||||||
|
"generator": ["python-snappy==0.5.4"],
|
||||||
},
|
},
|
||||||
install_requires=[
|
install_requires=[
|
||||||
"eth-utils>=1.3.0,<2",
|
"eth-utils>=1.3.0,<2",
|
||||||
"eth-typing>=2.1.0,<3.0.0",
|
"eth-typing>=2.1.0,<3.0.0",
|
||||||
"pycryptodome==3.9.4",
|
"pycryptodome==3.9.4",
|
||||||
"py_ecc==5.0.0",
|
"py_ecc==5.2.0",
|
||||||
"milagro_bls_binding==1.5.0",
|
"milagro_bls_binding==1.6.3",
|
||||||
"dataclasses==0.6",
|
"dataclasses==0.6",
|
||||||
"remerkleable==0.1.17",
|
"remerkleable==0.1.19",
|
||||||
"ruamel.yaml==0.16.5",
|
"ruamel.yaml==0.16.5",
|
||||||
"lru-dict==1.1.6"
|
"lru-dict==1.1.6",
|
||||||
|
"marko==1.0.2",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
|
@ -0,0 +1,742 @@
|
||||||
|
# Ethereum 2.0 Altair Beacon chain changes
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Custom types](#custom-types)
|
||||||
|
- [Constants](#constants)
|
||||||
|
- [Participation flag indices](#participation-flag-indices)
|
||||||
|
- [Incentivization weights](#incentivization-weights)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Updated penalty values](#updated-penalty-values)
|
||||||
|
- [Sync committee](#sync-committee)
|
||||||
|
- [Misc](#misc-1)
|
||||||
|
- [Domain types](#domain-types)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [Modified containers](#modified-containers)
|
||||||
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
|
- [`BeaconState`](#beaconstate)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [`SyncAggregate`](#syncaggregate)
|
||||||
|
- [`SyncCommittee`](#synccommittee)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [`Predicates`](#predicates)
|
||||||
|
- [`eth2_fast_aggregate_verify`](#eth2_fast_aggregate_verify)
|
||||||
|
- [Misc](#misc-2)
|
||||||
|
- [`get_flag_indices_and_weights`](#get_flag_indices_and_weights)
|
||||||
|
- [`add_flag`](#add_flag)
|
||||||
|
- [`has_flag`](#has_flag)
|
||||||
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
|
- [`get_sync_committee_indices`](#get_sync_committee_indices)
|
||||||
|
- [`get_sync_committee`](#get_sync_committee)
|
||||||
|
- [`get_base_reward_per_increment`](#get_base_reward_per_increment)
|
||||||
|
- [`get_base_reward`](#get_base_reward)
|
||||||
|
- [`get_unslashed_participating_indices`](#get_unslashed_participating_indices)
|
||||||
|
- [`get_flag_index_deltas`](#get_flag_index_deltas)
|
||||||
|
- [Modified `get_inactivity_penalty_deltas`](#modified-get_inactivity_penalty_deltas)
|
||||||
|
- [Beacon state mutators](#beacon-state-mutators)
|
||||||
|
- [Modified `slash_validator`](#modified-slash_validator)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
|
- [Modified `process_attestation`](#modified-process_attestation)
|
||||||
|
- [Modified `process_deposit`](#modified-process_deposit)
|
||||||
|
- [Sync committee processing](#sync-committee-processing)
|
||||||
|
- [Epoch processing](#epoch-processing)
|
||||||
|
- [Justification and finalization](#justification-and-finalization)
|
||||||
|
- [Inactivity scores](#inactivity-scores)
|
||||||
|
- [Rewards and penalties](#rewards-and-penalties)
|
||||||
|
- [Slashings](#slashings)
|
||||||
|
- [Participation flags updates](#participation-flags-updates)
|
||||||
|
- [Sync committee updates](#sync-committee-updates)
|
||||||
|
- [Initialize state for pure Altair testnets and test vectors](#initialize-state-for-pure-altair-testnets-and-test-vectors)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
Altair is the first beacon chain hard fork. Its main features are:
|
||||||
|
|
||||||
|
* sync committees to support light clients
|
||||||
|
* incentive accounting reforms to reduce spec complexity
|
||||||
|
* penalty parameter updates towards their planned maximally punitive values
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
| Name | SSZ equivalent | Description |
|
||||||
|
| - | - | - |
|
||||||
|
| `ParticipationFlags` | `uint8` | a succinct representation of 8 boolean participation flags |
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
### Participation flag indices
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `TIMELY_HEAD_FLAG_INDEX` | `0` |
|
||||||
|
| `TIMELY_SOURCE_FLAG_INDEX` | `1` |
|
||||||
|
| `TIMELY_TARGET_FLAG_INDEX` | `2` |
|
||||||
|
|
||||||
|
### Incentivization weights
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `TIMELY_HEAD_WEIGHT` | `uint64(12)` |
|
||||||
|
| `TIMELY_SOURCE_WEIGHT` | `uint64(12)` |
|
||||||
|
| `TIMELY_TARGET_WEIGHT` | `uint64(24)` |
|
||||||
|
| `SYNC_REWARD_WEIGHT` | `uint64(8)` |
|
||||||
|
| `PROPOSER_WEIGHT` | `uint64(8)` |
|
||||||
|
| `WEIGHT_DENOMINATOR` | `uint64(64)` |
|
||||||
|
|
||||||
|
*Note*: The sum of the weights equal `WEIGHT_DENOMINATOR`.
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `G2_POINT_AT_INFINITY` | `BLSSignature(b'\xc0' + b'\x00' * 95)` |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Updated penalty values
|
||||||
|
|
||||||
|
This patch updates a few configuration values to move penalty parameters closer to their final, maximum security values.
|
||||||
|
|
||||||
|
*Note*: The spec does *not* override previous configuration values but instead creates new values and replaces usage throughout.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `INACTIVITY_PENALTY_QUOTIENT_ALTAIR` | `uint64(3 * 2**24)` (= 50,331,648) |
|
||||||
|
| `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR` | `uint64(2**6)` (= 64) |
|
||||||
|
| `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR` | `uint64(2)` |
|
||||||
|
|
||||||
|
### Sync committee
|
||||||
|
|
||||||
|
| Name | Value | Unit | Duration |
|
||||||
|
| - | - | - | - |
|
||||||
|
| `SYNC_COMMITTEE_SIZE` | `uint64(2**9)` (= 512) | Validators | |
|
||||||
|
| `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` | `uint64(2**9)` (= 512) | epochs | ~54 hours |
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `INACTIVITY_SCORE_BIAS` | `uint64(4)` |
|
||||||
|
|
||||||
|
### Domain types
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `DOMAIN_SYNC_COMMITTEE` | `DomainType('0x07000000')` |
|
||||||
|
| `DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF` | `DomainType('0x08000000')` |
|
||||||
|
| `DOMAIN_CONTRIBUTION_AND_PROOF` | `DomainType('0x09000000')` |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### Modified containers
|
||||||
|
|
||||||
|
#### `BeaconBlockBody`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconBlockBody(Container):
|
||||||
|
randao_reveal: BLSSignature
|
||||||
|
eth1_data: Eth1Data # Eth1 data vote
|
||||||
|
graffiti: Bytes32 # Arbitrary data
|
||||||
|
# Operations
|
||||||
|
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||||
|
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||||
|
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||||
|
deposits: List[Deposit, MAX_DEPOSITS]
|
||||||
|
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||||
|
# [New in Altair]
|
||||||
|
sync_aggregate: SyncAggregate
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `BeaconState`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconState(Container):
|
||||||
|
# Versioning
|
||||||
|
genesis_time: uint64
|
||||||
|
genesis_validators_root: Root
|
||||||
|
slot: Slot
|
||||||
|
fork: Fork
|
||||||
|
# History
|
||||||
|
latest_block_header: BeaconBlockHeader
|
||||||
|
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
|
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||||
|
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||||
|
# Eth1
|
||||||
|
eth1_data: Eth1Data
|
||||||
|
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||||
|
eth1_deposit_index: uint64
|
||||||
|
# Registry
|
||||||
|
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||||
|
# Randomness
|
||||||
|
randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||||
|
# Slashings
|
||||||
|
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||||
|
# Participation
|
||||||
|
previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] # [Modified in Altair]
|
||||||
|
current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] # [Modified in Altair]
|
||||||
|
# Finality
|
||||||
|
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||||
|
previous_justified_checkpoint: Checkpoint
|
||||||
|
current_justified_checkpoint: Checkpoint
|
||||||
|
finalized_checkpoint: Checkpoint
|
||||||
|
# Inactivity
|
||||||
|
inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] # [New in Altair]
|
||||||
|
# Sync
|
||||||
|
current_sync_committee: SyncCommittee # [New in Altair]
|
||||||
|
next_sync_committee: SyncCommittee # [New in Altair]
|
||||||
|
```
|
||||||
|
|
||||||
|
### New containers
|
||||||
|
|
||||||
|
#### `SyncAggregate`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SyncAggregate(Container):
|
||||||
|
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||||
|
sync_committee_signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `SyncCommittee`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SyncCommittee(Container):
|
||||||
|
pubkeys: Vector[BLSPubkey, SYNC_COMMITTEE_SIZE]
|
||||||
|
aggregate_pubkey: BLSPubkey
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### `Predicates`
|
||||||
|
|
||||||
|
#### `eth2_fast_aggregate_verify`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def eth2_fast_aggregate_verify(pubkeys: Sequence[BLSPubkey], message: Bytes32, signature: BLSSignature) -> bool:
|
||||||
|
"""
|
||||||
|
Wrapper to ``bls.FastAggregateVerify`` accepting the ``G2_POINT_AT_INFINITY`` signature when ``pubkeys`` is empty.
|
||||||
|
"""
|
||||||
|
if len(pubkeys) == 0 and signature == G2_POINT_AT_INFINITY:
|
||||||
|
return True
|
||||||
|
return bls.FastAggregateVerify(pubkeys, message, signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
#### `get_flag_indices_and_weights`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_flag_indices_and_weights() -> Sequence[Tuple[int, uint64]]:
|
||||||
|
"""
|
||||||
|
Return paired tuples of participation flag indices along with associated incentivization weights.
|
||||||
|
"""
|
||||||
|
return (
|
||||||
|
(TIMELY_HEAD_FLAG_INDEX, TIMELY_HEAD_WEIGHT),
|
||||||
|
(TIMELY_SOURCE_FLAG_INDEX, TIMELY_SOURCE_WEIGHT),
|
||||||
|
(TIMELY_TARGET_FLAG_INDEX, TIMELY_TARGET_WEIGHT),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `add_flag`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def add_flag(flags: ParticipationFlags, flag_index: int) -> ParticipationFlags:
|
||||||
|
"""
|
||||||
|
Return a new ``ParticipationFlags`` adding ``flag_index`` to ``flags``.
|
||||||
|
"""
|
||||||
|
flag = ParticipationFlags(2**flag_index)
|
||||||
|
return flags | flag
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `has_flag`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def has_flag(flags: ParticipationFlags, flag_index: int) -> bool:
|
||||||
|
"""
|
||||||
|
Return whether ``flags`` has ``flag_index`` set.
|
||||||
|
"""
|
||||||
|
flag = ParticipationFlags(2**flag_index)
|
||||||
|
return flags & flag == flag
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beacon state accessors
|
||||||
|
|
||||||
|
#### `get_sync_committee_indices`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_sync_committee_indices(state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||||
|
"""
|
||||||
|
Return the sequence of sync committee indices (which may include duplicate indices)
|
||||||
|
for a given ``state`` and ``epoch``.
|
||||||
|
|
||||||
|
Note: This function is not stable during a sync committee period as
|
||||||
|
a validator's effective balance may change enough to affect the sampling.
|
||||||
|
"""
|
||||||
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
|
base_epoch = Epoch((max(epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD, 1) - 1) * EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
active_validator_indices = get_active_validator_indices(state, base_epoch)
|
||||||
|
active_validator_count = uint64(len(active_validator_indices))
|
||||||
|
seed = get_seed(state, base_epoch, DOMAIN_SYNC_COMMITTEE)
|
||||||
|
i = 0
|
||||||
|
sync_committee_indices: List[ValidatorIndex] = []
|
||||||
|
while len(sync_committee_indices) < SYNC_COMMITTEE_SIZE:
|
||||||
|
shuffled_index = compute_shuffled_index(uint64(i % active_validator_count), active_validator_count, seed)
|
||||||
|
candidate_index = active_validator_indices[shuffled_index]
|
||||||
|
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||||
|
effective_balance = state.validators[candidate_index].effective_balance
|
||||||
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte: # Sample with replacement
|
||||||
|
sync_committee_indices.append(candidate_index)
|
||||||
|
i += 1
|
||||||
|
return sync_committee_indices
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_sync_committee`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_sync_committee(state: BeaconState, epoch: Epoch) -> SyncCommittee:
|
||||||
|
"""
|
||||||
|
Return the sync committee for a given ``state`` and ``epoch``.
|
||||||
|
|
||||||
|
``SyncCommittee`` contains an aggregate pubkey that enables
|
||||||
|
resource-constrained clients to save some computation when verifying
|
||||||
|
the sync committee's signature.
|
||||||
|
|
||||||
|
``SyncCommittee`` can also contain duplicate pubkeys, when ``get_sync_committee_indices``
|
||||||
|
returns duplicate indices. Implementations must take care when handling
|
||||||
|
optimizations relating to aggregation and verification in the presence of duplicates.
|
||||||
|
|
||||||
|
Note: This function should only be called at sync committee period boundaries, as
|
||||||
|
``get_sync_committee_indices`` is not stable within a given period.
|
||||||
|
"""
|
||||||
|
indices = get_sync_committee_indices(state, epoch)
|
||||||
|
pubkeys = [state.validators[index].pubkey for index in indices]
|
||||||
|
aggregate_pubkey = bls.AggregatePKs(pubkeys)
|
||||||
|
return SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=aggregate_pubkey)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_base_reward_per_increment`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_base_reward_per_increment(state: BeaconState) -> Gwei:
|
||||||
|
return Gwei(EFFECTIVE_BALANCE_INCREMENT * BASE_REWARD_FACTOR // integer_squareroot(get_total_active_balance(state)))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_base_reward`
|
||||||
|
|
||||||
|
*Note*: The function `get_base_reward` is modified with the removal of `BASE_REWARDS_PER_EPOCH` and the use of increment based accounting.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_base_reward(state: BeaconState, index: ValidatorIndex) -> Gwei:
|
||||||
|
"""
|
||||||
|
Return the base reward for the validator defined by ``index`` with respect to the current ``state``.
|
||||||
|
|
||||||
|
Note: A validator can optimally earn one base reward per epoch over a long time horizon.
|
||||||
|
This takes into account both per-epoch (e.g. attestation) and intermittent duties (e.g. block proposal
|
||||||
|
and sync committees).
|
||||||
|
"""
|
||||||
|
increments = state.validators[index].effective_balance // EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
return Gwei(increments * get_base_reward_per_increment(state))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_unslashed_participating_indices`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_unslashed_participating_indices(state: BeaconState, flag_index: int, epoch: Epoch) -> Set[ValidatorIndex]:
|
||||||
|
"""
|
||||||
|
Return the set of validator indices that are both active and unslashed for the given ``flag_index`` and ``epoch``.
|
||||||
|
"""
|
||||||
|
assert epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||||
|
if epoch == get_current_epoch(state):
|
||||||
|
epoch_participation = state.current_epoch_participation
|
||||||
|
else:
|
||||||
|
epoch_participation = state.previous_epoch_participation
|
||||||
|
active_validator_indices = get_active_validator_indices(state, epoch)
|
||||||
|
participating_indices = [i for i in active_validator_indices if has_flag(epoch_participation[i], flag_index)]
|
||||||
|
return set(filter(lambda index: not state.validators[index].slashed, participating_indices))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_flag_index_deltas`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_flag_index_deltas(state: BeaconState, flag_index: int, weight: uint64) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return the deltas for a given ``flag_index`` scaled by ``weight`` by scanning through the participation flags.
|
||||||
|
"""
|
||||||
|
rewards = [Gwei(0)] * len(state.validators)
|
||||||
|
penalties = [Gwei(0)] * len(state.validators)
|
||||||
|
unslashed_participating_indices = get_unslashed_participating_indices(state, flag_index, get_previous_epoch(state))
|
||||||
|
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balances to avoid uint64 overflow
|
||||||
|
unslashed_participating_increments = get_total_balance(state, unslashed_participating_indices) // increment
|
||||||
|
active_increments = get_total_active_balance(state) // increment
|
||||||
|
for index in get_eligible_validator_indices(state):
|
||||||
|
base_reward = get_base_reward(state, index)
|
||||||
|
if index in unslashed_participating_indices:
|
||||||
|
if is_in_inactivity_leak(state):
|
||||||
|
# This flag reward cancels the inactivity penalty corresponding to the flag index
|
||||||
|
rewards[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||||
|
else:
|
||||||
|
reward_numerator = base_reward * weight * unslashed_participating_increments
|
||||||
|
rewards[index] += Gwei(reward_numerator // (active_increments * WEIGHT_DENOMINATOR))
|
||||||
|
else:
|
||||||
|
penalties[index] += Gwei(base_reward * weight // WEIGHT_DENOMINATOR)
|
||||||
|
return rewards, penalties
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Modified `get_inactivity_penalty_deltas`
|
||||||
|
|
||||||
|
*Note*: The function `get_inactivity_penalty_deltas` is modified in the selection of matching target indices
|
||||||
|
and the removal of `BASE_REWARDS_PER_EPOCH`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_inactivity_penalty_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence[Gwei]]:
|
||||||
|
"""
|
||||||
|
Return the inactivity penalty deltas by considering timely target participation flags and inactivity scores.
|
||||||
|
"""
|
||||||
|
rewards = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
|
penalties = [Gwei(0) for _ in range(len(state.validators))]
|
||||||
|
if is_in_inactivity_leak(state):
|
||||||
|
previous_epoch = get_previous_epoch(state)
|
||||||
|
matching_target_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, previous_epoch)
|
||||||
|
for index in get_eligible_validator_indices(state):
|
||||||
|
for (_, weight) in get_flag_indices_and_weights():
|
||||||
|
# This inactivity penalty cancels the flag reward corresponding to the flag index
|
||||||
|
penalties[index] += Gwei(get_base_reward(state, index) * weight // WEIGHT_DENOMINATOR)
|
||||||
|
if index not in matching_target_indices:
|
||||||
|
penalty_numerator = state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||||
|
penalty_denominator = INACTIVITY_SCORE_BIAS * INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||||
|
penalties[index] += Gwei(penalty_numerator // penalty_denominator)
|
||||||
|
return rewards, penalties
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beacon state mutators
|
||||||
|
|
||||||
|
#### Modified `slash_validator`
|
||||||
|
|
||||||
|
*Note*: The function `slash_validator` is modified to use `MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR`
|
||||||
|
and use `PROPOSER_WEIGHT` when calculating the proposer reward.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def slash_validator(state: BeaconState,
|
||||||
|
slashed_index: ValidatorIndex,
|
||||||
|
whistleblower_index: ValidatorIndex=None) -> None:
|
||||||
|
"""
|
||||||
|
Slash the validator with index ``slashed_index``.
|
||||||
|
"""
|
||||||
|
epoch = get_current_epoch(state)
|
||||||
|
initiate_validator_exit(state, slashed_index)
|
||||||
|
validator = state.validators[slashed_index]
|
||||||
|
validator.slashed = True
|
||||||
|
validator.withdrawable_epoch = max(validator.withdrawable_epoch, Epoch(epoch + EPOCHS_PER_SLASHINGS_VECTOR))
|
||||||
|
state.slashings[epoch % EPOCHS_PER_SLASHINGS_VECTOR] += validator.effective_balance
|
||||||
|
decrease_balance(state, slashed_index, validator.effective_balance // MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR)
|
||||||
|
|
||||||
|
# Apply proposer and whistleblower rewards
|
||||||
|
proposer_index = get_beacon_proposer_index(state)
|
||||||
|
if whistleblower_index is None:
|
||||||
|
whistleblower_index = proposer_index
|
||||||
|
whistleblower_reward = Gwei(validator.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT)
|
||||||
|
proposer_reward = Gwei(whistleblower_reward * PROPOSER_WEIGHT // WEIGHT_DENOMINATOR)
|
||||||
|
increase_balance(state, proposer_index, proposer_reward)
|
||||||
|
increase_balance(state, whistleblower_index, Gwei(whistleblower_reward - proposer_reward))
|
||||||
|
```
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
|
process_block_header(state, block)
|
||||||
|
process_randao(state, block.body)
|
||||||
|
process_eth1_data(state, block.body)
|
||||||
|
process_operations(state, block.body) # [Modified in Altair]
|
||||||
|
process_sync_committee(state, block.body.sync_aggregate) # [New in Altair]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Modified `process_attestation`
|
||||||
|
|
||||||
|
*Note*: The function `process_attestation` is modified to do incentive accounting with epoch participation flags.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
|
data = attestation.data
|
||||||
|
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||||
|
assert data.target.epoch == compute_epoch_at_slot(data.slot)
|
||||||
|
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||||
|
assert data.index < get_committee_count_per_slot(state, data.target.epoch)
|
||||||
|
|
||||||
|
committee = get_beacon_committee(state, data.slot, data.index)
|
||||||
|
assert len(attestation.aggregation_bits) == len(committee)
|
||||||
|
|
||||||
|
if data.target.epoch == get_current_epoch(state):
|
||||||
|
epoch_participation = state.current_epoch_participation
|
||||||
|
justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
else:
|
||||||
|
epoch_participation = state.previous_epoch_participation
|
||||||
|
justified_checkpoint = state.previous_justified_checkpoint
|
||||||
|
|
||||||
|
# Matching roots
|
||||||
|
is_matching_source = data.source == justified_checkpoint
|
||||||
|
is_matching_target = is_matching_source and data.target.root == get_block_root(state, data.target.epoch)
|
||||||
|
is_matching_head = is_matching_target and data.beacon_block_root == get_block_root_at_slot(state, data.slot)
|
||||||
|
assert is_matching_source
|
||||||
|
|
||||||
|
# Verify signature
|
||||||
|
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||||
|
|
||||||
|
# Participation flag indices
|
||||||
|
participation_flag_indices = []
|
||||||
|
if is_matching_source and state.slot <= data.slot + integer_squareroot(SLOTS_PER_EPOCH):
|
||||||
|
participation_flag_indices.append(TIMELY_SOURCE_FLAG_INDEX)
|
||||||
|
if is_matching_target and state.slot <= data.slot + SLOTS_PER_EPOCH:
|
||||||
|
participation_flag_indices.append(TIMELY_TARGET_FLAG_INDEX)
|
||||||
|
if is_matching_head and state.slot == data.slot + MIN_ATTESTATION_INCLUSION_DELAY:
|
||||||
|
participation_flag_indices.append(TIMELY_HEAD_FLAG_INDEX)
|
||||||
|
|
||||||
|
# Update epoch participation flags
|
||||||
|
proposer_reward_numerator = 0
|
||||||
|
for index in get_attesting_indices(state, data, attestation.aggregation_bits):
|
||||||
|
for flag_index, weight in get_flag_indices_and_weights():
|
||||||
|
if flag_index in participation_flag_indices and not has_flag(epoch_participation[index], flag_index):
|
||||||
|
epoch_participation[index] = add_flag(epoch_participation[index], flag_index)
|
||||||
|
proposer_reward_numerator += get_base_reward(state, index) * weight
|
||||||
|
|
||||||
|
# Reward proposer
|
||||||
|
proposer_reward_denominator = (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT) * WEIGHT_DENOMINATOR // PROPOSER_WEIGHT
|
||||||
|
proposer_reward = Gwei(proposer_reward_numerator // proposer_reward_denominator)
|
||||||
|
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Modified `process_deposit`
|
||||||
|
|
||||||
|
*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_deposit(state: BeaconState, deposit: Deposit) -> None:
|
||||||
|
# Verify the Merkle branch
|
||||||
|
assert is_valid_merkle_branch(
|
||||||
|
leaf=hash_tree_root(deposit.data),
|
||||||
|
branch=deposit.proof,
|
||||||
|
depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in
|
||||||
|
index=state.eth1_deposit_index,
|
||||||
|
root=state.eth1_data.deposit_root,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Deposits must be processed in order
|
||||||
|
state.eth1_deposit_index += 1
|
||||||
|
|
||||||
|
pubkey = deposit.data.pubkey
|
||||||
|
amount = deposit.data.amount
|
||||||
|
validator_pubkeys = [validator.pubkey for validator in state.validators]
|
||||||
|
if pubkey not in validator_pubkeys:
|
||||||
|
# Verify the deposit signature (proof of possession) which is not checked by the deposit contract
|
||||||
|
deposit_message = DepositMessage(
|
||||||
|
pubkey=deposit.data.pubkey,
|
||||||
|
withdrawal_credentials=deposit.data.withdrawal_credentials,
|
||||||
|
amount=deposit.data.amount,
|
||||||
|
)
|
||||||
|
domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks
|
||||||
|
signing_root = compute_signing_root(deposit_message, domain)
|
||||||
|
# Initialize validator if the deposit signature is valid
|
||||||
|
if bls.Verify(pubkey, signing_root, deposit.data.signature):
|
||||||
|
state.validators.append(get_validator_from_deposit(state, deposit))
|
||||||
|
state.balances.append(amount)
|
||||||
|
state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||||
|
state.current_epoch_participation.append(ParticipationFlags(0b0000_0000))
|
||||||
|
state.inactivity_scores.append(uint64(0))
|
||||||
|
else:
|
||||||
|
# Increase balance by deposit amount
|
||||||
|
index = ValidatorIndex(validator_pubkeys.index(pubkey))
|
||||||
|
increase_balance(state, index, amount)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sync committee processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_sync_committee(state: BeaconState, aggregate: SyncAggregate) -> None:
|
||||||
|
# Verify sync committee aggregate signature signing over the previous slot block root
|
||||||
|
committee_pubkeys = state.current_sync_committee.pubkeys
|
||||||
|
participant_pubkeys = [pubkey for pubkey, bit in zip(committee_pubkeys, aggregate.sync_committee_bits) if bit]
|
||||||
|
previous_slot = max(state.slot, Slot(1)) - Slot(1)
|
||||||
|
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, compute_epoch_at_slot(previous_slot))
|
||||||
|
signing_root = compute_signing_root(get_block_root_at_slot(state, previous_slot), domain)
|
||||||
|
assert eth2_fast_aggregate_verify(participant_pubkeys, signing_root, aggregate.sync_committee_signature)
|
||||||
|
|
||||||
|
# Compute participant and proposer rewards
|
||||||
|
total_active_increments = get_total_active_balance(state) // EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
total_base_rewards = Gwei(get_base_reward_per_increment(state) * total_active_increments)
|
||||||
|
max_participant_rewards = Gwei(total_base_rewards * SYNC_REWARD_WEIGHT // WEIGHT_DENOMINATOR // SLOTS_PER_EPOCH)
|
||||||
|
participant_reward = Gwei(max_participant_rewards // SYNC_COMMITTEE_SIZE)
|
||||||
|
proposer_reward = Gwei(participant_reward * PROPOSER_WEIGHT // (WEIGHT_DENOMINATOR - PROPOSER_WEIGHT))
|
||||||
|
|
||||||
|
# Apply participant and proposer rewards
|
||||||
|
all_pubkeys = [v.pubkey for v in state.validators]
|
||||||
|
committee_indices = [ValidatorIndex(all_pubkeys.index(pubkey)) for pubkey in state.current_sync_committee.pubkeys]
|
||||||
|
participant_indices = [index for index, bit in zip(committee_indices, aggregate.sync_committee_bits) if bit]
|
||||||
|
for participant_index in participant_indices:
|
||||||
|
increase_balance(state, participant_index, participant_reward)
|
||||||
|
increase_balance(state, get_beacon_proposer_index(state), proposer_reward)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Epoch processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_epoch(state: BeaconState) -> None:
|
||||||
|
process_justification_and_finalization(state) # [Modified in Altair]
|
||||||
|
process_inactivity_updates(state) # [New in Altair]
|
||||||
|
process_rewards_and_penalties(state) # [Modified in Altair]
|
||||||
|
process_registry_updates(state)
|
||||||
|
process_slashings(state) # [Modified in Altair]
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
process_slashings_reset(state)
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
process_participation_flag_updates(state) # [New in Altair]
|
||||||
|
process_sync_committee_updates(state) # [New in Altair]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Justification and finalization
|
||||||
|
|
||||||
|
*Note*: The function `process_justification_and_finalization` is modified to adapt to the new participation records.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_justification_and_finalization(state: BeaconState) -> None:
|
||||||
|
# Initial FFG checkpoint values have a `0x00` stub for `root`.
|
||||||
|
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||||
|
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||||
|
return
|
||||||
|
previous_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state))
|
||||||
|
current_indices = get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_current_epoch(state))
|
||||||
|
total_active_balance = get_total_active_balance(state)
|
||||||
|
previous_target_balance = get_total_balance(state, previous_indices)
|
||||||
|
current_target_balance = get_total_balance(state, current_indices)
|
||||||
|
weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Inactivity scores
|
||||||
|
|
||||||
|
*Note*: The function `process_inactivity_updates` is new.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_inactivity_updates(state: BeaconState) -> None:
|
||||||
|
for index in get_eligible_validator_indices(state):
|
||||||
|
if index in get_unslashed_participating_indices(state, TIMELY_TARGET_FLAG_INDEX, get_previous_epoch(state)):
|
||||||
|
if state.inactivity_scores[index] > 0:
|
||||||
|
state.inactivity_scores[index] -= 1
|
||||||
|
elif is_in_inactivity_leak(state):
|
||||||
|
state.inactivity_scores[index] += INACTIVITY_SCORE_BIAS
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Rewards and penalties
|
||||||
|
|
||||||
|
*Note*: The function `process_rewards_and_penalties` is modified to support the incentive accounting reforms.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_rewards_and_penalties(state: BeaconState) -> None:
|
||||||
|
# No rewards are applied at the end of `GENESIS_EPOCH` because rewards are for work done in the previous epoch
|
||||||
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
|
return
|
||||||
|
|
||||||
|
flag_indices_and_numerators = get_flag_indices_and_weights()
|
||||||
|
flag_deltas = [get_flag_index_deltas(state, index, numerator) for (index, numerator) in flag_indices_and_numerators]
|
||||||
|
deltas = flag_deltas + [get_inactivity_penalty_deltas(state)]
|
||||||
|
for (rewards, penalties) in deltas:
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
increase_balance(state, ValidatorIndex(index), rewards[index])
|
||||||
|
decrease_balance(state, ValidatorIndex(index), penalties[index])
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Slashings
|
||||||
|
|
||||||
|
*Note*: The function `process_slashings` is modified to use `PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_slashings(state: BeaconState) -> None:
|
||||||
|
epoch = get_current_epoch(state)
|
||||||
|
total_balance = get_total_active_balance(state)
|
||||||
|
adjusted_total_slashing_balance = min(sum(state.slashings) * PROPORTIONAL_SLASHING_MULTIPLIER_ALTAIR, total_balance)
|
||||||
|
for index, validator in enumerate(state.validators):
|
||||||
|
if validator.slashed and epoch + EPOCHS_PER_SLASHINGS_VECTOR // 2 == validator.withdrawable_epoch:
|
||||||
|
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from penalty numerator to avoid uint64 overflow
|
||||||
|
penalty_numerator = validator.effective_balance // increment * adjusted_total_slashing_balance
|
||||||
|
penalty = penalty_numerator // total_balance * increment
|
||||||
|
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Participation flags updates
|
||||||
|
|
||||||
|
*Note*: The function `process_participation_flag_updates` is new.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_participation_flag_updates(state: BeaconState) -> None:
|
||||||
|
state.previous_epoch_participation = state.current_epoch_participation
|
||||||
|
state.current_epoch_participation = [ParticipationFlags(0b0000_0000) for _ in range(len(state.validators))]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Sync committee updates
|
||||||
|
|
||||||
|
*Note*: The function `process_sync_committee_updates` is new.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_sync_committee_updates(state: BeaconState) -> None:
|
||||||
|
next_epoch = get_current_epoch(state) + Epoch(1)
|
||||||
|
if next_epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD == 0:
|
||||||
|
state.current_sync_committee = state.next_sync_committee
|
||||||
|
state.next_sync_committee = get_sync_committee(state, next_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Initialize state for pure Altair testnets and test vectors
|
||||||
|
|
||||||
|
This helper function is only for initializing the state for pure Altair testnets and tests.
|
||||||
|
|
||||||
|
*Note*: The function `initialize_beacon_state_from_eth1` is modified: (1) using `ALTAIR_FORK_VERSION` as the current fork version, (2) utilizing the Altair `BeaconBlockBody` when constructing the initial `latest_block_header`, and (3) adding initial sync committees.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
|
eth1_timestamp: uint64,
|
||||||
|
deposits: Sequence[Deposit]) -> BeaconState:
|
||||||
|
fork = Fork(
|
||||||
|
previous_version=GENESIS_FORK_VERSION,
|
||||||
|
current_version=ALTAIR_FORK_VERSION, # [Modified in Altair]
|
||||||
|
epoch=GENESIS_EPOCH,
|
||||||
|
)
|
||||||
|
state = BeaconState(
|
||||||
|
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||||
|
fork=fork,
|
||||||
|
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||||
|
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||||
|
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||||
|
)
|
||||||
|
|
||||||
|
# Process deposits
|
||||||
|
leaves = list(map(lambda deposit: deposit.data, deposits))
|
||||||
|
for index, deposit in enumerate(deposits):
|
||||||
|
deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1])
|
||||||
|
state.eth1_data.deposit_root = hash_tree_root(deposit_data_list)
|
||||||
|
process_deposit(state, deposit)
|
||||||
|
|
||||||
|
# Process activations
|
||||||
|
for index, validator in enumerate(state.validators):
|
||||||
|
balance = state.balances[index]
|
||||||
|
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||||
|
if validator.effective_balance == MAX_EFFECTIVE_BALANCE:
|
||||||
|
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||||
|
validator.activation_epoch = GENESIS_EPOCH
|
||||||
|
|
||||||
|
# Set genesis validators root for domain separation and chain versioning
|
||||||
|
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||||
|
|
||||||
|
# [New in Altair] Fill in sync committees
|
||||||
|
state.current_sync_committee = get_sync_committee(state, get_current_epoch(state))
|
||||||
|
state.next_sync_committee = get_sync_committee(state, get_current_epoch(state) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
|
return state
|
||||||
|
```
|
|
@ -0,0 +1,87 @@
|
||||||
|
# Ethereum 2.0 Altair fork
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Fork to Altair](#fork-to-altair)
|
||||||
|
- [Fork trigger](#fork-trigger)
|
||||||
|
- [Upgrading the state](#upgrading-the-state)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document describes the process of the first upgrade of Ethereum 2.0: the Altair hard fork, introducing light client support and other improvements.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Warning: this configuration is not definitive.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `ALTAIR_FORK_VERSION` | `Version('0x01000000')` |
|
||||||
|
| `ALTAIR_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||||
|
|
||||||
|
## Fork to Altair
|
||||||
|
|
||||||
|
### Fork trigger
|
||||||
|
|
||||||
|
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at epoch `ALTAIR_FORK_EPOCH`.
|
||||||
|
|
||||||
|
Note that for the pure Altair networks, we don't apply `upgrade_to_altair` since it starts with Altair version logic.
|
||||||
|
|
||||||
|
### Upgrading the state
|
||||||
|
|
||||||
|
After `process_slots` of Phase 0 finishes, if `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == ALTAIR_FORK_EPOCH`, an irregular state change is made to upgrade to Altair.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def upgrade_to_altair(pre: phase0.BeaconState) -> BeaconState:
|
||||||
|
epoch = phase0.get_current_epoch(pre)
|
||||||
|
post = BeaconState(
|
||||||
|
# Versioning
|
||||||
|
genesis_time=pre.genesis_time,
|
||||||
|
genesis_validators_root=pre.genesis_validators_root,
|
||||||
|
slot=pre.slot,
|
||||||
|
fork=Fork(
|
||||||
|
previous_version=pre.fork.current_version,
|
||||||
|
current_version=ALTAIR_FORK_VERSION,
|
||||||
|
epoch=epoch,
|
||||||
|
),
|
||||||
|
# History
|
||||||
|
latest_block_header=pre.latest_block_header,
|
||||||
|
block_roots=pre.block_roots,
|
||||||
|
state_roots=pre.state_roots,
|
||||||
|
historical_roots=pre.historical_roots,
|
||||||
|
# Eth1
|
||||||
|
eth1_data=pre.eth1_data,
|
||||||
|
eth1_data_votes=pre.eth1_data_votes,
|
||||||
|
eth1_deposit_index=pre.eth1_deposit_index,
|
||||||
|
# Registry
|
||||||
|
validators=pre.validators,
|
||||||
|
balances=pre.balances,
|
||||||
|
# Randomness
|
||||||
|
randao_mixes=pre.randao_mixes,
|
||||||
|
# Slashings
|
||||||
|
slashings=pre.slashings,
|
||||||
|
# Participation
|
||||||
|
previous_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||||
|
current_epoch_participation=[ParticipationFlags(0b0000_0000) for _ in range(len(pre.validators))],
|
||||||
|
# Finality
|
||||||
|
justification_bits=pre.justification_bits,
|
||||||
|
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||||
|
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||||
|
finalized_checkpoint=pre.finalized_checkpoint,
|
||||||
|
# Inactivity
|
||||||
|
inactivity_scores=[uint64(0) for _ in range(len(pre.validators))],
|
||||||
|
)
|
||||||
|
# Fill in sync committees
|
||||||
|
post.current_sync_committee = get_sync_committee(post, get_current_epoch(post))
|
||||||
|
post.next_sync_committee = get_sync_committee(post, get_current_epoch(post) + EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
return post
|
||||||
|
```
|
|
@ -0,0 +1,271 @@
|
||||||
|
# Ethereum Altair networking specification
|
||||||
|
|
||||||
|
This document contains the networking specification for Ethereum 2.0 clients added during the Altair deployment.
|
||||||
|
This document should be viewed as additive to the [document from Phase 0](../phase0/p2p-interface.md) and will be referred to as the "Phase 0 document" hereafter.
|
||||||
|
Readers should understand the Phase 0 document and use it as a basis to understand the changes outlined in this document.
|
||||||
|
|
||||||
|
Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery domain. Some Phase 0 features will be deprecated, but not removed immediately.
|
||||||
|
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Warning](#warning)
|
||||||
|
- [Modifications in Altair](#modifications-in-altair)
|
||||||
|
- [MetaData](#metadata)
|
||||||
|
- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub)
|
||||||
|
- [Topics and messages](#topics-and-messages)
|
||||||
|
- [Global topics](#global-topics)
|
||||||
|
- [`beacon_block`](#beacon_block)
|
||||||
|
- [`sync_committee_contribution_and_proof`](#sync_committee_contribution_and_proof)
|
||||||
|
- [Sync committee subnets](#sync-committee-subnets)
|
||||||
|
- [`sync_committee_{subnet_id}`](#sync_committee_subnet_id)
|
||||||
|
- [Sync committees and aggregation](#sync-committees-and-aggregation)
|
||||||
|
- [Transitioning the gossip](#transitioning-the-gossip)
|
||||||
|
- [The Req/Resp domain](#the-reqresp-domain)
|
||||||
|
- [Req-Resp interaction](#req-resp-interaction)
|
||||||
|
- [`ForkDigest`-context](#forkdigest-context)
|
||||||
|
- [Messages](#messages)
|
||||||
|
- [BeaconBlocksByRange v2](#beaconblocksbyrange-v2)
|
||||||
|
- [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2)
|
||||||
|
- [GetMetaData v2](#getmetadata-v2)
|
||||||
|
- [Transitioning from v1 to v2](#transitioning-from-v1-to-v2)
|
||||||
|
- [The discovery domain: discv5](#the-discovery-domain-discv5)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Warning
|
||||||
|
|
||||||
|
This document is currently illustrative for early Altair testnets and some parts are subject to change.
|
||||||
|
Refer to the note in the [validator guide](./validator.md) for further details.
|
||||||
|
|
||||||
|
# Modifications in Altair
|
||||||
|
|
||||||
|
## MetaData
|
||||||
|
|
||||||
|
The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions:
|
||||||
|
|
||||||
|
```
|
||||||
|
(
|
||||||
|
seq_number: uint64
|
||||||
|
attnets: Bitvector[ATTESTATION_SUBNET_COUNT]
|
||||||
|
syncnets: Bitvector[SYNC_COMMITTEE_SUBNET_COUNT]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Where
|
||||||
|
|
||||||
|
- `seq_number` and `attnets` have the same meaning defined in the Phase 0 document.
|
||||||
|
- `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability).
|
||||||
|
|
||||||
|
## The gossip domain: gossipsub
|
||||||
|
|
||||||
|
Gossip meshes are added in Altair to support the consensus activities of the sync committees.
|
||||||
|
Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors.
|
||||||
|
|
||||||
|
### Topics and messages
|
||||||
|
|
||||||
|
Topics follow the same specification as in the Phase 0 document.
|
||||||
|
New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type.
|
||||||
|
|
||||||
|
The specification around the creation, validation, and dissemination of messages has not changed from the Phase 0 document.
|
||||||
|
|
||||||
|
The new topics along with the type of the `data` field of a gossipsub message are given in this table:
|
||||||
|
|
||||||
|
| Name | Message Type |
|
||||||
|
| - | - |
|
||||||
|
| `beacon_block` | `SignedBeaconBlock` (modified) |
|
||||||
|
| `sync_committee_contribution_and_proof` | `SignedContributionAndProof` |
|
||||||
|
| `sync_committee_{subnet_id}` | `SyncCommitteeSignature` |
|
||||||
|
|
||||||
|
Definitions of these new types can be found in the [Altair validator guide](./validator.md#containers).
|
||||||
|
|
||||||
|
Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics.
|
||||||
|
|
||||||
|
#### Global topics
|
||||||
|
|
||||||
|
Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee signatures to all potential proposers of beacon blocks.
|
||||||
|
|
||||||
|
##### `beacon_block`
|
||||||
|
|
||||||
|
The existing specification for this topic does not change from the Phase 0 document,
|
||||||
|
but the type of the payload does change to the (modified) `SignedBeaconBlock`.
|
||||||
|
This type changes due to the inclusion of the inner `BeaconBlockBody` that is modified in Altair.
|
||||||
|
|
||||||
|
See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details.
|
||||||
|
|
||||||
|
##### `sync_committee_contribution_and_proof`
|
||||||
|
|
||||||
|
This topic is used to propagate partially aggregated sync committee signatures to be included in future blocks.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `signed_contribution_and_proof` on the network; define `contribution_and_proof = signed_contribution_and_proof.message`, `contribution = contribution_and_proof.contribution`, and the following function `get_sync_subcommittee_pubkeys` for convenience:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64) -> Sequence[BLSPubkey]:
|
||||||
|
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
i = subcommittee_index * sync_subcommittee_size
|
||||||
|
return state.current_sync_committee.pubkeys[i:i + sync_subcommittee_size]
|
||||||
|
```
|
||||||
|
|
||||||
|
- _[IGNORE]_ The contribution's slot is for the current slot, i.e. `contribution.slot == current_slot`.
|
||||||
|
- _[IGNORE]_ The block being signed over (`contribution.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||||
|
- _[REJECT]_ The subcommittee index is in the allowed range, i.e. `contribution.subcommittee_index < SYNC_COMMITTEE_SUBNET_COUNT`.
|
||||||
|
- _[IGNORE]_ The sync committee contribution is the first valid contribution received for the aggregator with index `contribution_and_proof.aggregator_index` for the slot `contribution.slot` and subcommittee index `contribution.subcommittee_index`.
|
||||||
|
- _[REJECT]_ `contribution_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_sync_committee_aggregator(contribution_and_proof.selection_proof)` returns `True`.
|
||||||
|
- _[REJECT]_ The aggregator's validator index is in the declared subcommittee of the current sync committee --
|
||||||
|
i.e. `state.validators[contribution_and_proof.aggregator_index].pubkey in get_sync_subcommittee_pubkeys(state, contribution.subcommittee_index)`.
|
||||||
|
- _[REJECT]_ The `contribution_and_proof.selection_proof` is a valid signature of the `SyncAggregatorSelectionData` derived from the `contribution` by the validator with index `contribution_and_proof.aggregator_index`.
|
||||||
|
- _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid.
|
||||||
|
- _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`.
|
||||||
|
|
||||||
|
#### Sync committee subnets
|
||||||
|
|
||||||
|
Sync committee subnets are used to propagate unaggregated sync committee signatures to subsections of the network.
|
||||||
|
|
||||||
|
##### `sync_committee_{subnet_id}`
|
||||||
|
|
||||||
|
The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee signatures to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `sync_committee_signature` on the network:
|
||||||
|
|
||||||
|
- _[IGNORE]_ The signature's slot is for the current slot, i.e. `sync_committee_signature.slot == current_slot`.
|
||||||
|
- _[IGNORE]_ The block being signed over (`sync_committee_signature.beacon_block_root`) has been seen (via both gossip and non-gossip sources).
|
||||||
|
- _[IGNORE]_ There has been no other valid sync committee signature for the declared `slot` for the validator referenced by `sync_committee_signature.validator_index`.
|
||||||
|
- _[REJECT]_ The `subnet_id` is valid for the given validator, i.e. `subnet_id in compute_subnets_for_sync_committee(state, sync_committee_signature.validator_index)`.
|
||||||
|
Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
|
||||||
|
- _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`.
|
||||||
|
|
||||||
|
#### Sync committees and aggregation
|
||||||
|
|
||||||
|
The aggregation scheme closely follows the design of the attestation aggregation scheme.
|
||||||
|
Sync committee signatures are broadcast into "subnets" defined by a topic.
|
||||||
|
The number of subnets is defined by `SYNC_COMMITTEE_SUBNET_COUNT` in the [Altair validator guide](./validator.md#constants).
|
||||||
|
Sync committee members are divided into "subcommittees" which are then assigned to a subnet for the duration of tenure in the sync committee.
|
||||||
|
Individual validators can be duplicated in the broader sync committee such that they are included multiple times in a given subcommittee or across multiple subcommittees.
|
||||||
|
|
||||||
|
Unaggregated signatures (along with metadata) are sent as `SyncCommitteeSignature`s on the `sync_committee_{subnet_id}` topics.
|
||||||
|
|
||||||
|
Aggregated sync committee signatures are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic.
|
||||||
|
|
||||||
|
### Transitioning the gossip
|
||||||
|
|
||||||
|
With any fork, the fork version, and thus the `ForkDigestValue`, change.
|
||||||
|
Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics.
|
||||||
|
|
||||||
|
The topics that are not removed in a fork are updated with a new `ForkDigestValue`. In advance of the fork, a node SHOULD subscribe to the post-fork variants of the topics.
|
||||||
|
|
||||||
|
Subscriptions are expected to be well-received, all updated nodes should subscribe as well.
|
||||||
|
Topic-meshes can be grafted quickly as the nodes are already connected and exchanging gossip control messages.
|
||||||
|
|
||||||
|
Messages SHOULD NOT be re-broadcast from one fork to the other.
|
||||||
|
A node's behavior before the fork and after the fork are as follows:
|
||||||
|
Pre-fork:
|
||||||
|
- Peers who propagate messages on the post-fork topics MAY be scored negatively proportionally to time till fork,
|
||||||
|
to account for clock discrepancy.
|
||||||
|
- Messages can be IGNORED on the post-fork topics, with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` margin.
|
||||||
|
|
||||||
|
Post-fork:
|
||||||
|
- Peers who propagate messages on the pre-fork topics MUST NOT be scored negatively. Lagging IWANT may force them to.
|
||||||
|
- Messages on pre and post-fork variants of topics share application-level caches.
|
||||||
|
E.g. an attestation on the both the old and new topic is ignored like any duplicate.
|
||||||
|
- Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`.
|
||||||
|
|
||||||
|
## The Req/Resp domain
|
||||||
|
|
||||||
|
### Req-Resp interaction
|
||||||
|
|
||||||
|
An additional `<context-bytes>` field is introduced to the `response_chunk` as defined in the Phase 0 document:
|
||||||
|
|
||||||
|
```
|
||||||
|
response_chunk ::= <result> | <context-bytes> | <encoding-dependent-header> | <encoded-payload>
|
||||||
|
```
|
||||||
|
|
||||||
|
All Phase 0 methods are compatible: `<context-bytes>` is empty by default.
|
||||||
|
On a non-zero `<result>` with `ErrorMessage` payload, the `<context-bytes>` is also empty.
|
||||||
|
|
||||||
|
In Altair and later forks, `<context-bytes>` functions as a short meta-data,
|
||||||
|
defined per req-resp method, and can parametrize the payload decoder.
|
||||||
|
|
||||||
|
#### `ForkDigest`-context
|
||||||
|
|
||||||
|
Starting with Altair, and in future forks, SSZ type definitions may change.
|
||||||
|
For this common case, we define the `ForkDigest`-context:
|
||||||
|
|
||||||
|
A fixed-width 4 byte `<context-bytes>`, set to the `ForkDigest` matching the chunk:
|
||||||
|
`compute_fork_digest(fork_version, genesis_validators_root)`.
|
||||||
|
|
||||||
|
### Messages
|
||||||
|
|
||||||
|
#### BeaconBlocksByRange v2
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/`
|
||||||
|
|
||||||
|
Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type.
|
||||||
|
|
||||||
|
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
|
[0]: # (eth2spec: skip)
|
||||||
|
|
||||||
|
| `fork_version` | Chunk SSZ type |
|
||||||
|
| ------------------------ | -------------------------- |
|
||||||
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
|
||||||
|
#### BeaconBlocksByRoot v2
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/`
|
||||||
|
|
||||||
|
Request and Response remain unchanged. A `ForkDigest`-context is used to select the fork namespace of the Response type.
|
||||||
|
|
||||||
|
Per `context = compute_fork_digest(fork_version, genesis_validators_root)`:
|
||||||
|
|
||||||
|
[1]: # (eth2spec: skip)
|
||||||
|
|
||||||
|
| `fork_version` | Chunk SSZ type |
|
||||||
|
| ------------------------ | -------------------------- |
|
||||||
|
| `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` |
|
||||||
|
| `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` |
|
||||||
|
|
||||||
|
#### GetMetaData v2
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/beacon_chain/req/metadata/2/`
|
||||||
|
|
||||||
|
No Request Content.
|
||||||
|
|
||||||
|
Response Content:
|
||||||
|
|
||||||
|
```
|
||||||
|
(
|
||||||
|
MetaData
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Requests the MetaData of a peer, using the new `MetaData` definition given above
|
||||||
|
that is extended from phase 0 in Altair. Other conditions for the `GetMetaData`
|
||||||
|
protocol are unchanged from the phase 0 p2p networking document.
|
||||||
|
|
||||||
|
### Transitioning from v1 to v2
|
||||||
|
|
||||||
|
In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition.
|
||||||
|
This is non-breaking, and is recommended as soon as the fork specification is stable.
|
||||||
|
|
||||||
|
The v1 variants will be deprecated, and implementations should use v2 when available
|
||||||
|
(as negotiated with peers via LibP2P multistream-select).
|
||||||
|
|
||||||
|
The v1 method MAY be unregistered at the fork boundary.
|
||||||
|
In the event of a request on v1 for an Altair specific payload,
|
||||||
|
the responder MUST return the **InvalidRequest** response code.
|
||||||
|
|
||||||
|
## The discovery domain: discv5
|
||||||
|
|
||||||
|
The `attnets` key of the ENR is used as defined in the Phase 0 document.
|
||||||
|
|
||||||
|
An additional bitfield is added to the ENR under the key `syncnets` to facilitate sync committee subnet discovery.
|
||||||
|
The length of this bitfield is `SYNC_COMMITTEE_SUBNET_COUNT` where each bit corresponds to a distinct `subnet_id` for a specific sync committee subnet.
|
||||||
|
The `i`th bit is set in this bitfield if the validator is currently subscribed to the `sync_committee_{i}` topic.
|
||||||
|
|
||||||
|
See the [validator document](./validator.md#sync-committee-subnet-stability) for further details on how the new bits are used.
|
|
@ -0,0 +1,195 @@
|
||||||
|
# Minimal Light Client
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Constants](#constants)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [`LightClientSnapshot`](#lightclientsnapshot)
|
||||||
|
- [`LightClientUpdate`](#lightclientupdate)
|
||||||
|
- [`LightClientStore`](#lightclientstore)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [`get_subtree_index`](#get_subtree_index)
|
||||||
|
- [Light client state updates](#light-client-state-updates)
|
||||||
|
- [`validate_light_client_update`](#validate_light_client_update)
|
||||||
|
- [`apply_light_client_update`](#apply_light_client_update)
|
||||||
|
- [`process_light_client_update`](#process_light_client_update)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
Eth2 is designed to be light client friendly for constrained environments to
|
||||||
|
access Eth2 with reasonable safety and liveness.
|
||||||
|
Such environments include resource-constrained devices (e.g. phones for trust-minimised wallets)
|
||||||
|
and metered VMs (e.g. blockchain VMs for cross-chain bridges).
|
||||||
|
|
||||||
|
This document suggests a minimal light client design for the beacon chain that
|
||||||
|
uses sync committees introduced in [this beacon chain extension](./beacon-chain.md).
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `FINALIZED_ROOT_INDEX` | `get_generalized_index(BeaconState, 'finalized_checkpoint', 'root')` |
|
||||||
|
| `NEXT_SYNC_COMMITTEE_INDEX` | `get_generalized_index(BeaconState, 'next_sync_committee')` |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `MIN_SYNC_COMMITTEE_PARTICIPANTS` | `1` |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### `LightClientSnapshot`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class LightClientSnapshot(Container):
|
||||||
|
# Beacon block header
|
||||||
|
header: BeaconBlockHeader
|
||||||
|
# Sync committees corresponding to the header
|
||||||
|
current_sync_committee: SyncCommittee
|
||||||
|
next_sync_committee: SyncCommittee
|
||||||
|
```
|
||||||
|
|
||||||
|
### `LightClientUpdate`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class LightClientUpdate(Container):
|
||||||
|
# Update beacon block header
|
||||||
|
header: BeaconBlockHeader
|
||||||
|
# Next sync committee corresponding to the header
|
||||||
|
next_sync_committee: SyncCommittee
|
||||||
|
next_sync_committee_branch: Vector[Bytes32, floorlog2(NEXT_SYNC_COMMITTEE_INDEX)]
|
||||||
|
# Finality proof for the update header
|
||||||
|
finality_header: BeaconBlockHeader
|
||||||
|
finality_branch: Vector[Bytes32, floorlog2(FINALIZED_ROOT_INDEX)]
|
||||||
|
# Sync committee aggregate signature
|
||||||
|
sync_committee_bits: Bitvector[SYNC_COMMITTEE_SIZE]
|
||||||
|
sync_committee_signature: BLSSignature
|
||||||
|
# Fork version for the aggregate signature
|
||||||
|
fork_version: Version
|
||||||
|
```
|
||||||
|
|
||||||
|
### `LightClientStore`
|
||||||
|
|
||||||
|
```python
|
||||||
|
@dataclass
|
||||||
|
class LightClientStore(object):
|
||||||
|
snapshot: LightClientSnapshot
|
||||||
|
valid_updates: Set[LightClientUpdate]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### `get_subtree_index`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_subtree_index(generalized_index: GeneralizedIndex) -> uint64:
|
||||||
|
return uint64(generalized_index % 2**(floorlog2(generalized_index)))
|
||||||
|
```
|
||||||
|
|
||||||
|
## Light client state updates
|
||||||
|
|
||||||
|
A light client maintains its state in a `store` object of type `LightClientStore` and receives `update` objects of type `LightClientUpdate`. Every `update` triggers `process_light_client_update(store, update, current_slot)` where `current_slot` is the current slot based on some local clock.
|
||||||
|
|
||||||
|
#### `validate_light_client_update`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def validate_light_client_update(snapshot: LightClientSnapshot,
|
||||||
|
update: LightClientUpdate,
|
||||||
|
genesis_validators_root: Root) -> None:
|
||||||
|
# Verify update slot is larger than snapshot slot
|
||||||
|
assert update.header.slot > snapshot.header.slot
|
||||||
|
|
||||||
|
# Verify update does not skip a sync committee period
|
||||||
|
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
assert update_period in (snapshot_period, snapshot_period + 1)
|
||||||
|
|
||||||
|
# Verify update header root is the finalized root of the finality header, if specified
|
||||||
|
if update.finality_header == BeaconBlockHeader():
|
||||||
|
signed_header = update.header
|
||||||
|
assert update.finality_branch == [Bytes32() for _ in range(floorlog2(FINALIZED_ROOT_INDEX))]
|
||||||
|
else:
|
||||||
|
signed_header = update.finality_header
|
||||||
|
assert is_valid_merkle_branch(
|
||||||
|
leaf=hash_tree_root(update.header),
|
||||||
|
branch=update.finality_branch,
|
||||||
|
depth=floorlog2(FINALIZED_ROOT_INDEX),
|
||||||
|
index=get_subtree_index(FINALIZED_ROOT_INDEX),
|
||||||
|
root=update.finality_header.state_root,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify update next sync committee if the update period incremented
|
||||||
|
if update_period == snapshot_period:
|
||||||
|
sync_committee = snapshot.current_sync_committee
|
||||||
|
assert update.next_sync_committee_branch == [Bytes32() for _ in range(floorlog2(NEXT_SYNC_COMMITTEE_INDEX))]
|
||||||
|
else:
|
||||||
|
sync_committee = snapshot.next_sync_committee
|
||||||
|
assert is_valid_merkle_branch(
|
||||||
|
leaf=hash_tree_root(update.next_sync_committee),
|
||||||
|
branch=update.next_sync_committee_branch,
|
||||||
|
depth=floorlog2(NEXT_SYNC_COMMITTEE_INDEX),
|
||||||
|
index=get_subtree_index(NEXT_SYNC_COMMITTEE_INDEX),
|
||||||
|
root=update.header.state_root,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Verify sync committee has sufficient participants
|
||||||
|
assert sum(update.sync_committee_bits) >= MIN_SYNC_COMMITTEE_PARTICIPANTS
|
||||||
|
|
||||||
|
# Verify sync committee aggregate signature
|
||||||
|
participant_pubkeys = [pubkey for (bit, pubkey) in zip(update.sync_committee_bits, sync_committee.pubkeys) if bit]
|
||||||
|
domain = compute_domain(DOMAIN_SYNC_COMMITTEE, update.fork_version, genesis_validators_root)
|
||||||
|
signing_root = compute_signing_root(signed_header, domain)
|
||||||
|
assert bls.FastAggregateVerify(participant_pubkeys, signing_root, update.sync_committee_signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `apply_light_client_update`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def apply_light_client_update(snapshot: LightClientSnapshot, update: LightClientUpdate) -> None:
|
||||||
|
snapshot_period = compute_epoch_at_slot(snapshot.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
update_period = compute_epoch_at_slot(update.header.slot) // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
if update_period == snapshot_period + 1:
|
||||||
|
snapshot.current_sync_committee = snapshot.next_sync_committee
|
||||||
|
snapshot.next_sync_committee = update.next_sync_committee
|
||||||
|
snapshot.header = update.header
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `process_light_client_update`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_light_client_update(store: LightClientStore, update: LightClientUpdate, current_slot: Slot,
|
||||||
|
genesis_validators_root: Root) -> None:
|
||||||
|
validate_light_client_update(store.snapshot, update, genesis_validators_root)
|
||||||
|
store.valid_updates.add(update)
|
||||||
|
|
||||||
|
update_timeout = SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
if (
|
||||||
|
sum(update.sync_committee_bits) * 3 >= len(update.sync_committee_bits) * 2
|
||||||
|
and update.finality_header != BeaconBlockHeader()
|
||||||
|
):
|
||||||
|
# Apply update if (1) 2/3 quorum is reached and (2) we have a finality proof.
|
||||||
|
# Note that (2) means that the current light client design needs finality.
|
||||||
|
# It may be changed to re-organizable light client design. See the on-going issue eth2.0-specs#2182.
|
||||||
|
apply_light_client_update(store.snapshot, update)
|
||||||
|
store.valid_updates = set()
|
||||||
|
elif current_slot > store.snapshot.header.slot + update_timeout:
|
||||||
|
# Forced best update when the update timeout has elapsed
|
||||||
|
apply_light_client_update(store.snapshot,
|
||||||
|
max(store.valid_updates, key=lambda update: sum(update.sync_committee_bits)))
|
||||||
|
store.valid_updates = set()
|
||||||
|
```
|
|
@ -0,0 +1,431 @@
|
||||||
|
# Ethereum 2.0 Altair -- Honest Validator
|
||||||
|
|
||||||
|
This is an accompanying document to [Ethereum 2.0 Altair -- The Beacon Chain](./beacon-chain.md), which describes the expected actions of a "validator" participating in the Ethereum 2.0 protocol.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Warning](#warning)
|
||||||
|
- [Constants](#constants)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [`SyncCommitteeSignature`](#synccommitteesignature)
|
||||||
|
- [`SyncCommitteeContribution`](#synccommitteecontribution)
|
||||||
|
- [`ContributionAndProof`](#contributionandproof)
|
||||||
|
- [`SignedContributionAndProof`](#signedcontributionandproof)
|
||||||
|
- [`SyncAggregatorSelectionData`](#syncaggregatorselectiondata)
|
||||||
|
- [Validator assignments](#validator-assignments)
|
||||||
|
- [Sync Committee](#sync-committee)
|
||||||
|
- [Lookahead](#lookahead)
|
||||||
|
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||||
|
- [Block proposal](#block-proposal)
|
||||||
|
- [Preparing a `BeaconBlock`](#preparing-a-beaconblock)
|
||||||
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
|
- [Sync committee](#sync-committee)
|
||||||
|
- [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock)
|
||||||
|
- [Attesting and attestation aggregation](#attesting-and-attestation-aggregation)
|
||||||
|
- [Sync committees](#sync-committees)
|
||||||
|
- [Sync committee signatures](#sync-committee-signatures)
|
||||||
|
- [Prepare sync committee signature](#prepare-sync-committee-signature)
|
||||||
|
- [Broadcast sync committee signature](#broadcast-sync-committee-signature)
|
||||||
|
- [Sync committee contributions](#sync-committee-contributions)
|
||||||
|
- [Aggregation selection](#aggregation-selection)
|
||||||
|
- [Construct sync committee contribution](#construct-sync-committee-contribution)
|
||||||
|
- [Slot](#slot)
|
||||||
|
- [Beacon block root](#beacon-block-root)
|
||||||
|
- [Subcommittee index](#subcommittee-index)
|
||||||
|
- [Aggregation bits](#aggregation-bits)
|
||||||
|
- [Signature](#signature)
|
||||||
|
- [Broadcast sync committee contribution](#broadcast-sync-committee-contribution)
|
||||||
|
- [Sync committee subnet stability](#sync-committee-subnet-stability)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document represents the expected behavior of an "honest validator" with respect to the Altair upgrade of the Ethereum 2.0 protocol.
|
||||||
|
It builds on the [previous document for the behavior of an "honest validator" from Phase 0](../phase0/validator.md) of the Ethereum 2.0 protocol.
|
||||||
|
This previous document is referred to below as the "Phase 0 document".
|
||||||
|
|
||||||
|
Altair introduces a new type of committee: the sync committee. Sync committees are responsible for signing each block of the canonical chain and there exists an efficient algorithm for light clients to sync the chain using the output of the sync committees.
|
||||||
|
See the [sync protocol](./sync-protocol.md) for further details on the light client sync.
|
||||||
|
Under this network upgrade, validators track their participation in this new committee type and produce the relevant signatures as required.
|
||||||
|
Block proposers incorporate the (aggregated) sync committee signatures into each block they produce.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
All terminology, constants, functions, and protocol mechanics defined in the [Altair -- The Beacon Chain](./beacon-chain.md) doc are requisite for this document and used throughout.
|
||||||
|
Please see this document before continuing and use as a reference throughout.
|
||||||
|
|
||||||
|
## Warning
|
||||||
|
|
||||||
|
This document is currently illustrative for early Altair testnets and some parts are subject to change, especially pending implementation and profiling of Altair testnets.
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value | Unit |
|
||||||
|
| - | - | :-: |
|
||||||
|
| `TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE` | `2**2` (= 4) | validators |
|
||||||
|
| `SYNC_COMMITTEE_SUBNET_COUNT` | `4` | The number of sync committee subnets used in the gossipsub aggregation protocol. |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### `SyncCommitteeSignature`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SyncCommitteeSignature(Container):
|
||||||
|
# Slot to which this contribution pertains
|
||||||
|
slot: Slot
|
||||||
|
# Block root for this signature
|
||||||
|
beacon_block_root: Root
|
||||||
|
# Index of the validator that produced this signature
|
||||||
|
validator_index: ValidatorIndex
|
||||||
|
# Signature by the validator over the block root of `slot`
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SyncCommitteeContribution`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SyncCommitteeContribution(Container):
|
||||||
|
# Slot to which this contribution pertains
|
||||||
|
slot: Slot
|
||||||
|
# Block root for this contribution
|
||||||
|
beacon_block_root: Root
|
||||||
|
# The subcommittee this contribution pertains to out of the broader sync committee
|
||||||
|
subcommittee_index: uint64
|
||||||
|
# A bit is set if a signature from the validator at the corresponding
|
||||||
|
# index in the subcommittee is present in the aggregate `signature`.
|
||||||
|
aggregation_bits: Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]
|
||||||
|
# Signature by the validator(s) over the block root of `slot`
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ContributionAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ContributionAndProof(Container):
|
||||||
|
aggregator_index: ValidatorIndex
|
||||||
|
contribution: SyncCommitteeContribution
|
||||||
|
selection_proof: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedContributionAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedContributionAndProof(Container):
|
||||||
|
message: ContributionAndProof
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SyncAggregatorSelectionData`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SyncAggregatorSelectionData(Container):
|
||||||
|
slot: Slot
|
||||||
|
subcommittee_index: uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
## Validator assignments
|
||||||
|
|
||||||
|
A validator determines beacon committee assignments and beacon block proposal duties as defined in the Phase 0 document.
|
||||||
|
|
||||||
|
### Sync Committee
|
||||||
|
|
||||||
|
To determine sync committee assignments, a validator can run the following function: `is_assigned_to_sync_committee(state, epoch, validator_index)` where `epoch` is an epoch number within the current or next sync committee period.
|
||||||
|
This function is a predicate indicating the presence or absence of the validator in the corresponding sync committee for the queried sync committee period.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_sync_committee_period(epoch: Epoch) -> uint64:
|
||||||
|
return epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_assigned_to_sync_committee(state: BeaconState,
|
||||||
|
epoch: Epoch,
|
||||||
|
validator_index: ValidatorIndex) -> bool:
|
||||||
|
sync_committee_period = compute_sync_committee_period(epoch)
|
||||||
|
current_epoch = get_current_epoch(state)
|
||||||
|
current_sync_committee_period = compute_sync_committee_period(current_epoch)
|
||||||
|
next_sync_committee_period = current_sync_committee_period + 1
|
||||||
|
assert sync_committee_period in (current_sync_committee_period, next_sync_committee_period)
|
||||||
|
|
||||||
|
pubkey = state.validators[validator_index].pubkey
|
||||||
|
if sync_committee_period == current_sync_committee_period:
|
||||||
|
return pubkey in state.current_sync_committee.pubkeys
|
||||||
|
else: # sync_committee_period == next_sync_committee_period
|
||||||
|
return pubkey in state.next_sync_committee.pubkeys
|
||||||
|
```
|
||||||
|
|
||||||
|
### Lookahead
|
||||||
|
|
||||||
|
The sync committee shufflings give validators 1 sync committee period of lookahead which amounts to `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs.
|
||||||
|
At any given `epoch`, the `BeaconState` contains the current `SyncCommittee` and the next `SyncCommittee`.
|
||||||
|
Once every `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs, the next `SyncCommittee` becomes the current `SyncCommittee` and the next committee is computed and stored.
|
||||||
|
|
||||||
|
*Note*: The data required to compute a given committee is not cached in the `BeaconState` after committees are calculated at the period boundaries.
|
||||||
|
This means that calling `get_sync_commitee()` in a given `epoch` can return a different result than what was computed during the relevant epoch transition.
|
||||||
|
For this reason, *always* get committee assignments via the fields of the `BeaconState` (`current_sync_committee` and `next_sync_committee`) or use the above reference code.
|
||||||
|
|
||||||
|
A validator should plan for future sync committee assignments by noting which sync committee periods they are selected for participation.
|
||||||
|
Specifically, a validator should:
|
||||||
|
* Upon (re)syncing the chain and upon sync committee period boundaries, check for assignments in the current and next sync committee periods.
|
||||||
|
* If the validator is in the current sync committee period, then they perform the responsibilities below for sync committee rewards.
|
||||||
|
* If the validator is in the next sync committee period, they should wait until the next `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` boundary and then perform the responsibilities throughout that period.
|
||||||
|
|
||||||
|
## Beacon chain responsibilities
|
||||||
|
|
||||||
|
A validator maintains the responsibilities given in the Phase 0 document.
|
||||||
|
|
||||||
|
Block proposals are modified to incorporate the sync committee signatures as detailed below.
|
||||||
|
|
||||||
|
When assigned to a sync committee, validators have a new responsibility to sign and broadcast beacon block roots during each slot of the sync committee period.
|
||||||
|
These signatures are aggregated and routed to the proposer over gossip for inclusion into a beacon block.
|
||||||
|
Assignments to a particular sync committee are infrequent at normal validator counts; however, an action every slot is required when in the current active sync committee.
|
||||||
|
|
||||||
|
### Block proposal
|
||||||
|
|
||||||
|
Refer to the phase 0 document for the majority of the [block proposal responsibility](../phase0/validator.md#block-proposal).
|
||||||
|
The validator should follow those instructions to prepare a `SignedBeaconBlock` for inclusion into the chain. All changes are additive to phase 0 and noted below.
|
||||||
|
|
||||||
|
#### Preparing a `BeaconBlock`
|
||||||
|
|
||||||
|
No change to [Preparing for a `BeaconBlock`](../phase0/validator.md#preparing-for-a-beaconblock).
|
||||||
|
|
||||||
|
#### Constructing the `BeaconBlockBody`
|
||||||
|
|
||||||
|
Each section of [Constructing the `BeaconBlockBody`](../phase0/validator.md#constructing-the-beaconblockbody) should be followed.
|
||||||
|
After constructing the `BeaconBlockBody` as per that section, the proposer has an additional task to include the sync committee signatures:
|
||||||
|
|
||||||
|
##### Sync committee
|
||||||
|
|
||||||
|
The proposer receives a number of `SyncCommitteeContribution`s (wrapped in `SignedContributionAndProof`s on the wire) from validators in the sync committee who are selected to partially aggregate signatures from independent subcommittees formed by breaking the full sync committee into `SYNC_COMMITTEE_SUBNET_COUNT` pieces (see below for details).
|
||||||
|
|
||||||
|
The proposer collects the contributions that match their local view of the chain (i.e. `contribution.beacon_block_root == block.parent_root`) for further aggregation when preparing a block.
|
||||||
|
Of these contributions, proposers should select the best contribution seen across all aggregators for each subnet/subcommittee.
|
||||||
|
A contribution with more valid signatures is better than a contribution with fewer signatures.
|
||||||
|
|
||||||
|
Recall `block.body.sync_aggregate.sync_committee_bits` is a `Bitvector` where the `i`th bit is `True` if the corresponding validator in the sync committee has produced a valid signature,
|
||||||
|
and that `block.body.sync_aggregate.sync_committee_signature` is the aggregate BLS signature combining all of the valid signatures.
|
||||||
|
|
||||||
|
Given a collection of the best seen `contributions` (with no repeating `subcommittee_index` values) and the `BeaconBlock` under construction,
|
||||||
|
the proposer processes them as follows:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_sync_committee_contributions(block: BeaconBlock,
|
||||||
|
contributions: Set[SyncCommitteeContribution]) -> None:
|
||||||
|
sync_aggregate = SyncAggregate()
|
||||||
|
signatures = []
|
||||||
|
|
||||||
|
for contribution in contributions:
|
||||||
|
subcommittee_index = contribution.subcommittee_index
|
||||||
|
for index, participated in enumerate(contribution.aggregation_bits):
|
||||||
|
if participated:
|
||||||
|
sync_subcommittee_size = SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
participant_index = sync_subcommittee_size * subcommittee_index + index
|
||||||
|
sync_aggregate.sync_committee_bits[participant_index] = True
|
||||||
|
signatures.append(contribution.signature)
|
||||||
|
|
||||||
|
sync_aggregate.sync_committee_signature = bls.Aggregate(signatures)
|
||||||
|
|
||||||
|
block.body.sync_aggregate = sync_aggregate
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note*: The resulting block must pass the validations for the `SyncAggregate` defined in `process_sync_committee` defined in the [state transition document](./beacon-chain.md#sync-committee-processing).
|
||||||
|
In particular, this means `SyncCommitteeContribution`s received from gossip must have a `beacon_block_root` that matches the proposer's local view of the chain.
|
||||||
|
|
||||||
|
#### Packaging into a `SignedBeaconBlock`
|
||||||
|
|
||||||
|
No change to [Packaging into a `SignedBeaconBlock`](../phase0/validator.md#packaging-into-a-signedbeaconblock).
|
||||||
|
|
||||||
|
### Attesting and attestation aggregation
|
||||||
|
|
||||||
|
Refer to the phase 0 document for the [attesting](../phase0/validator.md#attesting) and [attestation aggregation](../phase0/validator.md#attestation-aggregation) responsibilities.
|
||||||
|
There is no change compared to the phase 0 document.
|
||||||
|
|
||||||
|
### Sync committees
|
||||||
|
|
||||||
|
Sync committee members employ an aggregation scheme to reduce load on the global proposer channel that is monitored by all potential proposers to be able to include the full output of the sync committee every slot.
|
||||||
|
Sync committee members produce individual signatures on subnets (similar to the attestation subnets) via `SyncCommitteeSignature`s which are then collected by aggregators sampled from the sync subcommittees to produce a `SyncCommitteeContribution` which is gossiped to proposers.
|
||||||
|
This process occurs each slot.
|
||||||
|
|
||||||
|
#### Sync committee signatures
|
||||||
|
|
||||||
|
##### Prepare sync committee signature
|
||||||
|
|
||||||
|
If a validator is in the current sync committee (i.e. `is_assigned_to_sync_committee()` above returns `True`), then for every slot in the current sync committee period, the validator should prepare a `SyncCommitteeSignature` according to the logic in `get_sync_committee_signature` as soon as they have determined the head block of the current slot.
|
||||||
|
|
||||||
|
This logic is triggered upon the same conditions as when producing an attestation.
|
||||||
|
Meaning, a sync committee member should produce and broadcast a `SyncCommitteeSignature` either when (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the slot has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of the slot) -- whichever comes first.
|
||||||
|
|
||||||
|
`get_sync_committee_signature()` assumes `state` is the head state corresponding to processing the block up to the current slot as determined by the fork choice (including any empty slots up to the current slot processed with `process_slots` on top of the latest block), `block_root` is the root of the head block, `validator_index` is the index of the validator in the registry `state.validators` controlled by `privkey`, and `privkey` is the BLS private key for the validator.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_sync_committee_signature(state: BeaconState,
|
||||||
|
block_root: Root,
|
||||||
|
validator_index: ValidatorIndex,
|
||||||
|
privkey: int) -> SyncCommitteeSignature:
|
||||||
|
epoch = get_current_epoch(state)
|
||||||
|
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE, epoch)
|
||||||
|
signing_root = compute_signing_root(block_root, domain)
|
||||||
|
signature = bls.Sign(privkey, signing_root)
|
||||||
|
|
||||||
|
return SyncCommitteeSignature(slot=state.slot, validator_index=validator_index, signature=signature)
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Broadcast sync committee signature
|
||||||
|
|
||||||
|
The validator broadcasts the assembled signature to the assigned subnet, the `sync_committee_{subnet_id}` pubsub topic.
|
||||||
|
|
||||||
|
The `subnet_id` is derived from the position in the sync committee such that the sync committee is divided into "subcommittees".
|
||||||
|
`subnet_id` can be computed via `compute_subnets_for_sync_committee()` where `state` is a `BeaconState` during the matching sync committee period.
|
||||||
|
|
||||||
|
*Note*: This function returns multiple subnets if a given validator index is included multiple times in a given sync committee across multiple subcommittees.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_subnets_for_sync_committee(state: BeaconState, validator_index: ValidatorIndex) -> Sequence[uint64]:
|
||||||
|
target_pubkey = state.validators[validator_index].pubkey
|
||||||
|
sync_committee_indices = [
|
||||||
|
index for index, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||||
|
if pubkey == target_pubkey
|
||||||
|
]
|
||||||
|
return [
|
||||||
|
uint64(index // (SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT))
|
||||||
|
for index in sync_committee_indices
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
*Note*: Subnet assignment does not change during the duration of a validator's assignment to a given sync committee.
|
||||||
|
|
||||||
|
*Note*: If a validator has multiple `subnet_id` results from `compute_subnets_for_sync_committee`, the validator should broadcast a copy of the `sync_committee_signature` on each of the distinct subnets.
|
||||||
|
|
||||||
|
#### Sync committee contributions
|
||||||
|
|
||||||
|
Each slot, some sync committee members in each subcommittee are selected to aggregate the `SyncCommitteeSignature`s into a `SyncCommitteeContribution` which is broadcast on a global channel for inclusion into the next block.
|
||||||
|
|
||||||
|
##### Aggregation selection
|
||||||
|
|
||||||
|
A validator is selected to aggregate based on the value returned by `is_sync_committee_aggregator()` where `signature` is the BLS signature returned by `get_sync_committee_selection_proof()`.
|
||||||
|
The signature function takes a `BeaconState` with the relevant sync committees for the queried `slot` (i.e. `state.slot` is within the span covered by the current or next sync committee period), the `subcommittee_index` equal to the `subnet_id`, and the `privkey` is the BLS private key associated with the validator.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_sync_committee_selection_proof(state: BeaconState,
|
||||||
|
slot: Slot,
|
||||||
|
subcommittee_index: uint64,
|
||||||
|
privkey: int) -> BLSSignature:
|
||||||
|
domain = get_domain(state, DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
||||||
|
signing_data = SyncAggregatorSelectionData(
|
||||||
|
slot=slot,
|
||||||
|
subcommittee_index=subcommittee_index,
|
||||||
|
)
|
||||||
|
signing_root = compute_signing_root(signing_data, domain)
|
||||||
|
return bls.Sign(privkey, signing_root)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_sync_committee_aggregator(signature: BLSSignature) -> bool:
|
||||||
|
modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
|
||||||
|
return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
|
||||||
|
```
|
||||||
|
|
||||||
|
*NOTE*: The set of aggregators generally changes every slot; however, the assignments can be computed ahead of time as soon as the committee is known.
|
||||||
|
|
||||||
|
##### Construct sync committee contribution
|
||||||
|
|
||||||
|
If a validator is selected to aggregate the `SyncCommitteeSignature`s produced on a subnet during a given `slot`, they construct an aggregated `SyncCommitteeContribution`.
|
||||||
|
|
||||||
|
Given all of the (valid) collected `sync_committee_signatures: Set[SyncCommitteeSignature]` from the `sync_committee_{subnet_id}` gossip during the selected `slot` with an equivalent `beacon_block_root` to that of the aggregator, the aggregator creates a `contribution: SyncCommitteeContribution` with the following fields:
|
||||||
|
|
||||||
|
###### Slot
|
||||||
|
|
||||||
|
Set `contribution.slot = state.slot` where `state` is the `BeaconState` for the slot in question.
|
||||||
|
|
||||||
|
###### Beacon block root
|
||||||
|
|
||||||
|
Set `contribution.beacon_block_root = beacon_block_root` from the `beacon_block_root` found in the `sync_committee_signatures`.
|
||||||
|
|
||||||
|
###### Subcommittee index
|
||||||
|
|
||||||
|
Set `contribution.subcommittee_index` to the index for the subcommittee index corresponding to the subcommittee assigned to this subnet. This index matches the `subnet_id` used to derive the topic name.
|
||||||
|
|
||||||
|
###### Aggregation bits
|
||||||
|
|
||||||
|
Let `contribution.aggregation_bits` be a `Bitvector[SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT]`, where the `index`th bit is set in the `Bitvector` for each corresponding validator included in this aggregate from the corresponding subcommittee.
|
||||||
|
An aggregator finds the index in the sync committee (as returned by `get_sync_committee_indices()`) for a given validator referenced by `sync_committee_signature.validator_index` and maps the sync committee index to an index in the subcommittee (along with the prior `subcommittee_index`). This index within the subcommittee is set in `contribution.aggegration_bits`.
|
||||||
|
|
||||||
|
For example, if a validator with index `2044` is pseudo-randomly sampled to sync committee index `135`. This sync committee index maps to `subcommittee_index` `1` with position `7` in the `Bitvector` for the contribution.
|
||||||
|
|
||||||
|
*Note*: A validator **could be included multiple times** in a given subcommittee such that multiple bits are set for a single `SyncCommitteeSignature`.
|
||||||
|
|
||||||
|
###### Signature
|
||||||
|
|
||||||
|
Set `contribution.signature = aggregate_signature` where `aggregate_signature` is obtained by assembling the appropriate collection of `BLSSignature`s from the set of `sync_committee_signatures` and using the `bls.Aggregate()` function to produce an aggregate `BLSSignature`.
|
||||||
|
|
||||||
|
The collection of input signatures should include one signature per validator who had a bit set in the `aggregation_bits` bitfield, with repeated signatures if one validator maps to multiple indices within the subcommittee.
|
||||||
|
|
||||||
|
##### Broadcast sync committee contribution
|
||||||
|
|
||||||
|
If the validator is selected to aggregate (`is_sync_committee_aggregator()`), then they broadcast their best aggregate as a `SignedContributionAndProof` to the global aggregate channel (`sync_committee_contribution_and_proof` topic) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
||||||
|
|
||||||
|
Selection proofs are provided in `ContributionAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
|
||||||
|
|
||||||
|
`ContributionAndProof` messages are signed by the aggregator and broadcast inside of `SignedContributionAndProof` objects to prevent a class of DoS attacks and message forgeries.
|
||||||
|
|
||||||
|
First, `contribution_and_proof = get_contribution_and_proof(state, validator_index, contribution, privkey)` is constructed.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_contribution_and_proof(state: BeaconState,
|
||||||
|
aggregator_index: ValidatorIndex,
|
||||||
|
contribution: SyncCommitteeContribution,
|
||||||
|
privkey: int) -> ContributionAndProof:
|
||||||
|
selection_proof = get_sync_committee_selection_proof(
|
||||||
|
state,
|
||||||
|
contribution.slot,
|
||||||
|
contribution.subcommittee_index,
|
||||||
|
privkey,
|
||||||
|
)
|
||||||
|
return ContributionAndProof(
|
||||||
|
aggregator_index=aggregator_index,
|
||||||
|
contribution=contribution,
|
||||||
|
selection_proof=selection_proof,
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Then `signed_contribution_and_proof = SignedContributionAndProof(message=contribution_and_proof, signature=signature)` is constructed and broadcast. Where `signature` is obtained from:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_contribution_and_proof_signature(state: BeaconState,
|
||||||
|
contribution_and_proof: ContributionAndProof,
|
||||||
|
privkey: int) -> BLSSignature:
|
||||||
|
contribution = contribution_and_proof.contribution
|
||||||
|
domain = get_domain(state, DOMAIN_CONTRIBUTION_AND_PROOF, compute_epoch_at_slot(contribution.slot))
|
||||||
|
signing_root = compute_signing_root(contribution_and_proof, domain)
|
||||||
|
return bls.Sign(privkey, signing_root)
|
||||||
|
```
|
||||||
|
|
||||||
|
## Sync committee subnet stability
|
||||||
|
|
||||||
|
The sync committee subnets need special care to ensure stability given the relatively low number of validators involved in the sync committee at any particular time.
|
||||||
|
To provide this stability, a validator must do the following:
|
||||||
|
|
||||||
|
* Maintain advertisement of the subnet the validator in the sync committee is assigned to in their node's ENR as soon as they have joined the subnet.
|
||||||
|
Subnet assignments are known `EPOCHS_PER_SYNC_COMMITTEE_PERIOD` epochs in advance and can be computed with `compute_subnets_for_sync_committee` defined above.
|
||||||
|
ENR advertisement is indicated by setting the appropriate bit(s) of the bitfield found under the `syncnets` key in the ENR corresponding to the derived `subnet_id`(s).
|
||||||
|
Any bits modified for the sync committee responsibilities are unset in the ENR once the node no longer has any validators in the subcommittee.
|
||||||
|
|
||||||
|
*Note*: The first sync committee from phase 0 to the Altair fork will not be known until the fork happens, which implies subnet assignments are not known until then.
|
||||||
|
Early sync committee members should listen for topic subscriptions from peers and employ discovery via the ENR advertisements near the fork boundary to form initial subnets.
|
||||||
|
Some early sync committee rewards may be missed while the initial subnets form.
|
||||||
|
|
||||||
|
* To join a sync committee subnet, select a random number of epochs before the end of the current sync committee period between 1 and `SYNC_COMMITTEE_SUBNET_COUNT`, inclusive.
|
||||||
|
Validators should join their member subnet at the beginning of the epoch they have randomly selected.
|
||||||
|
For example, if the next sync committee period starts at epoch `853,248` and the validator randomly selects an offset of `3`, they should join the subnet at the beginning of epoch `853,245`.
|
||||||
|
Validators should leverage the lookahead period on sync committee assignments so that they can join the appropriate subnets ahead of their assigned sync committee period.
|
|
@ -1,9 +1,10 @@
|
||||||
# Ethereum 2.0 Phase 1 -- Custody Game
|
# Ethereum 2.0 Custody Game -- Beacon Chain
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
## Table of contents
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
@ -13,8 +14,13 @@
|
||||||
- [Configuration](#configuration)
|
- [Configuration](#configuration)
|
||||||
- [Time parameters](#time-parameters)
|
- [Time parameters](#time-parameters)
|
||||||
- [Max operations per block](#max-operations-per-block)
|
- [Max operations per block](#max-operations-per-block)
|
||||||
|
- [Size parameters](#size-parameters)
|
||||||
- [Reward and penalty quotients](#reward-and-penalty-quotients)
|
- [Reward and penalty quotients](#reward-and-penalty-quotients)
|
||||||
- [Data structures](#data-structures)
|
- [Data structures](#data-structures)
|
||||||
|
- [Extended types](#extended-types)
|
||||||
|
- [`Validator`](#validator)
|
||||||
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
|
- [`BeaconState`](#beaconstate)
|
||||||
- [New Beacon Chain operations](#new-beacon-chain-operations)
|
- [New Beacon Chain operations](#new-beacon-chain-operations)
|
||||||
- [`CustodyChunkChallenge`](#custodychunkchallenge)
|
- [`CustodyChunkChallenge`](#custodychunkchallenge)
|
||||||
- [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord)
|
- [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord)
|
||||||
|
@ -33,6 +39,7 @@
|
||||||
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
|
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
|
||||||
- [`get_custody_period_for_validator`](#get_custody_period_for_validator)
|
- [`get_custody_period_for_validator`](#get_custody_period_for_validator)
|
||||||
- [Per-block processing](#per-block-processing)
|
- [Per-block processing](#per-block-processing)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
- [Custody Game Operations](#custody-game-operations)
|
- [Custody Game Operations](#custody-game-operations)
|
||||||
- [Chunk challenges](#chunk-challenges)
|
- [Chunk challenges](#chunk-challenges)
|
||||||
- [Custody chunk response](#custody-chunk-response)
|
- [Custody chunk response](#custody-chunk-response)
|
||||||
|
@ -40,14 +47,18 @@
|
||||||
- [Early derived secret reveals](#early-derived-secret-reveals)
|
- [Early derived secret reveals](#early-derived-secret-reveals)
|
||||||
- [Custody Slashings](#custody-slashings)
|
- [Custody Slashings](#custody-slashings)
|
||||||
- [Per-epoch processing](#per-epoch-processing)
|
- [Per-epoch processing](#per-epoch-processing)
|
||||||
|
- [Epoch transition](#epoch-transition)
|
||||||
- [Handling of reveal deadlines](#handling-of-reveal-deadlines)
|
- [Handling of reveal deadlines](#handling-of-reveal-deadlines)
|
||||||
- [Final updates](#final-updates)
|
- [Final updates](#final-updates)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
## Introduction
|
## Introduction
|
||||||
|
|
||||||
This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](../phase0/beacon-chain.md) specification.
|
This document details the beacon chain additions and changes of Ethereum 2.0 to support the shard data custody game,
|
||||||
|
building upon the [Sharding](../sharding/beacon-chain.md) specification.
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
|
@ -83,6 +94,14 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||||
| `MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES` | `uint64(2**4)` (= 16) |
|
| `MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES` | `uint64(2**4)` (= 16) |
|
||||||
| `MAX_CUSTODY_SLASHINGS` | `uint64(2**0)` (= 1) |
|
| `MAX_CUSTODY_SLASHINGS` | `uint64(2**0)` (= 1) |
|
||||||
|
|
||||||
|
|
||||||
|
### Size parameters
|
||||||
|
|
||||||
|
| Name | Value | Unit |
|
||||||
|
| - | - | - |
|
||||||
|
| `BYTES_PER_CUSTODY_CHUNK` | `uint64(2**12)` (= 4,096) | bytes |
|
||||||
|
| `CUSTODY_RESPONSE_DEPTH` | `ceillog2(MAX_SHARD_BLOCK_SIZE // BYTES_PER_CUSTODY_CHUNK)` | - |
|
||||||
|
|
||||||
### Reward and penalty quotients
|
### Reward and penalty quotients
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
|
@ -92,6 +111,45 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
||||||
|
|
||||||
## Data structures
|
## Data structures
|
||||||
|
|
||||||
|
### Extended types
|
||||||
|
|
||||||
|
#### `Validator`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class Validator(sharding.Validator):
|
||||||
|
# next_custody_secret_to_reveal is initialised to the custody period
|
||||||
|
# (of the particular validator) in which the validator is activated
|
||||||
|
# = get_custody_period_for_validator(...)
|
||||||
|
next_custody_secret_to_reveal: uint64
|
||||||
|
# TODO: The max_reveal_lateness doesn't really make sense anymore.
|
||||||
|
# So how do we incentivise early custody key reveals now?
|
||||||
|
all_custody_secrets_revealed_epoch: Epoch # to be initialized to FAR_FUTURE_EPOCH
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `BeaconBlockBody`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconBlockBody(sharding.BeaconBlockBody):
|
||||||
|
# Custody game
|
||||||
|
chunk_challenges: List[CustodyChunkChallenge, MAX_CUSTODY_CHUNK_CHALLENGES]
|
||||||
|
chunk_challenge_responses: List[CustodyChunkResponse, MAX_CUSTODY_CHUNK_CHALLENGE_RESPONSES]
|
||||||
|
custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS]
|
||||||
|
early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS]
|
||||||
|
custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `BeaconState`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconState(sharding.BeaconState):
|
||||||
|
# Future derived secrets already exposed; contains the indices of the exposed validator
|
||||||
|
# at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
|
||||||
|
exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH],
|
||||||
|
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
|
||||||
|
custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, MAX_CUSTODY_CHUNK_CHALLENGE_RECORDS]
|
||||||
|
custody_chunk_challenge_index: uint64
|
||||||
|
```
|
||||||
|
|
||||||
### New Beacon Chain operations
|
### New Beacon Chain operations
|
||||||
|
|
||||||
#### `CustodyChunkChallenge`
|
#### `CustodyChunkChallenge`
|
||||||
|
@ -293,6 +351,18 @@ def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epo
|
||||||
|
|
||||||
## Per-block processing
|
## Per-block processing
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
|
process_block_header(state, block)
|
||||||
|
process_randao(state, block.body)
|
||||||
|
process_eth1_data(state, block.body)
|
||||||
|
process_light_client_aggregate(state, block.body)
|
||||||
|
process_operations(state, block.body)
|
||||||
|
process_custody_game_operations(state, block.body)
|
||||||
|
```
|
||||||
|
|
||||||
### Custody Game Operations
|
### Custody Game Operations
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -550,6 +620,41 @@ def process_custody_slashing(state: BeaconState, signed_custody_slashing: Signed
|
||||||
|
|
||||||
## Per-epoch processing
|
## Per-epoch processing
|
||||||
|
|
||||||
|
### Epoch transition
|
||||||
|
|
||||||
|
This epoch transition overrides the phase0 epoch transition:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_epoch(state: BeaconState) -> None:
|
||||||
|
process_justification_and_finalization(state)
|
||||||
|
process_rewards_and_penalties(state)
|
||||||
|
process_registry_updates(state)
|
||||||
|
|
||||||
|
# Proof of custody
|
||||||
|
process_reveal_deadlines(state)
|
||||||
|
process_challenge_deadlines(state)
|
||||||
|
|
||||||
|
process_slashings(state)
|
||||||
|
|
||||||
|
# Sharding
|
||||||
|
process_pending_headers(state)
|
||||||
|
charge_confirmed_header_fees(state)
|
||||||
|
reset_pending_headers(state)
|
||||||
|
|
||||||
|
# Final updates
|
||||||
|
# Phase 0
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
process_slashings_reset(state)
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
process_participation_record_updates(state)
|
||||||
|
# Proof of custody
|
||||||
|
process_custody_final_updates(state)
|
||||||
|
|
||||||
|
process_shard_epoch_increment(state)
|
||||||
|
```
|
||||||
|
|
||||||
### Handling of reveal deadlines
|
### Handling of reveal deadlines
|
||||||
|
|
||||||
```python
|
```python
|
||||||
|
@ -583,7 +688,7 @@ def process_custody_final_updates(state: BeaconState) -> None:
|
||||||
for index, validator in enumerate(state.validators):
|
for index, validator in enumerate(state.validators):
|
||||||
if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
if validator.exit_epoch != FAR_FUTURE_EPOCH:
|
||||||
not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH
|
not_all_secrets_are_revealed = validator.all_custody_secrets_revealed_epoch == FAR_FUTURE_EPOCH
|
||||||
if index in validator_indices_in_records or not_all_secrets_are_revealed:
|
if ValidatorIndex(index) in validator_indices_in_records or not_all_secrets_are_revealed:
|
||||||
# Delay withdrawable epochs if challenge records are not empty or not all
|
# Delay withdrawable epochs if challenge records are not empty or not all
|
||||||
# custody secrets revealed
|
# custody secrets revealed
|
||||||
validator.withdrawable_epoch = FAR_FUTURE_EPOCH
|
validator.withdrawable_epoch = FAR_FUTURE_EPOCH
|
|
@ -0,0 +1,85 @@
|
||||||
|
# Ethereum 2.0 Custody Game -- Honest Validator
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
This is an accompanying document to the [Ethereum 2.0 Custody Game](./), which describes the expected actions of a "validator"
|
||||||
|
participating in the Ethereum 2.0 Custody Game.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Becoming a validator](#becoming-a-validator)
|
||||||
|
- [Beacon chain validator assignments](#beacon-chain-validator-assignments)
|
||||||
|
- [Custody slashings](#custody-slashings)
|
||||||
|
- [Custody key reveals](#custody-key-reveals)
|
||||||
|
- [Early derived secret reveals](#early-derived-secret-reveals)
|
||||||
|
- [Construct attestation](#construct-attestation)
|
||||||
|
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||||
|
- [Custody slashing](#custody-slashing)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
This document is an extension of the [Sharding -- Validator](../sharding/validator.md). All behaviors and definitions defined in the Sharding doc carry over unless explicitly noted or overridden.
|
||||||
|
|
||||||
|
All terminology, constants, functions, and protocol mechanics defined in the [Custody Game -- The Beacon Chain](./beacon-chain.md)
|
||||||
|
docs are requisite for this document and used throughout. Please see the Custody Game docs before continuing and use them as a reference throughout.
|
||||||
|
|
||||||
|
## Becoming a validator
|
||||||
|
|
||||||
|
Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details.
|
||||||
|
|
||||||
|
## Beacon chain validator assignments
|
||||||
|
|
||||||
|
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details.
|
||||||
|
|
||||||
|
##### Custody slashings
|
||||||
|
|
||||||
|
Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./beacon-chain.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](beacon-chain.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE).
|
||||||
|
|
||||||
|
##### Custody key reveals
|
||||||
|
|
||||||
|
Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./beacon-chain.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included.
|
||||||
|
|
||||||
|
##### Early derived secret reveals
|
||||||
|
|
||||||
|
Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./beacon-chain.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](beacon-chain.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included.
|
||||||
|
|
||||||
|
#### Construct attestation
|
||||||
|
|
||||||
|
`attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO].
|
||||||
|
|
||||||
|
|
||||||
|
## How to avoid slashing
|
||||||
|
|
||||||
|
Proposer and Attester slashings described in Phase 0 remain in place with the addition of the following.
|
||||||
|
|
||||||
|
### Custody slashing
|
||||||
|
|
||||||
|
To avoid custody slashings, the attester must never sign any shard transition for which the custody bit is one. The custody bit is computed using the custody secret:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_custody_secret(state: BeaconState,
|
||||||
|
validator_index: ValidatorIndex,
|
||||||
|
privkey: int,
|
||||||
|
epoch: Epoch=None) -> BLSSignature:
|
||||||
|
if epoch is None:
|
||||||
|
epoch = get_current_epoch(state)
|
||||||
|
period = get_custody_period_for_validator(validator_index, epoch)
|
||||||
|
epoch_to_sign = get_randao_epoch_for_custody_period(period, validator_index)
|
||||||
|
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
|
||||||
|
signing_root = compute_signing_root(Epoch(epoch_to_sign), domain)
|
||||||
|
return bls.Sign(privkey, signing_root)
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated.
|
||||||
|
While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing.
|
|
@ -0,0 +1,190 @@
|
||||||
|
# Ethereum 2.0 Data Availability Sampling -- Core
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Custom types](#custom-types)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [`DASSample`](#dassample)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Reverse bit ordering](#reverse-bit-ordering)
|
||||||
|
- [`reverse_bit_order`](#reverse_bit_order)
|
||||||
|
- [`reverse_bit_order_list`](#reverse_bit_order_list)
|
||||||
|
- [Data extension](#data-extension)
|
||||||
|
- [Data recovery](#data-recovery)
|
||||||
|
- [DAS functions](#das-functions)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
We define the following Python custom types for type hinting and readability:
|
||||||
|
|
||||||
|
| Name | SSZ equivalent | Description |
|
||||||
|
| - | - | - |
|
||||||
|
| `SampleIndex` | `uint64` | A sample index, corresponding to chunk of extended data |
|
||||||
|
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value | Notes |
|
||||||
|
| - | - | - |
|
||||||
|
| `MAX_RESAMPLE_TIME` | `TODO` (= TODO) | Time window to sample a shard blob and put it on vertical subnets |
|
||||||
|
|
||||||
|
|
||||||
|
## New containers
|
||||||
|
|
||||||
|
### `DASSample`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class DASSample(Container):
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
index: SampleIndex
|
||||||
|
proof: BLSCommitment
|
||||||
|
data: Vector[BLSPoint, POINTS_PER_SAMPLE]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Reverse bit ordering
|
||||||
|
|
||||||
|
#### `reverse_bit_order`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def reverse_bit_order(n: int, order: int):
|
||||||
|
"""
|
||||||
|
Reverse the bit order of an integer n
|
||||||
|
"""
|
||||||
|
assert is_power_of_two(order)
|
||||||
|
return int(('{:0' + str(order.bit_length() - 1) + 'b}').format(n)[::-1], 2)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `reverse_bit_order_list`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def reverse_bit_order_list(elements: Sequence[int]) -> Sequence[int]:
|
||||||
|
order = len(elements)
|
||||||
|
assert is_power_of_two(order)
|
||||||
|
return [elements[reverse_bit_order(i, order)] for i in range(order)]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data extension
|
||||||
|
|
||||||
|
Implementations:
|
||||||
|
- [Python](https://github.com/protolambda/partial_fft/blob/master/das_fft.py)
|
||||||
|
- [Go](https://github.com/protolambda/go-kate/blob/master/das_extension.go)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def das_fft_extension(data: Sequence[Point]) -> Sequence[Point]:
|
||||||
|
"""
|
||||||
|
Given some even-index values of an IFFT input, compute the odd-index inputs,
|
||||||
|
such that the second output half of the IFFT is all zeroes.
|
||||||
|
"""
|
||||||
|
poly = inverse_fft(data)
|
||||||
|
return fft(poly + [0]*len(poly))[1::2]
|
||||||
|
```
|
||||||
|
|
||||||
|
### Data recovery
|
||||||
|
|
||||||
|
See [Reed-Solomon erasure code recovery in n*log^2(n) time with FFTs](https://ethresear.ch/t/reed-solomon-erasure-code-recovery-in-n-log-2-n-time-with-ffts/3039) for theory.
|
||||||
|
Implementations:
|
||||||
|
- [Original Python](https://github.com/ethereum/research/blob/master/mimc_stark/recovery.py)
|
||||||
|
- [New optimized approach in python](https://github.com/ethereum/research/tree/master/polynomial_reconstruction)
|
||||||
|
- [Old approach in Go](https://github.com/protolambda/go-kate/blob/master/recovery.go)
|
||||||
|
|
||||||
|
```python
|
||||||
|
def recover_data(data: Sequence[Optional[Sequence[Point]]]) -> Sequence[Point]:
|
||||||
|
"""Given an a subset of half or more of subgroup-aligned ranges of values, recover the None values."""
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## DAS functions
|
||||||
|
|
||||||
|
```python
|
||||||
|
def extend_data(data: Sequence[Point]) -> Sequence[Point]:
|
||||||
|
"""
|
||||||
|
The input data gets reverse-bit-ordered, such that the first half of the final output matches the original data.
|
||||||
|
We calculated the odd-index values with the DAS FFT extension, reverse-bit-order to put them in the second half.
|
||||||
|
"""
|
||||||
|
rev_bit_odds = reverse_bit_order_list(das_fft_extension(reverse_bit_order_list(data)))
|
||||||
|
return data + rev_bit_odds
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def unextend_data(extended_data: Sequence[Point]) -> Sequence[Point]:
|
||||||
|
return extended_data[:len(extended_data)//2]
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def check_multi_kzg_proof(commitment: BLSCommitment, proof: BLSCommitment, x: Point, ys: Sequence[Point]) -> bool:
|
||||||
|
"""
|
||||||
|
Run a KZG multi-proof check to verify that for the subgroup starting at x,
|
||||||
|
the proof indeed complements the ys to match the commitment.
|
||||||
|
"""
|
||||||
|
... # Omitted for now, refer to KZG implementation resources.
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def construct_proofs(extended_data_as_poly: Sequence[Point]) -> Sequence[BLSCommitment]:
|
||||||
|
"""
|
||||||
|
Constructs proofs for samples of extended data (in polynomial form, 2nd half being zeroes).
|
||||||
|
Use the FK20 multi-proof approach to construct proofs for a chunk length of POINTS_PER_SAMPLE.
|
||||||
|
"""
|
||||||
|
... # Omitted for now, refer to KZG implementation resources.
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def commit_to_data(data_as_poly: Sequence[Point]) -> BLSCommitment:
|
||||||
|
"""Commit to a polynomial by """
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def sample_data(slot: Slot, shard: Shard, extended_data: Sequence[Point]) -> Sequence[DASSample]:
|
||||||
|
sample_count = len(extended_data) // POINTS_PER_SAMPLE
|
||||||
|
assert sample_count <= MAX_SAMPLES_PER_BLOCK
|
||||||
|
# get polynomial form of full extended data, second half will be all zeroes.
|
||||||
|
poly = ifft(reverse_bit_order_list(extended_data))
|
||||||
|
assert all(v == 0 for v in poly[len(poly)//2:])
|
||||||
|
proofs = construct_proofs(poly)
|
||||||
|
return [
|
||||||
|
DASSample(
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
# The proof applies to `x = w ** (reverse_bit_order(i, sample_count) * POINTS_PER_SAMPLE)`
|
||||||
|
index=i,
|
||||||
|
# The computed proofs match the reverse_bit_order_list(extended_data), undo that to get the right proof.
|
||||||
|
proof=proofs[reverse_bit_order(i, sample_count)],
|
||||||
|
# note: we leave the sample data as-is so it matches the original nicely.
|
||||||
|
# The proof applies to `ys = reverse_bit_order_list(sample.data)`
|
||||||
|
data=extended_data[i*POINTS_PER_SAMPLE:(i+1)*POINTS_PER_SAMPLE]
|
||||||
|
) for i in range(sample_count)
|
||||||
|
]
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def verify_sample(sample: DASSample, sample_count: uint64, commitment: BLSCommitment):
|
||||||
|
domain_pos = reverse_bit_order(sample.index, sample_count)
|
||||||
|
sample_root_of_unity = ROOT_OF_UNITY**MAX_SAMPLES_PER_BLOCK # change point-level to sample-level domain
|
||||||
|
x = sample_root_of_unity**domain_pos
|
||||||
|
ys = reverse_bit_order_list(sample.data)
|
||||||
|
assert check_multi_kzg_proof(commitment, sample.proof, x, ys)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def reconstruct_extended_data(samples: Sequence[Optional[DASSample]]) -> Sequence[Point]:
|
||||||
|
# Instead of recovering with a point-by-point approach, recover the samples by recovering missing subgroups.
|
||||||
|
subgroups = [None if sample is None else reverse_bit_order_list(sample.data) for sample in samples]
|
||||||
|
return recover_data(subgroups)
|
||||||
|
```
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Ethereum 2.0 Data Availability Sampling -- Fork Choice
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Dependency calculation](#dependency-calculation)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document is the beacon chain fork choice spec for Ethereum 2.0 Data Availability Sampling. The only change that we add from phase 0 is that we add a concept of "data dependencies";
|
||||||
|
a block is only eligible for consideration in the fork choice after a data availability test has been successfully completed for all dependencies.
|
||||||
|
The "root" of a shard block for data dependency purposes is considered to be a `DataCommitment` object, which is a pair of a Kate commitment and a length.
|
||||||
|
|
||||||
|
## Dependency calculation
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_new_dependencies(state: BeaconState) -> Set[DataCommitment]:
|
||||||
|
return set(
|
||||||
|
# Already confirmed during this epoch
|
||||||
|
[c.commitment for c in state.current_epoch_pending_headers if c.confirmed] +
|
||||||
|
# Already confirmed during previous epoch
|
||||||
|
[c.commitment for c in state.previous_epoch_pending_headers if c.confirmed] +
|
||||||
|
# Confirmed in the epoch before the previous
|
||||||
|
[c for c in shard for shard in state.grandparent_epoch_confirmed_commitments if c != DataCommitment()]
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_all_dependencies(store: Store, block: BeaconBlock) -> Set[DataCommitment]:
|
||||||
|
if compute_epoch_at_slot(block.slot) < SHARDING_FORK_EPOCH:
|
||||||
|
return set()
|
||||||
|
else:
|
||||||
|
latest = get_new_dependencies(store.block_states[hash_tree_root(block)])
|
||||||
|
older = get_all_dependencies(store, store.blocks[block.parent_root])
|
||||||
|
return latest.union(older)
|
||||||
|
```
|
|
@ -0,0 +1,229 @@
|
||||||
|
# Ethereum 2.0 Data Availability Sampling -- Network specification
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [DAS Subnets](#das-subnets)
|
||||||
|
- [Horizontal subnets](#horizontal-subnets)
|
||||||
|
- [Publishing](#publishing)
|
||||||
|
- [Horizontal propagation](#horizontal-propagation)
|
||||||
|
- [Horizontal to vertical](#horizontal-to-vertical)
|
||||||
|
- [Vertical subnets](#vertical-subnets)
|
||||||
|
- [Slow rotation: Backbone](#slow-rotation-backbone)
|
||||||
|
- [Quick Rotation: Sampling](#quick-rotation-sampling)
|
||||||
|
- [DAS in the Gossip domain: Push](#das-in-the-gossip-domain-push)
|
||||||
|
- [Topics and messages](#topics-and-messages)
|
||||||
|
- [Horizontal subnets: `shard_blob_{shard}`](#horizontal-subnets-shard_blob_shard)
|
||||||
|
- [Vertical subnets: `das_sample_{subnet_index}`](#vertical-subnets-das_sample_subnet_index)
|
||||||
|
- [DAS in the Req-Resp domain: Pull](#das-in-the-req-resp-domain-pull)
|
||||||
|
- [Messages](#messages)
|
||||||
|
- [DASQuery](#dasquery)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
For an introduction about DAS itself, see [the DAS participation spec](sampling.md#data-availability-sampling).
|
||||||
|
This is not a pre-requisite for the network layer, but will give you valuable context.
|
||||||
|
|
||||||
|
For sampling, all nodes need to query for `k` random samples each slot.
|
||||||
|
|
||||||
|
*__TODO__: describe big picture of sampling workload size*
|
||||||
|
|
||||||
|
This is a lot of work, and ideally happens at a low latency.
|
||||||
|
|
||||||
|
To achieve quick querying, the query model is changed to *push* the samples to listeners instead, using GossipSub.
|
||||||
|
The listeners then randomly rotate their subscriptions to keep queries unpredictable.
|
||||||
|
Except for a small subset of subscriptions, which will function as a backbone to keep topics more stable and allow for efficient peer discovery.
|
||||||
|
|
||||||
|
Publishing can utilize the fan-out functionality in GossipSub, and is easier to split between nodes:
|
||||||
|
nodes on the horizontal networks can help by producing the same samples and fan-out publishing to their own peers.
|
||||||
|
|
||||||
|
This push model also helps to obfuscate the original source of a message:
|
||||||
|
the listeners do not have to make individual queries to some identified source.
|
||||||
|
|
||||||
|
The push model does not aim to serve "historical" queries (anything older than the most recent).
|
||||||
|
Historical queries are still required for the unhappy case, where messages are not pushed quick enough,
|
||||||
|
and missing samples are not reconstructed by other nodes on the horizontal subnet quick enough.
|
||||||
|
|
||||||
|
The main challenge in supporting historical queries is to target the right nodes,
|
||||||
|
without concentrating too many requests on a single node, or breaking the network/consensus identity separation.
|
||||||
|
|
||||||
|
## DAS Subnets
|
||||||
|
|
||||||
|
On a high level, the push-model roles are divided into:
|
||||||
|
- Sources: create blobs of shard block data, and transformed into many tiny samples.
|
||||||
|
- Sinks: continuously look for samples
|
||||||
|
|
||||||
|
At full operation, the network has one proposer, per shard, per slot.
|
||||||
|
|
||||||
|
In the push-model, there are:
|
||||||
|
- *Vertical subnets*: Sinks can subscribe to indices of samples: there is a sample to subnet mapping.
|
||||||
|
- *Horizontal subnets*: Sources need to distribute samples to all vertical networks: they participate in a fan-out layer.
|
||||||
|
|
||||||
|
### Horizontal subnets
|
||||||
|
|
||||||
|
The shift of the distribution responsibility to a proposer can only be achieved with amplification:
|
||||||
|
a regular proposer cannot reach every vertical subnet.
|
||||||
|
|
||||||
|
#### Publishing
|
||||||
|
|
||||||
|
To publish their work, proposers propagate the shard block as a whole on a shard-block subnet.
|
||||||
|
|
||||||
|
The proposer can fan-out their work more aggressively, by using the fan-out functionality of GossipSub:
|
||||||
|
it may publish to all its peers on the subnet, instead of just those in its mesh.
|
||||||
|
|
||||||
|
#### Horizontal propagation
|
||||||
|
|
||||||
|
Peers on the horizontal subnet are expected to at least perform regular propagation of shard blocks, like participation in any other topic.
|
||||||
|
|
||||||
|
*Although this may be sufficient for testnets, expect parameter changes in the spec here.*
|
||||||
|
|
||||||
|
#### Horizontal to vertical
|
||||||
|
|
||||||
|
Nodes on this same subnet can replicate the sampling efficiently (including a proof for each sample),
|
||||||
|
and distribute it to any vertical networks that are available to them.
|
||||||
|
|
||||||
|
Since the messages are content-addressed (instead of origin-stamped),
|
||||||
|
multiple publishers of the same samples on a vertical subnet do not hurt performance,
|
||||||
|
but actually improve it by shortcutting regular propagation on the vertical subnet, and thus lowering the latency to a sample.
|
||||||
|
|
||||||
|
|
||||||
|
### Vertical subnets
|
||||||
|
|
||||||
|
Vertical subnets propagate the samples to every peer that is interested.
|
||||||
|
These interests are randomly sampled and rotate quickly: although not perfect,
|
||||||
|
sufficient to avoid any significant amount of nodes from being 100% predictable.
|
||||||
|
|
||||||
|
As soon as a sample is missing after the expected propagation time window,
|
||||||
|
nodes can divert to the pull-model, or ultimately flag it as unavailable data.
|
||||||
|
|
||||||
|
Note that the vertical subnets are shared between the different shards,
|
||||||
|
and a simple hash function `(shard, slot, sample_index) -> subnet_index` defines which samples go where.
|
||||||
|
This is to evenly distribute samples to subnets, even when one shard has more activity than the other.
|
||||||
|
|
||||||
|
TODO: define `(shard, slot, sample_index) -> subnet_index` hash function.
|
||||||
|
|
||||||
|
#### Slow rotation: Backbone
|
||||||
|
|
||||||
|
To allow for subscriptions to rotate quickly and randomly, a backbone is formed to help onboard peers into other topics.
|
||||||
|
|
||||||
|
This backbone is based on a pure function of the *node* identity and time:
|
||||||
|
- Nodes can be found *without additional discovery overhead*:
|
||||||
|
peers on a vertical topic can be found by searching the local peerstore for identities that hash to the desired topic(s),
|
||||||
|
assuming the peerstore already has a large enough variety of peers.
|
||||||
|
- Nodes can be held accountable for contributing to the backbone:
|
||||||
|
peers that particpate in DAS but are not active on the appropriate backbone topics can be scored down.
|
||||||
|
*Note: This is experimental, DAS should be light enough for all participants to run, but scoring needs to undergo testing*
|
||||||
|
|
||||||
|
A node should anticipate backbone topics to subscribe to based their own identity.
|
||||||
|
These subscriptions rotate slowly, and with different offsets per node identity to avoid sudden network-wide rotations.
|
||||||
|
|
||||||
|
```python
|
||||||
|
# TODO hash function: (node, time)->subnets
|
||||||
|
```
|
||||||
|
|
||||||
|
Backbone subscription work is outlined in the [DAS participation spec](sampling.md#slow-rotation-backbone)
|
||||||
|
|
||||||
|
#### Quick Rotation: Sampling
|
||||||
|
|
||||||
|
A node MUST maintain `k` random subscriptions to topics, and rotate these according to the [DAS participation spec](sampling.md#quick-rotation-sampling).
|
||||||
|
If the node does not already have connected peers on the topic it needs to sample, it can search its peerstore and, if necessary, in the DHT for peers in the topic backbone.
|
||||||
|
|
||||||
|
## DAS in the Gossip domain: Push
|
||||||
|
|
||||||
|
### Topics and messages
|
||||||
|
|
||||||
|
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||||
|
| Name | Message Type |
|
||||||
|
|----------------------------------|---------------------------|
|
||||||
|
| `das_sample_{subnet_index}` | `DASSample` |
|
||||||
|
|
||||||
|
Also see the [Sharding general networking spec](../sharding/p2p-interface.md) for important topics such as that of the shard-blobs and shard-headers.
|
||||||
|
|
||||||
|
#### Horizontal subnets: `shard_blob_{shard}`
|
||||||
|
|
||||||
|
Extending the regular `shard_blob_{shard}` as [defined in the Sharding networking specification](../sharding/p2p-interface.md#shard-blobs-shard_blob_shard)
|
||||||
|
|
||||||
|
If participating in DAS, upon receiving a `signed_blob` for the first time with a `slot` not older than `MAX_RESAMPLE_TIME`,
|
||||||
|
a subscriber of a `shard_blob_{shard}` SHOULD reconstruct the samples and publish them to vertical subnets.
|
||||||
|
Take `blob = signed_blob.blob`:
|
||||||
|
1. Extend the data: `extended_data = extend_data(blob.data)`
|
||||||
|
2. Create samples with proofs: `samples = sample_data(blob.slot, blob.shard, extended_data)`
|
||||||
|
3. Fanout-publish the samples to the vertical subnets of its peers (not all vertical subnets may be reached).
|
||||||
|
|
||||||
|
The [DAS participation spec](sampling.md#horizontal-subnets) outlines when and where to participate in DAS on horizontal subnets.
|
||||||
|
|
||||||
|
|
||||||
|
#### Vertical subnets: `das_sample_{subnet_index}`
|
||||||
|
|
||||||
|
Shard blob samples can be verified with just a 48 byte KZG proof (commitment quotient polynomial),
|
||||||
|
against the commitment to blob polynomial, specific to that `(shard, slot)` key.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `sample` on the vertical subnet.
|
||||||
|
- _[IGNORE]_ The commitment for the (`sample.shard`, `sample.slot`, `sample.index`) tuple must be known.
|
||||||
|
If not known, the client MAY queue the sample if it passes formatting conditions.
|
||||||
|
- _[REJECT]_ `sample.shard`, `sample.slot` and `sample.index` are hashed into a `sbunet_index` (TODO: define hash) which MUST match the topic `{subnet_index}` parameter.
|
||||||
|
- _[REJECT]_ `sample.shard` must be within valid range: `0 <= sample.shard < get_active_shard_count(state, compute_epoch_at_slot(sample.slot))`.
|
||||||
|
- _[REJECT]_ `sample.index` must be within valid range: `0 <= sample.index < sample_count`, where:
|
||||||
|
- `sample_count = (points_count + POINTS_PER_SAMPLE - 1) // POINTS_PER_SAMPLE`
|
||||||
|
- `points_count` is the length as claimed along with the commitment, which must be smaller than `MAX_SAMPLES_PER_BLOCK`.
|
||||||
|
- _[IGNORE]_ The `sample` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
|
i.e. validate that `sample.slot <= current_slot`. A client MAY queue future samples for processing at the appropriate slot if it passed formatting conditions.
|
||||||
|
- _[IGNORE]_ This is the first received sample with the (`sample.shard`, `sample.slot`, `sample.index`) key tuple.
|
||||||
|
- _[REJECT]_ As already limited by the SSZ list-limit, it is important the sample data is well-formatted and not too large.
|
||||||
|
- _[REJECT]_ The `sample.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
|
||||||
|
- _[REJECT]_ The `sample.proof` MUST be valid: `verify_sample(sample, sample_count, commitment)`
|
||||||
|
|
||||||
|
Upon receiving a valid sample, it SHOULD be retained for a buffer period if the local node is part of the backbone that covers this sample.
|
||||||
|
This is to serve other peers that may have missed it.
|
||||||
|
|
||||||
|
|
||||||
|
## DAS in the Req-Resp domain: Pull
|
||||||
|
|
||||||
|
To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain.
|
||||||
|
|
||||||
|
This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md).
|
||||||
|
|
||||||
|
Note that DAS networking uses a different protocol prefix: `/eth2/das/req`
|
||||||
|
|
||||||
|
The result codes are extended with:
|
||||||
|
- 3: **ResourceUnavailable** -- when the request was valid but cannot be served at this point in time.
|
||||||
|
|
||||||
|
TODO: unify with phase0? Lighthoue already defined this in their response codes enum.
|
||||||
|
|
||||||
|
### Messages
|
||||||
|
|
||||||
|
#### DASQuery
|
||||||
|
|
||||||
|
**Protocol ID:** `/eth2/das/req/query/1/`
|
||||||
|
|
||||||
|
Request Content:
|
||||||
|
```
|
||||||
|
(
|
||||||
|
sample_index: SampleIndex
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
Response Content:
|
||||||
|
```
|
||||||
|
(
|
||||||
|
DASSample
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
When the sample is:
|
||||||
|
- Available: respond with a `Success` result code, and the encoded sample.
|
||||||
|
- Expected to be available, but not: respond with a `ResourceUnavailable` result code.
|
||||||
|
- Not available, but never of interest to the node: respond with an `InvalidRequest` result code.
|
||||||
|
|
||||||
|
When the node is part of the backbone and expected to have the sample, the validity of the quest MUST be recognized with `Success` or `ResourceUnavailable`.
|
|
@ -0,0 +1,84 @@
|
||||||
|
# Ethereum 2.0 Data Availability Sampling
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Data Availability Sampling](#data-availability-sampling)
|
||||||
|
- [GossipSub](#gossipsub)
|
||||||
|
- [Horizontal subnets](#horizontal-subnets)
|
||||||
|
- [Vertical subnets](#vertical-subnets)
|
||||||
|
- [Slow rotation: Backbone](#slow-rotation-backbone)
|
||||||
|
- [Quick rotation: Sampling](#quick-rotation-sampling)
|
||||||
|
- [DAS during network instability](#das-during-network-instability)
|
||||||
|
- [Stage 0: Waiting on missing samples](#stage-0-waiting-on-missing-samples)
|
||||||
|
- [Stage 1: Pulling missing samples from known peers](#stage-1-pulling-missing-samples-from-known-peers)
|
||||||
|
- [Stage 2: Pulling missing data from validators with custody.](#stage-2-pulling-missing-data-from-validators-with-custody)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Data Availability Sampling
|
||||||
|
|
||||||
|
TODO: Summary of Data Availability problem
|
||||||
|
|
||||||
|
TODO: Summary of solution, why 2x extension, and randomized samples
|
||||||
|
|
||||||
|
## GossipSub
|
||||||
|
|
||||||
|
### Horizontal subnets
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
### Vertical subnets
|
||||||
|
|
||||||
|
#### Slow rotation: Backbone
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
#### Quick rotation: Sampling
|
||||||
|
|
||||||
|
TODO
|
||||||
|
|
||||||
|
|
||||||
|
### DAS during network instability
|
||||||
|
|
||||||
|
The GossipSub based retrieval of samples may not always work.
|
||||||
|
In such event, a node can move through below stages until it recovers data availability.
|
||||||
|
|
||||||
|
#### Stage 0: Waiting on missing samples
|
||||||
|
|
||||||
|
Wait for the sample to re-broadcast. Someone may be slow with publishing, or someone else is able to do the work.
|
||||||
|
|
||||||
|
Any node can do the following work to keep the network healthy:
|
||||||
|
- Common: Listen on a horizontal subnet, chunkify the block data in samples, and propagate the samples to vertical subnets.
|
||||||
|
- Extreme: Listen on enough vertical subnets, reconstruct the missing samples by recovery, and propagate the recovered samples.
|
||||||
|
|
||||||
|
This is not a requirement, but should improve the network stability with little resources, and without any central party.
|
||||||
|
|
||||||
|
#### Stage 1: Pulling missing samples from known peers
|
||||||
|
|
||||||
|
The more realistic option, to execute when a sample is missing, is to query any node that is known to hold it.
|
||||||
|
Since *consensus identity is disconnected from network identity*, there is no direct way to contact custody holders
|
||||||
|
without explicitly asking for the data.
|
||||||
|
|
||||||
|
However, *network identities* are still used to build a backbone for each vertical subnet.
|
||||||
|
These nodes should have received the samples, and can serve a buffer of them on demand.
|
||||||
|
Although serving these is not directly incentivised, it is little work:
|
||||||
|
1. Buffer any message you see on the backbone vertical subnets, for a buffer of up to two weeks.
|
||||||
|
2. Serve the samples on request. An individual sample is just expected to be `~ 0.5 KB`, and does not require any pre-processing to serve.
|
||||||
|
|
||||||
|
A validator SHOULD make a `DASQuery` request to random peers, until failing more than the configured failure-rate.
|
||||||
|
|
||||||
|
TODO: detailed failure-mode spec. Stop after trying e.g. 3 peers for any sample in a configured time window (after the gossip period).
|
||||||
|
|
||||||
|
#### Stage 2: Pulling missing data from validators with custody.
|
||||||
|
|
||||||
|
Pulling samples directly from nodes with validators that have a custody responsibility,
|
||||||
|
without revealing their identity to the network, is an open problem.
|
||||||
|
|
|
@ -0,0 +1,223 @@
|
||||||
|
# Ethereum 2.0 The Merge
|
||||||
|
|
||||||
|
**Warning:** This document is currently based on [Phase 0](../phase0/beacon-chain.md) but will be rebased to [Altair](../altair/beacon-chain.md) once the latter is shipped.
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Custom types](#custom-types)
|
||||||
|
- [Constants](#constants)
|
||||||
|
- [Execution](#execution)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [Extended containers](#extended-containers)
|
||||||
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
|
- [`BeaconState`](#beaconstate)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [`ExecutionPayload`](#executionpayload)
|
||||||
|
- [`ExecutionPayloadHeader`](#executionpayloadheader)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [`is_execution_enabled`](#is_execution_enabled)
|
||||||
|
- [`is_transition_completed`](#is_transition_completed)
|
||||||
|
- [`is_transition_block`](#is_transition_block)
|
||||||
|
- [`compute_time_at_slot`](#compute_time_at_slot)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
|
- [Execution payload processing](#execution-payload-processing)
|
||||||
|
- [`verify_execution_state_transition`](#verify_execution_state_transition)
|
||||||
|
- [`process_execution_payload`](#process_execution_payload)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This is a patch implementing the executable beacon chain proposal.
|
||||||
|
It enshrines transaction execution and validity as a first class citizen at the core of the beacon chain.
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
We define the following Python custom types for type hinting and readability:
|
||||||
|
|
||||||
|
| Name | SSZ equivalent | Description |
|
||||||
|
| - | - | - |
|
||||||
|
| `OpaqueTransaction` | `ByteList[MAX_BYTES_PER_OPAQUE_TRANSACTION]` | a byte-list containing a single [typed transaction envelope](https://eips.ethereum.org/EIPS/eip-2718#opaque-byte-array-rather-than-an-rlp-array) structured as `TransactionType \|\| TransactionPayload` |
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
### Execution
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `MAX_BYTES_PER_OPAQUE_TRANSACTION` | `uint64(2**20)` (= 1,048,576) |
|
||||||
|
| `MAX_EXECUTION_TRANSACTIONS` | `uint64(2**14)` (= 16,384) |
|
||||||
|
| `BYTES_PER_LOGS_BLOOM` | `uint64(2**8)` (= 256) |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
Warning: this configuration is not definitive.
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `MERGE_FORK_VERSION` | `Version('0x02000000')` |
|
||||||
|
| `MERGE_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** |
|
||||||
|
| `TRANSITION_TOTAL_DIFFICULTY` | **TBD** |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### Extended containers
|
||||||
|
|
||||||
|
*Note*: Extended SSZ containers inherit all fields from the parent in the original
|
||||||
|
order and append any additional fields to the end.
|
||||||
|
|
||||||
|
#### `BeaconBlockBody`
|
||||||
|
|
||||||
|
*Note*: `BeaconBlockBody` fields remain unchanged other than the addition of `execution_payload`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconBlockBody(phase0.BeaconBlockBody):
|
||||||
|
execution_payload: ExecutionPayload # [New in Merge]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `BeaconState`
|
||||||
|
|
||||||
|
*Note*: `BeaconState` fields remain unchanged other than addition of `latest_execution_payload_header`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconState(phase0.BeaconState):
|
||||||
|
# Execution-layer
|
||||||
|
latest_execution_payload_header: ExecutionPayloadHeader # [New in Merge]
|
||||||
|
```
|
||||||
|
|
||||||
|
### New containers
|
||||||
|
|
||||||
|
#### `ExecutionPayload`
|
||||||
|
|
||||||
|
The execution payload included in a `BeaconBlockBody`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayload(Container):
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
parent_hash: Hash32
|
||||||
|
coinbase: Bytes20
|
||||||
|
state_root: Bytes32
|
||||||
|
number: uint64
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
receipt_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
transactions: List[OpaqueTransaction, MAX_EXECUTION_TRANSACTIONS]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `ExecutionPayloadHeader`
|
||||||
|
|
||||||
|
The execution payload header included in a `BeaconState`.
|
||||||
|
|
||||||
|
*Note:* Holds execution payload data without transaction bodies.
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ExecutionPayloadHeader(Container):
|
||||||
|
block_hash: Hash32 # Hash of execution block
|
||||||
|
parent_hash: Hash32
|
||||||
|
coinbase: Bytes20
|
||||||
|
state_root: Bytes32
|
||||||
|
number: uint64
|
||||||
|
gas_limit: uint64
|
||||||
|
gas_used: uint64
|
||||||
|
timestamp: uint64
|
||||||
|
receipt_root: Bytes32
|
||||||
|
logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM]
|
||||||
|
transactions_root: Root
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
#### `is_execution_enabled`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_execution_enabled(state: BeaconState, block: BeaconBlock) -> bool:
|
||||||
|
return is_transition_completed(state) or is_transition_block(state, block)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `is_transition_completed`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_transition_completed(state: BeaconState) -> bool:
|
||||||
|
return state.latest_execution_payload_header != ExecutionPayloadHeader()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `is_transition_block`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_transition_block(state: BeaconState, block: BeaconBlock) -> bool:
|
||||||
|
return not is_transition_completed(state) and block.body.execution_payload != ExecutionPayload()
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_time_at_slot`
|
||||||
|
|
||||||
|
*Note*: This function is unsafe with respect to overflows and underflows.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
|
||||||
|
slots_since_genesis = slot - GENESIS_SLOT
|
||||||
|
return uint64(state.genesis_time + slots_since_genesis * SECONDS_PER_SLOT)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
|
process_block_header(state, block)
|
||||||
|
process_randao(state, block.body)
|
||||||
|
process_eth1_data(state, block.body)
|
||||||
|
process_operations(state, block.body)
|
||||||
|
# Pre-merge, skip execution payload processing
|
||||||
|
if is_execution_enabled(state, block):
|
||||||
|
process_execution_payload(state, block.body.execution_payload) # [New in Merge]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Execution payload processing
|
||||||
|
|
||||||
|
##### `verify_execution_state_transition`
|
||||||
|
|
||||||
|
Let `verify_execution_state_transition(execution_payload: ExecutionPayload) -> bool` be the function that verifies given `ExecutionPayload` with respect to execution state transition.
|
||||||
|
The body of the function is implementation dependent.
|
||||||
|
|
||||||
|
##### `process_execution_payload`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_execution_payload(state: BeaconState, execution_payload: ExecutionPayload) -> None:
|
||||||
|
"""
|
||||||
|
Note: This function is designed to be able to be run in parallel with the other `process_block` sub-functions
|
||||||
|
"""
|
||||||
|
if is_transition_completed(state):
|
||||||
|
assert execution_payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||||
|
assert execution_payload.number == state.latest_execution_payload_header.number + 1
|
||||||
|
|
||||||
|
assert execution_payload.timestamp == compute_time_at_slot(state, state.slot)
|
||||||
|
|
||||||
|
assert verify_execution_state_transition(execution_payload)
|
||||||
|
|
||||||
|
state.latest_execution_payload_header = ExecutionPayloadHeader(
|
||||||
|
block_hash=execution_payload.block_hash,
|
||||||
|
parent_hash=execution_payload.parent_hash,
|
||||||
|
coinbase=execution_payload.coinbase,
|
||||||
|
state_root=execution_payload.state_root,
|
||||||
|
number=execution_payload.number,
|
||||||
|
gas_limit=execution_payload.gas_limit,
|
||||||
|
gas_used=execution_payload.gas_used,
|
||||||
|
timestamp=execution_payload.timestamp,
|
||||||
|
receipt_root=execution_payload.receipt_root,
|
||||||
|
logs_bloom=execution_payload.logs_bloom,
|
||||||
|
transactions_root=hash_tree_root(execution_payload.transactions),
|
||||||
|
)
|
||||||
|
```
|
|
@ -0,0 +1,115 @@
|
||||||
|
# Ethereum 2.0 The Merge
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Helpers](#helpers)
|
||||||
|
- [`PowBlock`](#powblock)
|
||||||
|
- [`get_pow_block`](#get_pow_block)
|
||||||
|
- [`is_valid_transition_block`](#is_valid_transition_block)
|
||||||
|
- [Updated fork-choice handlers](#updated-fork-choice-handlers)
|
||||||
|
- [`on_block`](#on_block)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This is the modification of the fork choice according to the executable beacon chain proposal.
|
||||||
|
|
||||||
|
*Note*: It introduces the process of transition from the last PoW block to the first PoS block.
|
||||||
|
|
||||||
|
### Helpers
|
||||||
|
|
||||||
|
#### `PowBlock`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class PowBlock(Container):
|
||||||
|
block_hash: Hash32
|
||||||
|
is_processed: boolean
|
||||||
|
is_valid: boolean
|
||||||
|
total_difficulty: uint256
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_pow_block`
|
||||||
|
|
||||||
|
Let `get_pow_block(block_hash: Hash32) -> PowBlock` be the function that given the hash of the PoW block returns its data.
|
||||||
|
|
||||||
|
*Note*: The `eth_getBlockByHash` JSON-RPC method does not distinguish invalid blocks from blocks that haven't been processed yet. Either extending this existing method or implementing a new one is required.
|
||||||
|
|
||||||
|
#### `is_valid_transition_block`
|
||||||
|
|
||||||
|
Used by fork-choice handler, `on_block`.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def is_valid_transition_block(block: PowBlock) -> bool:
|
||||||
|
is_total_difficulty_reached = block.total_difficulty >= TRANSITION_TOTAL_DIFFICULTY
|
||||||
|
return block.is_valid and is_total_difficulty_reached
|
||||||
|
```
|
||||||
|
|
||||||
|
### Updated fork-choice handlers
|
||||||
|
|
||||||
|
#### `on_block`
|
||||||
|
|
||||||
|
*Note*: The only modification is the addition of the verification of transition block conditions.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def on_block(store: Store, signed_block: SignedBeaconBlock) -> None:
|
||||||
|
block = signed_block.message
|
||||||
|
# Parent block must be known
|
||||||
|
assert block.parent_root in store.block_states
|
||||||
|
# Make a copy of the state to avoid mutability issues
|
||||||
|
pre_state = copy(store.block_states[block.parent_root])
|
||||||
|
# Blocks cannot be in the future. If they are, their consideration must be delayed until the are in the past.
|
||||||
|
assert get_current_slot(store) >= block.slot
|
||||||
|
|
||||||
|
# Check that block is later than the finalized epoch slot (optimization to reduce calls to get_ancestor)
|
||||||
|
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
|
assert block.slot > finalized_slot
|
||||||
|
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
||||||
|
assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root
|
||||||
|
|
||||||
|
# [New in Merge]
|
||||||
|
if is_transition_block(pre_state, block):
|
||||||
|
# Delay consideration of block until PoW block is processed by the PoW node
|
||||||
|
pow_block = get_pow_block(block.body.execution_payload.parent_hash)
|
||||||
|
assert pow_block.is_processed
|
||||||
|
assert is_valid_transition_block(pow_block)
|
||||||
|
|
||||||
|
# Check the block is valid and compute the post-state
|
||||||
|
state = pre_state.copy()
|
||||||
|
state_transition(state, signed_block, True)
|
||||||
|
# Add new block to the store
|
||||||
|
store.blocks[hash_tree_root(block)] = block
|
||||||
|
# Add new state for this block to the store
|
||||||
|
store.block_states[hash_tree_root(block)] = state
|
||||||
|
|
||||||
|
# Update justified checkpoint
|
||||||
|
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||||
|
if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch:
|
||||||
|
store.best_justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
if should_update_justified_checkpoint(store, state.current_justified_checkpoint):
|
||||||
|
store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
|
||||||
|
# Update finalized checkpoint
|
||||||
|
if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch:
|
||||||
|
store.finalized_checkpoint = state.finalized_checkpoint
|
||||||
|
|
||||||
|
# Potentially update justified if different from store
|
||||||
|
if store.justified_checkpoint != state.current_justified_checkpoint:
|
||||||
|
# Update justified if new justified is later than store justified
|
||||||
|
if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch:
|
||||||
|
store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
return
|
||||||
|
|
||||||
|
# Update justified if store justified is not in chain with finalized checkpoint
|
||||||
|
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
||||||
|
ancestor_at_finalized_slot = get_ancestor(store, store.justified_checkpoint.root, finalized_slot)
|
||||||
|
if ancestor_at_finalized_slot != store.finalized_checkpoint.root:
|
||||||
|
store.justified_checkpoint = state.current_justified_checkpoint
|
||||||
|
```
|
|
@ -0,0 +1,72 @@
|
||||||
|
# Ethereum 2.0 The Merge
|
||||||
|
|
||||||
|
**Warning:** This document is currently based on [Phase 0](../phase0/validator.md) but will be rebased to [Altair](../altair/validator.md) once the latter is shipped.
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
||||||
|
- [Block proposal](#block-proposal)
|
||||||
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
|
- [Execution Payload](#execution-payload)
|
||||||
|
- [`get_pow_chain_head`](#get_pow_chain_head)
|
||||||
|
- [`produce_execution_payload`](#produce_execution_payload)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document represents the changes to be made in the code of an "honest validator" to implement executable beacon chain proposal.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
||||||
|
|
||||||
|
All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [The Merge](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout.
|
||||||
|
|
||||||
|
## Beacon chain responsibilities
|
||||||
|
|
||||||
|
All validator responsibilities remain unchanged other than those noted below. Namely, the transition block handling and the addition of `ExecutionPayload`.
|
||||||
|
|
||||||
|
### Block proposal
|
||||||
|
|
||||||
|
#### Constructing the `BeaconBlockBody`
|
||||||
|
|
||||||
|
##### Execution Payload
|
||||||
|
|
||||||
|
###### `get_pow_chain_head`
|
||||||
|
|
||||||
|
Let `get_pow_chain_head() -> PowBlock` be the function that returns the head of the PoW chain. The body of the function is implementation specific.
|
||||||
|
|
||||||
|
###### `produce_execution_payload`
|
||||||
|
|
||||||
|
Let `produce_execution_payload(parent_hash: Hash32, timestamp: uint64) -> ExecutionPayload` be the function that produces new instance of execution payload.
|
||||||
|
The body of this function is implementation dependent.
|
||||||
|
|
||||||
|
* Set `block.body.execution_payload = get_execution_payload(state)` where:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_execution_payload(state: BeaconState) -> ExecutionPayload:
|
||||||
|
if not is_transition_completed(state):
|
||||||
|
pow_block = get_pow_chain_head()
|
||||||
|
if not is_valid_transition_block(pow_block):
|
||||||
|
# Pre-merge, empty payload
|
||||||
|
return ExecutionPayload()
|
||||||
|
else:
|
||||||
|
# Signify merge via producing on top of the last PoW block
|
||||||
|
timestamp = compute_time_at_slot(state, state.slot)
|
||||||
|
return produce_execution_payload(pow_block.block_hash, timestamp)
|
||||||
|
|
||||||
|
# Post-merge, normal payload
|
||||||
|
execution_parent_hash = state.latest_execution_payload_header.block_hash
|
||||||
|
timestamp = compute_time_at_slot(state, state.slot)
|
||||||
|
return produce_execution_payload(execution_parent_hash, timestamp)
|
||||||
|
```
|
|
@ -13,6 +13,7 @@
|
||||||
- [Misc](#misc)
|
- [Misc](#misc)
|
||||||
- [Gwei values](#gwei-values)
|
- [Gwei values](#gwei-values)
|
||||||
- [Initial values](#initial-values)
|
- [Initial values](#initial-values)
|
||||||
|
- [Withdrawal prefixes](#withdrawal-prefixes)
|
||||||
- [Time parameters](#time-parameters)
|
- [Time parameters](#time-parameters)
|
||||||
- [State list lengths](#state-list-lengths)
|
- [State list lengths](#state-list-lengths)
|
||||||
- [Rewards and penalties](#rewards-and-penalties)
|
- [Rewards and penalties](#rewards-and-penalties)
|
||||||
|
@ -113,7 +114,12 @@
|
||||||
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
- [`process_rewards_and_penalties`](#process_rewards_and_penalties)
|
||||||
- [Registry updates](#registry-updates)
|
- [Registry updates](#registry-updates)
|
||||||
- [Slashings](#slashings)
|
- [Slashings](#slashings)
|
||||||
- [Final updates](#final-updates)
|
- [Eth1 data votes updates](#eth1-data-votes-updates)
|
||||||
|
- [Effective balances updates](#effective-balances-updates)
|
||||||
|
- [Slashings balances updates](#slashings-balances-updates)
|
||||||
|
- [Randao mixes updates](#randao-mixes-updates)
|
||||||
|
- [Historical roots updates](#historical-roots-updates)
|
||||||
|
- [Participation records rotation](#participation-records-rotation)
|
||||||
- [Block processing](#block-processing)
|
- [Block processing](#block-processing)
|
||||||
- [Block header](#block-header)
|
- [Block header](#block-header)
|
||||||
- [RANDAO](#randao)
|
- [RANDAO](#randao)
|
||||||
|
@ -133,7 +139,7 @@
|
||||||
This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain.
|
This document represents the specification for Phase 0 of Ethereum 2.0 -- The Beacon Chain.
|
||||||
|
|
||||||
At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of Ethereum 2.0, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a validator happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
|
At the core of Ethereum 2.0 is a system chain called the "beacon chain". The beacon chain stores and manages the registry of validators. In the initial deployment phases of Ethereum 2.0, the only mechanism to become a validator is to make a one-way ETH transaction to a deposit contract on Ethereum 1.0. Activation as a validator happens when Ethereum 1.0 deposit receipts are processed by the beacon chain, the activation balance is reached, and a queuing process is completed. Exit is either voluntary or done forcibly as a penalty for misbehavior.
|
||||||
The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (Phase 1) and proof-of-stake votes for a beacon block (Phase 0).
|
The primary source of load on the beacon chain is "attestations". Attestations are simultaneously availability votes for a shard block (in a later Eth2 upgrade) and proof-of-stake votes for a beacon block (Phase 0).
|
||||||
|
|
||||||
## Notation
|
## Notation
|
||||||
|
|
||||||
|
@ -151,6 +157,7 @@ We define the following Python custom types for type hinting and readability:
|
||||||
| `ValidatorIndex` | `uint64` | a validator registry index |
|
| `ValidatorIndex` | `uint64` | a validator registry index |
|
||||||
| `Gwei` | `uint64` | an amount in Gwei |
|
| `Gwei` | `uint64` | an amount in Gwei |
|
||||||
| `Root` | `Bytes32` | a Merkle root |
|
| `Root` | `Bytes32` | a Merkle root |
|
||||||
|
| `Hash32` | `Bytes32` | a 256-bit hash |
|
||||||
| `Version` | `Bytes4` | a fork version number |
|
| `Version` | `Bytes4` | a fork version number |
|
||||||
| `DomainType` | `Bytes4` | a domain type |
|
| `DomainType` | `Bytes4` | a domain type |
|
||||||
| `ForkDigest` | `Bytes4` | a digest of the current fork data |
|
| `ForkDigest` | `Bytes4` | a digest of the current fork data |
|
||||||
|
@ -158,6 +165,7 @@ We define the following Python custom types for type hinting and readability:
|
||||||
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
|
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
|
||||||
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
|
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
|
||||||
|
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
The following values are (non-configurable) constants used throughout the specification.
|
The following values are (non-configurable) constants used throughout the specification.
|
||||||
|
@ -209,7 +217,13 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
| - | - |
|
| - | - |
|
||||||
| `GENESIS_FORK_VERSION` | `Version('0x00000000')` |
|
| `GENESIS_FORK_VERSION` | `Version('0x00000000')` |
|
||||||
|
|
||||||
|
### Withdrawal prefixes
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
| `BLS_WITHDRAWAL_PREFIX` | `Bytes1('0x00')` |
|
| `BLS_WITHDRAWAL_PREFIX` | `Bytes1('0x00')` |
|
||||||
|
| `ETH1_ADDRESS_WITHDRAWAL_PREFIX` | `Bytes1('0x01')` |
|
||||||
|
|
||||||
### Time parameters
|
### Time parameters
|
||||||
|
|
||||||
|
@ -245,12 +259,12 @@ The following values are (non-configurable) constants used throughout the specif
|
||||||
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `uint64(2**9)` (= 512) |
|
| `WHISTLEBLOWER_REWARD_QUOTIENT` | `uint64(2**9)` (= 512) |
|
||||||
| `PROPOSER_REWARD_QUOTIENT` | `uint64(2**3)` (= 8) |
|
| `PROPOSER_REWARD_QUOTIENT` | `uint64(2**3)` (= 8) |
|
||||||
| `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**26)` (= 67,108,864) |
|
| `INACTIVITY_PENALTY_QUOTIENT` | `uint64(2**26)` (= 67,108,864) |
|
||||||
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (=128) |
|
| `MIN_SLASHING_PENALTY_QUOTIENT` | `uint64(2**7)` (= 128) |
|
||||||
| `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(1)` |
|
| `PROPORTIONAL_SLASHING_MULTIPLIER` | `uint64(1)` |
|
||||||
|
|
||||||
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
- The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak.
|
||||||
|
|
||||||
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accoutable safety margin.
|
- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin.
|
||||||
|
|
||||||
### Max operations per block
|
### Max operations per block
|
||||||
|
|
||||||
|
@ -362,7 +376,7 @@ class PendingAttestation(Container):
|
||||||
class Eth1Data(Container):
|
class Eth1Data(Container):
|
||||||
deposit_root: Root
|
deposit_root: Root
|
||||||
deposit_count: uint64
|
deposit_count: uint64
|
||||||
block_hash: Bytes32
|
block_hash: Hash32
|
||||||
```
|
```
|
||||||
|
|
||||||
#### `HistoricalBatch`
|
#### `HistoricalBatch`
|
||||||
|
@ -1147,7 +1161,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
||||||
state = BeaconState(
|
state = BeaconState(
|
||||||
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
genesis_time=eth1_timestamp + GENESIS_DELAY,
|
||||||
fork=fork,
|
fork=fork,
|
||||||
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=len(deposits)),
|
eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))),
|
||||||
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())),
|
||||||
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy
|
||||||
)
|
)
|
||||||
|
@ -1250,7 +1264,12 @@ def process_epoch(state: BeaconState) -> None:
|
||||||
process_rewards_and_penalties(state)
|
process_rewards_and_penalties(state)
|
||||||
process_registry_updates(state)
|
process_registry_updates(state)
|
||||||
process_slashings(state)
|
process_slashings(state)
|
||||||
process_final_updates(state)
|
process_eth1_data_reset(state)
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
process_slashings_reset(state)
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
process_participation_record_updates(state)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Helper functions
|
#### Helper functions
|
||||||
|
@ -1303,7 +1322,19 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||||
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
# Skip FFG updates in the first two epochs to avoid corner cases that might result in modifying this stub.
|
||||||
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
if get_current_epoch(state) <= GENESIS_EPOCH + 1:
|
||||||
return
|
return
|
||||||
|
previous_attestations = get_matching_target_attestations(state, get_previous_epoch(state))
|
||||||
|
current_attestations = get_matching_target_attestations(state, get_current_epoch(state))
|
||||||
|
total_active_balance = get_total_active_balance(state)
|
||||||
|
previous_target_balance = get_attesting_balance(state, previous_attestations)
|
||||||
|
current_target_balance = get_attesting_balance(state, current_attestations)
|
||||||
|
weigh_justification_and_finalization(state, total_active_balance, previous_target_balance, current_target_balance)
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def weigh_justification_and_finalization(state: BeaconState,
|
||||||
|
total_active_balance: Gwei,
|
||||||
|
previous_epoch_target_balance: Gwei,
|
||||||
|
current_epoch_target_balance: Gwei) -> None:
|
||||||
previous_epoch = get_previous_epoch(state)
|
previous_epoch = get_previous_epoch(state)
|
||||||
current_epoch = get_current_epoch(state)
|
current_epoch = get_current_epoch(state)
|
||||||
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
old_previous_justified_checkpoint = state.previous_justified_checkpoint
|
||||||
|
@ -1313,13 +1344,11 @@ def process_justification_and_finalization(state: BeaconState) -> None:
|
||||||
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
state.previous_justified_checkpoint = state.current_justified_checkpoint
|
||||||
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
state.justification_bits[1:] = state.justification_bits[:JUSTIFICATION_BITS_LENGTH - 1]
|
||||||
state.justification_bits[0] = 0b0
|
state.justification_bits[0] = 0b0
|
||||||
matching_target_attestations = get_matching_target_attestations(state, previous_epoch) # Previous epoch
|
if previous_epoch_target_balance * 3 >= total_active_balance * 2:
|
||||||
if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
|
||||||
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
state.current_justified_checkpoint = Checkpoint(epoch=previous_epoch,
|
||||||
root=get_block_root(state, previous_epoch))
|
root=get_block_root(state, previous_epoch))
|
||||||
state.justification_bits[1] = 0b1
|
state.justification_bits[1] = 0b1
|
||||||
matching_target_attestations = get_matching_target_attestations(state, current_epoch) # Current epoch
|
if current_epoch_target_balance * 3 >= total_active_balance * 2:
|
||||||
if get_attesting_balance(state, matching_target_attestations) * 3 >= get_total_active_balance(state) * 2:
|
|
||||||
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
state.current_justified_checkpoint = Checkpoint(epoch=current_epoch,
|
||||||
root=get_block_root(state, current_epoch))
|
root=get_block_root(state, current_epoch))
|
||||||
state.justification_bits[0] = 0b1
|
state.justification_bits[0] = 0b1
|
||||||
|
@ -1557,15 +1586,19 @@ def process_slashings(state: BeaconState) -> None:
|
||||||
decrease_balance(state, ValidatorIndex(index), penalty)
|
decrease_balance(state, ValidatorIndex(index), penalty)
|
||||||
```
|
```
|
||||||
|
|
||||||
#### Final updates
|
#### Eth1 data votes updates
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def process_final_updates(state: BeaconState) -> None:
|
def process_eth1_data_reset(state: BeaconState) -> None:
|
||||||
current_epoch = get_current_epoch(state)
|
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
next_epoch = Epoch(current_epoch + 1)
|
|
||||||
# Reset eth1 data votes
|
# Reset eth1 data votes
|
||||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||||
state.eth1_data_votes = []
|
state.eth1_data_votes = []
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Effective balances updates
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_effective_balance_updates(state: BeaconState) -> None:
|
||||||
# Update effective balances with hysteresis
|
# Update effective balances with hysteresis
|
||||||
for index, validator in enumerate(state.validators):
|
for index, validator in enumerate(state.validators):
|
||||||
balance = state.balances[index]
|
balance = state.balances[index]
|
||||||
|
@ -1577,14 +1610,41 @@ def process_final_updates(state: BeaconState) -> None:
|
||||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||||
):
|
):
|
||||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Slashings balances updates
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_slashings_reset(state: BeaconState) -> None:
|
||||||
|
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
# Reset slashings
|
# Reset slashings
|
||||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Randao mixes updates
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_randao_mixes_reset(state: BeaconState) -> None:
|
||||||
|
current_epoch = get_current_epoch(state)
|
||||||
|
next_epoch = Epoch(current_epoch + 1)
|
||||||
# Set randao mix
|
# Set randao mix
|
||||||
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
state.randao_mixes[next_epoch % EPOCHS_PER_HISTORICAL_VECTOR] = get_randao_mix(state, current_epoch)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Historical roots updates
|
||||||
|
```python
|
||||||
|
def process_historical_roots_update(state: BeaconState) -> None:
|
||||||
# Set historical root accumulator
|
# Set historical root accumulator
|
||||||
|
next_epoch = Epoch(get_current_epoch(state) + 1)
|
||||||
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
if next_epoch % (SLOTS_PER_HISTORICAL_ROOT // SLOTS_PER_EPOCH) == 0:
|
||||||
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
historical_batch = HistoricalBatch(block_roots=state.block_roots, state_roots=state.state_roots)
|
||||||
state.historical_roots.append(hash_tree_root(historical_batch))
|
state.historical_roots.append(hash_tree_root(historical_batch))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Participation records rotation
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_participation_record_updates(state: BeaconState) -> None:
|
||||||
# Rotate current/previous epoch attestations
|
# Rotate current/previous epoch attestations
|
||||||
state.previous_epoch_attestations = state.current_epoch_attestations
|
state.previous_epoch_attestations = state.current_epoch_attestations
|
||||||
state.current_epoch_attestations = []
|
state.current_epoch_attestations = []
|
||||||
|
|
|
@ -58,12 +58,13 @@ The amount of ETH (rounded down to the closest Gwei) sent to the deposit contrac
|
||||||
|
|
||||||
#### Withdrawal credentials
|
#### Withdrawal credentials
|
||||||
|
|
||||||
One of the `DepositData` fields is `withdrawal_credentials`. It is a commitment to credentials for withdrawing validator balance (e.g. to another validator, or to shards). The first byte of `withdrawal_credentials` is a version number. As of now, the only expected format is as follows:
|
One of the `DepositData` fields is `withdrawal_credentials` which constrains validator withdrawals.
|
||||||
|
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||||
|
The withdrawal prefixes currently supported are `BLS_WITHDRAWAL_PREFIX` and `ETH1_ADDRESS_WITHDRAWAL_PREFIX`.
|
||||||
|
Read more in the [validator guide](./validator.md#withdrawal-credentials).
|
||||||
|
|
||||||
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
*Note*: The deposit contract does not validate the `withdrawal_credentials` field.
|
||||||
* `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]` where `withdrawal_pubkey` is a BLS pubkey
|
Support for new withdrawal prefixes can be added without modifying the deposit contract.
|
||||||
|
|
||||||
The private key corresponding to `withdrawal_pubkey` will be required to initiate a withdrawal. It can be stored separately until a withdrawal is required, e.g. in cold storage.
|
|
||||||
|
|
||||||
#### `DepositEvent` log
|
#### `DepositEvent` log
|
||||||
|
|
||||||
|
|
|
@ -103,7 +103,7 @@ It consists of four main sections:
|
||||||
- [Compression/Encoding](#compressionencoding)
|
- [Compression/Encoding](#compressionencoding)
|
||||||
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
- [Why are we using SSZ for encoding?](#why-are-we-using-ssz-for-encoding)
|
||||||
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
- [Why are we compressing, and at which layers?](#why-are-we-compressing-and-at-which-layers)
|
||||||
- [Why are using Snappy for compression?](#why-are-using-snappy-for-compression)
|
- [Why are we using Snappy for compression?](#why-are-we-using-snappy-for-compression)
|
||||||
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
- [Can I get access to unencrypted bytes on the wire for debugging purposes?](#can-i-get-access-to-unencrypted-bytes-on-the-wire-for-debugging-purposes)
|
||||||
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
- [What are SSZ type size bounds?](#what-are-ssz-type-size-bounds)
|
||||||
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
- [libp2p implementations matrix](#libp2p-implementations-matrix)
|
||||||
|
@ -294,7 +294,7 @@ If one or more validations fail while processing the items in order, return eith
|
||||||
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
There are two primary global topics used to propagate beacon blocks (`beacon_block`)
|
||||||
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network.
|
||||||
|
|
||||||
There are three additional global topics are used to propagate lower frequency validator messages
|
There are three additional global topics that are used to propagate lower frequency validator messages
|
||||||
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
(`voluntary_exit`, `proposer_slashing`, and `attester_slashing`).
|
||||||
|
|
||||||
##### `beacon_block`
|
##### `beacon_block`
|
||||||
|
@ -315,6 +315,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block`
|
||||||
(via both gossip and non-gossip sources)
|
(via both gossip and non-gossip sources)
|
||||||
(a client MAY queue blocks for processing once the parent block is retrieved).
|
(a client MAY queue blocks for processing once the parent block is retrieved).
|
||||||
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
- _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation.
|
||||||
|
- _[REJECT]_ The block is from a higher slot than its parent.
|
||||||
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
- _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e.
|
||||||
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
`get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch))
|
||||||
== store.finalized_checkpoint.root`
|
== store.finalized_checkpoint.root`
|
||||||
|
@ -336,8 +337,6 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_
|
||||||
(a client MAY queue future aggregates for processing at the appropriate slot).
|
(a client MAY queue future aggregates for processing at the appropriate slot).
|
||||||
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
- _[REJECT]_ The aggregate attestation's epoch matches its target -- i.e. `aggregate.data.target.epoch ==
|
||||||
compute_epoch_at_slot(aggregate.data.slot)`
|
compute_epoch_at_slot(aggregate.data.slot)`
|
||||||
- _[IGNORE]_ The valid aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen
|
|
||||||
(via aggregate gossip, within a verified block, or through the creation of an equivalent aggregate locally).
|
|
||||||
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
- _[IGNORE]_ The `aggregate` is the first valid aggregate received for the aggregator
|
||||||
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
with index `aggregate_and_proof.aggregator_index` for the epoch `aggregate.data.target.epoch`.
|
||||||
- _[REJECT]_ The attestation has participants --
|
- _[REJECT]_ The attestation has participants --
|
||||||
|
@ -421,7 +420,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||||
- _[REJECT]_ The signature of `attestation` is valid.
|
- _[REJECT]_ The signature of `attestation` is valid.
|
||||||
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
- _[IGNORE]_ The block being voted for (`attestation.data.beacon_block_root`) has been seen
|
||||||
(via both gossip and non-gossip sources)
|
(via both gossip and non-gossip sources)
|
||||||
(a client MAY queue aggregates for processing once block is retrieved).
|
(a client MAY queue attestations for processing once block is retrieved).
|
||||||
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
- _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||||
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
- _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e.
|
||||||
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
`get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root`
|
||||||
|
@ -436,7 +435,7 @@ The following validations MUST pass before forwarding the `attestation` on the s
|
||||||
Attestation broadcasting is grouped into subnets defined by a topic.
|
Attestation broadcasting is grouped into subnets defined by a topic.
|
||||||
The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`.
|
The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`.
|
||||||
The correct subnet for an attestation can be calculated with `compute_subnet_for_attestation`.
|
The correct subnet for an attestation can be calculated with `compute_subnet_for_attestation`.
|
||||||
`beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees in Phase 1.
|
`beacon_attestation_{subnet_id}` topics, are rotated through throughout the epoch in a similar fashion to rotating through shards in committees (future Eth2 upgrade).
|
||||||
The subnets are rotated through with `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)` subnets per slot.
|
The subnets are rotated through with `committees_per_slot = get_committee_count_per_slot(state, attestation.data.target.epoch)` subnets per slot.
|
||||||
|
|
||||||
Unaggregated attestations are sent as `Attestation`s to the subnet topic,
|
Unaggregated attestations are sent as `Attestation`s to the subnet topic,
|
||||||
|
@ -954,7 +953,7 @@ where the fields of `ENRForkID` are defined as
|
||||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated.
|
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated.
|
||||||
If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||||
|
|
||||||
*Note*: `fork_digest` is composed of values that are not not known until the genesis block/state are available.
|
*Note*: `fork_digest` is composed of values that are not known until the genesis block/state are available.
|
||||||
Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known.
|
Due to this, clients SHOULD NOT form ENRs and begin peer discovery until genesis values are known.
|
||||||
One notable exception to this rule is the distribution of bootnode ENRs prior to genesis.
|
One notable exception to this rule is the distribution of bootnode ENRs prior to genesis.
|
||||||
In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as
|
In this case, bootnode ENRs SHOULD be initially distributed with `eth2` field set as
|
||||||
|
@ -1240,7 +1239,7 @@ the node's fork choice prevents integration of these messages into the actual co
|
||||||
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel.
|
||||||
The exact grouping will be dependent on more involved network tests.
|
The exact grouping will be dependent on more involved network tests.
|
||||||
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet).
|
||||||
The value is currently set to to be equal `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise.
|
||||||
|
|
||||||
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots?
|
||||||
|
|
||||||
|
@ -1316,10 +1315,10 @@ Requests are segregated by protocol ID to:
|
||||||
6. Parallelise RFCs (or Eth2 EIPs).
|
6. Parallelise RFCs (or Eth2 EIPs).
|
||||||
By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently
|
By decoupling requests from one another, each RFC that affects the request protocol can be deployed/tested/debated independently
|
||||||
without relying on a synchronization point to version the general top-level protocol.
|
without relying on a synchronization point to version the general top-level protocol.
|
||||||
1. This has the benefit that clients can explicitly choose which RFCs to deploy
|
1. This has the benefit that clients can explicitly choose which RFCs to deploy
|
||||||
without buying into all other RFCs that may be included in that top-level version.
|
without buying into all other RFCs that may be included in that top-level version.
|
||||||
2. Affording this level of granularity with a top-level protocol would imply creating as many variants
|
2. Affording this level of granularity with a top-level protocol would imply creating as many variants
|
||||||
(e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
(e.g. /protocol/43-{a,b,c,d,...}) as the cartesian product of RFCs inflight, O(n^2).
|
||||||
7. Allow us to simplify the payload of requests.
|
7. Allow us to simplify the payload of requests.
|
||||||
Request-id’s and method-ids no longer need to be sent.
|
Request-id’s and method-ids no longer need to be sent.
|
||||||
The encoding/request type and version can all be handled by the framework.
|
The encoding/request type and version can all be handled by the framework.
|
||||||
|
@ -1386,7 +1385,7 @@ Thus, it may happen that we need to transmit an empty list - there are several w
|
||||||
|
|
||||||
Semantically, it is not an error that a block is missing during a slot making option 2 unnatural.
|
Semantically, it is not an error that a block is missing during a slot making option 2 unnatural.
|
||||||
|
|
||||||
Option 1 allows allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
Option 1 allows the responder to signal "no block", but this information may be wrong - for example in the case of a malicious node.
|
||||||
|
|
||||||
Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response,
|
Under option 0, there is no way for a client to distinguish between a slot without a block and an incomplete response,
|
||||||
but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen.
|
but given that it already must contain logic to handle the uncertainty of a malicious peer, option 0 was chosen.
|
||||||
|
@ -1551,7 +1550,7 @@ This looks different depending on the interaction layer:
|
||||||
implementers are encouraged to encapsulate the encoding and compression logic behind
|
implementers are encouraged to encapsulate the encoding and compression logic behind
|
||||||
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams.
|
||||||
|
|
||||||
### Why are using Snappy for compression?
|
### Why are we using Snappy for compression?
|
||||||
|
|
||||||
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks,
|
||||||
and can calculate the size of the uncompressed object without inflating it in memory.
|
and can calculate the size of the uncompressed object without inflating it in memory.
|
||||||
|
|
|
@ -12,10 +12,16 @@ This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](.
|
||||||
- [Prerequisites](#prerequisites)
|
- [Prerequisites](#prerequisites)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
- [Misc](#misc)
|
- [Misc](#misc)
|
||||||
|
- [Containers](#containers)
|
||||||
|
- [`Eth1Block`](#eth1block)
|
||||||
|
- [`AggregateAndProof`](#aggregateandproof)
|
||||||
|
- [`SignedAggregateAndProof`](#signedaggregateandproof)
|
||||||
- [Becoming a validator](#becoming-a-validator)
|
- [Becoming a validator](#becoming-a-validator)
|
||||||
- [Initialization](#initialization)
|
- [Initialization](#initialization)
|
||||||
- [BLS public key](#bls-public-key)
|
- [BLS public key](#bls-public-key)
|
||||||
- [BLS withdrawal key](#bls-withdrawal-key)
|
- [Withdrawal credentials](#withdrawal-credentials)
|
||||||
|
- [`BLS_WITHDRAWAL_PREFIX`](#bls_withdrawal_prefix)
|
||||||
|
- [`ETH1_ADDRESS_WITHDRAWAL_PREFIX`](#eth1_address_withdrawal_prefix)
|
||||||
- [Submit deposit](#submit-deposit)
|
- [Submit deposit](#submit-deposit)
|
||||||
- [Process deposit](#process-deposit)
|
- [Process deposit](#process-deposit)
|
||||||
- [Validator index](#validator-index)
|
- [Validator index](#validator-index)
|
||||||
|
@ -31,7 +37,6 @@ This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](.
|
||||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||||
- [Randao reveal](#randao-reveal)
|
- [Randao reveal](#randao-reveal)
|
||||||
- [Eth1 Data](#eth1-data)
|
- [Eth1 Data](#eth1-data)
|
||||||
- [`Eth1Block`](#eth1block)
|
|
||||||
- [`get_eth1_data`](#get_eth1_data)
|
- [`get_eth1_data`](#get_eth1_data)
|
||||||
- [Proposer slashings](#proposer-slashings)
|
- [Proposer slashings](#proposer-slashings)
|
||||||
- [Attester slashings](#attester-slashings)
|
- [Attester slashings](#attester-slashings)
|
||||||
|
@ -58,8 +63,6 @@ This is an accompanying document to [Ethereum 2.0 Phase 0 -- The Beacon Chain](.
|
||||||
- [Aggregation bits](#aggregation-bits-1)
|
- [Aggregation bits](#aggregation-bits-1)
|
||||||
- [Aggregate signature](#aggregate-signature-1)
|
- [Aggregate signature](#aggregate-signature-1)
|
||||||
- [Broadcast aggregate](#broadcast-aggregate)
|
- [Broadcast aggregate](#broadcast-aggregate)
|
||||||
- [`AggregateAndProof`](#aggregateandproof)
|
|
||||||
- [`SignedAggregateAndProof`](#signedaggregateandproof)
|
|
||||||
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
|
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
|
||||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||||
- [Proposer slashing](#proposer-slashing)
|
- [Proposer slashing](#proposer-slashing)
|
||||||
|
@ -90,6 +93,35 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph
|
||||||
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours |
|
||||||
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
| `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. |
|
||||||
|
|
||||||
|
## Containers
|
||||||
|
|
||||||
|
### `Eth1Block`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class Eth1Block(Container):
|
||||||
|
timestamp: uint64
|
||||||
|
deposit_root: Root
|
||||||
|
deposit_count: uint64
|
||||||
|
# All other eth1 block fields
|
||||||
|
```
|
||||||
|
|
||||||
|
### `AggregateAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AggregateAndProof(Container):
|
||||||
|
aggregator_index: ValidatorIndex
|
||||||
|
aggregate: Attestation
|
||||||
|
selection_proof: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedAggregateAndProof`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedAggregateAndProof(Container):
|
||||||
|
message: AggregateAndProof
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
## Becoming a validator
|
## Becoming a validator
|
||||||
|
|
||||||
### Initialization
|
### Initialization
|
||||||
|
@ -100,14 +132,41 @@ A validator must initialize many parameters locally before submitting a deposit
|
||||||
|
|
||||||
Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
|
Validator public keys are [G1 points](beacon-chain.md#bls-signatures) on the [BLS12-381 curve](https://z.cash/blog/new-snark-curve). A private key, `privkey`, must be securely generated along with the resultant `pubkey`. This `privkey` must be "hot", that is, constantly available to sign data throughout the lifetime of the validator.
|
||||||
|
|
||||||
#### BLS withdrawal key
|
#### Withdrawal credentials
|
||||||
|
|
||||||
A secondary withdrawal private key, `withdrawal_privkey`, must also be securely generated along with the resultant `withdrawal_pubkey`. This `withdrawal_privkey` does not have to be available for signing during the normal lifetime of a validator and can live in "cold storage".
|
The `withdrawal_credentials` field constrains validator withdrawals.
|
||||||
|
The first byte of this 32-byte field is a withdrawal prefix which defines the semantics of the remaining 31 bytes.
|
||||||
|
|
||||||
The validator constructs their `withdrawal_credentials` via the following:
|
The following withdrawal prefixes are currently supported.
|
||||||
|
|
||||||
* Set `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`.
|
##### `BLS_WITHDRAWAL_PREFIX`
|
||||||
* Set `withdrawal_credentials[1:] == hash(withdrawal_pubkey)[1:]`.
|
|
||||||
|
Withdrawal credentials with the BLS withdrawal prefix allow a BLS key pair
|
||||||
|
`(bls_withdrawal_privkey, bls_withdrawal_pubkey)` to trigger withdrawals.
|
||||||
|
The `withdrawal_credentials` field must be such that:
|
||||||
|
|
||||||
|
* `withdrawal_credentials[:1] == BLS_WITHDRAWAL_PREFIX`
|
||||||
|
* `withdrawal_credentials[1:] == hash(bls_withdrawal_pubkey)[1:]`
|
||||||
|
|
||||||
|
*Note*: The `bls_withdrawal_privkey` is not required for validating and can be kept in cold storage.
|
||||||
|
|
||||||
|
##### `ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||||
|
|
||||||
|
Withdrawal credentials with the Eth1 address withdrawal prefix specify
|
||||||
|
a 20-byte Eth1 address `eth1_withdrawal_address` as the recipient for all withdrawals.
|
||||||
|
The `eth1_withdrawal_address` can be the address of either an externally owned account or of a contract.
|
||||||
|
|
||||||
|
The `withdrawal_credentials` field must be such that:
|
||||||
|
|
||||||
|
* `withdrawal_credentials[:1] == ETH1_ADDRESS_WITHDRAWAL_PREFIX`
|
||||||
|
* `withdrawal_credentials[1:12] == b'\x00' * 11`
|
||||||
|
* `withdrawal_credentials[12:] == eth1_withdrawal_address`
|
||||||
|
|
||||||
|
After the merge of the current Ethereum application layer (Eth1) into the Beacon Chain (Eth2),
|
||||||
|
withdrawals to `eth1_withdrawal_address` will be normal ETH transfers (with no payload other than the validator's ETH)
|
||||||
|
triggered by a user transaction that will set the gas price and gas limit as well pay fees.
|
||||||
|
As long as the account or contract with address `eth1_withdrawal_address` can receive ETH transfers,
|
||||||
|
the future withdrawal protocol is agnostic to all other implementation details.
|
||||||
|
|
||||||
### Submit deposit
|
### Submit deposit
|
||||||
|
|
||||||
|
@ -273,20 +332,10 @@ If over half of the block proposers in the current Eth1 voting period vote for t
|
||||||
`eth1_data` then `state.eth1_data` updates immediately allowing new deposits to be processed.
|
`eth1_data` then `state.eth1_data` updates immediately allowing new deposits to be processed.
|
||||||
Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
Each deposit in `block.body.deposits` must verify against `state.eth1_data.eth1_deposit_root`.
|
||||||
|
|
||||||
###### `Eth1Block`
|
###### `get_eth1_data`
|
||||||
|
|
||||||
Let `Eth1Block` be an abstract object representing Eth1 blocks with the `timestamp` and depost contract data available.
|
Let `Eth1Block` be an abstract object representing Eth1 blocks with the `timestamp` and depost contract data available.
|
||||||
|
|
||||||
```python
|
|
||||||
class Eth1Block(Container):
|
|
||||||
timestamp: uint64
|
|
||||||
deposit_root: Root
|
|
||||||
deposit_count: uint64
|
|
||||||
# All other eth1 block fields
|
|
||||||
```
|
|
||||||
|
|
||||||
###### `get_eth1_data`
|
|
||||||
|
|
||||||
Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns the Eth1 data for a given Eth1 block.
|
Let `get_eth1_data(block: Eth1Block) -> Eth1Data` be the function that returns the Eth1 data for a given Eth1 block.
|
||||||
|
|
||||||
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where:
|
An honest block proposer sets `block.body.eth1_data = get_eth1_vote(state, eth1_chain)` where:
|
||||||
|
@ -327,7 +376,9 @@ def get_eth1_vote(state: BeaconState, eth1_chain: Sequence[Eth1Block]) -> Eth1Da
|
||||||
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
valid_votes = [vote for vote in state.eth1_data_votes if vote in votes_to_consider]
|
||||||
|
|
||||||
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
# Default vote on latest eth1 block data in the period range unless eth1 chain is not live
|
||||||
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state.eth1_data
|
# Non-substantive casting for linter
|
||||||
|
state_eth1_data: Eth1Data = state.eth1_data
|
||||||
|
default_vote = votes_to_consider[len(votes_to_consider) - 1] if any(votes_to_consider) else state_eth1_data
|
||||||
|
|
||||||
return max(
|
return max(
|
||||||
valid_votes,
|
valid_votes,
|
||||||
|
@ -462,7 +513,7 @@ The `subnet_id` for the `attestation` is calculated with:
|
||||||
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64:
|
||||||
"""
|
"""
|
||||||
Compute the correct subnet for an attestation for Phase 0.
|
Compute the correct subnet for an attestation for Phase 0.
|
||||||
Note, this mimics expected Phase 1 behavior where attestations will be mapped to their shard subnet.
|
Note, this mimics expected future behavior where attestations will be mapped to their shard subnet.
|
||||||
"""
|
"""
|
||||||
slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH)
|
||||||
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
committees_since_epoch_start = committees_per_slot * slots_since_epoch_start
|
||||||
|
@ -538,7 +589,7 @@ def get_aggregate_and_proof(state: BeaconState,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
Then `signed_aggregate_and_proof = SignedAggregateAndProof(message=aggregate_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from:
|
Then `signed_aggregate_and_proof = SignedAggregateAndProof(message=aggregate_and_proof, signature=signature)` is constructed and broadcast. Where `signature` is obtained from:
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def get_aggregate_and_proof_signature(state: BeaconState,
|
def get_aggregate_and_proof_signature(state: BeaconState,
|
||||||
|
@ -550,23 +601,6 @@ def get_aggregate_and_proof_signature(state: BeaconState,
|
||||||
return bls.Sign(privkey, signing_root)
|
return bls.Sign(privkey, signing_root)
|
||||||
```
|
```
|
||||||
|
|
||||||
##### `AggregateAndProof`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class AggregateAndProof(Container):
|
|
||||||
aggregator_index: ValidatorIndex
|
|
||||||
aggregate: Attestation
|
|
||||||
selection_proof: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
##### `SignedAggregateAndProof`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedAggregateAndProof(Container):
|
|
||||||
message: AggregateAndProof
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
## Phase 0 attestation subnet stability
|
## Phase 0 attestation subnet stability
|
||||||
|
|
||||||
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
|
Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must:
|
||||||
|
@ -616,5 +650,5 @@ A validator client should be considered standalone and should consider the beaco
|
||||||
1) Private keys -- private keys should be protected from being exported accidentally or by an attacker.
|
1) Private keys -- private keys should be protected from being exported accidentally or by an attacker.
|
||||||
2) Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object.
|
2) Slashing -- before a validator client signs a message it should validate the data, check it against a local slashing database (do not sign a slashable attestation or block) and update its internal slashing database with the newly signed object.
|
||||||
3) Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format.
|
3) Recovered validator -- Recovering a validator from a private key will result in an empty local slashing db. Best practice is to import (from a trusted source) that validator's attestation history. See [EIP 3076](https://github.com/ethereum/EIPs/pull/3076/files) for a standard slashing interchange format.
|
||||||
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity leak and potential ejection due to low balance.
|
4) Far future signing requests -- A validator client can be requested to sign a far into the future attestation, resulting in a valid non-slashable request. If the validator client signs this message, it will result in it blocking itself from attesting any other attestation until the beacon-chain reaches that far into the future epoch. This will result in an inactivity penalty and potential ejection due to low balance.
|
||||||
A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event.
|
A validator client should prevent itself from signing such requests by: a) keeping a local time clock if possible and following best practices to stop time server attacks and b) refusing to sign, by default, any message that has a large (>6h) gap from the current slashing protection database indicated a time "jump" or a long offline event. The administrator can manually override this protection to restart the validator after a genuine long offline event.
|
||||||
|
|
|
@ -8,13 +8,17 @@
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Prerequisites](#prerequisites)
|
- [Prerequisites](#prerequisites)
|
||||||
|
- [Custom Types](#custom-types)
|
||||||
- [Constants](#constants)
|
- [Constants](#constants)
|
||||||
|
- [Configuration](#configuration)
|
||||||
- [Weak Subjectivity Checkpoint](#weak-subjectivity-checkpoint)
|
- [Weak Subjectivity Checkpoint](#weak-subjectivity-checkpoint)
|
||||||
- [Weak Subjectivity Period](#weak-subjectivity-period)
|
- [Weak Subjectivity Period](#weak-subjectivity-period)
|
||||||
- [Calculating the Weak Subjectivity Period](#calculating-the-weak-subjectivity-period)
|
- [Calculating the Weak Subjectivity Period](#calculating-the-weak-subjectivity-period)
|
||||||
|
- [`compute_weak_subjectivity_period`](#compute_weak_subjectivity_period)
|
||||||
- [Weak Subjectivity Sync](#weak-subjectivity-sync)
|
- [Weak Subjectivity Sync](#weak-subjectivity-sync)
|
||||||
- [Weak Subjectivity Sync Procedure](#weak-subjectivity-sync-procedure)
|
- [Weak Subjectivity Sync Procedure](#weak-subjectivity-sync-procedure)
|
||||||
- [Checking for Stale Weak Subjectivity Checkpoint](#checking-for-stale-weak-subjectivity-checkpoint)
|
- [Checking for Stale Weak Subjectivity Checkpoint](#checking-for-stale-weak-subjectivity-checkpoint)
|
||||||
|
- [`is_within_weak_subjectivity_period`](#is_within_weak_subjectivity_period)
|
||||||
- [Distributing Weak Subjectivity Checkpoints](#distributing-weak-subjectivity-checkpoints)
|
- [Distributing Weak Subjectivity Checkpoints](#distributing-weak-subjectivity-checkpoints)
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
@ -34,15 +38,27 @@ For more information about weak subjectivity and why it is required, please refe
|
||||||
This document uses data structures, constants, functions, and terminology from
|
This document uses data structures, constants, functions, and terminology from
|
||||||
[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md).
|
[Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Beacon Chain Fork Choice](./fork-choice.md).
|
||||||
|
|
||||||
|
## Custom Types
|
||||||
|
|
||||||
|
| Name | SSZ Equivalent | Description |
|
||||||
|
|---|---|---|
|
||||||
|
| `Ether` | `uint64` | an amount in Ether |
|
||||||
|
|
||||||
## Constants
|
## Constants
|
||||||
|
|
||||||
| Name | Value |
|
| Name | Value |
|
||||||
|----------------|--------------|
|
|---|---|
|
||||||
|
| `ETH_TO_GWEI` | `uint64(10**9)` |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
|---|---|
|
||||||
| `SAFETY_DECAY` | `uint64(10)` |
|
| `SAFETY_DECAY` | `uint64(10)` |
|
||||||
|
|
||||||
## Weak Subjectivity Checkpoint
|
## Weak Subjectivity Checkpoint
|
||||||
|
|
||||||
Any `Checkpoint` can used be a Weak Subjectivity Checkpoint.
|
Any `Checkpoint` object can be used as a Weak Subjectivity Checkpoint.
|
||||||
These Weak Subjectivity Checkpoints are distributed by providers,
|
These Weak Subjectivity Checkpoints are distributed by providers,
|
||||||
downloaded by users and/or distributed as a part of clients, and used as input while syncing a client.
|
downloaded by users and/or distributed as a part of clients, and used as input while syncing a client.
|
||||||
|
|
||||||
|
@ -59,38 +75,64 @@ a safety margin of at least `1/3 - SAFETY_DECAY/100`.
|
||||||
|
|
||||||
### Calculating the Weak Subjectivity Period
|
### Calculating the Weak Subjectivity Period
|
||||||
|
|
||||||
*Note*: `compute_weak_subjectivity_period()` is planned to be updated when a more accurate calculation is made.
|
A detailed analysis of the calculation of the weak subjectivity period is made in [this report](https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf).
|
||||||
|
|
||||||
|
*Note*: The expressions in the report use fractions, whereas eth2.0-specs uses only `uint64` arithmetic. The expressions have been simplified to avoid computing fractions, and more details can be found [here](https://www.overleaf.com/read/wgjzjdjpvpsd).
|
||||||
|
|
||||||
|
*Note*: The calculations here use `Ether` instead of `Gwei`, because the large magnitude of balances in `Gwei` can cause an overflow while computing using `uint64` arithmetic operations. Using `Ether` reduces the magnitude of the multiplicative factors by an order of `ETH_TO_GWEI` (`= 10**9`) and avoid the scope for overflows in `uint64`.
|
||||||
|
|
||||||
|
#### `compute_weak_subjectivity_period`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
def compute_weak_subjectivity_period(state: BeaconState) -> uint64:
|
||||||
weak_subjectivity_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
"""
|
||||||
validator_count = len(get_active_validator_indices(state, get_current_epoch(state)))
|
Returns the weak subjectivity period for the current ``state``.
|
||||||
if validator_count >= MIN_PER_EPOCH_CHURN_LIMIT * CHURN_LIMIT_QUOTIENT:
|
This computation takes into account the effect of:
|
||||||
weak_subjectivity_period += SAFETY_DECAY * CHURN_LIMIT_QUOTIENT // (2 * 100)
|
- validator set churn (bounded by ``get_validator_churn_limit()`` per epoch), and
|
||||||
|
- validator balance top-ups (bounded by ``MAX_DEPOSITS * SLOTS_PER_EPOCH`` per epoch).
|
||||||
|
A detailed calculation can be found at:
|
||||||
|
https://github.com/runtimeverification/beacon-chain-verification/blob/master/weak-subjectivity/weak-subjectivity-analysis.pdf
|
||||||
|
"""
|
||||||
|
ws_period = MIN_VALIDATOR_WITHDRAWABILITY_DELAY
|
||||||
|
N = len(get_active_validator_indices(state, get_current_epoch(state)))
|
||||||
|
t = get_total_active_balance(state) // N // ETH_TO_GWEI
|
||||||
|
T = MAX_EFFECTIVE_BALANCE // ETH_TO_GWEI
|
||||||
|
delta = get_validator_churn_limit(state)
|
||||||
|
Delta = MAX_DEPOSITS * SLOTS_PER_EPOCH
|
||||||
|
D = SAFETY_DECAY
|
||||||
|
|
||||||
|
if T * (200 + 3 * D) < t * (200 + 12 * D):
|
||||||
|
epochs_for_validator_set_churn = (
|
||||||
|
N * (t * (200 + 12 * D) - T * (200 + 3 * D)) // (600 * delta * (2 * t + T))
|
||||||
|
)
|
||||||
|
epochs_for_balance_top_ups = (
|
||||||
|
N * (200 + 3 * D) // (600 * Delta)
|
||||||
|
)
|
||||||
|
ws_period += max(epochs_for_validator_set_churn, epochs_for_balance_top_ups)
|
||||||
else:
|
else:
|
||||||
weak_subjectivity_period += SAFETY_DECAY * validator_count // (2 * 100 * MIN_PER_EPOCH_CHURN_LIMIT)
|
ws_period += (
|
||||||
return weak_subjectivity_period
|
3 * N * D * t // (200 * Delta * (T - t))
|
||||||
|
)
|
||||||
|
|
||||||
|
return ws_period
|
||||||
```
|
```
|
||||||
|
|
||||||
*Details about the calculation*:
|
A brief reference for what these values look like in practice ([reference script](https://gist.github.com/adiasg/3aceab409b36aa9a9d9156c1baa3c248)):
|
||||||
- `100` appears in the denominator to get the actual percentage ratio from `SAFETY_DECAY`
|
|
||||||
- For more information about other terms in this equation, refer to
|
|
||||||
[Weak Subjectivity in Eth2.0](https://notes.ethereum.org/@adiasg/weak-subjectvity-eth2)
|
|
||||||
|
|
||||||
A brief reference for what these values look like in practice:
|
| Safety Decay | Avg. Val. Balance (ETH) | Val. Count | Weak Sub. Period (Epochs) |
|
||||||
|
| ---- | ---- | ---- | ---- |
|
||||||
| `validator_count` | `weak_subjectivity_period` |
|
| 10 | 28 | 32768 | 504 |
|
||||||
| ---- | ---- |
|
| 10 | 28 | 65536 | 752 |
|
||||||
| 1024 | 268 |
|
| 10 | 28 | 131072 | 1248 |
|
||||||
| 2048 | 281 |
|
| 10 | 28 | 262144 | 2241 |
|
||||||
| 4096 | 307 |
|
| 10 | 28 | 524288 | 2241 |
|
||||||
| 8192 | 358 |
|
| 10 | 28 | 1048576 | 2241 |
|
||||||
| 16384 | 460 |
|
| 10 | 32 | 32768 | 665 |
|
||||||
| 32768 | 665 |
|
| 10 | 32 | 65536 | 1075 |
|
||||||
| 65536 | 1075 |
|
| 10 | 32 | 131072 | 1894 |
|
||||||
| 131072 | 1894 |
|
| 10 | 32 | 262144 | 3532 |
|
||||||
| 262144 | 3532 |
|
| 10 | 32 | 524288 | 3532 |
|
||||||
| 524288 | 3532 |
|
| 10 | 32 | 1048576 | 3532 |
|
||||||
|
|
||||||
## Weak Subjectivity Sync
|
## Weak Subjectivity Sync
|
||||||
|
|
||||||
|
@ -101,17 +143,20 @@ If such a sync is not possible, the client should treat this as a critical and i
|
||||||
### Weak Subjectivity Sync Procedure
|
### Weak Subjectivity Sync Procedure
|
||||||
|
|
||||||
1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format,
|
1. Input a Weak Subjectivity Checkpoint as a CLI parameter in `block_root:epoch_number` format,
|
||||||
where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`.
|
where `block_root` (an "0x" prefixed 32-byte hex string) and `epoch_number` (an integer) represent a valid `Checkpoint`.
|
||||||
Example of the format:
|
Example of the format:
|
||||||
```
|
|
||||||
0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544
|
```
|
||||||
```
|
0x8584188b86a9296932785cc2827b925f9deebacce6d72ad8d53171fa046b43d9:9544
|
||||||
2. - *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
```
|
||||||
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
|
||||||
Emit descriptive critical error if this assert fails, then exit client process.
|
2. Check the weak subjectivity requirements:
|
||||||
|
- *IF* `epoch_number > store.finalized_checkpoint.epoch`,
|
||||||
|
then *ASSERT* during block sync that block with root `block_root` is in the sync path at epoch `epoch_number`.
|
||||||
|
Emit descriptive critical error if this assert fails, then exit client process.
|
||||||
- *IF* `epoch_number <= store.finalized_checkpoint.epoch`,
|
- *IF* `epoch_number <= store.finalized_checkpoint.epoch`,
|
||||||
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
then *ASSERT* that the block in the canonical chain at epoch `epoch_number` has root `block_root`.
|
||||||
Emit descriptive critical error if this assert fails, then exit client process.
|
Emit descriptive critical error if this assert fails, then exit client process.
|
||||||
|
|
||||||
### Checking for Stale Weak Subjectivity Checkpoint
|
### Checking for Stale Weak Subjectivity Checkpoint
|
||||||
|
|
||||||
|
@ -120,6 +165,8 @@ To support this mechanism, the client needs to take the state at the Weak Subjec
|
||||||
a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source).
|
a CLI parameter input (or fetch the state associated with the input Weak Subjectivity Checkpoint from some source).
|
||||||
The check can be implemented in the following way:
|
The check can be implemented in the following way:
|
||||||
|
|
||||||
|
#### `is_within_weak_subjectivity_period`
|
||||||
|
|
||||||
```python
|
```python
|
||||||
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_checkpoint: Checkpoint) -> bool:
|
||||||
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
# Clients may choose to validate the input state against the input Weak Subjectivity Checkpoint
|
||||||
|
@ -133,4 +180,5 @@ def is_within_weak_subjectivity_period(store: Store, ws_state: BeaconState, ws_c
|
||||||
```
|
```
|
||||||
|
|
||||||
## Distributing Weak Subjectivity Checkpoints
|
## Distributing Weak Subjectivity Checkpoints
|
||||||
|
|
||||||
This section will be updated soon.
|
This section will be updated soon.
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,108 +0,0 @@
|
||||||
# Ethereum 2.0 Phase 1 -- Beacon Chain Fork Choice
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
<!-- TOC -->
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Updated data structures](#updated-data-structures)
|
|
||||||
- [Extended `Store`](#extended-store)
|
|
||||||
- [New data structures](#new-data-structures)
|
|
||||||
- [`ShardLatestMessage`](#shardlatestmessage)
|
|
||||||
- [`ShardStore`](#shardstore)
|
|
||||||
- [Updated helpers](#updated-helpers)
|
|
||||||
- [Updated `get_forkchoice_store`](#updated-get_forkchoice_store)
|
|
||||||
- [Updated `update_latest_messages`](#updated-update_latest_messages)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- /TOC -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1.
|
|
||||||
|
|
||||||
### Updated data structures
|
|
||||||
|
|
||||||
#### Extended `Store`
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class Store(object):
|
|
||||||
time: uint64
|
|
||||||
genesis_time: uint64
|
|
||||||
justified_checkpoint: Checkpoint
|
|
||||||
finalized_checkpoint: Checkpoint
|
|
||||||
best_justified_checkpoint: Checkpoint
|
|
||||||
blocks: Dict[Root, BeaconBlock] = field(default_factory=dict)
|
|
||||||
block_states: Dict[Root, BeaconState] = field(default_factory=dict)
|
|
||||||
checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict)
|
|
||||||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
|
||||||
shard_stores: Dict[Shard, ShardStore] = field(default_factory=dict)
|
|
||||||
```
|
|
||||||
|
|
||||||
### New data structures
|
|
||||||
|
|
||||||
#### `ShardLatestMessage`
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass(eq=True, frozen=True)
|
|
||||||
class ShardLatestMessage(object):
|
|
||||||
epoch: Epoch
|
|
||||||
root: Root
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `ShardStore`
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class ShardStore:
|
|
||||||
shard: Shard
|
|
||||||
signed_blocks: Dict[Root, SignedShardBlock] = field(default_factory=dict)
|
|
||||||
block_states: Dict[Root, ShardState] = field(default_factory=dict)
|
|
||||||
latest_messages: Dict[ValidatorIndex, ShardLatestMessage] = field(default_factory=dict)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Updated helpers
|
|
||||||
|
|
||||||
#### Updated `get_forkchoice_store`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) -> Store:
|
|
||||||
assert anchor_block.state_root == hash_tree_root(anchor_state)
|
|
||||||
anchor_root = hash_tree_root(anchor_block)
|
|
||||||
anchor_epoch = get_current_epoch(anchor_state)
|
|
||||||
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
|
||||||
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
|
||||||
return Store(
|
|
||||||
time=anchor_state.genesis_time + SECONDS_PER_SLOT * anchor_state.slot,
|
|
||||||
genesis_time=anchor_state.genesis_time,
|
|
||||||
justified_checkpoint=justified_checkpoint,
|
|
||||||
finalized_checkpoint=finalized_checkpoint,
|
|
||||||
best_justified_checkpoint=justified_checkpoint,
|
|
||||||
blocks={anchor_root: copy(anchor_block)},
|
|
||||||
block_states={anchor_root: anchor_state.copy()},
|
|
||||||
checkpoint_states={justified_checkpoint: anchor_state.copy()},
|
|
||||||
shard_stores={
|
|
||||||
Shard(shard): get_forkchoice_shard_store(anchor_state, Shard(shard))
|
|
||||||
for shard in range(get_active_shard_count(anchor_state))
|
|
||||||
}
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Updated `update_latest_messages`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
|
||||||
target = attestation.data.target
|
|
||||||
beacon_block_root = attestation.data.beacon_block_root
|
|
||||||
# TODO: separate shard chain vote
|
|
||||||
shard = attestation.data.shard
|
|
||||||
for i in attesting_indices:
|
|
||||||
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
|
||||||
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root)
|
|
||||||
shard_latest_message = ShardLatestMessage(epoch=target.epoch, root=attestation.data.shard_head_root)
|
|
||||||
store.shard_stores[shard].latest_messages[i] = shard_latest_message
|
|
||||||
```
|
|
|
@ -1,174 +0,0 @@
|
||||||
# Minimal Light Client Design
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- TOC -->
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Custom types](#custom-types)
|
|
||||||
- [Constants](#constants)
|
|
||||||
- [Containers](#containers)
|
|
||||||
- [`LightClientUpdate`](#lightclientupdate)
|
|
||||||
- [Helpers](#helpers)
|
|
||||||
- [`LightClientMemory`](#lightclientmemory)
|
|
||||||
- [`get_persistent_committee_pubkeys_and_balances`](#get_persistent_committee_pubkeys_and_balances)
|
|
||||||
- [Light client state updates](#light-client-state-updates)
|
|
||||||
- [Data overhead](#data-overhead)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- /TOC -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
Ethereum 2.0 is designed to be light client friendly. This allows low-resource clients such as mobile phones to access Ethereum 2.0 with reasonable safety and liveness. It also facilitates the development of "bridges" to external blockchains. This document suggests a minimal light client design for the beacon chain.
|
|
||||||
|
|
||||||
## Custom types
|
|
||||||
|
|
||||||
We define the following Python custom types for type hinting and readability:
|
|
||||||
|
|
||||||
| Name | SSZ equivalent | Description |
|
|
||||||
| - | - | - |
|
|
||||||
| `CompactValidator` | `uint64` | compact representation of a validator for light clients |
|
|
||||||
|
|
||||||
## Constants
|
|
||||||
|
|
||||||
| Name | Value |
|
|
||||||
| - | - |
|
|
||||||
| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH` | `4` |
|
|
||||||
| `BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX` | **TBD** |
|
|
||||||
| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH` | `5` |
|
|
||||||
| `PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX` | **TBD** |
|
|
||||||
|
|
||||||
## Containers
|
|
||||||
|
|
||||||
### `LightClientUpdate`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class LightClientUpdate(Container):
|
|
||||||
# Shard block root (and authenticating signature data)
|
|
||||||
shard_block_root: Root
|
|
||||||
fork_version: Version
|
|
||||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
|
||||||
signature: BLSSignature
|
|
||||||
# Updated beacon header (and authenticating branch)
|
|
||||||
header: BeaconBlockHeader
|
|
||||||
header_branch: Vector[Bytes32, BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH]
|
|
||||||
# Updated period committee (and authenticating branch)
|
|
||||||
committee: CompactCommittee
|
|
||||||
committee_branch: Vector[Bytes32, PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT)]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Helpers
|
|
||||||
|
|
||||||
### `LightClientMemory`
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class LightClientMemory(object):
|
|
||||||
shard: Shard # Randomly initialized and retained forever
|
|
||||||
header: BeaconBlockHeader # Beacon header which is not expected to revert
|
|
||||||
# period committees corresponding to the beacon header
|
|
||||||
previous_committee: CompactCommittee
|
|
||||||
current_committee: CompactCommittee
|
|
||||||
next_committee: CompactCommittee
|
|
||||||
```
|
|
||||||
|
|
||||||
### `get_persistent_committee_pubkeys_and_balances`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_persistent_committee_pubkeys_and_balances(memory: LightClientMemory,
|
|
||||||
epoch: Epoch) -> Tuple[Sequence[BLSPubkey], Sequence[uint64]]:
|
|
||||||
"""
|
|
||||||
Return pubkeys and balances for the persistent committee at ``epoch``.
|
|
||||||
"""
|
|
||||||
current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD
|
|
||||||
next_period = epoch // EPOCHS_PER_SHARD_PERIOD
|
|
||||||
assert next_period in (current_period, current_period + 1)
|
|
||||||
if next_period == current_period:
|
|
||||||
earlier_committee, later_committee = memory.previous_committee, memory.current_committee
|
|
||||||
else:
|
|
||||||
earlier_committee, later_committee = memory.current_committee, memory.next_committee
|
|
||||||
|
|
||||||
pubkeys = []
|
|
||||||
balances = []
|
|
||||||
for pubkey, compact_validator in zip(earlier_committee.pubkeys, earlier_committee.compact_validators):
|
|
||||||
index, slashed, balance = unpack_compact_validator(compact_validator)
|
|
||||||
if epoch % EPOCHS_PER_SHARD_PERIOD < index % EPOCHS_PER_SHARD_PERIOD:
|
|
||||||
pubkeys.append(pubkey)
|
|
||||||
balances.append(balance)
|
|
||||||
for pubkey, compact_validator in zip(later_committee.pubkeys, later_committee.compact_validators):
|
|
||||||
index, slashed, balance = unpack_compact_validator(compact_validator)
|
|
||||||
if epoch % EPOCHS_PER_SHARD_PERIOD >= index % EPOCHS_PER_SHARD_PERIOD:
|
|
||||||
pubkeys.append(pubkey)
|
|
||||||
balances.append(balance)
|
|
||||||
return pubkeys, balances
|
|
||||||
```
|
|
||||||
|
|
||||||
## Light client state updates
|
|
||||||
|
|
||||||
The state of a light client is stored in a `memory` object of type `LightClientMemory`. To advance its state a light client requests an `update` object of type `LightClientUpdate` from the network by sending a request containing `(memory.shard, memory.header.slot, slot_range_end)` and calls `update_memory(memory, update)`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def update_memory(memory: LightClientMemory, update: LightClientUpdate) -> None:
|
|
||||||
# Verify the update does not skip a period
|
|
||||||
current_period = compute_epoch_at_slot(memory.header.slot) // EPOCHS_PER_SHARD_PERIOD
|
|
||||||
next_epoch = compute_epoch_of_shard_slot(update.header.slot)
|
|
||||||
next_period = next_epoch // EPOCHS_PER_SHARD_PERIOD
|
|
||||||
assert next_period in (current_period, current_period + 1)
|
|
||||||
|
|
||||||
# Verify update header against shard block root and header branch
|
|
||||||
assert is_valid_merkle_branch(
|
|
||||||
leaf=hash_tree_root(update.header),
|
|
||||||
branch=update.header_branch,
|
|
||||||
depth=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_DEPTH,
|
|
||||||
index=BEACON_CHAIN_ROOT_IN_SHARD_BLOCK_HEADER_INDEX,
|
|
||||||
root=update.shard_block_root,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Verify persistent committee votes pass 2/3 threshold
|
|
||||||
pubkeys, balances = get_persistent_committee_pubkeys_and_balances(memory, next_epoch)
|
|
||||||
assert 3 * sum(filter(lambda i: update.aggregation_bits[i], balances)) > 2 * sum(balances)
|
|
||||||
|
|
||||||
# Verify shard attestations
|
|
||||||
pubkeys = filter(lambda i: update.aggregation_bits[i], pubkeys)
|
|
||||||
domain = compute_domain(DOMAIN_SHARD_ATTESTER, update.fork_version)
|
|
||||||
signing_root = compute_signing_root(update.shard_block_root, domain)
|
|
||||||
assert bls.FastAggregateVerify(pubkeys, signing_root, update.signature)
|
|
||||||
|
|
||||||
# Update period committees if entering a new period
|
|
||||||
if next_period == current_period + 1:
|
|
||||||
assert is_valid_merkle_branch(
|
|
||||||
leaf=hash_tree_root(update.committee),
|
|
||||||
branch=update.committee_branch,
|
|
||||||
depth=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_DEPTH + log_2(SHARD_COUNT),
|
|
||||||
index=PERIOD_COMMITTEE_ROOT_IN_BEACON_STATE_INDEX << log_2(SHARD_COUNT) + memory.shard,
|
|
||||||
root=hash_tree_root(update.header),
|
|
||||||
)
|
|
||||||
memory.previous_committee = memory.current_committee
|
|
||||||
memory.current_committee = memory.next_committee
|
|
||||||
memory.next_committee = update.committee
|
|
||||||
|
|
||||||
# Update header
|
|
||||||
memory.header = update.header
|
|
||||||
```
|
|
||||||
|
|
||||||
## Data overhead
|
|
||||||
|
|
||||||
Once every `EPOCHS_PER_SHARD_PERIOD` epochs (~27 hours) a light client downloads a `LightClientUpdate` object:
|
|
||||||
|
|
||||||
* `shard_block_root`: 32 bytes
|
|
||||||
* `fork_version`: 4 bytes
|
|
||||||
* `aggregation_bits`: 16 bytes
|
|
||||||
* `signature`: 96 bytes
|
|
||||||
* `header`: 8 + 32 + 32 + 32 + 96 = 200 bytes
|
|
||||||
* `header_branch`: 4 * 32 = 128 bytes
|
|
||||||
* `committee`: 128 * (48 + 8) = 7,168 bytes
|
|
||||||
* `committee_branch`: (5 + 10) * 32 = 480 bytes
|
|
||||||
|
|
||||||
The total overhead is 8,124 bytes, or ~0.083 bytes per second. The Bitcoin SPV equivalent is 80 bytes per ~560 seconds, or ~0.143 bytes per second. Various compression optimisations (similar to [these](https://github.com/RCasatta/compressedheaders)) are possible.
|
|
||||||
|
|
||||||
A light client can choose to update the header (without updating the committee) more frequently than once every `EPOCHS_PER_SHARD_PERIOD` epochs at a cost of 32 + 4 + 16 + 96 + 200 + 128 = 476 bytes per update.
|
|
|
@ -1,111 +0,0 @@
|
||||||
# Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Configuration](#configuration)
|
|
||||||
- [Fork to Phase 1](#fork-to-phase-1)
|
|
||||||
- [Fork trigger](#fork-trigger)
|
|
||||||
- [Upgrading the state](#upgrading-the-state)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document describes the process of moving from Phase 0 to Phase 1 of Ethereum 2.0.
|
|
||||||
|
|
||||||
## Configuration
|
|
||||||
|
|
||||||
Warning: this configuration is not definitive.
|
|
||||||
|
|
||||||
| Name | Value |
|
|
||||||
| - | - |
|
|
||||||
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
|
|
||||||
| `PHASE_1_FORK_SLOT` | `Slot(0)` **TBD** |
|
|
||||||
|
|
||||||
## Fork to Phase 1
|
|
||||||
|
|
||||||
### Fork trigger
|
|
||||||
|
|
||||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork. For now we assume the condition will be triggered at slot `PHASE_1_FORK_SLOT`, where `PHASE_1_FORK_SLOT % SLOTS_PER_EPOCH == 0`.
|
|
||||||
|
|
||||||
### Upgrading the state
|
|
||||||
|
|
||||||
After `process_slots` of Phase 0 finishes, if `state.slot == PHASE_1_FORK_SLOT`, an irregular state change is made to upgrade to Phase 1.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
|
||||||
epoch = get_current_epoch(pre)
|
|
||||||
post = BeaconState(
|
|
||||||
genesis_time=pre.genesis_time,
|
|
||||||
slot=pre.slot,
|
|
||||||
fork=Fork(
|
|
||||||
previous_version=pre.fork.current_version,
|
|
||||||
current_version=PHASE_1_FORK_VERSION,
|
|
||||||
epoch=epoch,
|
|
||||||
),
|
|
||||||
# History
|
|
||||||
latest_block_header=pre.latest_block_header,
|
|
||||||
block_roots=pre.block_roots,
|
|
||||||
state_roots=pre.state_roots,
|
|
||||||
historical_roots=pre.historical_roots,
|
|
||||||
# Eth1
|
|
||||||
eth1_data=pre.eth1_data,
|
|
||||||
eth1_data_votes=pre.eth1_data_votes,
|
|
||||||
eth1_deposit_index=pre.eth1_deposit_index,
|
|
||||||
# Registry
|
|
||||||
validators=List[Validator, VALIDATOR_REGISTRY_LIMIT](
|
|
||||||
Validator(
|
|
||||||
pubkey=phase0_validator.pubkey,
|
|
||||||
withdrawal_credentials=phase0_validator.withdrawal_credentials,
|
|
||||||
effective_balance=phase0_validator.effective_balance,
|
|
||||||
slashed=phase0_validator.slashed,
|
|
||||||
activation_eligibility_epoch=phase0_validator.activation_eligibility_epoch,
|
|
||||||
activation_epoch=phase0_validator.activation_eligibility_epoch,
|
|
||||||
exit_epoch=phase0_validator.exit_epoch,
|
|
||||||
withdrawable_epoch=phase0_validator.withdrawable_epoch,
|
|
||||||
next_custody_secret_to_reveal=get_custody_period_for_validator(ValidatorIndex(i), epoch),
|
|
||||||
all_custody_secrets_revealed_epoch=FAR_FUTURE_EPOCH,
|
|
||||||
) for i, phase0_validator in enumerate(pre.validators)
|
|
||||||
),
|
|
||||||
balances=pre.balances,
|
|
||||||
# Randomness
|
|
||||||
randao_mixes=pre.randao_mixes,
|
|
||||||
# Slashings
|
|
||||||
slashings=pre.slashings,
|
|
||||||
# Attestations
|
|
||||||
# previous_epoch_attestations is cleared on upgrade.
|
|
||||||
previous_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
|
||||||
# empty in pre state, since the upgrade is performed just after an epoch boundary.
|
|
||||||
current_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
|
||||||
# Finality
|
|
||||||
justification_bits=pre.justification_bits,
|
|
||||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
|
||||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
|
||||||
finalized_checkpoint=pre.finalized_checkpoint,
|
|
||||||
# Phase 1
|
|
||||||
current_epoch_start_shard=Shard(0),
|
|
||||||
shard_states=List[ShardState, MAX_SHARDS](
|
|
||||||
ShardState(
|
|
||||||
slot=compute_previous_slot(pre.slot),
|
|
||||||
gasprice=MIN_GASPRICE,
|
|
||||||
latest_block_root=Root(),
|
|
||||||
) for i in range(INITIAL_ACTIVE_SHARDS)
|
|
||||||
),
|
|
||||||
online_countdown=[ONLINE_PERIOD] * len(pre.validators), # all online
|
|
||||||
current_light_committee=CompactCommittee(), # computed after state creation
|
|
||||||
next_light_committee=CompactCommittee(),
|
|
||||||
# Custody game
|
|
||||||
exposed_derived_secrets=[()] * EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS,
|
|
||||||
# exposed_derived_secrets will fully default to zeroes
|
|
||||||
)
|
|
||||||
next_epoch = Epoch(epoch + 1)
|
|
||||||
post.current_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, epoch))
|
|
||||||
post.next_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, next_epoch))
|
|
||||||
return post
|
|
||||||
```
|
|
|
@ -1,178 +0,0 @@
|
||||||
# Ethereum 2.0 Phase 1 -- Beacon Chain + Shard Chain Fork Choice
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Fork choice](#fork-choice)
|
|
||||||
- [Helpers](#helpers)
|
|
||||||
- [`get_forkchoice_shard_store`](#get_forkchoice_shard_store)
|
|
||||||
- [`get_shard_latest_attesting_balance`](#get_shard_latest_attesting_balance)
|
|
||||||
- [`get_shard_head`](#get_shard_head)
|
|
||||||
- [`get_shard_ancestor`](#get_shard_ancestor)
|
|
||||||
- [`get_pending_shard_blocks`](#get_pending_shard_blocks)
|
|
||||||
- [Handlers](#handlers)
|
|
||||||
- [`on_shard_block`](#on_shard_block)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document is the shard chain fork choice spec for part of Ethereum 2.0 Phase 1. It assumes the [beacon chain fork choice spec](./fork-choice.md).
|
|
||||||
|
|
||||||
## Fork choice
|
|
||||||
|
|
||||||
### Helpers
|
|
||||||
|
|
||||||
#### `get_forkchoice_shard_store`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_forkchoice_shard_store(anchor_state: BeaconState, shard: Shard) -> ShardStore:
|
|
||||||
return ShardStore(
|
|
||||||
shard=shard,
|
|
||||||
signed_blocks={
|
|
||||||
anchor_state.shard_states[shard].latest_block_root: SignedShardBlock(
|
|
||||||
message=ShardBlock(slot=compute_previous_slot(anchor_state.slot), shard=shard)
|
|
||||||
)
|
|
||||||
},
|
|
||||||
block_states={anchor_state.shard_states[shard].latest_block_root: anchor_state.copy().shard_states[shard]},
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_shard_latest_attesting_balance`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_latest_attesting_balance(store: Store, shard: Shard, root: Root) -> Gwei:
|
|
||||||
shard_store = store.shard_stores[shard]
|
|
||||||
state = store.checkpoint_states[store.justified_checkpoint]
|
|
||||||
active_indices = get_active_validator_indices(state, get_current_epoch(state))
|
|
||||||
return Gwei(sum(
|
|
||||||
state.validators[i].effective_balance for i in active_indices
|
|
||||||
if (
|
|
||||||
i in shard_store.latest_messages
|
|
||||||
# TODO: check the latest message logic: currently, validator's previous vote of another shard
|
|
||||||
# would be ignored once their newer vote is accepted. Check if it makes sense.
|
|
||||||
and get_shard_ancestor(
|
|
||||||
store,
|
|
||||||
shard,
|
|
||||||
shard_store.latest_messages[i].root,
|
|
||||||
shard_store.signed_blocks[root].message.slot,
|
|
||||||
) == root
|
|
||||||
)
|
|
||||||
))
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_shard_head`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_head(store: Store, shard: Shard) -> Root:
|
|
||||||
# Execute the LMD-GHOST fork choice
|
|
||||||
"""
|
|
||||||
Execute the LMD-GHOST fork choice.
|
|
||||||
"""
|
|
||||||
shard_store = store.shard_stores[shard]
|
|
||||||
beacon_head_root = get_head(store)
|
|
||||||
shard_head_state = store.block_states[beacon_head_root].shard_states[shard]
|
|
||||||
shard_head_root = shard_head_state.latest_block_root
|
|
||||||
shard_blocks = {
|
|
||||||
root: signed_shard_block.message for root, signed_shard_block in shard_store.signed_blocks.items()
|
|
||||||
if signed_shard_block.message.slot > shard_head_state.slot
|
|
||||||
}
|
|
||||||
while True:
|
|
||||||
# Find the valid child block roots
|
|
||||||
children = [
|
|
||||||
root for root, shard_block in shard_blocks.items()
|
|
||||||
if shard_block.shard_parent_root == shard_head_root
|
|
||||||
]
|
|
||||||
if len(children) == 0:
|
|
||||||
return shard_head_root
|
|
||||||
# Sort by latest attesting balance with ties broken lexicographically
|
|
||||||
shard_head_root = max(
|
|
||||||
children, key=lambda root: (get_shard_latest_attesting_balance(store, shard, root), root)
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_shard_ancestor`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_ancestor(store: Store, shard: Shard, root: Root, slot: Slot) -> Root:
|
|
||||||
shard_store = store.shard_stores[shard]
|
|
||||||
block = shard_store.signed_blocks[root].message
|
|
||||||
if block.slot > slot:
|
|
||||||
return get_shard_ancestor(store, shard, block.shard_parent_root, slot)
|
|
||||||
elif block.slot == slot:
|
|
||||||
return root
|
|
||||||
else:
|
|
||||||
# root is older than queried slot, thus a skip slot. Return most recent root prior to slot
|
|
||||||
return root
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `get_pending_shard_blocks`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_pending_shard_blocks(store: Store, shard: Shard) -> Sequence[SignedShardBlock]:
|
|
||||||
"""
|
|
||||||
Return the canonical shard block branch that has not yet been crosslinked.
|
|
||||||
"""
|
|
||||||
shard_store = store.shard_stores[shard]
|
|
||||||
|
|
||||||
beacon_head_root = get_head(store)
|
|
||||||
beacon_head_state = store.block_states[beacon_head_root]
|
|
||||||
latest_shard_block_root = beacon_head_state.shard_states[shard].latest_block_root
|
|
||||||
|
|
||||||
shard_head_root = get_shard_head(store, shard)
|
|
||||||
root = shard_head_root
|
|
||||||
signed_shard_blocks = []
|
|
||||||
while root != latest_shard_block_root:
|
|
||||||
signed_shard_block = shard_store.signed_blocks[root]
|
|
||||||
signed_shard_blocks.append(signed_shard_block)
|
|
||||||
root = signed_shard_block.message.shard_parent_root
|
|
||||||
|
|
||||||
signed_shard_blocks.reverse()
|
|
||||||
return signed_shard_blocks
|
|
||||||
```
|
|
||||||
|
|
||||||
### Handlers
|
|
||||||
|
|
||||||
#### `on_shard_block`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def on_shard_block(store: Store, signed_shard_block: SignedShardBlock) -> None:
|
|
||||||
shard_block = signed_shard_block.message
|
|
||||||
shard = shard_block.shard
|
|
||||||
shard_store = store.shard_stores[shard]
|
|
||||||
|
|
||||||
# Check shard parent exists
|
|
||||||
assert shard_block.shard_parent_root in shard_store.block_states
|
|
||||||
shard_parent_state = shard_store.block_states[shard_block.shard_parent_root]
|
|
||||||
|
|
||||||
# Check beacon parent exists
|
|
||||||
assert shard_block.beacon_parent_root in store.block_states
|
|
||||||
beacon_parent_state = store.block_states[shard_block.beacon_parent_root]
|
|
||||||
|
|
||||||
# Check that block is later than the finalized shard state slot (optimization to reduce calls to get_ancestor)
|
|
||||||
finalized_beacon_state = store.block_states[store.finalized_checkpoint.root]
|
|
||||||
finalized_shard_state = finalized_beacon_state.shard_states[shard]
|
|
||||||
assert shard_block.slot > finalized_shard_state.slot
|
|
||||||
|
|
||||||
# Check block is a descendant of the finalized block at the checkpoint finalized slot
|
|
||||||
finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)
|
|
||||||
assert (
|
|
||||||
get_ancestor(store, shard_block.beacon_parent_root, finalized_slot) == store.finalized_checkpoint.root
|
|
||||||
)
|
|
||||||
|
|
||||||
# Check the block is valid and compute the post-state
|
|
||||||
shard_state = shard_parent_state.copy()
|
|
||||||
shard_state_transition(shard_state, signed_shard_block, beacon_parent_state, validate_result=True)
|
|
||||||
|
|
||||||
# Add new block to the store
|
|
||||||
# Note: storing `SignedShardBlock` format for computing `ShardTransition.proposer_signature_aggregate`
|
|
||||||
shard_store.signed_blocks[hash_tree_root(shard_block)] = signed_shard_block
|
|
||||||
|
|
||||||
# Add new state for this block to the store
|
|
||||||
shard_store.block_states[hash_tree_root(shard_block)] = shard_state
|
|
||||||
```
|
|
|
@ -1,145 +0,0 @@
|
||||||
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Helper functions](#helper-functions)
|
|
||||||
- [Shard block verification functions](#shard-block-verification-functions)
|
|
||||||
- [`verify_shard_block_message`](#verify_shard_block_message)
|
|
||||||
- [`verify_shard_block_signature`](#verify_shard_block_signature)
|
|
||||||
- [Shard state transition function](#shard-state-transition-function)
|
|
||||||
- [Fraud proofs](#fraud-proofs)
|
|
||||||
- [Verifying the proof](#verifying-the-proof)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
|
||||||
|
|
||||||
## Helper functions
|
|
||||||
|
|
||||||
### Shard block verification functions
|
|
||||||
|
|
||||||
#### `verify_shard_block_message`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def verify_shard_block_message(beacon_parent_state: BeaconState,
|
|
||||||
shard_parent_state: ShardState,
|
|
||||||
block: ShardBlock) -> bool:
|
|
||||||
# Check `shard_parent_root` field
|
|
||||||
assert block.shard_parent_root == shard_parent_state.latest_block_root
|
|
||||||
# Check `beacon_parent_root` field
|
|
||||||
beacon_parent_block_header = beacon_parent_state.latest_block_header.copy()
|
|
||||||
if beacon_parent_block_header.state_root == Root():
|
|
||||||
beacon_parent_block_header.state_root = hash_tree_root(beacon_parent_state)
|
|
||||||
beacon_parent_root = hash_tree_root(beacon_parent_block_header)
|
|
||||||
assert block.beacon_parent_root == beacon_parent_root
|
|
||||||
# Check `slot` field
|
|
||||||
shard = block.shard
|
|
||||||
next_slot = Slot(block.slot + 1)
|
|
||||||
offset_slots = compute_offset_slots(get_latest_slot_for_shard(beacon_parent_state, shard), next_slot)
|
|
||||||
assert block.slot in offset_slots
|
|
||||||
# Check `proposer_index` field
|
|
||||||
assert block.proposer_index == get_shard_proposer_index(beacon_parent_state, block.slot, shard)
|
|
||||||
# Check `body` field
|
|
||||||
assert 0 < len(block.body) <= MAX_SHARD_BLOCK_SIZE
|
|
||||||
return True
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `verify_shard_block_signature`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def verify_shard_block_signature(beacon_parent_state: BeaconState,
|
|
||||||
signed_block: SignedShardBlock) -> bool:
|
|
||||||
proposer = beacon_parent_state.validators[signed_block.message.proposer_index]
|
|
||||||
domain = get_domain(beacon_parent_state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(signed_block.message.slot))
|
|
||||||
signing_root = compute_signing_root(signed_block.message, domain)
|
|
||||||
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Shard state transition function
|
|
||||||
|
|
||||||
The post-state corresponding to a pre-state `shard_state` and a signed block `signed_block` is defined as `shard_state_transition(shard_state, signed_block, beacon_parent_state)`, where `beacon_parent_state` is the parent beacon state of the `signed_block`. State transitions that trigger an unhandled exception (e.g. a failed `assert` or an out-of-range list access) are considered invalid. State transitions that cause a `uint64` overflow or underflow are also considered invalid.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def shard_state_transition(shard_state: ShardState,
|
|
||||||
signed_block: SignedShardBlock,
|
|
||||||
beacon_parent_state: BeaconState,
|
|
||||||
validate_result: bool = True) -> None:
|
|
||||||
assert verify_shard_block_message(beacon_parent_state, shard_state, signed_block.message)
|
|
||||||
|
|
||||||
if validate_result:
|
|
||||||
assert verify_shard_block_signature(beacon_parent_state, signed_block)
|
|
||||||
|
|
||||||
process_shard_block(shard_state, signed_block.message)
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
def process_shard_block(shard_state: ShardState,
|
|
||||||
block: ShardBlock) -> None:
|
|
||||||
"""
|
|
||||||
Update ``shard_state`` with shard ``block``.
|
|
||||||
"""
|
|
||||||
shard_state.slot = block.slot
|
|
||||||
prev_gasprice = shard_state.gasprice
|
|
||||||
shard_block_length = len(block.body)
|
|
||||||
shard_state.gasprice = compute_updated_gasprice(prev_gasprice, uint64(shard_block_length))
|
|
||||||
if shard_block_length != 0:
|
|
||||||
shard_state.latest_block_root = hash_tree_root(block)
|
|
||||||
```
|
|
||||||
|
|
||||||
## Fraud proofs
|
|
||||||
|
|
||||||
### Verifying the proof
|
|
||||||
|
|
||||||
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
|
||||||
|
|
||||||
1. An on-time attestation `attestation` on some shard `shard` signing a `transition: ShardTransition`
|
|
||||||
2. An index `offset_index` of a particular position to focus on
|
|
||||||
3. The `transition: ShardTransition` itself
|
|
||||||
4. The full body of the shard block `shard_block`
|
|
||||||
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
|
||||||
6. The `subkey` to generate the custody bit
|
|
||||||
|
|
||||||
Call the following function to verify the proof:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def is_valid_fraud_proof(beacon_state: BeaconState,
|
|
||||||
attestation: Attestation,
|
|
||||||
offset_index: uint64,
|
|
||||||
transition: ShardTransition,
|
|
||||||
block: ShardBlock,
|
|
||||||
subkey: BLSPubkey,
|
|
||||||
beacon_parent_block: BeaconBlock) -> bool:
|
|
||||||
# 1. Check if `custody_bits[offset_index][j] != generate_custody_bit(subkey, block_contents)` for any `j`.
|
|
||||||
custody_bits = attestation.custody_bits_blocks
|
|
||||||
for j in range(len(custody_bits[offset_index])):
|
|
||||||
if custody_bits[offset_index][j] != generate_custody_bit(subkey, block):
|
|
||||||
return True
|
|
||||||
|
|
||||||
# 2. Check if the shard state transition result is wrong between
|
|
||||||
# `transition.shard_states[offset_index - 1]` to `transition.shard_states[offset_index]`.
|
|
||||||
if offset_index == 0:
|
|
||||||
shard_states = beacon_parent_block.body.shard_transitions[attestation.data.shard].shard_states
|
|
||||||
shard_state = shard_states[len(shard_states) - 1]
|
|
||||||
else:
|
|
||||||
shard_state = transition.shard_states[offset_index - 1] # Not doing the actual state updates here.
|
|
||||||
|
|
||||||
process_shard_block(shard_state, block)
|
|
||||||
if shard_state != transition.shard_states[offset_index]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
def generate_custody_bit(subkey: BLSPubkey, block: ShardBlock) -> bool:
|
|
||||||
# TODO
|
|
||||||
...
|
|
||||||
```
|
|
|
@ -1,562 +0,0 @@
|
||||||
# Ethereum 2.0 Phase 1 -- Honest Validator
|
|
||||||
|
|
||||||
**Notice**: This document is a work-in-progress for researchers and implementers. This is an accompanying document to [Ethereum 2.0 Phase 1](./), which describes the expected actions of a "validator" participating in the Ethereum 2.0 Phase 1 protocol.
|
|
||||||
|
|
||||||
## Table of contents
|
|
||||||
|
|
||||||
<!-- TOC -->
|
|
||||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
|
||||||
|
|
||||||
- [Introduction](#introduction)
|
|
||||||
- [Prerequisites](#prerequisites)
|
|
||||||
- [Constants](#constants)
|
|
||||||
- [Misc](#misc)
|
|
||||||
- [Becoming a validator](#becoming-a-validator)
|
|
||||||
- [Beacon chain validator assignments](#beacon-chain-validator-assignments)
|
|
||||||
- [Lookahead](#lookahead)
|
|
||||||
- [Beacon chain responsibilities](#beacon-chain-responsibilities)
|
|
||||||
- [Block proposal](#block-proposal)
|
|
||||||
- [Preparing for a `BeaconBlock`](#preparing-for-a-beaconblock)
|
|
||||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
|
||||||
- [Custody slashings](#custody-slashings)
|
|
||||||
- [Custody key reveals](#custody-key-reveals)
|
|
||||||
- [Early derived secret reveals](#early-derived-secret-reveals)
|
|
||||||
- [Shard transitions](#shard-transitions)
|
|
||||||
- [Light client fields](#light-client-fields)
|
|
||||||
- [Packaging into a `SignedBeaconBlock`](#packaging-into-a-signedbeaconblock)
|
|
||||||
- [Attesting](#attesting)
|
|
||||||
- [`FullAttestationData`](#fullattestationdata)
|
|
||||||
- [`FullAttestation`](#fullattestation)
|
|
||||||
- [Timing](#timing)
|
|
||||||
- [Attestation data](#attestation-data)
|
|
||||||
- [Shard head root](#shard-head-root)
|
|
||||||
- [Shard transition](#shard-transition)
|
|
||||||
- [Construct attestation](#construct-attestation)
|
|
||||||
- [Attestation Aggregation](#attestation-aggregation)
|
|
||||||
- [Broadcast aggregate](#broadcast-aggregate)
|
|
||||||
- [`AggregateAndProof`](#aggregateandproof)
|
|
||||||
- [`SignedAggregateAndProof`](#signedaggregateandproof)
|
|
||||||
- [Light client committee](#light-client-committee)
|
|
||||||
- [Preparation](#preparation)
|
|
||||||
- [Light client vote](#light-client-vote)
|
|
||||||
- [Light client vote data](#light-client-vote-data)
|
|
||||||
- [`LightClientVoteData`](#lightclientvotedata)
|
|
||||||
- [Construct vote](#construct-vote)
|
|
||||||
- [`LightClientVote`](#lightclientvote)
|
|
||||||
- [Broadcast](#broadcast)
|
|
||||||
- [Light client vote aggregation](#light-client-vote-aggregation)
|
|
||||||
- [Aggregation selection](#aggregation-selection)
|
|
||||||
- [Construct aggregate](#construct-aggregate)
|
|
||||||
- [Broadcast aggregate](#broadcast-aggregate-1)
|
|
||||||
- [`LightAggregateAndProof`](#lightaggregateandproof)
|
|
||||||
- [`SignedLightAggregateAndProof`](#signedlightaggregateandproof)
|
|
||||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
|
||||||
- [Custody slashing](#custody-slashing)
|
|
||||||
|
|
||||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
|
||||||
<!-- /TOC -->
|
|
||||||
|
|
||||||
## Introduction
|
|
||||||
|
|
||||||
This document represents the expected behavior of an "honest validator" with respect to Phase 1 of the Ethereum 2.0 protocol. This document does not distinguish between a "node" (i.e. the functionality of following and reading the beacon chain) and a "validator client" (i.e. the functionality of actively participating in consensus). The separation of concerns between these (potentially) two pieces of software is left as a design decision that is out of scope.
|
|
||||||
|
|
||||||
A validator is an entity that participates in the consensus of the Ethereum 2.0 protocol. This is an optional role for users in which they can post ETH as collateral and verify and attest to the validity of blocks to seek financial returns in exchange for building and securing the protocol. This is similar to proof-of-work networks in which miners provide collateral in the form of hardware/hash-power to seek returns in exchange for building and securing the protocol.
|
|
||||||
|
|
||||||
## Prerequisites
|
|
||||||
|
|
||||||
This document is an extension of the [Phase 0 -- Validator](../phase0/validator.md). All behaviors and definitions defined in the Phase 0 doc carry over unless explicitly noted or overridden.
|
|
||||||
|
|
||||||
All terminology, constants, functions, and protocol mechanics defined in the [Phase 1 -- The Beacon Chain](./beacon-chain.md) and [Phase 1 -- Custody Game](./custody-game.md) docs are requisite for this document and used throughout. Please see the Phase 1 docs before continuing and use as a reference throughout.
|
|
||||||
|
|
||||||
## Constants
|
|
||||||
|
|
||||||
See constants from [Phase 0 validator guide](../phase0/validator.md#constants).
|
|
||||||
|
|
||||||
### Misc
|
|
||||||
|
|
||||||
| Name | Value | Unit | Duration |
|
|
||||||
| - | - | :-: | :-: |
|
|
||||||
| `TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT` | `2**3` (= 8) | validators | |
|
|
||||||
| `LIGHT_CLIENT_PREPARATION_EPOCHS` | `2**2` (= 4) | epochs | |
|
|
||||||
|
|
||||||
## Becoming a validator
|
|
||||||
|
|
||||||
Becoming a validator in Phase 1 is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details.
|
|
||||||
|
|
||||||
## Beacon chain validator assignments
|
|
||||||
|
|
||||||
Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details.
|
|
||||||
|
|
||||||
### Lookahead
|
|
||||||
|
|
||||||
Lookahead for beacon committee assignments operates in the same manner as Phase 0, but committee members must join a shard block pubsub topic in addition to the committee attestation topic.
|
|
||||||
|
|
||||||
Specifically _after_ finding stable peers of attestation subnets (see Phase 0) a validator should:
|
|
||||||
* Let `shard = compute_shard_from_committee_index(state, committee_index, slot)`
|
|
||||||
* Subscribe to the pubsub topic `shard_{shard}_block` (attestation subnet peers should have this topic available).
|
|
||||||
|
|
||||||
TODO: For now, the `state` we pass to `compute_shard_from_committee_index` is the current state without considering `len(state.shard_states)`, i.e., the result from `get_active_shard_count(state)` changes. We should fix it when we have shard count update logic.
|
|
||||||
|
|
||||||
## Beacon chain responsibilities
|
|
||||||
|
|
||||||
A validator has two primary responsibilities to the beacon chain: [proposing blocks](#block-proposal) and [creating attestations](#attesting). Proposals happen infrequently, whereas attestations should be created once per epoch.
|
|
||||||
|
|
||||||
These responsibilities are largely unchanged from Phase 0, but utilize the updated `SignedBeaconBlock`, `BeaconBlock`, `BeaconBlockBody`, `Attestation`, and `AttestationData` definitions found in Phase 1. Below notes only the additional and modified behavior with respect to Phase 0.
|
|
||||||
|
|
||||||
Phase 1 adds light client committees and associated responsibilities, discussed [below](#light-client-committee).
|
|
||||||
|
|
||||||
### Block proposal
|
|
||||||
|
|
||||||
#### Preparing for a `BeaconBlock`
|
|
||||||
|
|
||||||
`slot`, `proposer_index`, `parent_root`, `state_root` fields are unchanged.
|
|
||||||
|
|
||||||
#### Constructing the `BeaconBlockBody`
|
|
||||||
|
|
||||||
`randao_reveal`, `eth1_data`, and `graffiti` are unchanged.
|
|
||||||
|
|
||||||
`proposer_slashings`, `deposits`, and `voluntary_exits` are unchanged.
|
|
||||||
|
|
||||||
`attester_slashings` and `attestations` operate exactly as in Phase 0, but with new definitations of `AttesterSlashing` and `Attestation`, along with modified validation conditions found in `process_attester_slashing` and `process_attestation`.
|
|
||||||
|
|
||||||
##### Custody slashings
|
|
||||||
|
|
||||||
Up to `MAX_CUSTODY_SLASHINGS`, [`CustodySlashing`](./custody-game.md#custodyslashing) objects can be included in the `block`. The custody slashings must satisfy the verification conditions found in [custody slashings processing](./custody-game.md#custody-slashings). The validator receives a small "whistleblower" reward for each custody slashing included (THIS IS NOT CURRENTLY THE CASE BUT PROBABLY SHOULD BE).
|
|
||||||
|
|
||||||
##### Custody key reveals
|
|
||||||
|
|
||||||
Up to `MAX_CUSTODY_KEY_REVEALS`, [`CustodyKeyReveal`](./custody-game.md#custodykeyreveal) objects can be included in the `block`. The custody key reveals must satisfy the verification conditions found in [custody key reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small reward for each custody key reveal included.
|
|
||||||
|
|
||||||
##### Early derived secret reveals
|
|
||||||
|
|
||||||
Up to `MAX_EARLY_DERIVED_SECRET_REVEALS`, [`EarlyDerivedSecretReveal`](./custody-game.md#earlyderivedsecretreveal) objects can be included in the `block`. The early derived secret reveals must satisfy the verification conditions found in [early derived secret reveal processing](./custody-game.md#custody-key-reveals). The validator receives a small "whistleblower" reward for each early derived secrete reveal included.
|
|
||||||
|
|
||||||
##### Shard transitions
|
|
||||||
|
|
||||||
Exactly `MAX_SHARDS` [`ShardTransition`](./beacon-chain.md#shardtransition) objects are included in the block. Default each to an empty `ShardTransition()`. Then for each committee assigned to the slot with an associated `committee_index` and `shard`, set `shard_transitions[shard] = full_transitions[winning_root]` if the committee had enough weight to form a crosslink this slot.
|
|
||||||
|
|
||||||
Specifically:
|
|
||||||
* Call `shards, winning_roots = get_shard_winning_roots(state, block.body.attestations)`
|
|
||||||
* Let `full_transitions` be a dictionary mapping from the `shard_transition_root`s found in `attestations` to the corresponding full `ShardTransition`
|
|
||||||
* Then for each `shard` and `winning_root` in `zip(shards, winning_roots)` set `shard_transitions[shard] = full_transitions[winning_root]`
|
|
||||||
|
|
||||||
*Note*: The `state` passed into `get_shard_winning_roots` must be transitioned the slot of `block.slot` to run accurately due to the internal use of `get_online_validator_indices` and `is_on_time_attestation`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_winning_roots(state: BeaconState,
|
|
||||||
attestations: Sequence[Attestation]) -> Tuple[Sequence[Shard], Sequence[Root]]:
|
|
||||||
shards = []
|
|
||||||
winning_roots = []
|
|
||||||
online_indices = get_online_validator_indices(state)
|
|
||||||
on_time_attestation_slot = compute_previous_slot(state.slot)
|
|
||||||
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(on_time_attestation_slot))
|
|
||||||
for committee_index in map(CommitteeIndex, range(committee_count)):
|
|
||||||
shard = compute_shard_from_committee_index(state, committee_index, on_time_attestation_slot)
|
|
||||||
# All attestations in the block for this committee/shard and are "on time"
|
|
||||||
shard_attestations = [
|
|
||||||
attestation for attestation in attestations
|
|
||||||
if is_on_time_attestation(state, attestation.data) and attestation.data.index == committee_index
|
|
||||||
]
|
|
||||||
committee = get_beacon_committee(state, on_time_attestation_slot, committee_index)
|
|
||||||
|
|
||||||
# Loop over all shard transition roots, looking for a winning root
|
|
||||||
shard_transition_roots = set(a.data.shard_transition_root for a in shard_attestations) # non-duplicate
|
|
||||||
for shard_transition_root in sorted(shard_transition_roots):
|
|
||||||
transition_attestations = [
|
|
||||||
a for a in shard_attestations
|
|
||||||
if a.data.shard_transition_root == shard_transition_root
|
|
||||||
]
|
|
||||||
transition_participants: Set[ValidatorIndex] = set()
|
|
||||||
for attestation in transition_attestations:
|
|
||||||
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
|
||||||
transition_participants = transition_participants.union(participants)
|
|
||||||
|
|
||||||
enough_online_stake = (
|
|
||||||
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
|
|
||||||
get_total_balance(state, online_indices.intersection(committee)) * 2
|
|
||||||
)
|
|
||||||
if enough_online_stake:
|
|
||||||
shards.append(shard)
|
|
||||||
winning_roots.append(shard_transition_root)
|
|
||||||
break
|
|
||||||
|
|
||||||
return shards, winning_roots
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Light client fields
|
|
||||||
|
|
||||||
First retrieve `best_aggregate` from `get_best_light_client_aggregate(block, aggregates)` where `aggregates` is a list of valid aggregated `LightClientVote`s for the previous slot.
|
|
||||||
|
|
||||||
Then:
|
|
||||||
* Set `light_client_bits = best_aggregate.aggregation_bits`
|
|
||||||
* Set `light_client_signature = best_aggregate.signature`
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_best_light_client_aggregate(block: BeaconBlock,
|
|
||||||
aggregates: Sequence[LightClientVote]) -> LightClientVote:
|
|
||||||
viable_aggregates = [
|
|
||||||
aggregate for aggregate in aggregates
|
|
||||||
if (
|
|
||||||
aggregate.data.slot == compute_previous_slot(block.slot)
|
|
||||||
and aggregate.data.beacon_block_root == block.parent_root
|
|
||||||
)
|
|
||||||
]
|
|
||||||
|
|
||||||
return max(
|
|
||||||
viable_aggregates,
|
|
||||||
# Ties broken by lexicographically by hash_tree_root
|
|
||||||
key=lambda a: (len([i for i in a.aggregation_bits if i == 1]), hash_tree_root(a)),
|
|
||||||
default=LightClientVote(),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Packaging into a `SignedBeaconBlock`
|
|
||||||
|
|
||||||
Packaging into a `SignedBeaconBlock` is unchanged from Phase 0.
|
|
||||||
|
|
||||||
### Attesting
|
|
||||||
|
|
||||||
A validator is expected to create, sign, and broadcast an attestation during each epoch.
|
|
||||||
|
|
||||||
Assignments and the core of this duty are unchanged from Phase 0. There are a few additional fields related to the assigned shard chain.
|
|
||||||
|
|
||||||
The `Attestation` and `AttestationData` defined in the [Phase 1 Beacon Chain spec](./beacon-chain.md) utilizes `shard_transition_root: Root` rather than a full `ShardTransition`. For the purposes of the validator and p2p layer, a modified `FullAttestationData` and containing `FullAttestation` are used to send the accompanying `ShardTransition` in its entirety. Note that due to the properties of SSZ `hash_tree_root`, the root and signatures of `AttestationData` and `FullAttestationData` are equivalent.
|
|
||||||
|
|
||||||
#### `FullAttestationData`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class FullAttestationData(Container):
|
|
||||||
slot: Slot
|
|
||||||
index: CommitteeIndex
|
|
||||||
# LMD GHOST vote
|
|
||||||
beacon_block_root: Root
|
|
||||||
# FFG vote
|
|
||||||
source: Checkpoint
|
|
||||||
target: Checkpoint
|
|
||||||
# Current-slot shard block root
|
|
||||||
shard_head_root: Root
|
|
||||||
# Full shard transition
|
|
||||||
shard_transition: ShardTransition
|
|
||||||
```
|
|
||||||
|
|
||||||
#### `FullAttestation`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class FullAttestation(Container):
|
|
||||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
|
||||||
data: FullAttestationData
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Timing
|
|
||||||
|
|
||||||
Note the timing of when to create/broadcast is altered from Phase 1.
|
|
||||||
|
|
||||||
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid `BeaconBlock` from the expected beacon block proposer and a valid `ShardBlock` for the expected shard block proposer for the assigned `slot` or (b) one-half of the `slot` has transpired (`SECONDS_PER_SLOT / 2` seconds after the start of `slot`) -- whichever comes _first_.
|
|
||||||
|
|
||||||
#### Attestation data
|
|
||||||
|
|
||||||
`attestation_data` is constructed in the same manner as Phase 0 but uses `FullAttestationData` with the addition of two fields -- `shard_head_root` and `shard_transition`.
|
|
||||||
|
|
||||||
- Let `head_block` be the result of running the fork choice during the assigned slot.
|
|
||||||
- Let `head_state` be the state of `head_block` processed through any empty slots up to the assigned slot using `process_slots(state, slot)`.
|
|
||||||
- Let `shard_head_block` be the result of running the fork choice on the assigned shard chain during the assigned slot.
|
|
||||||
- Let `shard_blocks` be the shard blocks in the chain starting immediately _after_ the most recent crosslink (`head_state.shard_transitions[shard].latest_block_root`) up to the `shard_head_block` (i.e. the value of the shard fork choice store of `get_pending_shard_blocks(store, shard_store)`).
|
|
||||||
|
|
||||||
*Note*: We assume that the fork choice only follows branches with valid `offset_slots` with respect to the most recent beacon state shard transition for the queried shard.
|
|
||||||
|
|
||||||
##### Shard head root
|
|
||||||
|
|
||||||
If `attestation_data.slot == GENESIS_SLOT`, set `attestation_data.shard_head_root = Root()`. Otherwise, set `attestation_data.shard_head_root = hash_tree_root(shard_head_block)`.
|
|
||||||
|
|
||||||
##### Shard transition
|
|
||||||
|
|
||||||
Set `shard_transition` to the value returned by `get_shard_transition(head_state, shard, shard_blocks)`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_transition_fields(
|
|
||||||
beacon_state: BeaconState,
|
|
||||||
shard: Shard,
|
|
||||||
shard_blocks: Sequence[SignedShardBlock],
|
|
||||||
) -> Tuple[Sequence[uint64], Sequence[Root], Sequence[ShardState]]:
|
|
||||||
shard_block_lengths = [] # type: PyList[uint64]
|
|
||||||
shard_data_roots = [] # type: PyList[Root]
|
|
||||||
shard_states = [] # type: PyList[ShardState]
|
|
||||||
|
|
||||||
shard_state = beacon_state.shard_states[shard]
|
|
||||||
shard_block_slots = [shard_block.message.slot for shard_block in shard_blocks]
|
|
||||||
offset_slots = compute_offset_slots(
|
|
||||||
get_latest_slot_for_shard(beacon_state, shard),
|
|
||||||
Slot(beacon_state.slot + 1),
|
|
||||||
)
|
|
||||||
for slot in offset_slots:
|
|
||||||
if slot in shard_block_slots:
|
|
||||||
shard_block = shard_blocks[shard_block_slots.index(slot)]
|
|
||||||
shard_data_roots.append(hash_tree_root(shard_block.message.body))
|
|
||||||
else:
|
|
||||||
shard_block = SignedShardBlock(message=ShardBlock(slot=slot, shard=shard))
|
|
||||||
shard_data_roots.append(Root())
|
|
||||||
shard_state = shard_state.copy()
|
|
||||||
process_shard_block(shard_state, shard_block.message)
|
|
||||||
shard_states.append(shard_state)
|
|
||||||
shard_block_lengths.append(uint64(len(shard_block.message.body)))
|
|
||||||
|
|
||||||
return shard_block_lengths, shard_data_roots, shard_states
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_shard_transition(beacon_state: BeaconState,
|
|
||||||
shard: Shard,
|
|
||||||
shard_blocks: Sequence[SignedShardBlock]) -> ShardTransition:
|
|
||||||
# NOTE: We currently set `PHASE_1_FORK_SLOT` to `GENESIS_SLOT` for test vectors.
|
|
||||||
if beacon_state.slot == GENESIS_SLOT:
|
|
||||||
return ShardTransition()
|
|
||||||
|
|
||||||
offset_slots = compute_offset_slots(
|
|
||||||
get_latest_slot_for_shard(beacon_state, shard),
|
|
||||||
Slot(beacon_state.slot + 1),
|
|
||||||
)
|
|
||||||
shard_block_lengths, shard_data_roots, shard_states = (
|
|
||||||
get_shard_transition_fields(beacon_state, shard, shard_blocks)
|
|
||||||
)
|
|
||||||
|
|
||||||
if len(shard_blocks) > 0:
|
|
||||||
proposer_signatures = [shard_block.signature for shard_block in shard_blocks]
|
|
||||||
proposer_signature_aggregate = bls.Aggregate(proposer_signatures)
|
|
||||||
else:
|
|
||||||
proposer_signature_aggregate = NO_SIGNATURE
|
|
||||||
|
|
||||||
return ShardTransition(
|
|
||||||
start_slot=offset_slots[0],
|
|
||||||
shard_block_lengths=shard_block_lengths,
|
|
||||||
shard_data_roots=shard_data_roots,
|
|
||||||
shard_states=shard_states,
|
|
||||||
proposer_signature_aggregate=proposer_signature_aggregate,
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Construct attestation
|
|
||||||
|
|
||||||
Next, the validator creates `attestation`, a `FullAttestation` as defined above.
|
|
||||||
|
|
||||||
`attestation.data`, `attestation.aggregation_bits`, and `attestation.signature` are unchanged from Phase 0. But safety/validity in signing the message is premised upon calculation of the "custody bit" [TODO].
|
|
||||||
|
|
||||||
### Attestation Aggregation
|
|
||||||
|
|
||||||
Some validators are selected to locally aggregate attestations with a similar `attestation_data` to their constructed `attestation` for the assigned `slot`.
|
|
||||||
|
|
||||||
Aggregation selection and the core of this duty are largely unchanged from Phase 0. Any additional components or changes are noted.
|
|
||||||
|
|
||||||
#### Broadcast aggregate
|
|
||||||
|
|
||||||
Note the timing of when to broadcast aggregates is altered in Phase 1+.
|
|
||||||
|
|
||||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) three-fourths of the way through the `slot`-that is, `SECONDS_PER_SLOT * 3 / 4` seconds after the start of `slot`.
|
|
||||||
|
|
||||||
##### `AggregateAndProof`
|
|
||||||
|
|
||||||
`AggregateAndProof` is unchanged other than the contained `Attestation`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
class AggregateAndProof(Container):
|
|
||||||
aggregator_index: ValidatorIndex
|
|
||||||
aggregate: Attestation
|
|
||||||
selection_proof: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
##### `SignedAggregateAndProof`
|
|
||||||
|
|
||||||
`AggregateAndProof` is unchanged other than the contained `AggregateAndProof`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedAggregateAndProof(Container):
|
|
||||||
message: AggregateAndProof
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
### Light client committee
|
|
||||||
|
|
||||||
In addition to the core beacon chain responsibilities, Phase 1 adds an additional role -- the Light Client Committee -- to aid in light client functionality.
|
|
||||||
|
|
||||||
Validators serve on the light client committee for `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs and the assignment to be on a committee is known `LIGHT_CLIENT_COMMITTEE_PERIOD` epochs in advance.
|
|
||||||
|
|
||||||
#### Preparation
|
|
||||||
|
|
||||||
When `get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == LIGHT_CLIENT_COMMITTEE_PERIOD - LIGHT_CLIENT_PREPARATION_EPOCHS` each validator must check if they are in the next period light client committee by calling `is_in_next_light_client_committee()`.
|
|
||||||
|
|
||||||
If the validator is in the next light client committee, they must join the `light_client_votes` pubsub topic to begin duties at the start of the next period.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def is_in_next_light_client_committee(state: BeaconState, index: ValidatorIndex) -> bool:
|
|
||||||
next_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
|
|
||||||
return index in next_committee
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Light client vote
|
|
||||||
|
|
||||||
During a period of epochs that the validator is a part of the light client committee (`validator_index in get_light_client_committee(state, epoch)`), the validator creates and broadcasts a `LightClientVote` at each slot.
|
|
||||||
|
|
||||||
A validator should create and broadcast the `light_client_vote` to the `light_client_votes` pubsub topic when either (a) the validator has received a valid block from the expected block proposer for the current `slot` or (b) one-third of the `slot` have transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
|
|
||||||
|
|
||||||
- Let `light_client_committee = get_light_client_committee(state, compute_epoch_at_slot(slot))`
|
|
||||||
|
|
||||||
##### Light client vote data
|
|
||||||
|
|
||||||
First the validator constructs `light_client_vote_data`, a [`LightClientVoteData`](#lightclientvotedata) object.
|
|
||||||
|
|
||||||
* Let `head_block` be the result of running the fork choice during the assigned slot.
|
|
||||||
* Set `light_client_vote.slot = slot`.
|
|
||||||
* Set `light_client_vote.beacon_block_root = hash_tree_root(head_block)`.
|
|
||||||
|
|
||||||
###### `LightClientVoteData`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class LightClientVoteData(Container):
|
|
||||||
slot: Slot
|
|
||||||
beacon_block_root: Root
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Construct vote
|
|
||||||
|
|
||||||
Then the validator constructs `light_client_vote`, a [`LightClientVote`](#lightclientvote) object.
|
|
||||||
|
|
||||||
* Set `light_client_vote.data = light_client_vote_data`.
|
|
||||||
* Set `light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where the bit of the index of the validator in the `light_client_committee` is set to `0b1` and all other bits are are set to `0b0`.
|
|
||||||
* Set `light_client_vote.signature = vote_signature` where `vote_signature` is obtained from:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_light_client_vote_signature(state: BeaconState,
|
|
||||||
light_client_vote_data: LightClientVoteData,
|
|
||||||
privkey: int) -> BLSSignature:
|
|
||||||
domain = get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(light_client_vote_data.slot))
|
|
||||||
signing_root = compute_signing_root(light_client_vote_data, domain)
|
|
||||||
return bls.Sign(privkey, signing_root)
|
|
||||||
```
|
|
||||||
|
|
||||||
###### `LightClientVote`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class LightClientVote(Container):
|
|
||||||
data: LightClientVoteData
|
|
||||||
aggregation_bits: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
##### Broadcast
|
|
||||||
|
|
||||||
Finally, the validator broadcasts `light_client_vote` to the `light_client_votes` pubsub topic.
|
|
||||||
|
|
||||||
#### Light client vote aggregation
|
|
||||||
|
|
||||||
Some validators in the light client committee are selected to locally aggregate light client votes with a similar `light_client_vote_data` to their constructed `light_client_vote` for the assigned `slot`.
|
|
||||||
|
|
||||||
#### Aggregation selection
|
|
||||||
|
|
||||||
A validator is selected to aggregate based upon the return value of `is_light_client_aggregator()`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_light_client_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
|
|
||||||
domain = get_domain(state, DOMAIN_LIGHT_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
|
||||||
signing_root = compute_signing_root(slot, domain)
|
|
||||||
return bls.Sign(privkey, signing_root)
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
def is_light_client_aggregator(state: BeaconState, slot: Slot, slot_signature: BLSSignature) -> bool:
|
|
||||||
committee = get_light_client_committee(state, compute_epoch_at_slot(slot))
|
|
||||||
modulo = max(1, len(committee) // TARGET_LIGHT_CLIENT_AGGREGATORS_PER_SLOT)
|
|
||||||
return bytes_to_uint64(hash(slot_signature)[0:8]) % modulo == 0
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Construct aggregate
|
|
||||||
|
|
||||||
If the validator is selected to aggregate (`is_light_client_aggregator()`), they construct an aggregate light client vote via the following.
|
|
||||||
|
|
||||||
Collect `light_client_votes` seen via gossip during the `slot` that have an equivalent `light_client_vote_data` to that constructed by the validator, and create a `aggregate_light_client_vote: LightClientVote` with the following fields.
|
|
||||||
|
|
||||||
* Set `aggregate_light_client_vote.data = light_client_vote_data` where `light_client_vote_data` is the `LightClientVoteData` object that is the same for each individual light client vote being aggregated.
|
|
||||||
* Set `aggregate_light_client_vote.aggregation_bits` to be a `Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]`, where each bit set from each individual light client vote is set to `0b1`.
|
|
||||||
* Set `aggregate_light_client_vote.signature = aggregate_light_client_signature` where `aggregate_light_client_signature` is obtained from `get_aggregate_light_client_signature`.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_aggregate_light_client_signature(light_client_votes: Sequence[LightClientVote]) -> BLSSignature:
|
|
||||||
signatures = [light_client_vote.signature for light_client_vote in light_client_votes]
|
|
||||||
return bls.Aggregate(signatures)
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Broadcast aggregate
|
|
||||||
|
|
||||||
If the validator is selected to aggregate (`is_light_client_aggregator`), then they broadcast their best aggregate light client vote as a `SignedLightAggregateAndProof` to the global aggregate light client vote channel (`aggregate_light_client_votes`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
|
||||||
|
|
||||||
Selection proofs are provided in `LightAggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
|
|
||||||
|
|
||||||
`LightAggregateAndProof` messages are signed by the aggregator and broadcast inside of `SignedLightAggregateAndProof` objects to prevent a class of DoS attacks and message forgeries.
|
|
||||||
|
|
||||||
First, `light_aggregate_and_proof = get_light_aggregate_and_proof(state, validator_index, aggregate_light_client_vote, privkey)` is constructed.
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_light_aggregate_and_proof(state: BeaconState,
|
|
||||||
aggregator_index: ValidatorIndex,
|
|
||||||
aggregate: LightClientVote,
|
|
||||||
privkey: int) -> LightAggregateAndProof:
|
|
||||||
return LightAggregateAndProof(
|
|
||||||
aggregator_index=aggregator_index,
|
|
||||||
aggregate=aggregate,
|
|
||||||
selection_proof=get_light_client_slot_signature(state, aggregate.data.slot, privkey),
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
Then `signed_light_aggregate_and_proof = SignedLightAggregateAndProof(message=light_aggregate_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_light_aggregate_and_proof_signature(state: BeaconState,
|
|
||||||
aggregate_and_proof: LightAggregateAndProof,
|
|
||||||
privkey: int) -> BLSSignature:
|
|
||||||
aggregate = aggregate_and_proof.aggregate
|
|
||||||
domain = get_domain(state, DOMAIN_LIGHT_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot))
|
|
||||||
signing_root = compute_signing_root(aggregate_and_proof, domain)
|
|
||||||
return bls.Sign(privkey, signing_root)
|
|
||||||
```
|
|
||||||
|
|
||||||
##### `LightAggregateAndProof`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class LightAggregateAndProof(Container):
|
|
||||||
aggregator_index: ValidatorIndex
|
|
||||||
aggregate: LightClientVote
|
|
||||||
selection_proof: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
##### `SignedLightAggregateAndProof`
|
|
||||||
|
|
||||||
```python
|
|
||||||
class SignedLightAggregateAndProof(Container):
|
|
||||||
message: LightAggregateAndProof
|
|
||||||
signature: BLSSignature
|
|
||||||
```
|
|
||||||
|
|
||||||
## How to avoid slashing
|
|
||||||
|
|
||||||
Proposer and Attester slashings described in Phase 0 remain in place with the
|
|
||||||
addition of the following.
|
|
||||||
|
|
||||||
### Custody slashing
|
|
||||||
|
|
||||||
To avoid custody slashings, the attester must never sign any shard transition for which the custody bit is one. The custody bit is computed using the custody secret:
|
|
||||||
|
|
||||||
```python
|
|
||||||
def get_custody_secret(state: BeaconState,
|
|
||||||
validator_index: ValidatorIndex,
|
|
||||||
privkey: int,
|
|
||||||
epoch: Epoch=None) -> BLSSignature:
|
|
||||||
if epoch is None:
|
|
||||||
epoch = get_current_epoch(state)
|
|
||||||
period = get_custody_period_for_validator(validator_index, epoch)
|
|
||||||
epoch_to_sign = get_randao_epoch_for_custody_period(period, validator_index)
|
|
||||||
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
|
|
||||||
signing_root = compute_signing_root(Epoch(epoch_to_sign), domain)
|
|
||||||
return bls.Sign(privkey, signing_root)
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that the valid custody secret is always the one for the **attestation target epoch**, not to be confused with the epoch in which the shard block was generated. While they are the same most of the time, getting this wrong at custody epoch boundaries would result in a custody slashing.
|
|
|
@ -0,0 +1,779 @@
|
||||||
|
# Ethereum 2.0 Sharding -- Beacon Chain changes
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [Custom types](#custom-types)
|
||||||
|
- [Constants](#constants)
|
||||||
|
- [Configuration](#configuration)
|
||||||
|
- [Misc](#misc)
|
||||||
|
- [Shard block configs](#shard-block-configs)
|
||||||
|
- [Precomputed size verification points](#precomputed-size-verification-points)
|
||||||
|
- [Gwei values](#gwei-values)
|
||||||
|
- [Time parameters](#time-parameters)
|
||||||
|
- [Domain types](#domain-types)
|
||||||
|
- [Updated containers](#updated-containers)
|
||||||
|
- [`AttestationData`](#attestationdata)
|
||||||
|
- [`BeaconBlockBody`](#beaconblockbody)
|
||||||
|
- [`BeaconState`](#beaconstate)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [`DataCommitment`](#datacommitment)
|
||||||
|
- [`ShardBlobBodySummary`](#shardblobbodysummary)
|
||||||
|
- [`ShardBlobHeader`](#shardblobheader)
|
||||||
|
- [`SignedShardBlobHeader`](#signedshardblobheader)
|
||||||
|
- [`PendingShardHeader`](#pendingshardheader)
|
||||||
|
- [`ShardBlobReference`](#shardblobreference)
|
||||||
|
- [`SignedShardBlobReference`](#signedshardblobreference)
|
||||||
|
- [`ShardProposerSlashing`](#shardproposerslashing)
|
||||||
|
- [Helper functions](#helper-functions)
|
||||||
|
- [Misc](#misc-1)
|
||||||
|
- [`next_power_of_two`](#next_power_of_two)
|
||||||
|
- [`compute_previous_slot`](#compute_previous_slot)
|
||||||
|
- [`compute_updated_gasprice`](#compute_updated_gasprice)
|
||||||
|
- [`compute_committee_source_epoch`](#compute_committee_source_epoch)
|
||||||
|
- [Beacon state accessors](#beacon-state-accessors)
|
||||||
|
- [Updated `get_committee_count_per_slot`](#updated-get_committee_count_per_slot)
|
||||||
|
- [`get_active_shard_count`](#get_active_shard_count)
|
||||||
|
- [`get_shard_committee`](#get_shard_committee)
|
||||||
|
- [`compute_proposer_index`](#compute_proposer_index)
|
||||||
|
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||||
|
- [`get_start_shard`](#get_start_shard)
|
||||||
|
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||||
|
- [`compute_committee_index_from_shard`](#compute_committee_index_from_shard)
|
||||||
|
- [Block processing](#block-processing)
|
||||||
|
- [Operations](#operations)
|
||||||
|
- [New Attestation processing](#new-attestation-processing)
|
||||||
|
- [Updated `process_attestation`](#updated-process_attestation)
|
||||||
|
- [`update_pending_votes`](#update_pending_votes)
|
||||||
|
- [`process_shard_header`](#process_shard_header)
|
||||||
|
- [Shard Proposer slashings](#shard-proposer-slashings)
|
||||||
|
- [Epoch transition](#epoch-transition)
|
||||||
|
- [Pending headers](#pending-headers)
|
||||||
|
- [Shard epoch increment](#shard-epoch-increment)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
This document describes the extensions made to the Phase 0 design of The Beacon Chain to support data sharding,
|
||||||
|
based on the ideas [here](https://hackmd.io/G-Iy5jqyT7CXWEz8Ssos8g) and more broadly [here](https://arxiv.org/abs/1809.09044),
|
||||||
|
using KZG10 commitments to commit to data to remove any need for fraud proofs (and hence, safety-critical synchrony assumptions) in the design.
|
||||||
|
|
||||||
|
|
||||||
|
## Custom types
|
||||||
|
|
||||||
|
We define the following Python custom types for type hinting and readability:
|
||||||
|
|
||||||
|
| Name | SSZ equivalent | Description |
|
||||||
|
| - | - | - |
|
||||||
|
| `Shard` | `uint64` | A shard number |
|
||||||
|
| `BLSCommitment` | `Bytes48` | A G1 curve point |
|
||||||
|
| `BLSPoint` | `uint256` | A number `x` in the range `0 <= x < MODULUS` |
|
||||||
|
|
||||||
|
## Constants
|
||||||
|
|
||||||
|
The following values are (non-configurable) constants used throughout the specification.
|
||||||
|
|
||||||
|
| Name | Value | Notes |
|
||||||
|
| - | - | - |
|
||||||
|
| `PRIMITIVE_ROOT_OF_UNITY` | `5` | Primitive root of unity of the BLS12_381 (inner) modulus |
|
||||||
|
| `DATA_AVAILABILITY_INVERSE_CODING_RATE` | `2**1` (= 2) | Factor by which samples are extended for data availability encoding |
|
||||||
|
| `POINTS_PER_SAMPLE` | `uint64(2**3)` (= 8) | 31 * 8 = 248 bytes |
|
||||||
|
| `MODULUS` | `0x73eda753299d7d483339d80809a1d80553bda402fffe5bfeffffffff00000001` (curve order of BLS12_381) |
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
| Name | Value | Notes |
|
||||||
|
| - | - | - |
|
||||||
|
| `MAX_SHARDS` | `uint64(2**10)` (= 1,024) | Theoretical max shard count (used to determine data structure sizes) |
|
||||||
|
| `INITIAL_ACTIVE_SHARDS` | `uint64(2**6)` (= 64) | Initial shard count |
|
||||||
|
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `uint64(2**3)` (= 8) | Gasprice may decrease/increase by at most exp(1 / this value) *per epoch* |
|
||||||
|
| `MAX_SHARD_HEADERS_PER_SHARD` | `4` | |
|
||||||
|
| `MAX_SHARD_PROPOSER_SLASHINGS` | `2**4` (= 16) | Maximum amount of shard proposer slashing operations per block |
|
||||||
|
|
||||||
|
### Shard block configs
|
||||||
|
|
||||||
|
| Name | Value | Notes |
|
||||||
|
| - | - | - |
|
||||||
|
| `MAX_SAMPLES_PER_BLOCK` | `uint64(2**11)` (= 2,048) | 248 * 2,048 = 507,904 bytes |
|
||||||
|
| `TARGET_SAMPLES_PER_BLOCK` | `uint64(2**10)` (= 1,024) | 248 * 1,024 = 253,952 bytes |
|
||||||
|
|
||||||
|
### Precomputed size verification points
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `G1_SETUP` | Type `List[G1]`. The G1-side trusted setup `[G, G*s, G*s**2....]`; note that the first point is the generator. |
|
||||||
|
| `G2_SETUP` | Type `List[G2]`. The G2-side trusted setup `[G, G*s, G*s**2....]` |
|
||||||
|
| `ROOT_OF_UNITY` | `pow(PRIMITIVE_ROOT_OF_UNITY, (MODULUS - 1) // int(MAX_SAMPLES_PER_BLOCK * POINTS_PER_SAMPLE), MODULUS)` |
|
||||||
|
|
||||||
|
### Gwei values
|
||||||
|
|
||||||
|
| Name | Value | Unit | Description |
|
||||||
|
| - | - | - | - |
|
||||||
|
| `MAX_GASPRICE` | `Gwei(2**33)` (= 8,589,934,592) | Gwei | Max gasprice charged for a TARGET-sized shard block |
|
||||||
|
| `MIN_GASPRICE` | `Gwei(2**3)` (= 8) | Gwei | Min gasprice charged for a TARGET-sized shard block |
|
||||||
|
|
||||||
|
### Time parameters
|
||||||
|
|
||||||
|
| Name | Value | Unit | Duration |
|
||||||
|
| - | - | :-: | :-: |
|
||||||
|
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||||
|
|
||||||
|
### Domain types
|
||||||
|
|
||||||
|
| Name | Value |
|
||||||
|
| - | - |
|
||||||
|
| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` |
|
||||||
|
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` |
|
||||||
|
|
||||||
|
## Updated containers
|
||||||
|
|
||||||
|
The following containers have updated definitions to support Sharding.
|
||||||
|
|
||||||
|
### `AttestationData`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class AttestationData(Container):
|
||||||
|
slot: Slot
|
||||||
|
index: CommitteeIndex
|
||||||
|
# LMD GHOST vote
|
||||||
|
beacon_block_root: Root
|
||||||
|
# FFG vote
|
||||||
|
source: Checkpoint
|
||||||
|
target: Checkpoint
|
||||||
|
# Shard header root
|
||||||
|
shard_header_root: Root # [New in Sharding]
|
||||||
|
```
|
||||||
|
|
||||||
|
### `BeaconBlockBody`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconBlockBody(merge.BeaconBlockBody): # [extends The Merge block body]
|
||||||
|
shard_proposer_slashings: List[ShardProposerSlashing, MAX_SHARD_PROPOSER_SLASHINGS]
|
||||||
|
shard_headers: List[SignedShardBlobHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD]
|
||||||
|
```
|
||||||
|
|
||||||
|
### `BeaconState`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class BeaconState(merge.BeaconState): # [extends The Merge state]
|
||||||
|
# [Updated fields]
|
||||||
|
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||||
|
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||||
|
# [New fields]
|
||||||
|
previous_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH]
|
||||||
|
current_epoch_pending_shard_headers: List[PendingShardHeader, MAX_SHARDS * MAX_SHARD_HEADERS_PER_SHARD * SLOTS_PER_EPOCH]
|
||||||
|
grandparent_epoch_confirmed_commitments: Vector[Vector[DataCommitment, SLOTS_PER_EPOCH], MAX_SHARDS]
|
||||||
|
shard_gasprice: uint64
|
||||||
|
current_epoch_start_shard: Shard
|
||||||
|
```
|
||||||
|
|
||||||
|
## New containers
|
||||||
|
|
||||||
|
The shard data itself is network-layer only, and can be found in the [P2P specification](./p2p-interface.md).
|
||||||
|
The beacon chain registers just the commitments of the shard data.
|
||||||
|
|
||||||
|
### `DataCommitment`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class DataCommitment(Container):
|
||||||
|
# KZG10 commitment to the data
|
||||||
|
point: BLSCommitment
|
||||||
|
# Length of the data in samples
|
||||||
|
length: uint64
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ShardBlobBodySummary`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlobBodySummary(Container):
|
||||||
|
# The actual data commitment
|
||||||
|
commitment: DataCommitment
|
||||||
|
# Proof that the degree < commitment.length
|
||||||
|
degree_proof: BLSCommitment
|
||||||
|
# Hash-tree-root as summary of the data field
|
||||||
|
data_root: Root
|
||||||
|
# Latest block root of the Beacon Chain, before shard_blob.slot
|
||||||
|
beacon_block_root: Root
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ShardBlobHeader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlobHeader(Container):
|
||||||
|
# Slot and shard that this header is intended for
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
body_summary: ShardBlobBodySummary
|
||||||
|
# Proposer of the shard-blob
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedShardBlobHeader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedShardBlobHeader(Container):
|
||||||
|
message: ShardBlobHeader
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `PendingShardHeader`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class PendingShardHeader(Container):
|
||||||
|
# Slot and shard that this header is intended for
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
# KZG10 commitment to the data
|
||||||
|
commitment: DataCommitment
|
||||||
|
# hash_tree_root of the ShardHeader (stored so that attestations can be checked against it)
|
||||||
|
root: Root
|
||||||
|
# Who voted for the header
|
||||||
|
votes: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||||
|
# Has this header been confirmed?
|
||||||
|
confirmed: boolean
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ShardBlobReference`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlobReference(Container):
|
||||||
|
# Slot and shard that this reference is intended for
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
# Hash-tree-root of commitment data
|
||||||
|
body_root: Root
|
||||||
|
# Proposer of the shard-blob
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
|
```
|
||||||
|
|
||||||
|
### `SignedShardBlobReference`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedShardBlobReference(Container):
|
||||||
|
message: ShardBlobReference
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
### `ShardProposerSlashing`
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardProposerSlashing(Container):
|
||||||
|
signed_reference_1: SignedShardBlobReference
|
||||||
|
signed_reference_2: SignedShardBlobReference
|
||||||
|
```
|
||||||
|
|
||||||
|
## Helper functions
|
||||||
|
|
||||||
|
### Misc
|
||||||
|
|
||||||
|
#### `next_power_of_two`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def next_power_of_two(x: int) -> int:
|
||||||
|
return 2 ** ((x - 1).bit_length())
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_previous_slot`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_previous_slot(slot: Slot) -> Slot:
|
||||||
|
if slot > 0:
|
||||||
|
return Slot(slot - 1)
|
||||||
|
else:
|
||||||
|
return Slot(0)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_updated_gasprice`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_updated_gasprice(prev_gasprice: Gwei, shard_block_length: uint64, adjustment_quotient: uint64) -> Gwei:
|
||||||
|
if shard_block_length > TARGET_SAMPLES_PER_BLOCK:
|
||||||
|
delta = max(1, prev_gasprice * (shard_block_length - TARGET_SAMPLES_PER_BLOCK)
|
||||||
|
// TARGET_SAMPLES_PER_BLOCK // adjustment_quotient)
|
||||||
|
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||||
|
else:
|
||||||
|
delta = max(1, prev_gasprice * (TARGET_SAMPLES_PER_BLOCK - shard_block_length)
|
||||||
|
// TARGET_SAMPLES_PER_BLOCK // adjustment_quotient)
|
||||||
|
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_committee_source_epoch`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_committee_source_epoch(epoch: Epoch, period: uint64) -> Epoch:
|
||||||
|
"""
|
||||||
|
Return the source epoch for computing the committee.
|
||||||
|
"""
|
||||||
|
source_epoch = Epoch(epoch - epoch % period)
|
||||||
|
if source_epoch >= period:
|
||||||
|
source_epoch -= period # `period` epochs lookahead
|
||||||
|
return source_epoch
|
||||||
|
```
|
||||||
|
|
||||||
|
### Beacon state accessors
|
||||||
|
|
||||||
|
#### Updated `get_committee_count_per_slot`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_committee_count_per_slot(state: BeaconState, epoch: Epoch) -> uint64:
|
||||||
|
"""
|
||||||
|
Return the number of committees in each slot for the given ``epoch``.
|
||||||
|
"""
|
||||||
|
return max(uint64(1), min(
|
||||||
|
get_active_shard_count(state, epoch),
|
||||||
|
uint64(len(get_active_validator_indices(state, epoch))) // SLOTS_PER_EPOCH // TARGET_COMMITTEE_SIZE,
|
||||||
|
))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_active_shard_count`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_active_shard_count(state: BeaconState, epoch: Epoch) -> uint64:
|
||||||
|
"""
|
||||||
|
Return the number of active shards.
|
||||||
|
Note that this puts an upper bound on the number of committees per slot.
|
||||||
|
"""
|
||||||
|
return INITIAL_ACTIVE_SHARDS
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_shard_committee`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||||
|
"""
|
||||||
|
Return the shard committee of the given ``epoch`` of the given ``shard``.
|
||||||
|
"""
|
||||||
|
source_epoch = compute_committee_source_epoch(epoch, SHARD_COMMITTEE_PERIOD)
|
||||||
|
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||||
|
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
||||||
|
return compute_committee(
|
||||||
|
indices=active_validator_indices,
|
||||||
|
seed=seed,
|
||||||
|
index=shard,
|
||||||
|
count=get_active_shard_count(beacon_state, epoch),
|
||||||
|
)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_proposer_index`
|
||||||
|
|
||||||
|
Updated version to get a proposer index that will only allow proposers with a certain minimum balance,
|
||||||
|
ensuring that the balance is always sufficient to cover gas costs.
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_proposer_index(beacon_state: BeaconState,
|
||||||
|
indices: Sequence[ValidatorIndex],
|
||||||
|
seed: Bytes32,
|
||||||
|
min_effective_balance: Gwei = Gwei(0)) -> ValidatorIndex:
|
||||||
|
"""
|
||||||
|
Return from ``indices`` a random index sampled by effective balance.
|
||||||
|
"""
|
||||||
|
assert len(indices) > 0
|
||||||
|
MAX_RANDOM_BYTE = 2**8 - 1
|
||||||
|
i = uint64(0)
|
||||||
|
total = uint64(len(indices))
|
||||||
|
while True:
|
||||||
|
candidate_index = indices[compute_shuffled_index(i % total, total, seed)]
|
||||||
|
random_byte = hash(seed + uint_to_bytes(uint64(i // 32)))[i % 32]
|
||||||
|
effective_balance = beacon_state.validators[candidate_index].effective_balance
|
||||||
|
if effective_balance <= min_effective_balance:
|
||||||
|
continue
|
||||||
|
if effective_balance * MAX_RANDOM_BYTE >= MAX_EFFECTIVE_BALANCE * random_byte:
|
||||||
|
return candidate_index
|
||||||
|
i += 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_shard_proposer_index`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard) -> ValidatorIndex:
|
||||||
|
"""
|
||||||
|
Return the proposer's index of shard block at ``slot``.
|
||||||
|
"""
|
||||||
|
epoch = compute_epoch_at_slot(slot)
|
||||||
|
committee = get_shard_committee(beacon_state, epoch, shard)
|
||||||
|
seed = hash(get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER) + uint_to_bytes(slot))
|
||||||
|
|
||||||
|
# Proposer must have sufficient balance to pay for worst case fee burn
|
||||||
|
EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION = (
|
||||||
|
EFFECTIVE_BALANCE_INCREMENT - EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
* HYSTERESIS_DOWNWARD_MULTIPLIER // HYSTERESIS_QUOTIENT
|
||||||
|
)
|
||||||
|
min_effective_balance = (
|
||||||
|
beacon_state.shard_gasprice * MAX_SAMPLES_PER_BLOCK // TARGET_SAMPLES_PER_BLOCK
|
||||||
|
+ EFFECTIVE_BALANCE_MAX_DOWNWARD_DEVIATION
|
||||||
|
)
|
||||||
|
return compute_proposer_index(beacon_state, committee, seed, min_effective_balance)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `get_start_shard`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
|
||||||
|
"""
|
||||||
|
Return the start shard at ``slot``.
|
||||||
|
"""
|
||||||
|
current_epoch_start_slot = compute_start_slot_at_epoch(get_current_epoch(state))
|
||||||
|
shard = state.current_epoch_start_shard
|
||||||
|
if slot > current_epoch_start_slot:
|
||||||
|
# Current epoch or the next epoch lookahead
|
||||||
|
for _slot in range(current_epoch_start_slot, slot):
|
||||||
|
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot)))
|
||||||
|
active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot)))
|
||||||
|
shard = (shard + committee_count) % active_shard_count
|
||||||
|
elif slot < current_epoch_start_slot:
|
||||||
|
# Previous epoch
|
||||||
|
for _slot in list(range(slot, current_epoch_start_slot))[::-1]:
|
||||||
|
committee_count = get_committee_count_per_slot(state, compute_epoch_at_slot(Slot(_slot)))
|
||||||
|
active_shard_count = get_active_shard_count(state, compute_epoch_at_slot(Slot(_slot)))
|
||||||
|
# Ensure positive
|
||||||
|
shard = (shard + active_shard_count - committee_count) % active_shard_count
|
||||||
|
return Shard(shard)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_shard_from_committee_index`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_shard_from_committee_index(state: BeaconState, slot: Slot, index: CommitteeIndex) -> Shard:
|
||||||
|
active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot))
|
||||||
|
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `compute_committee_index_from_shard`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def compute_committee_index_from_shard(state: BeaconState, slot: Slot, shard: Shard) -> CommitteeIndex:
|
||||||
|
active_shards = get_active_shard_count(state, compute_epoch_at_slot(slot))
|
||||||
|
return CommitteeIndex((active_shards + shard - get_start_shard(state, slot)) % active_shards)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
|
### Block processing
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||||
|
process_block_header(state, block)
|
||||||
|
process_randao(state, block.body)
|
||||||
|
process_eth1_data(state, block.body)
|
||||||
|
process_operations(state, block.body) # [Modified in Sharding]
|
||||||
|
process_execution_payload(state, block.body) # [New in Merge]
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Operations
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||||
|
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||||
|
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||||
|
|
||||||
|
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||||
|
for operation in operations:
|
||||||
|
fn(state, operation)
|
||||||
|
|
||||||
|
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||||
|
for_ops(body.attester_slashings, process_attester_slashing)
|
||||||
|
# New shard proposer slashing processing
|
||||||
|
for_ops(body.shard_proposer_slashings, process_shard_proposer_slashing)
|
||||||
|
# Limit is dynamic based on active shard count
|
||||||
|
assert len(body.shard_headers) <= MAX_SHARD_HEADERS_PER_SHARD * get_active_shard_count(state, get_current_epoch(state))
|
||||||
|
for_ops(body.shard_headers, process_shard_header)
|
||||||
|
# New attestation processing
|
||||||
|
for_ops(body.attestations, process_attestation)
|
||||||
|
for_ops(body.deposits, process_deposit)
|
||||||
|
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||||
|
```
|
||||||
|
|
||||||
|
### New Attestation processing
|
||||||
|
|
||||||
|
#### Updated `process_attestation`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||||
|
phase0.process_attestation(state, attestation)
|
||||||
|
update_pending_votes(state, attestation)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `update_pending_votes`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def update_pending_votes(state: BeaconState, attestation: Attestation) -> None:
|
||||||
|
# Find and update the PendingShardHeader object, invalid block if pending header not in state
|
||||||
|
if compute_epoch_at_slot(attestation.data.slot) == get_current_epoch(state):
|
||||||
|
pending_headers = state.current_epoch_pending_shard_headers
|
||||||
|
else:
|
||||||
|
pending_headers = state.previous_epoch_pending_shard_headers
|
||||||
|
pending_header = None
|
||||||
|
for header in pending_headers:
|
||||||
|
if header.root == attestation.data.shard_header_root:
|
||||||
|
pending_header = header
|
||||||
|
assert pending_header is not None
|
||||||
|
assert pending_header.slot == attestation.data.slot
|
||||||
|
assert pending_header.shard == compute_shard_from_committee_index(
|
||||||
|
state,
|
||||||
|
attestation.data.slot,
|
||||||
|
attestation.data.index,
|
||||||
|
)
|
||||||
|
for i in range(len(pending_header.votes)):
|
||||||
|
pending_header.votes[i] = pending_header.votes[i] or attestation.aggregation_bits[i]
|
||||||
|
|
||||||
|
# Check if the PendingShardHeader is eligible for expedited confirmation
|
||||||
|
# Requirement 1: nothing else confirmed
|
||||||
|
all_candidates = [
|
||||||
|
c for c in pending_headers if
|
||||||
|
(c.slot, c.shard) == (pending_header.slot, pending_header.shard)
|
||||||
|
]
|
||||||
|
if True in [c.confirmed for c in all_candidates]:
|
||||||
|
return
|
||||||
|
|
||||||
|
# Requirement 2: >= 2/3 of balance attesting
|
||||||
|
participants = get_attesting_indices(state, attestation.data, pending_header.votes)
|
||||||
|
participants_balance = get_total_balance(state, participants)
|
||||||
|
full_committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||||
|
full_committee_balance = get_total_balance(state, set(full_committee))
|
||||||
|
if participants_balance * 3 >= full_committee_balance * 2:
|
||||||
|
pending_header.confirmed = True
|
||||||
|
```
|
||||||
|
|
||||||
|
#### `process_shard_header`
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_shard_header(state: BeaconState,
|
||||||
|
signed_header: SignedShardBlobHeader) -> None:
|
||||||
|
header = signed_header.message
|
||||||
|
# Verify the header is not 0, and not from the future.
|
||||||
|
assert Slot(0) < header.slot <= state.slot
|
||||||
|
header_epoch = compute_epoch_at_slot(header.slot)
|
||||||
|
# Verify that the header is within the processing time window
|
||||||
|
assert header_epoch in [get_previous_epoch(state), get_current_epoch(state)]
|
||||||
|
# Verify that the shard is active
|
||||||
|
assert header.shard < get_active_shard_count(state, header_epoch)
|
||||||
|
# Verify that the block root matches,
|
||||||
|
# to ensure the header will only be included in this specific Beacon Chain sub-tree.
|
||||||
|
assert header.body_summary.beacon_block_root == get_block_root_at_slot(state, header.slot - 1)
|
||||||
|
# Verify proposer
|
||||||
|
assert header.proposer_index == get_shard_proposer_index(state, header.slot, header.shard)
|
||||||
|
# Verify signature
|
||||||
|
signing_root = compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSER))
|
||||||
|
assert bls.Verify(state.validators[header.proposer_index].pubkey, signing_root, signed_header.signature)
|
||||||
|
|
||||||
|
# Verify the length by verifying the degree.
|
||||||
|
body_summary = header.body_summary
|
||||||
|
if body_summary.commitment.length == 0:
|
||||||
|
assert body_summary.degree_proof == G1_SETUP[0]
|
||||||
|
assert (
|
||||||
|
bls.Pairing(body_summary.degree_proof, G2_SETUP[0])
|
||||||
|
== bls.Pairing(body_summary.commitment.point, G2_SETUP[-body_summary.commitment.length])
|
||||||
|
)
|
||||||
|
|
||||||
|
# Get the correct pending header list
|
||||||
|
if header_epoch == get_current_epoch(state):
|
||||||
|
pending_headers = state.current_epoch_pending_shard_headers
|
||||||
|
else:
|
||||||
|
pending_headers = state.previous_epoch_pending_shard_headers
|
||||||
|
|
||||||
|
header_root = hash_tree_root(header)
|
||||||
|
# Check that this header is not yet in the pending list
|
||||||
|
assert header_root not in [pending_header.root for pending_header in pending_headers]
|
||||||
|
|
||||||
|
# Include it in the pending list
|
||||||
|
index = compute_committee_index_from_shard(state, header.slot, header.shard)
|
||||||
|
committee_length = len(get_beacon_committee(state, header.slot, index))
|
||||||
|
pending_headers.append(PendingShardHeader(
|
||||||
|
slot=header.slot,
|
||||||
|
shard=header.shard,
|
||||||
|
commitment=body_summary.commitment,
|
||||||
|
root=header_root,
|
||||||
|
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||||
|
confirmed=False,
|
||||||
|
))
|
||||||
|
```
|
||||||
|
|
||||||
|
The degree proof works as follows. For a block `B` with length `l` (so `l` values in `[0...l - 1]`, seen as a polynomial `B(X)` which takes these values),
|
||||||
|
the length proof is the commitment to the polynomial `B(X) * X**(MAX_DEGREE + 1 - l)`,
|
||||||
|
where `MAX_DEGREE` is the maximum power of `s` available in the setup, which is `MAX_DEGREE = len(G2_SETUP) - 1`.
|
||||||
|
The goal is to ensure that a proof can only be constructed if `deg(B) < l` (there are not hidden higher-order terms in the polynomial, which would thwart reconstruction).
|
||||||
|
|
||||||
|
##### Shard Proposer slashings
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_shard_proposer_slashing(state: BeaconState, proposer_slashing: ShardProposerSlashing) -> None:
|
||||||
|
reference_1 = proposer_slashing.signed_reference_1.message
|
||||||
|
reference_2 = proposer_slashing.signed_reference_2.message
|
||||||
|
|
||||||
|
# Verify header slots match
|
||||||
|
assert reference_1.slot == reference_2.slot
|
||||||
|
# Verify header shards match
|
||||||
|
assert reference_1.shard == reference_2.shard
|
||||||
|
# Verify header proposer indices match
|
||||||
|
assert reference_1.proposer_index == reference_2.proposer_index
|
||||||
|
# Verify the headers are different (i.e. different body)
|
||||||
|
assert reference_1 != reference_2
|
||||||
|
# Verify the proposer is slashable
|
||||||
|
proposer = state.validators[reference_1.proposer_index]
|
||||||
|
assert is_slashable_validator(proposer, get_current_epoch(state))
|
||||||
|
# Verify signatures
|
||||||
|
for signed_header in (proposer_slashing.signed_reference_1, proposer_slashing.signed_reference_2):
|
||||||
|
domain = get_domain(state, DOMAIN_SHARD_PROPOSER, compute_epoch_at_slot(signed_header.message.slot))
|
||||||
|
signing_root = compute_signing_root(signed_header.message, domain)
|
||||||
|
assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
|
||||||
|
|
||||||
|
slash_validator(state, reference_1.proposer_index)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Epoch transition
|
||||||
|
|
||||||
|
This epoch transition overrides the Merge epoch transition:
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_epoch(state: BeaconState) -> None:
|
||||||
|
process_justification_and_finalization(state)
|
||||||
|
process_rewards_and_penalties(state)
|
||||||
|
process_registry_updates(state)
|
||||||
|
|
||||||
|
process_slashings(state)
|
||||||
|
|
||||||
|
# Sharding
|
||||||
|
process_pending_headers(state)
|
||||||
|
charge_confirmed_header_fees(state)
|
||||||
|
reset_pending_headers(state)
|
||||||
|
|
||||||
|
# Final updates
|
||||||
|
# Phase 0
|
||||||
|
process_eth1_data_reset(state)
|
||||||
|
process_effective_balance_updates(state)
|
||||||
|
process_slashings_reset(state)
|
||||||
|
process_randao_mixes_reset(state)
|
||||||
|
process_historical_roots_update(state)
|
||||||
|
process_participation_record_updates(state)
|
||||||
|
|
||||||
|
process_shard_epoch_increment(state)
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Pending headers
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_pending_headers(state: BeaconState) -> None:
|
||||||
|
# Pending header processing applies to the previous epoch.
|
||||||
|
# Skip if `GENESIS_EPOCH` because no prior epoch to process.
|
||||||
|
if get_current_epoch(state) == GENESIS_EPOCH:
|
||||||
|
return
|
||||||
|
|
||||||
|
previous_epoch = get_previous_epoch(state)
|
||||||
|
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||||
|
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||||
|
for shard_index in range(get_active_shard_count(state, previous_epoch)):
|
||||||
|
shard = Shard(shard_index)
|
||||||
|
# Pending headers for this (slot, shard) combo
|
||||||
|
candidates = [
|
||||||
|
c for c in state.previous_epoch_pending_shard_headers
|
||||||
|
if (c.slot, c.shard) == (slot, shard)
|
||||||
|
]
|
||||||
|
# If any candidates already confirmed, skip
|
||||||
|
if True in [c.confirmed for c in candidates]:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# The entire committee (and its balance)
|
||||||
|
index = compute_committee_index_from_shard(state, slot, shard)
|
||||||
|
full_committee = get_beacon_committee(state, slot, index)
|
||||||
|
# The set of voters who voted for each header (and their total balances)
|
||||||
|
voting_sets = [
|
||||||
|
set(v for i, v in enumerate(full_committee) if c.votes[i])
|
||||||
|
for c in candidates
|
||||||
|
]
|
||||||
|
voting_balances = [
|
||||||
|
get_total_balance(state, voters)
|
||||||
|
for voters in voting_sets
|
||||||
|
]
|
||||||
|
# Get the index with the most total balance voting for them.
|
||||||
|
# NOTE: if two choices get exactly the same voting balance,
|
||||||
|
# the candidate earlier in the list wins
|
||||||
|
if max(voting_balances) > 0:
|
||||||
|
winning_index = voting_balances.index(max(voting_balances))
|
||||||
|
else:
|
||||||
|
# If no votes, zero wins
|
||||||
|
winning_index = [c.root for c in candidates].index(Root())
|
||||||
|
candidates[winning_index].confirmed = True
|
||||||
|
for slot_index in range(SLOTS_PER_EPOCH):
|
||||||
|
for shard in range(MAX_SHARDS):
|
||||||
|
state.grandparent_epoch_confirmed_commitments[shard][slot_index] = DataCommitment()
|
||||||
|
confirmed_headers = [candidate for candidate in state.previous_epoch_pending_shard_headers if candidate.confirmed]
|
||||||
|
for header in confirmed_headers:
|
||||||
|
state.grandparent_epoch_confirmed_commitments[header.shard][header.slot % SLOTS_PER_EPOCH] = header.commitment
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def charge_confirmed_header_fees(state: BeaconState) -> None:
|
||||||
|
new_gasprice = state.shard_gasprice
|
||||||
|
previous_epoch = get_previous_epoch(state)
|
||||||
|
adjustment_quotient = (
|
||||||
|
get_active_shard_count(state, previous_epoch)
|
||||||
|
* SLOTS_PER_EPOCH * GASPRICE_ADJUSTMENT_COEFFICIENT
|
||||||
|
)
|
||||||
|
previous_epoch_start_slot = compute_start_slot_at_epoch(previous_epoch)
|
||||||
|
for slot in range(previous_epoch_start_slot, previous_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||||
|
for shard_index in range(get_active_shard_count(state, previous_epoch)):
|
||||||
|
shard = Shard(shard_index)
|
||||||
|
confirmed_candidates = [
|
||||||
|
c for c in state.previous_epoch_pending_shard_headers
|
||||||
|
if (c.slot, c.shard, c.confirmed) == (slot, shard, True)
|
||||||
|
]
|
||||||
|
if not any(confirmed_candidates):
|
||||||
|
continue
|
||||||
|
candidate = confirmed_candidates[0]
|
||||||
|
|
||||||
|
# Charge EIP 1559 fee
|
||||||
|
proposer = get_shard_proposer_index(state, slot, shard)
|
||||||
|
fee = (
|
||||||
|
(state.shard_gasprice * candidate.commitment.length)
|
||||||
|
// TARGET_SAMPLES_PER_BLOCK
|
||||||
|
)
|
||||||
|
decrease_balance(state, proposer, fee)
|
||||||
|
|
||||||
|
# Track updated gas price
|
||||||
|
new_gasprice = compute_updated_gasprice(
|
||||||
|
new_gasprice,
|
||||||
|
candidate.commitment.length,
|
||||||
|
adjustment_quotient,
|
||||||
|
)
|
||||||
|
state.shard_gasprice = new_gasprice
|
||||||
|
```
|
||||||
|
|
||||||
|
```python
|
||||||
|
def reset_pending_headers(state: BeaconState) -> None:
|
||||||
|
state.previous_epoch_pending_shard_headers = state.current_epoch_pending_shard_headers
|
||||||
|
state.current_epoch_pending_shard_headers = []
|
||||||
|
# Add dummy "empty" PendingShardHeader (default vote for if no shard header available)
|
||||||
|
next_epoch = get_current_epoch(state) + 1
|
||||||
|
next_epoch_start_slot = compute_start_slot_at_epoch(next_epoch)
|
||||||
|
for slot in range(next_epoch_start_slot, next_epoch_start_slot + SLOTS_PER_EPOCH):
|
||||||
|
for index in range(get_committee_count_per_slot(state, next_epoch)):
|
||||||
|
committee_index = CommitteeIndex(index)
|
||||||
|
shard = compute_shard_from_committee_index(state, slot, committee_index)
|
||||||
|
committee_length = len(get_beacon_committee(state, slot, committee_index))
|
||||||
|
state.current_epoch_pending_shard_headers.append(PendingShardHeader(
|
||||||
|
slot=slot,
|
||||||
|
shard=shard,
|
||||||
|
commitment=DataCommitment(),
|
||||||
|
root=Root(),
|
||||||
|
votes=Bitlist[MAX_VALIDATORS_PER_COMMITTEE]([0] * committee_length),
|
||||||
|
confirmed=False,
|
||||||
|
))
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Shard epoch increment
|
||||||
|
|
||||||
|
```python
|
||||||
|
def process_shard_epoch_increment(state: BeaconState) -> None:
|
||||||
|
# Update current_epoch_start_shard
|
||||||
|
state.current_epoch_start_shard = get_start_shard(state, Slot(state.slot + 1))
|
||||||
|
```
|
|
@ -0,0 +1,135 @@
|
||||||
|
# Ethereum 2.0 Sharding -- Network specification
|
||||||
|
|
||||||
|
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||||
|
|
||||||
|
## Table of contents
|
||||||
|
|
||||||
|
<!-- TOC -->
|
||||||
|
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||||
|
|
||||||
|
- [Introduction](#introduction)
|
||||||
|
- [New containers](#new-containers)
|
||||||
|
- [ShardBlobBody](#shardblobbody)
|
||||||
|
- [ShardBlob](#shardblob)
|
||||||
|
- [SignedShardBlob](#signedshardblob)
|
||||||
|
- [Gossip domain](#gossip-domain)
|
||||||
|
- [Topics and messages](#topics-and-messages)
|
||||||
|
- [Shard blobs: `shard_blob_{shard}`](#shard-blobs-shard_blob_shard)
|
||||||
|
- [Shard header: `shard_header`](#shard-header-shard_header)
|
||||||
|
- [Shard proposer slashing: `shard_proposer_slashing`](#shard-proposer-slashing-shard_proposer_slashing)
|
||||||
|
|
||||||
|
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||||
|
<!-- /TOC -->
|
||||||
|
|
||||||
|
|
||||||
|
## Introduction
|
||||||
|
|
||||||
|
The specification of these changes continues in the same format as the [Phase0](../phase0/p2p-interface.md) and
|
||||||
|
[Altair](../altair/p2p-interface.md) network specifications, and assumes them as pre-requisite.
|
||||||
|
The adjustments and additions for Shards are outlined in this document.
|
||||||
|
|
||||||
|
## New containers
|
||||||
|
|
||||||
|
### ShardBlobBody
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlobBody(Container):
|
||||||
|
# The actual data commitment
|
||||||
|
commitment: DataCommitment
|
||||||
|
# Proof that the degree < commitment.length
|
||||||
|
degree_proof: BLSCommitment
|
||||||
|
# The actual data. Should match the commitment and degree proof.
|
||||||
|
data: List[BLSPoint, POINTS_PER_SAMPLE * MAX_SAMPLES_PER_BLOCK]
|
||||||
|
# Latest block root of the Beacon Chain, before shard_blob.slot
|
||||||
|
beacon_block_root: Root
|
||||||
|
```
|
||||||
|
|
||||||
|
The user MUST always verify the commitments in the `body` are valid for the `data` in the `body`.
|
||||||
|
|
||||||
|
### ShardBlob
|
||||||
|
|
||||||
|
```python
|
||||||
|
class ShardBlob(Container):
|
||||||
|
# Slot and shard that this blob is intended for
|
||||||
|
slot: Slot
|
||||||
|
shard: Shard
|
||||||
|
body: ShardBlobBody
|
||||||
|
# Proposer of the shard-blob
|
||||||
|
proposer_index: ValidatorIndex
|
||||||
|
```
|
||||||
|
|
||||||
|
This is the expanded form of the `ShardBlobHeader` type.
|
||||||
|
|
||||||
|
### SignedShardBlob
|
||||||
|
|
||||||
|
```python
|
||||||
|
class SignedShardBlob(Container):
|
||||||
|
message: ShardBlob
|
||||||
|
signature: BLSSignature
|
||||||
|
```
|
||||||
|
|
||||||
|
## Gossip domain
|
||||||
|
|
||||||
|
### Topics and messages
|
||||||
|
|
||||||
|
Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are:
|
||||||
|
|
||||||
|
| Name | Message Type |
|
||||||
|
|----------------------------------|---------------------------|
|
||||||
|
| `shard_blob_{shard}` | `SignedShardBlob` |
|
||||||
|
| `shard_header` | `SignedShardHeader` |
|
||||||
|
| `shard_proposer_slashing` | `ShardProposerSlashing` |
|
||||||
|
|
||||||
|
The [DAS network specification](./das-p2p.md) defines additional topics.
|
||||||
|
|
||||||
|
#### Shard blobs: `shard_blob_{shard}`
|
||||||
|
|
||||||
|
Shard block data, in the form of a `SignedShardBlob` is published to the `shard_blob_{shard}` subnets.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `signed_blob` (with inner `message` as `blob`) on the horizontal subnet or creating samples for it.
|
||||||
|
- _[REJECT]_ `blob.shard` MUST match the topic `{shard}` parameter. (And thus within valid shard index range)
|
||||||
|
- _[IGNORE]_ The `blob` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
|
i.e. validate that `blob.slot <= current_slot`
|
||||||
|
(a client MAY queue future blobs for processing at the appropriate slot).
|
||||||
|
- _[IGNORE]_ The `blob` is new enough to be still be processed --
|
||||||
|
i.e. validate that `compute_epoch_at_slot(blob.slot) >= get_previous_epoch(state)`
|
||||||
|
- _[IGNORE]_ The blob is the first blob with valid signature received for the `(blob.proposer_index, blob.slot, blob.shard)` combination.
|
||||||
|
- _[REJECT]_ As already limited by the SSZ list-limit, it is important the blob is well-formatted and not too large.
|
||||||
|
- _[REJECT]_ The `blob.body.data` MUST NOT contain any point `p >= MODULUS`. Although it is a `uint256`, not the full 256 bit range is valid.
|
||||||
|
- _[REJECT]_ The proposer signature, `signed_blob.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||||
|
- _[REJECT]_ The blob is proposed by the expected `proposer_index` for the blob's slot
|
||||||
|
in the context of the current shuffling (defined by `blob.body.beacon_block_root`/`slot`).
|
||||||
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
|
the block MAY be queued for later processing while proposers for the blob's branch are calculated --
|
||||||
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
|
||||||
|
#### Shard header: `shard_header`
|
||||||
|
|
||||||
|
Shard header data, in the form of a `SignedShardBlobHeader` is published to the global `shard_header` subnet.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `signed_shard_header` (with inner `message` as `header`) on the network.
|
||||||
|
- _[IGNORE]_ The `header` is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) --
|
||||||
|
i.e. validate that `header.slot <= current_slot`
|
||||||
|
(a client MAY queue future headers for processing at the appropriate slot).
|
||||||
|
- _[IGNORE]_ The `header` is new enough to be still be processed --
|
||||||
|
i.e. validate that `compute_epoch_at_slot(header.slot) >= get_previous_epoch(state)`
|
||||||
|
- _[IGNORE]_ The header is the first header with valid signature received for the `(header.proposer_index, header.slot, header.shard)` combination.
|
||||||
|
- _[REJECT]_ The proposer signature, `signed_shard_header.signature`, is valid with respect to the `proposer_index` pubkey.
|
||||||
|
- _[REJECT]_ The header is proposed by the expected `proposer_index` for the block's slot
|
||||||
|
in the context of the current shuffling (defined by `header.body_summary.beacon_block_root`/`slot`).
|
||||||
|
If the `proposer_index` cannot immediately be verified against the expected shuffling,
|
||||||
|
the block MAY be queued for later processing while proposers for the block's branch are calculated --
|
||||||
|
in such a case _do not_ `REJECT`, instead `IGNORE` this message.
|
||||||
|
|
||||||
|
|
||||||
|
#### Shard proposer slashing: `shard_proposer_slashing`
|
||||||
|
|
||||||
|
Shard proposer slashings, in the form of `ShardProposerSlashing`, are published to the global `shard_proposer_slashing` topic.
|
||||||
|
|
||||||
|
The following validations MUST pass before forwarding the `shard_proposer_slashing` on to the network.
|
||||||
|
- _[IGNORE]_ The shard proposer slashing is the first valid shard proposer slashing received
|
||||||
|
for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`.
|
||||||
|
The `slot` and `shard` are ignored, there are no per-shard slashings.
|
||||||
|
- _[REJECT]_ All of the conditions within `process_shard_proposer_slashing` pass validation.
|
|
@ -75,7 +75,8 @@ For convenience we alias:
|
||||||
|
|
||||||
* `bit` to `boolean`
|
* `bit` to `boolean`
|
||||||
* `byte` to `uint8` (this is a basic type)
|
* `byte` to `uint8` (this is a basic type)
|
||||||
* `BytesN` to `Vector[byte, N]` (this is *not* a basic type)
|
* `BytesN` and `ByteVector[N]` to `Vector[byte, N]` (this is *not* a basic type)
|
||||||
|
* `ByteList[N]` to `List[byte, N]`
|
||||||
* `null`: `{}`
|
* `null`: `{}`
|
||||||
|
|
||||||
### Default values
|
### Default values
|
||||||
|
|
|
@ -1,40 +0,0 @@
|
||||||
from inspect import getmembers, isfunction
|
|
||||||
from typing import Any, Iterable
|
|
||||||
|
|
||||||
from gen_base.gen_typing import TestCase
|
|
||||||
|
|
||||||
|
|
||||||
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
|
||||||
fork_name: str, bls_active: bool = True) -> Iterable[TestCase]:
|
|
||||||
"""
|
|
||||||
Generate a list of test cases by running tests from the given src in generator-mode.
|
|
||||||
:param runner_name: to categorize the test in general as.
|
|
||||||
:param handler_name: to categorize the test specialization as.
|
|
||||||
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
|
||||||
:param fork_name: to run tests against particular phase and/or fork.
|
|
||||||
(if multiple forks are applicable, indicate the last fork)
|
|
||||||
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
|
||||||
:return: an iterable of test cases.
|
|
||||||
"""
|
|
||||||
fn_names = [
|
|
||||||
name for (name, _) in getmembers(src, isfunction)
|
|
||||||
if name.startswith('test_')
|
|
||||||
]
|
|
||||||
print("generating test vectors from tests source: %s" % src.__name__)
|
|
||||||
for name in fn_names:
|
|
||||||
tfn = getattr(src, name)
|
|
||||||
|
|
||||||
# strip off the `test_`
|
|
||||||
case_name = name
|
|
||||||
if case_name.startswith('test_'):
|
|
||||||
case_name = case_name[5:]
|
|
||||||
|
|
||||||
yield TestCase(
|
|
||||||
fork_name=fork_name,
|
|
||||||
runner_name=runner_name,
|
|
||||||
handler_name=handler_name,
|
|
||||||
suite_name='pyspec_tests',
|
|
||||||
case_name=case_name,
|
|
||||||
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
|
||||||
case_fn=lambda: tfn(generator_mode=True, phase=fork_name, bls_active=bls_active)
|
|
||||||
)
|
|
|
@ -1,3 +0,0 @@
|
||||||
ruamel.yaml==0.16.5
|
|
||||||
eth-utils==1.6.0
|
|
||||||
pytest>=4.4
|
|
|
@ -1,11 +0,0 @@
|
||||||
from distutils.core import setup
|
|
||||||
|
|
||||||
setup(
|
|
||||||
name='gen_helpers',
|
|
||||||
packages=['gen_base', 'gen_from_tests'],
|
|
||||||
install_requires=[
|
|
||||||
"ruamel.yaml==0.16.5",
|
|
||||||
"eth-utils==1.6.0",
|
|
||||||
"pytest>=4.4",
|
|
||||||
]
|
|
||||||
)
|
|
|
@ -7,27 +7,30 @@ With this executable spec,
|
||||||
test-generators can easily create test-vectors for client implementations,
|
test-generators can easily create test-vectors for client implementations,
|
||||||
and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest.
|
and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest.
|
||||||
|
|
||||||
## Building
|
|
||||||
|
|
||||||
To build the pyspec: `python setup.py build`
|
|
||||||
(or `pip install .`, but beware that ignored files will still be copied over to a temporary dir, due to pip issue 2195).
|
|
||||||
This outputs the build files to the `./build/lib/eth2spec/...` dir, and can't be used for local test running. Instead, use the dev-install as described below.
|
|
||||||
|
|
||||||
## Dev Install
|
## Dev Install
|
||||||
|
|
||||||
All the dynamic parts of the spec are automatically built with `python setup.py pyspecdev`.
|
First, create a `venv` and install the developer dependencies (`test` and `lint` extras):
|
||||||
Unlike the regular install, this outputs spec files to their original source location, instead of build output only.
|
|
||||||
|
|
||||||
Alternatively, you can build a sub-set of the pyspec with the distutil command:
|
```shell
|
||||||
```bash
|
make install_test
|
||||||
python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
|
|
||||||
```
|
```
|
||||||
|
|
||||||
## Py-tests
|
All the dynamic parts of the spec are built with:
|
||||||
|
|
||||||
After installing, you can install the optional dependencies for testing and linting.
|
```shell
|
||||||
With makefile: `make install_test`.
|
(venv) python setup.py pyspecdev
|
||||||
Or manually: run `pip install .[testing]` and `pip install .[linting]`.
|
```
|
||||||
|
|
||||||
|
Unlike the regular install, this outputs spec files to their intended source location,
|
||||||
|
to enable debuggers to navigate between packages and generated code, without fragile directory linking.
|
||||||
|
|
||||||
|
By default, when installing the `eth2spec` as package in non-develop mode,
|
||||||
|
the distutils implementation of the `setup` runs `build`, which is extended to run the same `pyspec` work,
|
||||||
|
but outputs into the standard `./build/lib` output.
|
||||||
|
This enables the `eth2.0-specs` repository to be installed like any other python package.
|
||||||
|
|
||||||
|
|
||||||
|
## Py-tests
|
||||||
|
|
||||||
These tests are not intended for client-consumption.
|
These tests are not intended for client-consumption.
|
||||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||||
|
@ -39,20 +42,32 @@ However, most of the tests can be run in generator-mode, to output test vectors
|
||||||
|
|
||||||
Run `make test` from the root of the specs repository (after running `make install_test` if have not before).
|
Run `make test` from the root of the specs repository (after running `make install_test` if have not before).
|
||||||
|
|
||||||
|
Note that the `make` commands run through the build steps: it runs the `build` output, not the local package source files.
|
||||||
|
|
||||||
#### Manual
|
#### Manual
|
||||||
|
|
||||||
From the repository root:
|
See `Dev install` for test pre-requisites.
|
||||||
|
|
||||||
Install venv and install:
|
Tests are built for `pytest`.
|
||||||
```bash
|
|
||||||
python3 -m venv venv
|
Caveats:
|
||||||
. venv/bin/activate
|
- Working directory must be `./tests/core/pyspec`. The work-directory is important to locate eth2 configuration files.
|
||||||
python setup.py pyspecdev
|
- Run `pytest` as module. It avoids environment differences, and the behavior is different too:
|
||||||
|
`pytest` as module adds the current directory to the `sys.path`
|
||||||
|
|
||||||
|
Full test usage, with explicit configuration for illustration of options usage:
|
||||||
|
```shell
|
||||||
|
(venv) python -m pytest --config=minimal eth2spec
|
||||||
```
|
```
|
||||||
|
|
||||||
Run the test command from the `tests/core/pyspec` directory:
|
Or, to run a specific test file, specify the full path:
|
||||||
|
```shell
|
||||||
|
(venv) python -m pytest --config=minimal ./eth2spec/test/phase0/block_processing/test_process_attestation.py
|
||||||
```
|
```
|
||||||
pytest --config=minimal eth2spec
|
|
||||||
|
Or, to run a specific test function (specify the `eth2spec` module, or the script path if the keyword is ambiguous):
|
||||||
|
```shell
|
||||||
|
(venv) python -m pytest --config=minimal -k test_success_multi_proposer_index_iterations eth2spec
|
||||||
```
|
```
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
|
@ -64,6 +79,12 @@ Options:
|
||||||
|
|
||||||
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
|
Run `make open_cov` from the root of the specs repository after running `make test` to open the html code coverage report.
|
||||||
|
|
||||||
|
### Advanced
|
||||||
|
|
||||||
|
Building spec files from any markdown sources, to a custom location:
|
||||||
|
```bash
|
||||||
|
(venv) python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
|
||||||
|
```
|
||||||
|
|
||||||
## Contributing
|
## Contributing
|
||||||
|
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
1.0.0
|
1.1.0-alpha.3
|
|
@ -17,8 +17,8 @@ def apply_constants_config(spec_globals: Dict[str, Any], warn_if_unknown: bool =
|
||||||
# Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei')
|
# Keep the same type as the default value indicates (which may be an SSZ basic type subclass, e.g. 'Gwei')
|
||||||
spec_globals[k] = spec_globals[k].__class__(v)
|
spec_globals[k] = spec_globals[k].__class__(v)
|
||||||
else:
|
else:
|
||||||
# Note: Phase 0 spec will not know the phase 1 config values.
|
# Note: The phase 0 spec will not warn if Altair or later config values are applied.
|
||||||
# Yet, during debugging you can enable explicit warnings.
|
# During debugging you can enable explicit warnings.
|
||||||
if warn_if_unknown:
|
if warn_if_unknown:
|
||||||
print(f"WARNING: unknown config key: '{k}' with value: '{v}'")
|
print(f"WARNING: unknown config key: '{k}' with value: '{v}'")
|
||||||
|
|
||||||
|
@ -54,8 +54,7 @@ def load_config_file(configs_dir: str, presets_name: str) -> Dict[str, Any]:
|
||||||
out[k] = [int(item) if item.isdigit() else item for item in v]
|
out[k] = [int(item) if item.isdigit() else item for item in v]
|
||||||
elif isinstance(v, str) and v.startswith("0x"):
|
elif isinstance(v, str) and v.startswith("0x"):
|
||||||
out[k] = bytes.fromhex(v[2:])
|
out[k] = bytes.fromhex(v[2:])
|
||||||
elif k == "CONFIG_NAME":
|
|
||||||
out[k] = str(v)
|
|
||||||
else:
|
else:
|
||||||
out[k] = int(v)
|
out[k] = int(v)
|
||||||
|
out['CONFIG_NAME'] = presets_name
|
||||||
return out
|
return out
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
|
|
||||||
A util to quickly write new test suite generators with.
|
A util to quickly write new test suite generators with.
|
||||||
|
|
||||||
See [Generators documentation](../../generators/README.md) for integration details.
|
See [Generators documentation](../../../../generators/README.md) for integration details.
|
||||||
|
|
||||||
Options:
|
Options:
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ The yielding pattern is:
|
||||||
3 value style: `yield <key name> <kind name> <value>`.
|
3 value style: `yield <key name> <kind name> <value>`.
|
||||||
|
|
||||||
Test part output kinds:
|
Test part output kinds:
|
||||||
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz` file.
|
- `ssz`: value is expected to be a `bytes`, and the raw data is written to a `<key name>.ssz_snappy` file.
|
||||||
- `data`: value is expected to be any python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
- `data`: value is expected to be any Python object that can be dumped as YAML. Output is written to `<key name>.yaml`
|
||||||
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
- `meta`: these key-value pairs are collected into a dict, and then collectively written to a metadata
|
||||||
file named `meta.yaml`, if anything is yielded with `meta` empty.
|
file named `meta.yaml`, if anything is yielded with `meta` empty.
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
import argparse
|
import argparse
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import sys
|
import sys
|
||||||
|
@ -8,11 +10,13 @@ from ruamel.yaml import (
|
||||||
YAML,
|
YAML,
|
||||||
)
|
)
|
||||||
|
|
||||||
from gen_base.gen_typing import TestProvider
|
from snappy import compress
|
||||||
|
|
||||||
from eth2spec.test import context
|
from eth2spec.test import context
|
||||||
from eth2spec.test.exceptions import SkippedTest
|
from eth2spec.test.exceptions import SkippedTest
|
||||||
|
|
||||||
|
from .gen_typing import TestProvider
|
||||||
|
|
||||||
|
|
||||||
# Flag that the runner does NOT run test via pytest
|
# Flag that the runner does NOT run test via pytest
|
||||||
context.is_pytest = False
|
context.is_pytest = False
|
||||||
|
@ -100,8 +104,11 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
yaml = YAML(pure=True)
|
yaml = YAML(pure=True)
|
||||||
yaml.default_flow_style = None
|
yaml.default_flow_style = None
|
||||||
|
|
||||||
|
log_file = Path(output_dir) / 'testgen_error_log.txt'
|
||||||
|
|
||||||
print(f"Generating tests into {output_dir}")
|
print(f"Generating tests into {output_dir}")
|
||||||
print(f"Reading configs from {args.configs_path}")
|
print(f"Reading configs from {args.configs_path}")
|
||||||
|
print(f'Error log file: {log_file}')
|
||||||
|
|
||||||
configs = args.config_list
|
configs = args.config_list
|
||||||
if configs is None:
|
if configs is None:
|
||||||
|
@ -119,18 +126,32 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
|
|
||||||
print(f"generating tests with config '{config_name}' ...")
|
print(f"generating tests with config '{config_name}' ...")
|
||||||
for test_case in tprov.make_cases():
|
for test_case in tprov.make_cases():
|
||||||
case_dir = Path(output_dir) / Path(config_name) / Path(test_case.fork_name) \
|
case_dir = (
|
||||||
/ Path(test_case.runner_name) / Path(test_case.handler_name) \
|
Path(output_dir) / Path(config_name) / Path(test_case.fork_name)
|
||||||
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
/ Path(test_case.runner_name) / Path(test_case.handler_name)
|
||||||
|
/ Path(test_case.suite_name) / Path(test_case.case_name)
|
||||||
|
)
|
||||||
|
incomplete_tag_file = case_dir / "INCOMPLETE"
|
||||||
|
|
||||||
if case_dir.exists():
|
if case_dir.exists():
|
||||||
if not args.force:
|
if not args.force and not incomplete_tag_file.exists():
|
||||||
print(f'Skipping already existing test: {case_dir}')
|
print(f'Skipping already existing test: {case_dir}')
|
||||||
continue
|
continue
|
||||||
print(f'Warning, output directory {case_dir} already exist,'
|
else:
|
||||||
f' old files are not deleted but will be overwritten when a new version is produced')
|
print(f'Warning, output directory {case_dir} already exist,'
|
||||||
|
f' old files will be deleted and it will generate test vector files with the latest version')
|
||||||
|
# Clear the existing case_dir folder
|
||||||
|
shutil.rmtree(case_dir)
|
||||||
|
|
||||||
print(f'Generating test: {case_dir}')
|
print(f'Generating test: {case_dir}')
|
||||||
|
|
||||||
|
written_part = False
|
||||||
|
|
||||||
|
# Add `INCOMPLETE` tag file to indicate that the test generation has not completed.
|
||||||
|
case_dir.mkdir(parents=True, exist_ok=True)
|
||||||
|
with incomplete_tag_file.open("w") as f:
|
||||||
|
f.write("\n")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
def output_part(out_kind: str, name: str, fn: Callable[[Path, ], None]):
|
||||||
# make sure the test case directory is created before any test part is written.
|
# make sure the test case directory is created before any test part is written.
|
||||||
|
@ -140,7 +161,6 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
except IOError as e:
|
except IOError as e:
|
||||||
sys.exit(f'Error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}')
|
sys.exit(f'Error when dumping test "{case_dir}", part "{name}", kind "{out_kind}": {e}')
|
||||||
|
|
||||||
written_part = False
|
|
||||||
meta = dict()
|
meta = dict()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -154,6 +174,7 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
output_part("ssz", name, dump_ssz_fn(data, name, file_mode))
|
output_part("ssz", name, dump_ssz_fn(data, name, file_mode))
|
||||||
except SkippedTest as e:
|
except SkippedTest as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
shutil.rmtree(case_dir)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
# Once all meta data is collected (if any), write it to a meta data file.
|
# Once all meta data is collected (if any), write it to a meta data file.
|
||||||
|
@ -163,10 +184,22 @@ def run_generator(generator_name, test_providers: Iterable[TestProvider]):
|
||||||
|
|
||||||
if not written_part:
|
if not written_part:
|
||||||
print(f"test case {case_dir} did not produce any test case parts")
|
print(f"test case {case_dir} did not produce any test case parts")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
print(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
# Write to log file
|
||||||
|
with log_file.open("a+") as f:
|
||||||
|
f.write(f"ERROR: failed to generate vector(s) for test {case_dir}: {e}")
|
||||||
|
traceback.print_exc(file=f)
|
||||||
|
f.write('\n')
|
||||||
|
else:
|
||||||
|
# If no written_part, the only file was incomplete_tag_file. Clear the existing case_dir folder.
|
||||||
|
if not written_part:
|
||||||
|
shutil.rmtree(case_dir)
|
||||||
|
else:
|
||||||
|
# Only remove `INCOMPLETE` tag file
|
||||||
|
os.remove(incomplete_tag_file)
|
||||||
|
|
||||||
print(f"completed {generator_name}")
|
print(f"completed {generator_name}")
|
||||||
|
|
||||||
|
|
||||||
|
@ -180,7 +213,8 @@ def dump_yaml_fn(data: Any, name: str, file_mode: str, yaml_encoder: YAML):
|
||||||
|
|
||||||
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
def dump_ssz_fn(data: AnyStr, name: str, file_mode: str):
|
||||||
def dump(case_path: Path):
|
def dump(case_path: Path):
|
||||||
out_path = case_path / Path(name + '.ssz')
|
out_path = case_path / Path(name + '.ssz_snappy')
|
||||||
|
compressed = compress(data)
|
||||||
with out_path.open(file_mode + 'b') as f: # write in raw binary mode
|
with out_path.open(file_mode + 'b') as f: # write in raw binary mode
|
||||||
f.write(data)
|
f.write(compressed)
|
||||||
return dump
|
return dump
|
|
@ -0,0 +1,106 @@
|
||||||
|
from importlib import reload, import_module
|
||||||
|
from inspect import getmembers, isfunction
|
||||||
|
from typing import Any, Callable, Dict, Iterable, Optional
|
||||||
|
|
||||||
|
from eth2spec.config import config_util
|
||||||
|
from eth2spec.utils import bls
|
||||||
|
from eth2spec.test.helpers.constants import ALL_CONFIGS, TESTGEN_FORKS
|
||||||
|
from eth2spec.test.helpers.typing import SpecForkName, ConfigName
|
||||||
|
|
||||||
|
from eth2spec.gen_helpers.gen_base import gen_runner
|
||||||
|
from eth2spec.gen_helpers.gen_base.gen_typing import TestCase, TestProvider
|
||||||
|
|
||||||
|
|
||||||
|
def generate_from_tests(runner_name: str, handler_name: str, src: Any,
|
||||||
|
fork_name: SpecForkName, bls_active: bool = True,
|
||||||
|
phase: Optional[str]=None) -> Iterable[TestCase]:
|
||||||
|
"""
|
||||||
|
Generate a list of test cases by running tests from the given src in generator-mode.
|
||||||
|
:param runner_name: to categorize the test in general as.
|
||||||
|
:param handler_name: to categorize the test specialization as.
|
||||||
|
:param src: to retrieve tests from (discovered using inspect.getmembers).
|
||||||
|
:param fork_name: the folder name for these tests.
|
||||||
|
(if multiple forks are applicable, indicate the last fork)
|
||||||
|
:param bls_active: optional, to override BLS switch preference. Defaults to True.
|
||||||
|
:param phase: optional, to run tests against a particular spec version. Default to `fork_name` value.
|
||||||
|
:return: an iterable of test cases.
|
||||||
|
"""
|
||||||
|
fn_names = [
|
||||||
|
name for (name, _) in getmembers(src, isfunction)
|
||||||
|
if name.startswith('test_')
|
||||||
|
]
|
||||||
|
|
||||||
|
if phase is None:
|
||||||
|
phase = fork_name
|
||||||
|
|
||||||
|
print("generating test vectors from tests source: %s" % src.__name__)
|
||||||
|
for name in fn_names:
|
||||||
|
tfn = getattr(src, name)
|
||||||
|
|
||||||
|
# strip off the `test_`
|
||||||
|
case_name = name
|
||||||
|
if case_name.startswith('test_'):
|
||||||
|
case_name = case_name[5:]
|
||||||
|
|
||||||
|
yield TestCase(
|
||||||
|
fork_name=fork_name,
|
||||||
|
runner_name=runner_name,
|
||||||
|
handler_name=handler_name,
|
||||||
|
suite_name='pyspec_tests',
|
||||||
|
case_name=case_name,
|
||||||
|
# TODO: with_all_phases and other per-phase tooling, should be replaced with per-fork equivalent.
|
||||||
|
case_fn=lambda: tfn(generator_mode=True, phase=phase, bls_active=bls_active)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_provider(create_provider_fn: Callable[[SpecForkName, str, str, ConfigName], TestProvider],
|
||||||
|
config_name: ConfigName,
|
||||||
|
fork_name: SpecForkName,
|
||||||
|
all_mods: Dict[str, Dict[str, str]]) -> Iterable[TestProvider]:
|
||||||
|
for key, mod_name in all_mods[fork_name].items():
|
||||||
|
yield create_provider_fn(
|
||||||
|
fork_name=fork_name,
|
||||||
|
handler_name=key,
|
||||||
|
tests_src_mod_name=mod_name,
|
||||||
|
config_name=config_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def get_create_provider_fn(
|
||||||
|
runner_name: str, config_name: ConfigName, specs: Iterable[Any]
|
||||||
|
) -> Callable[[SpecForkName, str, str, ConfigName], TestProvider]:
|
||||||
|
def prepare_fn(configs_path: str) -> str:
|
||||||
|
config_util.prepare_config(configs_path, config_name)
|
||||||
|
for spec in specs:
|
||||||
|
reload(spec)
|
||||||
|
bls.use_milagro()
|
||||||
|
return config_name
|
||||||
|
|
||||||
|
def create_provider(fork_name: SpecForkName, handler_name: str,
|
||||||
|
tests_src_mod_name: str, config_name: ConfigName) -> TestProvider:
|
||||||
|
def cases_fn() -> Iterable[TestCase]:
|
||||||
|
tests_src = import_module(tests_src_mod_name)
|
||||||
|
return generate_from_tests(
|
||||||
|
runner_name=runner_name,
|
||||||
|
handler_name=handler_name,
|
||||||
|
src=tests_src,
|
||||||
|
fork_name=fork_name,
|
||||||
|
)
|
||||||
|
|
||||||
|
return TestProvider(prepare=prepare_fn, make_cases=cases_fn)
|
||||||
|
return create_provider
|
||||||
|
|
||||||
|
|
||||||
|
def run_state_test_generators(runner_name: str, specs: Iterable[Any], all_mods: Dict[str, Dict[str, str]]) -> None:
|
||||||
|
"""
|
||||||
|
Generate all available state tests of `TESTGEN_FORKS` forks of `ALL_CONFIGS` configs of the given runner.
|
||||||
|
"""
|
||||||
|
for config_name in ALL_CONFIGS:
|
||||||
|
for fork_name in TESTGEN_FORKS:
|
||||||
|
if fork_name in all_mods:
|
||||||
|
gen_runner.run_generator(runner_name, get_provider(
|
||||||
|
create_provider_fn=get_create_provider_fn(runner_name, config_name, specs),
|
||||||
|
config_name=config_name,
|
||||||
|
fork_name=fork_name,
|
||||||
|
all_mods=all_mods,
|
||||||
|
))
|
|
@ -0,0 +1,409 @@
|
||||||
|
from collections import Counter
|
||||||
|
import random
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
|
build_empty_block_for_next_slot,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.block_processing import run_block_processing_to
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
state_transition_and_sign_block,
|
||||||
|
transition_to,
|
||||||
|
next_epoch,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
MAINNET, MINIMAL,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.sync_committee import (
|
||||||
|
compute_aggregate_sync_committee_signature,
|
||||||
|
)
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
expect_assertion_error,
|
||||||
|
with_altair_and_later,
|
||||||
|
with_configs,
|
||||||
|
spec_state_test,
|
||||||
|
always_bls,
|
||||||
|
)
|
||||||
|
from eth2spec.utils.hash_function import hash
|
||||||
|
|
||||||
|
|
||||||
|
def run_sync_committee_processing(spec, state, block, expect_exception=False):
|
||||||
|
"""
|
||||||
|
Processes everything up to the sync committee work, then runs the sync committee work in isolation, and
|
||||||
|
produces a pre-state and post-state (None if exception) specifically for sync-committee processing changes.
|
||||||
|
"""
|
||||||
|
# process up to the sync committee work
|
||||||
|
call = run_block_processing_to(spec, state, block, 'process_sync_committee')
|
||||||
|
yield 'pre', state
|
||||||
|
yield 'sync_aggregate', block.body.sync_aggregate
|
||||||
|
if expect_exception:
|
||||||
|
expect_assertion_error(lambda: call(state, block))
|
||||||
|
yield 'post', None
|
||||||
|
else:
|
||||||
|
call(state, block)
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
|
def get_committee_indices(spec, state, duplicates=False):
|
||||||
|
"""
|
||||||
|
This utility function allows the caller to ensure there are or are not
|
||||||
|
duplicate validator indices in the returned committee based on
|
||||||
|
the boolean ``duplicates``.
|
||||||
|
"""
|
||||||
|
state = state.copy()
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
randao_index = current_epoch % spec.EPOCHS_PER_HISTORICAL_VECTOR
|
||||||
|
while True:
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
if duplicates:
|
||||||
|
if len(committee) != len(set(committee)):
|
||||||
|
return committee
|
||||||
|
else:
|
||||||
|
if len(committee) == len(set(committee)):
|
||||||
|
return committee
|
||||||
|
state.randao_mixes[randao_index] = hash(state.randao_mixes[randao_index])
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_missing_participant(spec, state):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
rng = random.Random(2020)
|
||||||
|
random_participant = rng.choice(committee)
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Exclude one participant whose signature was included.
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[index != random_participant for index in committee],
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee, # full committee signs
|
||||||
|
)
|
||||||
|
)
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_extra_participant(spec, state):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
rng = random.Random(3030)
|
||||||
|
random_participant = rng.choice(committee)
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Exclude one signature even though the block claims the entire committee participated.
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
[index for index in committee if index != random_participant],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_sync_committee_inclusion_reward(spec, state, participant_index, committee, committee_bits):
|
||||||
|
total_active_increments = spec.get_total_active_balance(state) // spec.EFFECTIVE_BALANCE_INCREMENT
|
||||||
|
total_base_rewards = spec.Gwei(spec.get_base_reward_per_increment(state) * total_active_increments)
|
||||||
|
max_epoch_rewards = spec.Gwei(total_base_rewards * spec.SYNC_REWARD_WEIGHT // spec.WEIGHT_DENOMINATOR)
|
||||||
|
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||||
|
max_slot_rewards = spec.Gwei(max_epoch_rewards * len(included_indices) // len(committee) // spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# Compute the participant and proposer sync rewards
|
||||||
|
committee_effective_balance = sum([state.validators[index].effective_balance for index in included_indices])
|
||||||
|
committee_effective_balance = max(spec.EFFECTIVE_BALANCE_INCREMENT, committee_effective_balance)
|
||||||
|
effective_balance = state.validators[participant_index].effective_balance
|
||||||
|
return spec.Gwei(max_slot_rewards * effective_balance // committee_effective_balance)
|
||||||
|
|
||||||
|
|
||||||
|
def compute_sync_committee_participant_reward(spec, state, participant_index, committee, committee_bits):
|
||||||
|
included_indices = [index for index, bit in zip(committee, committee_bits) if bit]
|
||||||
|
multiplicities = Counter(included_indices)
|
||||||
|
|
||||||
|
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||||
|
spec, state, participant_index, committee, committee_bits,
|
||||||
|
)
|
||||||
|
return spec.Gwei(inclusion_reward * multiplicities[participant_index])
|
||||||
|
|
||||||
|
|
||||||
|
def compute_sync_committee_proposer_reward(spec, state, committee, committee_bits):
|
||||||
|
proposer_reward = 0
|
||||||
|
for index, bit in zip(committee, committee_bits):
|
||||||
|
if not bit:
|
||||||
|
continue
|
||||||
|
inclusion_reward = compute_sync_committee_inclusion_reward(
|
||||||
|
spec, state, index, committee, committee_bits,
|
||||||
|
)
|
||||||
|
proposer_reward_denominator = (
|
||||||
|
(spec.WEIGHT_DENOMINATOR - spec.PROPOSER_WEIGHT)
|
||||||
|
* spec.WEIGHT_DENOMINATOR
|
||||||
|
// spec.PROPOSER_WEIGHT
|
||||||
|
)
|
||||||
|
proposer_reward += spec.Gwei((inclusion_reward * spec.WEIGHT_DENOMINATOR) // proposer_reward_denominator)
|
||||||
|
return proposer_reward
|
||||||
|
|
||||||
|
|
||||||
|
def validate_sync_committee_rewards(spec, pre_state, post_state, committee, committee_bits, proposer_index):
|
||||||
|
for index in range(len(post_state.validators)):
|
||||||
|
reward = 0
|
||||||
|
if index in committee:
|
||||||
|
reward += compute_sync_committee_participant_reward(
|
||||||
|
spec,
|
||||||
|
pre_state,
|
||||||
|
index,
|
||||||
|
committee,
|
||||||
|
committee_bits,
|
||||||
|
)
|
||||||
|
|
||||||
|
if proposer_index == index:
|
||||||
|
reward += compute_sync_committee_proposer_reward(
|
||||||
|
spec,
|
||||||
|
pre_state,
|
||||||
|
committee,
|
||||||
|
committee_bits,
|
||||||
|
)
|
||||||
|
|
||||||
|
assert post_state.balances[index] == pre_state.balances[index] + reward
|
||||||
|
|
||||||
|
|
||||||
|
def run_successful_sync_committee_test(spec, state, committee, committee_bits):
|
||||||
|
pre_state = state.copy()
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=committee_bits,
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
[index for index, bit in zip(committee, committee_bits) if bit],
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
validate_sync_committee_rewards(
|
||||||
|
spec,
|
||||||
|
pre_state,
|
||||||
|
state,
|
||||||
|
committee,
|
||||||
|
committee_bits,
|
||||||
|
block.proposer_index,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_configs([MINIMAL], reason="to create nonduplicate committee")
|
||||||
|
@spec_state_test
|
||||||
|
def test_sync_committee_rewards_nonduplicate_committee(spec, state):
|
||||||
|
committee = get_committee_indices(spec, state, duplicates=False)
|
||||||
|
committee_size = len(committee)
|
||||||
|
committee_bits = [True] * committee_size
|
||||||
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
|
||||||
|
# Preconditions of this test case
|
||||||
|
assert active_validator_count >= spec.SYNC_COMMITTEE_SIZE
|
||||||
|
assert committee_size == len(set(committee))
|
||||||
|
|
||||||
|
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_configs([MAINNET], reason="to create duplicate committee")
|
||||||
|
@spec_state_test
|
||||||
|
def test_sync_committee_rewards_duplicate_committee(spec, state):
|
||||||
|
committee = get_committee_indices(spec, state, duplicates=True)
|
||||||
|
committee_size = len(committee)
|
||||||
|
committee_bits = [True] * committee_size
|
||||||
|
active_validator_count = len(spec.get_active_validator_indices(state, spec.get_current_epoch(state)))
|
||||||
|
|
||||||
|
# Preconditions of this test case
|
||||||
|
assert active_validator_count < spec.SYNC_COMMITTEE_SIZE
|
||||||
|
assert committee_size > len(set(committee))
|
||||||
|
|
||||||
|
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_rewards_not_full_participants(spec, state):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
rng = random.Random(1010)
|
||||||
|
committee_bits = [rng.choice([True, False]) for _ in committee]
|
||||||
|
|
||||||
|
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_sync_committee_rewards_empty_participants(spec, state):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
committee_bits = [False for _ in committee]
|
||||||
|
|
||||||
|
yield from run_successful_sync_committee_test(spec, state, committee, committee_bits)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_past_block(spec, state):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
|
||||||
|
blocks = []
|
||||||
|
for _ in range(2):
|
||||||
|
# NOTE: need to transition twice to move beyond the degenerate case at genesis
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Valid sync committee signature here...
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
blocks.append(signed_block)
|
||||||
|
|
||||||
|
invalid_block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
# Invalid signature from a slot other than the previous
|
||||||
|
invalid_block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
invalid_block.slot - 2,
|
||||||
|
committee,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_sync_committee_processing(spec, state, invalid_block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_configs([MINIMAL], reason="to produce different committee sets")
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
def test_invalid_signature_previous_committee(spec, state):
|
||||||
|
# NOTE: the `state` provided is at genesis and the process to select
|
||||||
|
# sync committees currently returns the same committee for the first and second
|
||||||
|
# periods at genesis.
|
||||||
|
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||||
|
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
old_sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
|
epoch_in_future_sync_commitee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
slot_in_future_sync_committee_period = epoch_in_future_sync_commitee_period * spec.SLOTS_PER_EPOCH
|
||||||
|
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||||
|
|
||||||
|
# Use the previous sync committee to produce the signature.
|
||||||
|
pubkeys = [validator.pubkey for validator in state.validators]
|
||||||
|
# Ensure that the pubkey sets are different.
|
||||||
|
assert set(old_sync_committee.pubkeys) != set(state.current_sync_committee.pubkeys)
|
||||||
|
committee = [pubkeys.index(pubkey) for pubkey in old_sync_committee.pubkeys]
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_sync_committee_processing(spec, state, block, expect_exception=True)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_valid_signature_future_committee(spec, state):
|
||||||
|
# NOTE: the `state` provided is at genesis and the process to select
|
||||||
|
# sync committees currently returns the same committee for the first and second
|
||||||
|
# periods at genesis.
|
||||||
|
# To get a distinct committee so we can generate an "old" signature, we need to advance
|
||||||
|
# 2 EPOCHS_PER_SYNC_COMMITTEE_PERIOD periods.
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
old_current_sync_committee = state.current_sync_committee
|
||||||
|
old_next_sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
|
epoch_in_future_sync_committee_period = current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
slot_in_future_sync_committee_period = epoch_in_future_sync_committee_period * spec.SLOTS_PER_EPOCH
|
||||||
|
transition_to(spec, state, slot_in_future_sync_committee_period)
|
||||||
|
|
||||||
|
sync_committee = state.current_sync_committee
|
||||||
|
|
||||||
|
expected_sync_committee = spec.get_sync_committee(state, epoch_in_future_sync_committee_period)
|
||||||
|
|
||||||
|
assert sync_committee == expected_sync_committee
|
||||||
|
assert sync_committee != old_current_sync_committee
|
||||||
|
assert sync_committee != old_next_sync_committee
|
||||||
|
|
||||||
|
pubkeys = [validator.pubkey for validator in state.validators]
|
||||||
|
committee_indices = [pubkeys.index(pubkey) for pubkey in sync_committee.pubkeys]
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[True] * len(committee_indices),
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
committee_indices,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
yield from run_sync_committee_processing(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_sync_committee_is_only_computed_at_epoch_boundary(spec, state):
|
||||||
|
"""
|
||||||
|
Sync committees can only be computed at sync committee period boundaries.
|
||||||
|
Ensure a client respects the committee in the state (assumed to be derived
|
||||||
|
in the correct way).
|
||||||
|
"""
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
|
||||||
|
# use a "synthetic" committee to simulate the situation
|
||||||
|
# where ``spec.get_sync_committee`` at the sync committee
|
||||||
|
# period epoch boundary would have diverged some epochs into the
|
||||||
|
# period; ``aggregate_pubkey`` is not relevant to this test
|
||||||
|
pubkeys = []
|
||||||
|
committee_indices = []
|
||||||
|
i = 0
|
||||||
|
active_validator_count = len(spec.get_active_validator_indices(state, current_epoch))
|
||||||
|
while len(pubkeys) < spec.SYNC_COMMITTEE_SIZE:
|
||||||
|
v = state.validators[i % active_validator_count]
|
||||||
|
if spec.is_active_validator(v, current_epoch):
|
||||||
|
pubkeys.append(v.pubkey)
|
||||||
|
committee_indices.append(i)
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
synthetic_committee = spec.SyncCommittee(pubkeys=pubkeys, aggregate_pubkey=spec.BLSPubkey())
|
||||||
|
state.current_sync_committee = synthetic_committee
|
||||||
|
|
||||||
|
assert spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD > 3
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
|
||||||
|
committee = get_committee_indices(spec, state)
|
||||||
|
assert committee != committee_indices
|
||||||
|
committee_size = len(committee_indices)
|
||||||
|
committee_bits = [True] * committee_size
|
||||||
|
|
||||||
|
yield from run_successful_sync_committee_test(spec, state, committee_indices, committee_bits)
|
|
@ -0,0 +1,80 @@
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
always_bls,
|
||||||
|
spec_state_test,
|
||||||
|
spec_test,
|
||||||
|
with_altair_and_later,
|
||||||
|
with_configs,
|
||||||
|
with_custom_state,
|
||||||
|
single_phase,
|
||||||
|
misc_balances,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.constants import MINIMAL
|
||||||
|
from eth2spec.test.helpers.state import transition_to
|
||||||
|
from eth2spec.test.helpers.epoch_processing import (
|
||||||
|
run_epoch_processing_with,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Note:
|
||||||
|
# Calculating sync committees requires pubkey aggregation, thus all tests are generated with `always_bls`
|
||||||
|
#
|
||||||
|
|
||||||
|
def run_sync_committees_progress_test(spec, state):
|
||||||
|
first_sync_committee = state.current_sync_committee
|
||||||
|
second_sync_committee = state.next_sync_committee
|
||||||
|
|
||||||
|
current_period = spec.get_current_epoch(state) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
next_period = current_period + 1
|
||||||
|
next_period_start_epoch = next_period * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
next_period_start_slot = next_period_start_epoch * spec.SLOTS_PER_EPOCH
|
||||||
|
end_slot_of_current_period = next_period_start_slot - 1
|
||||||
|
transition_to(spec, state, end_slot_of_current_period)
|
||||||
|
|
||||||
|
# Ensure assignments have not changed:
|
||||||
|
assert state.current_sync_committee == first_sync_committee
|
||||||
|
assert state.next_sync_committee == second_sync_committee
|
||||||
|
|
||||||
|
yield from run_epoch_processing_with(spec, state, 'process_sync_committee_updates')
|
||||||
|
|
||||||
|
# Can compute the third committee having computed final balances in the last epoch
|
||||||
|
# of this `EPOCHS_PER_SYNC_COMMITTEE_PERIOD`
|
||||||
|
current_epoch = spec.get_current_epoch(state)
|
||||||
|
third_sync_committee = spec.get_sync_committee(state, current_epoch + 2 * spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
|
||||||
|
assert state.current_sync_committee == second_sync_committee
|
||||||
|
assert state.next_sync_committee == third_sync_committee
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_sync_committees_progress_genesis(spec, state):
|
||||||
|
# Genesis epoch period has an exceptional case
|
||||||
|
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||||
|
|
||||||
|
yield from run_sync_committees_progress_test(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
@always_bls
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_sync_committees_progress_not_genesis(spec, state):
|
||||||
|
# Transition out of the genesis epoch period to test non-exceptional case
|
||||||
|
assert spec.get_current_epoch(state) == spec.GENESIS_EPOCH
|
||||||
|
slot_in_next_period = state.slot + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
|
||||||
|
transition_to(spec, state, slot_in_next_period)
|
||||||
|
|
||||||
|
yield from run_sync_committees_progress_test(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@single_phase
|
||||||
|
@always_bls
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_sync_committees_progress_misc_balances(spec, state):
|
||||||
|
yield from run_sync_committees_progress_test(spec, state)
|
|
@ -0,0 +1,119 @@
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
with_phases,
|
||||||
|
with_custom_state,
|
||||||
|
with_configs,
|
||||||
|
spec_test, with_state,
|
||||||
|
low_balances, misc_balances, large_validator_set,
|
||||||
|
)
|
||||||
|
from eth2spec.test.utils import with_meta_tags
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
PHASE0, ALTAIR,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
next_epoch,
|
||||||
|
next_epoch_via_block,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
ALTAIR_FORK_TEST_META_TAGS = {
|
||||||
|
'fork': 'altair',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def run_fork_test(post_spec, pre_state):
|
||||||
|
yield 'pre', pre_state
|
||||||
|
|
||||||
|
post_state = post_spec.upgrade_to_altair(pre_state)
|
||||||
|
|
||||||
|
# Stable fields
|
||||||
|
stable_fields = [
|
||||||
|
'genesis_time', 'genesis_validators_root', 'slot',
|
||||||
|
# History
|
||||||
|
'latest_block_header', 'block_roots', 'state_roots', 'historical_roots',
|
||||||
|
# Eth1
|
||||||
|
'eth1_data', 'eth1_data_votes', 'eth1_deposit_index',
|
||||||
|
# Registry
|
||||||
|
'validators', 'balances',
|
||||||
|
# Randomness
|
||||||
|
'randao_mixes',
|
||||||
|
# Slashings
|
||||||
|
'slashings',
|
||||||
|
# Finality
|
||||||
|
'justification_bits', 'previous_justified_checkpoint', 'current_justified_checkpoint', 'finalized_checkpoint',
|
||||||
|
]
|
||||||
|
for field in stable_fields:
|
||||||
|
assert getattr(pre_state, field) == getattr(post_state, field)
|
||||||
|
|
||||||
|
# Modified fields
|
||||||
|
modified_fields = ['fork']
|
||||||
|
for field in modified_fields:
|
||||||
|
assert getattr(pre_state, field) != getattr(post_state, field)
|
||||||
|
|
||||||
|
assert pre_state.fork.current_version == post_state.fork.previous_version
|
||||||
|
assert post_state.fork.current_version == post_spec.ALTAIR_FORK_VERSION
|
||||||
|
assert post_state.fork.epoch == post_spec.get_current_epoch(post_state)
|
||||||
|
|
||||||
|
yield 'post', post_state
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@spec_test
|
||||||
|
@with_state
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_base_state(spec, phases, state):
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@spec_test
|
||||||
|
@with_state
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_next_epoch(spec, phases, state):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@spec_test
|
||||||
|
@with_state
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_next_epoch_with_block(spec, phases, state):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@spec_test
|
||||||
|
@with_state
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_many_next_epoch(spec, phases, state):
|
||||||
|
for _ in range(3):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_random_low_balances(spec, phases, state):
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_random_misc_balances(spec, phases, state):
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases(phases=[PHASE0], other_phases=[ALTAIR])
|
||||||
|
@with_configs([MINIMAL],
|
||||||
|
reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated")
|
||||||
|
@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||||
|
@spec_test
|
||||||
|
@with_meta_tags(ALTAIR_FORK_TEST_META_TAGS)
|
||||||
|
def test_fork_random_large_validator_set(spec, phases, state):
|
||||||
|
yield from run_fork_test(phases[ALTAIR], state)
|
|
@ -0,0 +1,100 @@
|
||||||
|
import random
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
state_transition_and_sign_block,
|
||||||
|
next_epoch,
|
||||||
|
next_epoch_via_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
|
build_empty_block_for_next_slot,
|
||||||
|
build_empty_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.sync_committee import (
|
||||||
|
compute_aggregate_sync_committee_signature,
|
||||||
|
)
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
with_altair_and_later,
|
||||||
|
spec_state_test,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def run_sync_committee_sanity_test(spec, state, fraction_full=1.0):
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
participants = random.sample(committee, int(len(committee) * fraction_full))
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
block.body.sync_aggregate = spec.SyncAggregate(
|
||||||
|
sync_committee_bits=[index in participants for index in committee],
|
||||||
|
sync_committee_signature=compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot - 1,
|
||||||
|
participants,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_sync_committee_committee(spec, state):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_half_sync_committee_committee(spec, state):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_empty_sync_committee_committee(spec, state):
|
||||||
|
next_epoch(spec, state)
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_full_sync_committee_committee_genesis(spec, state):
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=1.0)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_half_sync_committee_committee_genesis(spec, state):
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.5)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_empty_sync_committee_committee_genesis(spec, state):
|
||||||
|
yield from run_sync_committee_sanity_test(spec, state, fraction_full=0.0)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@spec_state_test
|
||||||
|
def test_inactivity_scores(spec, state):
|
||||||
|
for _ in range(spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 2):
|
||||||
|
next_epoch_via_block(spec, state)
|
||||||
|
|
||||||
|
assert spec.is_in_inactivity_leak(state)
|
||||||
|
previous_inactivity_scores = state.inactivity_scores.copy()
|
||||||
|
|
||||||
|
yield 'pre', state
|
||||||
|
|
||||||
|
# Block transition to next epoch
|
||||||
|
block = build_empty_block(spec, state, slot=state.slot + spec.SLOTS_PER_EPOCH)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
|
||||||
|
yield 'blocks', [signed_block]
|
||||||
|
yield 'post', state
|
||||||
|
|
||||||
|
for pre, post in zip(previous_inactivity_scores, state.inactivity_scores):
|
||||||
|
assert post == pre + spec.INACTIVITY_SCORE_BIAS
|
|
@ -0,0 +1,35 @@
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
spec_state_test,
|
||||||
|
with_phases,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.constants import ALTAIR
|
||||||
|
from eth2spec.test.helpers.merkle import build_proof
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases([ALTAIR])
|
||||||
|
@spec_state_test
|
||||||
|
def test_next_sync_committee_tree(spec, state):
|
||||||
|
state.next_sync_committee: object = spec.SyncCommittee(
|
||||||
|
pubkeys=[state.validators[i]for i in range(spec.SYNC_COMMITTEE_SIZE)]
|
||||||
|
)
|
||||||
|
next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||||
|
assert spec.is_valid_merkle_branch(
|
||||||
|
leaf=state.next_sync_committee.hash_tree_root(),
|
||||||
|
branch=next_sync_committee_branch,
|
||||||
|
depth=spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||||
|
index=spec.get_subtree_index(spec.NEXT_SYNC_COMMITTEE_INDEX),
|
||||||
|
root=state.hash_tree_root(),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases([ALTAIR])
|
||||||
|
@spec_state_test
|
||||||
|
def test_finality_root_tree(spec, state):
|
||||||
|
finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
|
||||||
|
assert spec.is_valid_merkle_branch(
|
||||||
|
leaf=state.finalized_checkpoint.root,
|
||||||
|
branch=finality_branch,
|
||||||
|
depth=spec.floorlog2(spec.FINALIZED_ROOT_INDEX),
|
||||||
|
index=spec.get_subtree_index(spec.FINALIZED_ROOT_INDEX),
|
||||||
|
root=state.hash_tree_root(),
|
||||||
|
)
|
|
@ -0,0 +1,218 @@
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
spec_state_test,
|
||||||
|
with_configs,
|
||||||
|
with_phases,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.attestations import next_epoch_with_attestations
|
||||||
|
from eth2spec.test.helpers.block import (
|
||||||
|
build_empty_block,
|
||||||
|
build_empty_block_for_next_slot,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
ALTAIR,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.state import (
|
||||||
|
next_slots,
|
||||||
|
state_transition_and_sign_block,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.sync_committee import (
|
||||||
|
compute_aggregate_sync_committee_signature,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.merkle import build_proof
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases([ALTAIR])
|
||||||
|
@spec_state_test
|
||||||
|
def test_process_light_client_update_not_updated(spec, state):
|
||||||
|
pre_snapshot = spec.LightClientSnapshot(
|
||||||
|
header=spec.BeaconBlockHeader(),
|
||||||
|
current_sync_committee=state.current_sync_committee,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
)
|
||||||
|
store = spec.LightClientStore(
|
||||||
|
snapshot=pre_snapshot,
|
||||||
|
valid_updates=set(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Block at slot 1 doesn't increase sync committee period, so it won't update snapshot
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
block_header = spec.BeaconBlockHeader(
|
||||||
|
slot=signed_block.message.slot,
|
||||||
|
proposer_index=signed_block.message.proposer_index,
|
||||||
|
parent_root=signed_block.message.parent_root,
|
||||||
|
state_root=signed_block.message.state_root,
|
||||||
|
body_root=signed_block.message.body.hash_tree_root(),
|
||||||
|
)
|
||||||
|
# Sync committee signing the header
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
sync_committee_bits = [True] * len(committee)
|
||||||
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block.slot,
|
||||||
|
committee,
|
||||||
|
)
|
||||||
|
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||||
|
|
||||||
|
# Ensure that finality checkpoint is genesis
|
||||||
|
assert state.finalized_checkpoint.epoch == 0
|
||||||
|
# Finality is unchanged
|
||||||
|
finality_header = spec.BeaconBlockHeader()
|
||||||
|
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||||
|
|
||||||
|
update = spec.LightClientUpdate(
|
||||||
|
header=block_header,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
next_sync_committee_branch=next_sync_committee_branch,
|
||||||
|
finality_header=finality_header,
|
||||||
|
finality_branch=finality_branch,
|
||||||
|
sync_committee_bits=sync_committee_bits,
|
||||||
|
sync_committee_signature=sync_committee_signature,
|
||||||
|
fork_version=state.fork.current_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
assert len(store.valid_updates) == 1
|
||||||
|
assert store.valid_updates.pop() == update
|
||||||
|
assert store.snapshot == pre_snapshot
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases([ALTAIR])
|
||||||
|
@spec_state_test
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_process_light_client_update_timeout(spec, state):
|
||||||
|
pre_snapshot = spec.LightClientSnapshot(
|
||||||
|
header=spec.BeaconBlockHeader(),
|
||||||
|
current_sync_committee=state.current_sync_committee,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
)
|
||||||
|
store = spec.LightClientStore(
|
||||||
|
snapshot=pre_snapshot,
|
||||||
|
valid_updates=set(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Forward to next sync committee period
|
||||||
|
next_slots(spec, state, spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD))
|
||||||
|
snapshot_period = spec.compute_epoch_at_slot(pre_snapshot.header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
assert snapshot_period + 1 == update_period
|
||||||
|
|
||||||
|
block = build_empty_block_for_next_slot(spec, state)
|
||||||
|
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||||
|
block_header = spec.BeaconBlockHeader(
|
||||||
|
slot=signed_block.message.slot,
|
||||||
|
proposer_index=signed_block.message.proposer_index,
|
||||||
|
parent_root=signed_block.message.parent_root,
|
||||||
|
state_root=signed_block.message.state_root,
|
||||||
|
body_root=signed_block.message.body.hash_tree_root(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sync committee signing the finalized_block_header
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
sync_committee_bits = [True] * len(committee)
|
||||||
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block_header.slot,
|
||||||
|
committee,
|
||||||
|
block_root=spec.Root(block_header.hash_tree_root()),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sync committee is updated
|
||||||
|
next_sync_committee_branch = build_proof(state.get_backing(), spec.NEXT_SYNC_COMMITTEE_INDEX)
|
||||||
|
# Finality is unchanged
|
||||||
|
finality_header = spec.BeaconBlockHeader()
|
||||||
|
finality_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.FINALIZED_ROOT_INDEX))]
|
||||||
|
|
||||||
|
update = spec.LightClientUpdate(
|
||||||
|
header=block_header,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
next_sync_committee_branch=next_sync_committee_branch,
|
||||||
|
finality_header=finality_header,
|
||||||
|
finality_branch=finality_branch,
|
||||||
|
sync_committee_bits=sync_committee_bits,
|
||||||
|
sync_committee_signature=sync_committee_signature,
|
||||||
|
fork_version=state.fork.current_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
# snapshot has been updated
|
||||||
|
assert len(store.valid_updates) == 0
|
||||||
|
assert store.snapshot.header == update.header
|
||||||
|
|
||||||
|
|
||||||
|
@with_phases([ALTAIR])
|
||||||
|
@spec_state_test
|
||||||
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
def test_process_light_client_update_finality_updated(spec, state):
|
||||||
|
pre_snapshot = spec.LightClientSnapshot(
|
||||||
|
header=spec.BeaconBlockHeader(),
|
||||||
|
current_sync_committee=state.current_sync_committee,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
)
|
||||||
|
store = spec.LightClientStore(
|
||||||
|
snapshot=pre_snapshot,
|
||||||
|
valid_updates=set(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Change finality
|
||||||
|
blocks = []
|
||||||
|
next_slots(spec, state, spec.SLOTS_PER_EPOCH * 2)
|
||||||
|
for epoch in range(3):
|
||||||
|
prev_state, new_blocks, state = next_epoch_with_attestations(spec, state, True, True)
|
||||||
|
blocks += new_blocks
|
||||||
|
# Ensure that finality checkpoint has changed
|
||||||
|
assert state.finalized_checkpoint.epoch == 3
|
||||||
|
# Ensure that it's same period
|
||||||
|
snapshot_period = spec.compute_epoch_at_slot(pre_snapshot.header.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
update_period = spec.compute_epoch_at_slot(state.slot) // spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
assert snapshot_period == update_period
|
||||||
|
|
||||||
|
# Updated sync_committee and finality
|
||||||
|
next_sync_committee_branch = [spec.Bytes32() for _ in range(spec.floorlog2(spec.NEXT_SYNC_COMMITTEE_INDEX))]
|
||||||
|
finalized_block_header = blocks[spec.SLOTS_PER_EPOCH - 1].message
|
||||||
|
assert finalized_block_header.slot == spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)
|
||||||
|
assert finalized_block_header.hash_tree_root() == state.finalized_checkpoint.root
|
||||||
|
finality_branch = build_proof(state.get_backing(), spec.FINALIZED_ROOT_INDEX)
|
||||||
|
|
||||||
|
# Build block header
|
||||||
|
block = build_empty_block(spec, state)
|
||||||
|
block_header = spec.BeaconBlockHeader(
|
||||||
|
slot=block.slot,
|
||||||
|
proposer_index=block.proposer_index,
|
||||||
|
parent_root=block.parent_root,
|
||||||
|
state_root=state.hash_tree_root(),
|
||||||
|
body_root=block.body.hash_tree_root(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# Sync committee signing the finalized_block_header
|
||||||
|
committee = spec.get_sync_committee_indices(state, spec.get_current_epoch(state))
|
||||||
|
sync_committee_bits = [True] * len(committee)
|
||||||
|
sync_committee_signature = compute_aggregate_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
block_header.slot,
|
||||||
|
committee,
|
||||||
|
block_root=spec.Root(block_header.hash_tree_root()),
|
||||||
|
)
|
||||||
|
|
||||||
|
update = spec.LightClientUpdate(
|
||||||
|
header=finalized_block_header,
|
||||||
|
next_sync_committee=state.next_sync_committee,
|
||||||
|
next_sync_committee_branch=next_sync_committee_branch,
|
||||||
|
finality_header=block_header, # block_header is the signed header
|
||||||
|
finality_branch=finality_branch,
|
||||||
|
sync_committee_bits=sync_committee_bits,
|
||||||
|
sync_committee_signature=sync_committee_signature,
|
||||||
|
fork_version=state.fork.current_version,
|
||||||
|
)
|
||||||
|
|
||||||
|
spec.process_light_client_update(store, update, state.slot, state.genesis_validators_root)
|
||||||
|
|
||||||
|
# snapshot has been updated
|
||||||
|
assert len(store.valid_updates) == 0
|
||||||
|
assert store.snapshot.header == update.header
|
|
@ -0,0 +1,165 @@
|
||||||
|
import random
|
||||||
|
from collections import defaultdict
|
||||||
|
from eth2spec.utils.ssz.ssz_typing import Bitvector
|
||||||
|
from eth2spec.test.helpers.block import build_empty_block
|
||||||
|
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||||
|
from eth2spec.test.helpers.state import transition_to
|
||||||
|
from eth2spec.utils import bls
|
||||||
|
from eth2spec.utils.bls import only_with_bls
|
||||||
|
from eth2spec.test.context import (
|
||||||
|
with_altair_and_later,
|
||||||
|
with_state,
|
||||||
|
)
|
||||||
|
|
||||||
|
rng = random.Random(1337)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_assignments_in_sync_committee(
|
||||||
|
spec, state, epoch, sync_committee, active_pubkeys
|
||||||
|
):
|
||||||
|
assert len(sync_committee.pubkeys) >= 3
|
||||||
|
some_pubkeys = rng.sample(sync_committee.pubkeys, 3)
|
||||||
|
for pubkey in some_pubkeys:
|
||||||
|
validator_index = active_pubkeys.index(pubkey)
|
||||||
|
assert spec.is_assigned_to_sync_committee(state, epoch, validator_index)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_state
|
||||||
|
def test_is_assigned_to_sync_committee(phases, spec, state):
|
||||||
|
epoch = spec.get_current_epoch(state)
|
||||||
|
validator_indices = spec.get_active_validator_indices(state, epoch)
|
||||||
|
validator_count = len(validator_indices)
|
||||||
|
|
||||||
|
query_epoch = epoch + 1
|
||||||
|
next_query_epoch = query_epoch + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD
|
||||||
|
active_pubkeys = [state.validators[index].pubkey for index in validator_indices]
|
||||||
|
|
||||||
|
ensure_assignments_in_sync_committee(
|
||||||
|
spec, state, query_epoch, state.current_sync_committee, active_pubkeys
|
||||||
|
)
|
||||||
|
ensure_assignments_in_sync_committee(
|
||||||
|
spec, state, next_query_epoch, state.next_sync_committee, active_pubkeys
|
||||||
|
)
|
||||||
|
|
||||||
|
sync_committee_pubkeys = set(
|
||||||
|
list(state.current_sync_committee.pubkeys)
|
||||||
|
+ list(state.next_sync_committee.pubkeys)
|
||||||
|
)
|
||||||
|
disqualified_pubkeys = set(
|
||||||
|
filter(lambda key: key not in sync_committee_pubkeys, active_pubkeys)
|
||||||
|
)
|
||||||
|
# NOTE: only check `disqualified_pubkeys` if SYNC_COMMITEE_SIZE < validator count
|
||||||
|
if disqualified_pubkeys:
|
||||||
|
sample_size = 3
|
||||||
|
assert validator_count >= sample_size
|
||||||
|
some_pubkeys = rng.sample(disqualified_pubkeys, sample_size)
|
||||||
|
for pubkey in some_pubkeys:
|
||||||
|
validator_index = active_pubkeys.index(pubkey)
|
||||||
|
is_current = spec.is_assigned_to_sync_committee(
|
||||||
|
state, query_epoch, validator_index
|
||||||
|
)
|
||||||
|
is_next = spec.is_assigned_to_sync_committee(
|
||||||
|
state, next_query_epoch, validator_index
|
||||||
|
)
|
||||||
|
is_current_or_next = is_current or is_next
|
||||||
|
assert not is_current_or_next
|
||||||
|
|
||||||
|
|
||||||
|
def _get_sync_committee_signature(
|
||||||
|
spec,
|
||||||
|
state,
|
||||||
|
target_slot,
|
||||||
|
target_block_root,
|
||||||
|
subcommittee_index,
|
||||||
|
index_in_subcommittee,
|
||||||
|
):
|
||||||
|
subcommittee_size = spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
sync_committee_index = (
|
||||||
|
subcommittee_index * subcommittee_size + index_in_subcommittee
|
||||||
|
)
|
||||||
|
pubkey = state.current_sync_committee.pubkeys[sync_committee_index]
|
||||||
|
privkey = pubkey_to_privkey[pubkey]
|
||||||
|
|
||||||
|
domain = spec.get_domain(
|
||||||
|
state,
|
||||||
|
spec.DOMAIN_SYNC_COMMITTEE,
|
||||||
|
)
|
||||||
|
signing_data = spec.compute_signing_root(target_block_root, domain)
|
||||||
|
return bls.Sign(privkey, spec.hash_tree_root(signing_data))
|
||||||
|
|
||||||
|
|
||||||
|
@only_with_bls()
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_state
|
||||||
|
def test_process_sync_committee_contributions(phases, spec, state):
|
||||||
|
# skip over slots at genesis
|
||||||
|
transition_to(spec, state, state.slot + 3)
|
||||||
|
|
||||||
|
# build a block and attempt to assemble a sync aggregate
|
||||||
|
# from some sync committee contributions
|
||||||
|
block = build_empty_block(spec, state)
|
||||||
|
previous_slot = state.slot - 1
|
||||||
|
target_block_root = spec.get_block_root_at_slot(state, previous_slot)
|
||||||
|
aggregation_bits = Bitvector[
|
||||||
|
spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT
|
||||||
|
]()
|
||||||
|
aggregation_index = 0
|
||||||
|
aggregation_bits[aggregation_index] = True
|
||||||
|
|
||||||
|
contributions = [
|
||||||
|
spec.SyncCommitteeContribution(
|
||||||
|
slot=block.slot,
|
||||||
|
beacon_block_root=target_block_root,
|
||||||
|
subcommittee_index=i,
|
||||||
|
aggregation_bits=aggregation_bits,
|
||||||
|
signature=_get_sync_committee_signature(
|
||||||
|
spec, state, previous_slot, target_block_root, i, aggregation_index
|
||||||
|
),
|
||||||
|
)
|
||||||
|
for i in range(spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||||
|
]
|
||||||
|
|
||||||
|
# ensure the block has an empty sync aggregate...
|
||||||
|
empty_sync_aggregate = spec.SyncAggregate()
|
||||||
|
empty_sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||||
|
assert block.body.sync_aggregate == empty_sync_aggregate
|
||||||
|
spec.process_sync_committee_contributions(block, set(contributions))
|
||||||
|
|
||||||
|
# and that after processing, it is no longer empty
|
||||||
|
assert len(block.body.sync_aggregate.sync_committee_bits) != 0
|
||||||
|
assert (
|
||||||
|
block.body.sync_aggregate.sync_committee_signature != spec.G2_POINT_AT_INFINITY
|
||||||
|
)
|
||||||
|
# moreover, ensure the sync aggregate is valid if the block is accepted
|
||||||
|
spec.process_block(state, block)
|
||||||
|
|
||||||
|
|
||||||
|
def _validator_index_for_pubkey(state, pubkey):
|
||||||
|
return list(map(lambda v: v.pubkey, state.validators)).index(pubkey)
|
||||||
|
|
||||||
|
|
||||||
|
def _subnet_for_sync_committee_index(spec, i):
|
||||||
|
return i // (spec.SYNC_COMMITTEE_SIZE // spec.SYNC_COMMITTEE_SUBNET_COUNT)
|
||||||
|
|
||||||
|
|
||||||
|
@with_altair_and_later
|
||||||
|
@with_state
|
||||||
|
def test_compute_subnets_for_sync_committee(state, spec, phases):
|
||||||
|
some_sync_committee_members = list(
|
||||||
|
(
|
||||||
|
_subnet_for_sync_committee_index(spec, i),
|
||||||
|
pubkey,
|
||||||
|
)
|
||||||
|
for i, pubkey in enumerate(state.current_sync_committee.pubkeys)
|
||||||
|
)
|
||||||
|
|
||||||
|
expected_subnets_by_pubkey = defaultdict(list)
|
||||||
|
for (subnet, pubkey) in some_sync_committee_members:
|
||||||
|
expected_subnets_by_pubkey[pubkey].append(subnet)
|
||||||
|
|
||||||
|
for _, pubkey in some_sync_committee_members:
|
||||||
|
validator_index = _validator_index_for_pubkey(state, pubkey)
|
||||||
|
subnets = spec.compute_subnets_for_sync_committee(state, validator_index)
|
||||||
|
expected_subnets = expected_subnets_by_pubkey[pubkey]
|
||||||
|
assert subnets == expected_subnets
|
|
@ -1,15 +1,20 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from eth2spec.phase0 import spec as spec_phase0
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
from eth2spec.phase1 import spec as spec_phase1
|
from eth2spec.altair import spec as spec_altair
|
||||||
|
from eth2spec.merge import spec as spec_merge
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
|
|
||||||
from .exceptions import SkippedTest
|
from .exceptions import SkippedTest
|
||||||
|
from .helpers.constants import (
|
||||||
|
PHASE0, ALTAIR, MERGE,
|
||||||
|
ALL_PHASES, FORKS_BEFORE_ALTAIR, FORKS_BEFORE_MERGE,
|
||||||
|
)
|
||||||
from .helpers.genesis import create_genesis_state
|
from .helpers.genesis import create_genesis_state
|
||||||
from .utils import vector_test, with_meta_tags
|
from .utils import vector_test, with_meta_tags
|
||||||
|
|
||||||
from random import Random
|
from random import Random
|
||||||
from typing import Any, Callable, NewType, Sequence, TypedDict, Protocol
|
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
||||||
|
|
||||||
from lru import LRU
|
from lru import LRU
|
||||||
|
|
||||||
|
@ -18,20 +23,8 @@ from importlib import reload
|
||||||
|
|
||||||
def reload_specs():
|
def reload_specs():
|
||||||
reload(spec_phase0)
|
reload(spec_phase0)
|
||||||
reload(spec_phase1)
|
reload(spec_altair)
|
||||||
|
reload(spec_merge)
|
||||||
|
|
||||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
|
||||||
|
|
||||||
SpecForkName = NewType("SpecForkName", str)
|
|
||||||
ConfigName = NewType("ConfigName", str)
|
|
||||||
|
|
||||||
PHASE0 = SpecForkName('phase0')
|
|
||||||
PHASE1 = SpecForkName('phase1')
|
|
||||||
ALL_PHASES = (PHASE0, PHASE1)
|
|
||||||
|
|
||||||
MAINNET = ConfigName('mainnet')
|
|
||||||
MINIMAL = ConfigName('minimal')
|
|
||||||
|
|
||||||
|
|
||||||
# TODO: currently phases are defined as python modules.
|
# TODO: currently phases are defined as python modules.
|
||||||
|
@ -46,31 +39,27 @@ class SpecPhase0(Spec):
|
||||||
...
|
...
|
||||||
|
|
||||||
|
|
||||||
class SpecPhase1(Spec):
|
class SpecAltair(Spec):
|
||||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
...
|
||||||
...
|
|
||||||
|
|
||||||
|
class SpecMerge(Spec):
|
||||||
|
...
|
||||||
|
|
||||||
|
|
||||||
# add transfer, bridge, etc. as the spec evolves
|
|
||||||
class SpecForks(TypedDict, total=False):
|
class SpecForks(TypedDict, total=False):
|
||||||
PHASE0: SpecPhase0
|
PHASE0: SpecPhase0
|
||||||
PHASE1: SpecPhase1
|
ALTAIR: SpecAltair
|
||||||
|
MERGE: SpecMerge
|
||||||
|
|
||||||
|
|
||||||
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
def _prepare_state(balances_fn: Callable[[Any], Sequence[int]], threshold_fn: Callable[[Any], int],
|
||||||
spec: Spec, phases: SpecForks):
|
spec: Spec, phases: SpecForks):
|
||||||
|
phase = phases[spec.fork]
|
||||||
p0 = phases[PHASE0]
|
balances = balances_fn(phase)
|
||||||
balances = balances_fn(p0)
|
activation_threshold = threshold_fn(phase)
|
||||||
activation_threshold = threshold_fn(p0)
|
state = create_genesis_state(spec=phase, validator_balances=balances,
|
||||||
|
|
||||||
state = create_genesis_state(spec=p0, validator_balances=balances,
|
|
||||||
activation_threshold=activation_threshold)
|
activation_threshold=activation_threshold)
|
||||||
if spec.fork == PHASE1:
|
|
||||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
|
||||||
# Decide based on performance/consistency results later.
|
|
||||||
state = phases[PHASE1].upgrade_to_phase1(state)
|
|
||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
||||||
|
|
||||||
|
@ -83,8 +72,7 @@ def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||||
|
|
||||||
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
||||||
# make a key for the state
|
# make a key for the state
|
||||||
# genesis fork version separates configs during test-generation runtime.
|
key = (spec.fork, spec.CONFIG_NAME, spec.__file__, balances_fn, threshold_fn)
|
||||||
key = (spec.fork, spec.GENESIS_FORK_VERSION, spec.__file__, balances_fn, threshold_fn)
|
|
||||||
global _custom_state_cache_dict
|
global _custom_state_cache_dict
|
||||||
if key not in _custom_state_cache_dict:
|
if key not in _custom_state_cache_dict:
|
||||||
state = _prepare_state(balances_fn, threshold_fn, spec, phases)
|
state = _prepare_state(balances_fn, threshold_fn, spec, phases)
|
||||||
|
@ -324,25 +312,38 @@ def with_phases(phases, other_phases=None):
|
||||||
return None
|
return None
|
||||||
run_phases = [phase]
|
run_phases = [phase]
|
||||||
|
|
||||||
|
if PHASE0 not in run_phases and ALTAIR not in run_phases and MERGE not in run_phases:
|
||||||
|
dump_skipping_message("none of the recognized phases are executable, skipping test.")
|
||||||
|
return None
|
||||||
|
|
||||||
available_phases = set(run_phases)
|
available_phases = set(run_phases)
|
||||||
if other_phases is not None:
|
if other_phases is not None:
|
||||||
available_phases += set(other_phases)
|
available_phases |= set(other_phases)
|
||||||
|
|
||||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
# TODO: test state is dependent on phase0 but is immediately transitioned to later phases.
|
||||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
# A new state-creation helper for later phases may be in place, and then tests can run without phase0
|
||||||
available_phases.add(PHASE0)
|
available_phases.add(PHASE0)
|
||||||
|
|
||||||
|
# Populate all phases for multi-phase tests
|
||||||
phase_dir = {}
|
phase_dir = {}
|
||||||
if PHASE0 in available_phases:
|
if PHASE0 in available_phases:
|
||||||
phase_dir[PHASE0] = spec_phase0
|
phase_dir[PHASE0] = spec_phase0
|
||||||
if PHASE1 in available_phases:
|
if ALTAIR in available_phases:
|
||||||
phase_dir[PHASE1] = spec_phase1
|
phase_dir[ALTAIR] = spec_altair
|
||||||
|
if MERGE in available_phases:
|
||||||
|
phase_dir[MERGE] = spec_merge
|
||||||
|
|
||||||
# return is ignored whenever multiple phases are ran. If
|
# return is ignored whenever multiple phases are ran.
|
||||||
|
# This return is for test generators to emit python generators (yielding test vector outputs)
|
||||||
if PHASE0 in run_phases:
|
if PHASE0 in run_phases:
|
||||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||||
if PHASE1 in run_phases:
|
if ALTAIR in run_phases:
|
||||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
ret = fn(spec=spec_altair, phases=phase_dir, *args, **kw)
|
||||||
|
if MERGE in run_phases:
|
||||||
|
ret = fn(spec=spec_merge, phases=phase_dir, *args, **kw)
|
||||||
|
|
||||||
|
# TODO: merge, sharding, custody_game and das are not executable yet.
|
||||||
|
# Tests that specify these features will not run, and get ignored for these specific phases.
|
||||||
return ret
|
return ret
|
||||||
return wrapper
|
return wrapper
|
||||||
return decorator
|
return decorator
|
||||||
|
@ -364,15 +365,21 @@ def with_configs(configs, reason=None):
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
|
|
||||||
def only_full_crosslink(fn):
|
def is_post_altair(spec):
|
||||||
def is_full_crosslink(spec, state):
|
if spec.fork == MERGE: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||||
epoch = spec.compute_epoch_at_slot(state.slot)
|
return False
|
||||||
return spec.get_committee_count_per_slot(state, epoch) >= spec.get_active_shard_count(state)
|
if spec.fork in FORKS_BEFORE_ALTAIR:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def wrapper(*args, spec: Spec, state: Any, **kw):
|
|
||||||
# TODO: update condition to "phase1+" if we have phase2
|
def is_post_merge(spec):
|
||||||
if spec.fork == PHASE1 and not is_full_crosslink(spec, state):
|
if spec.fork == ALTAIR: # TODO: remove parallel Altair-Merge condition after rebase.
|
||||||
dump_skipping_message("only for full crosslink")
|
return False
|
||||||
return None
|
if spec.fork in FORKS_BEFORE_MERGE:
|
||||||
return fn(*args, spec=spec, state=state, **kw)
|
return False
|
||||||
return wrapper
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
with_altair_and_later = with_phases([ALTAIR]) # TODO: include Merge, but not until Merge work is rebased.
|
||||||
|
with_merge_and_later = with_phases([MERGE])
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
with_phases,
|
||||||
with_all_phases_except,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
always_bls,
|
always_bls,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||||
from eth2spec.test.helpers.state import transition_to
|
from eth2spec.test.helpers.state import transition_to
|
||||||
from eth2spec.test.helpers.attestations import (
|
from eth2spec.test.helpers.attestations import (
|
||||||
run_attestation_processing,
|
run_attestation_processing,
|
||||||
|
@ -12,7 +12,7 @@ from eth2spec.test.helpers.attestations import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_on_time_success(spec, state):
|
def test_on_time_success(spec, state):
|
||||||
|
@ -23,7 +23,7 @@ def test_on_time_success(spec, state):
|
||||||
yield from run_attestation_processing(spec, state, attestation)
|
yield from run_attestation_processing(spec, state, attestation)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_late_success(spec, state):
|
def test_late_success(spec, state):
|
|
@ -6,14 +6,16 @@ from eth2spec.test.helpers.custody import (
|
||||||
from eth2spec.test.helpers.attestations import (
|
from eth2spec.test.helpers.attestations import (
|
||||||
get_valid_on_time_attestation,
|
get_valid_on_time_attestation,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
CUSTODY_GAME,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
|
||||||
MINIMAL,
|
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
disable_process_reveal_deadlines,
|
disable_process_reveal_deadlines,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
with_all_phases_except,
|
with_phases,
|
||||||
with_configs,
|
with_configs,
|
||||||
)
|
)
|
||||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||||
|
@ -68,7 +70,7 @@ def run_custody_chunk_response_processing(spec, state, custody_response, valid=T
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
|
@ -92,7 +94,7 @@ def test_challenge_appended(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -118,7 +120,7 @@ def test_challenge_empty_element_replaced(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -144,7 +146,7 @@ def test_duplicate_challenge(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge, valid=False)
|
yield from run_chunk_challenge_processing(spec, state, challenge, valid=False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -172,7 +174,7 @@ def test_second_challenge(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge1)
|
yield from run_chunk_challenge_processing(spec, state, challenge1)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -197,7 +199,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -222,7 +224,7 @@ def test_many_epochs_custody(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -243,7 +245,7 @@ def test_off_chain_attestation(spec, state):
|
||||||
yield from run_chunk_challenge_processing(spec, state, challenge)
|
yield from run_chunk_challenge_processing(spec, state, challenge)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -275,7 +277,7 @@ def test_custody_response(spec, state):
|
||||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -306,7 +308,7 @@ def test_custody_response_chunk_index_2(spec, state):
|
||||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -338,7 +340,7 @@ def test_custody_response_multiple_epochs(spec, state):
|
||||||
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
yield from run_custody_chunk_response_processing(spec, state, custody_response)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
|
@ -1,7 +1,7 @@
|
||||||
|
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||||
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
with_phases,
|
||||||
with_all_phases_except,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
always_bls,
|
always_bls,
|
||||||
|
@ -39,7 +39,7 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
|
@ -49,7 +49,7 @@ def test_success(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_too_early(spec, state):
|
def test_reveal_too_early(spec, state):
|
||||||
|
@ -58,7 +58,7 @@ def test_reveal_too_early(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_wrong_period(spec, state):
|
def test_wrong_period(spec, state):
|
||||||
|
@ -67,7 +67,7 @@ def test_wrong_period(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_late_reveal(spec, state):
|
def test_late_reveal(spec, state):
|
||||||
|
@ -77,7 +77,7 @@ def test_late_reveal(spec, state):
|
||||||
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_double_reveal(spec, state):
|
def test_double_reveal(spec, state):
|
|
@ -5,13 +5,15 @@ from eth2spec.test.helpers.custody import (
|
||||||
from eth2spec.test.helpers.attestations import (
|
from eth2spec.test.helpers.attestations import (
|
||||||
get_valid_on_time_attestation,
|
get_valid_on_time_attestation,
|
||||||
)
|
)
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
CUSTODY_GAME,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
from eth2spec.utils.ssz.ssz_typing import ByteList
|
from eth2spec.utils.ssz.ssz_typing import ByteList
|
||||||
from eth2spec.test.helpers.state import get_balance, transition_to
|
from eth2spec.test.helpers.state import get_balance, transition_to
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
with_phases,
|
||||||
MINIMAL,
|
|
||||||
with_all_phases_except,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
disable_process_reveal_deadlines,
|
disable_process_reveal_deadlines,
|
||||||
|
@ -112,7 +114,7 @@ def run_standard_custody_slashing_test(spec,
|
||||||
yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct)
|
yield from run_custody_slashing_processing(spec, state, slashing, valid=valid, correct=correct)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -120,7 +122,7 @@ def test_custody_slashing(spec, state):
|
||||||
yield from run_standard_custody_slashing_test(spec, state)
|
yield from run_standard_custody_slashing_test(spec, state)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -128,7 +130,7 @@ def test_incorrect_custody_slashing(spec, state):
|
||||||
yield from run_standard_custody_slashing_test(spec, state, correct=False)
|
yield from run_standard_custody_slashing_test(spec, state, correct=False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -136,7 +138,7 @@ def test_multiple_epochs_custody(spec, state):
|
||||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3)
|
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 3)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
|
@ -144,7 +146,7 @@ def test_many_epochs_custody(spec, state):
|
||||||
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5)
|
yield from run_standard_custody_slashing_test(spec, state, shard_lateness=spec.SLOTS_PER_EPOCH * 5)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@disable_process_reveal_deadlines
|
@disable_process_reveal_deadlines
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
|
@ -1,8 +1,8 @@
|
||||||
|
from eth2spec.test.helpers.constants import CUSTODY_GAME
|
||||||
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
from eth2spec.test.helpers.custody import get_valid_early_derived_secret_reveal
|
||||||
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
from eth2spec.test.helpers.state import next_epoch_via_block, get_balance
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
with_phases,
|
||||||
with_all_phases_except,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
expect_assertion_error,
|
expect_assertion_error,
|
||||||
always_bls,
|
always_bls,
|
||||||
|
@ -41,7 +41,7 @@ def run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, v
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_success(spec, state):
|
def test_success(spec, state):
|
||||||
|
@ -50,7 +50,7 @@ def test_success(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_reveal_from_current_epoch(spec, state):
|
def test_reveal_from_current_epoch(spec, state):
|
||||||
|
@ -59,7 +59,7 @@ def test_reveal_from_current_epoch(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_reveal_from_past_epoch(spec, state):
|
def test_reveal_from_past_epoch(spec, state):
|
||||||
|
@ -69,7 +69,7 @@ def test_reveal_from_past_epoch(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_with_custody_padding(spec, state):
|
def test_reveal_with_custody_padding(spec, state):
|
||||||
|
@ -81,7 +81,7 @@ def test_reveal_with_custody_padding(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@always_bls
|
@always_bls
|
||||||
def test_reveal_with_custody_padding_minus_one(spec, state):
|
def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||||
|
@ -93,7 +93,7 @@ def test_reveal_with_custody_padding_minus_one(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, True)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_double_reveal(spec, state):
|
def test_double_reveal(spec, state):
|
||||||
|
@ -114,7 +114,7 @@ def test_double_reveal(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal2, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_revealer_is_slashed(spec, state):
|
def test_revealer_is_slashed(spec, state):
|
||||||
|
@ -124,7 +124,7 @@ def test_revealer_is_slashed(spec, state):
|
||||||
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
yield from run_early_derived_secret_reveal_processing(spec, state, randao_key_reveal, False)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@never_bls
|
@never_bls
|
||||||
def test_far_future_epoch(spec, state):
|
def test_far_future_epoch(spec, state):
|
|
@ -7,16 +7,18 @@ from eth2spec.test.helpers.attestations import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
from eth2spec.test.helpers.state import transition_to, transition_to_valid_shard_slot
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
|
||||||
MINIMAL,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
with_all_phases_except,
|
with_phases,
|
||||||
with_configs,
|
with_configs,
|
||||||
)
|
)
|
||||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
from eth2spec.test.helpers.constants import (
|
||||||
|
CUSTODY_GAME,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||||
|
|
||||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import (
|
||||||
run_chunk_challenge_processing,
|
run_chunk_challenge_processing,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +27,7 @@ def run_process_challenge_deadlines(spec, state):
|
||||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
def test_validator_slashed_after_chunk_challenge(spec, state):
|
def test_validator_slashed_after_chunk_challenge(spec, state):
|
|
@ -1,5 +1,5 @@
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.helpers.constants import (
|
||||||
PHASE0,
|
CUSTODY_GAME,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.custody import (
|
from eth2spec.test.helpers.custody import (
|
||||||
get_valid_chunk_challenge,
|
get_valid_chunk_challenge,
|
||||||
|
@ -12,24 +12,26 @@ from eth2spec.test.helpers.attestations import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot
|
from eth2spec.test.helpers.state import next_epoch_via_block, transition_to, transition_to_valid_shard_slot
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
with_all_phases_except,
|
with_phases,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
)
|
)
|
||||||
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
from eth2spec.test.phase0.block_processing.test_process_attestation import run_attestation_processing
|
||||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||||
|
|
||||||
from eth2spec.test.phase1.block_processing.test_process_chunk_challenge import (
|
from eth2spec.test.custody_game.block_processing.test_process_chunk_challenge import (
|
||||||
run_chunk_challenge_processing,
|
run_chunk_challenge_processing,
|
||||||
run_custody_chunk_response_processing,
|
run_custody_chunk_response_processing,
|
||||||
)
|
)
|
||||||
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
|
from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import (
|
||||||
|
run_custody_key_reveal_processing,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def run_process_custody_final_updates(spec, state):
|
def run_process_custody_final_updates(spec, state):
|
||||||
yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates')
|
yield from run_epoch_processing_with(spec, state, 'process_custody_final_updates')
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_withdrawal_delay(spec, state):
|
def test_validator_withdrawal_delay(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
@ -42,7 +44,7 @@ def test_validator_withdrawal_delay(spec, state):
|
||||||
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
assert state.validators[0].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
@ -67,7 +69,7 @@ def test_validator_withdrawal_reenable_after_custody_reveal(spec, state):
|
||||||
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
assert state.validators[0].withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
@ -116,7 +118,7 @@ def test_validator_withdrawal_suspend_after_chunk_challenge(spec, state):
|
||||||
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
assert state.validators[validator_index].withdrawable_epoch == spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state):
|
def test_validator_withdrawal_resume_after_chunk_challenge_response(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
|
@ -3,21 +3,25 @@ from eth2spec.test.helpers.custody import (
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.state import transition_to
|
from eth2spec.test.helpers.state import transition_to
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0,
|
with_phases,
|
||||||
MINIMAL,
|
|
||||||
with_all_phases_except,
|
|
||||||
with_configs,
|
with_configs,
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
)
|
)
|
||||||
from eth2spec.test.phase0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
from eth2spec.test.helpers.constants import (
|
||||||
from eth2spec.test.phase1.block_processing.test_process_custody_key_reveal import run_custody_key_reveal_processing
|
CUSTODY_GAME,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
|
from eth2spec.test.helpers.epoch_processing import run_epoch_processing_with
|
||||||
|
from eth2spec.test.custody_game.block_processing.test_process_custody_key_reveal import (
|
||||||
|
run_custody_key_reveal_processing,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def run_process_challenge_deadlines(spec, state):
|
def run_process_challenge_deadlines(spec, state):
|
||||||
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
yield from run_epoch_processing_with(spec, state, 'process_challenge_deadlines')
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
def test_validator_slashed_after_reveal_deadline(spec, state):
|
def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||||
|
@ -37,7 +41,7 @@ def test_validator_slashed_after_reveal_deadline(spec, state):
|
||||||
assert state.validators[0].slashed == 1
|
assert state.validators[0].slashed == 1
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_configs([MINIMAL], reason="too slow")
|
@with_configs([MINIMAL], reason="too slow")
|
||||||
def test_validator_not_slashed_after_reveal(spec, state):
|
def test_validator_not_slashed_after_reveal(spec, state):
|
|
@ -1,14 +1,16 @@
|
||||||
from typing import Dict, Sequence
|
from typing import Dict, Sequence
|
||||||
|
|
||||||
from eth2spec.test.context import (
|
from eth2spec.test.context import (
|
||||||
PHASE0, MINIMAL,
|
with_phases,
|
||||||
with_all_phases_except,
|
|
||||||
spec_state_test,
|
spec_state_test,
|
||||||
only_full_crosslink,
|
|
||||||
with_configs,
|
with_configs,
|
||||||
)
|
)
|
||||||
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
from eth2spec.test.helpers.attestations import get_valid_on_time_attestation
|
||||||
from eth2spec.test.helpers.block import build_empty_block
|
from eth2spec.test.helpers.block import build_empty_block
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
CUSTODY_GAME,
|
||||||
|
MINIMAL,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.custody import (
|
from eth2spec.test.helpers.custody import (
|
||||||
get_custody_slashable_test_vector,
|
get_custody_slashable_test_vector,
|
||||||
get_valid_chunk_challenge,
|
get_valid_chunk_challenge,
|
||||||
|
@ -41,99 +43,13 @@ def run_beacon_block(spec, state, block, valid=True):
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
#
|
|
||||||
# Beacon block with non-empty shard transitions
|
|
||||||
#
|
|
||||||
|
|
||||||
|
|
||||||
def run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard, valid=True):
|
|
||||||
transition_to(spec, state, state.slot + target_len_offset_slot)
|
|
||||||
|
|
||||||
body = get_sample_shard_block_body(spec, is_max=True)
|
|
||||||
shard_block = build_shard_block(spec, state, shard, body=body, slot=state.slot, signed=True)
|
|
||||||
shard_block_dict: Dict[spec.Shard, Sequence[spec.SignedShardBlock]] = {shard: [shard_block]}
|
|
||||||
|
|
||||||
shard_transitions = get_shard_transitions(spec, state, shard_block_dict)
|
|
||||||
attestations = [
|
|
||||||
get_valid_on_time_attestation(
|
|
||||||
spec,
|
|
||||||
state,
|
|
||||||
index=committee_index,
|
|
||||||
shard_transition=shard_transitions[shard],
|
|
||||||
signed=True,
|
|
||||||
)
|
|
||||||
for shard in shard_block_dict.keys()
|
|
||||||
]
|
|
||||||
|
|
||||||
beacon_block = build_empty_block(spec, state, slot=state.slot + 1)
|
|
||||||
beacon_block.body.attestations = attestations
|
|
||||||
beacon_block.body.shard_transitions = shard_transitions
|
|
||||||
|
|
||||||
pre_gasprice = state.shard_states[shard].gasprice
|
|
||||||
pre_shard_states = state.shard_states.copy()
|
|
||||||
yield 'pre', state.copy()
|
|
||||||
|
|
||||||
if not valid:
|
|
||||||
state_transition_and_sign_block(spec, state, beacon_block, expect_fail=True)
|
|
||||||
yield 'block', beacon_block
|
|
||||||
yield 'post', None
|
|
||||||
return
|
|
||||||
|
|
||||||
signed_beacon_block = state_transition_and_sign_block(spec, state, beacon_block)
|
|
||||||
yield 'block', signed_beacon_block
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
for shard in range(spec.get_active_shard_count(state)):
|
|
||||||
post_shard_state = state.shard_states[shard]
|
|
||||||
if shard in shard_block_dict:
|
|
||||||
# Shard state has been changed to state_transition result
|
|
||||||
assert post_shard_state == shard_transitions[shard].shard_states[
|
|
||||||
len(shard_transitions[shard].shard_states) - 1
|
|
||||||
]
|
|
||||||
assert post_shard_state.slot == state.slot - 1
|
|
||||||
if len((shard_block_dict[shard])) == 0:
|
|
||||||
# `latest_block_root` is the same
|
|
||||||
assert post_shard_state.latest_block_root == pre_shard_states[shard].latest_block_root
|
|
||||||
if target_len_offset_slot == 1 and len(shard_block_dict[shard]) > 0:
|
|
||||||
assert post_shard_state.gasprice > pre_gasprice
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
|
||||||
@spec_state_test
|
|
||||||
@only_full_crosslink
|
|
||||||
def test_process_beacon_block_with_normal_shard_transition(spec, state):
|
|
||||||
transition_to_valid_shard_slot(spec, state)
|
|
||||||
|
|
||||||
target_len_offset_slot = 1
|
|
||||||
committee_index = spec.CommitteeIndex(0)
|
|
||||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
|
|
||||||
assert state.shard_states[shard].slot == state.slot - 1
|
|
||||||
|
|
||||||
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
|
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
|
||||||
@spec_state_test
|
|
||||||
@only_full_crosslink
|
|
||||||
def test_process_beacon_block_with_empty_proposal_transition(spec, state):
|
|
||||||
transition_to_valid_shard_slot(spec, state)
|
|
||||||
|
|
||||||
target_len_offset_slot = 1
|
|
||||||
committee_index = spec.CommitteeIndex(0)
|
|
||||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot + target_len_offset_slot - 1)
|
|
||||||
assert state.shard_states[shard].slot == state.slot - 1
|
|
||||||
|
|
||||||
yield from run_beacon_block_with_shard_blocks(spec, state, target_len_offset_slot, committee_index, shard)
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Beacon block with custody operations
|
# Beacon block with custody operations
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@only_full_crosslink
|
|
||||||
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
|
||||||
|
@ -165,7 +81,7 @@ def test_with_shard_transition_with_custody_challenge_and_response(spec, state):
|
||||||
yield from run_beacon_block(spec, state, block)
|
yield from run_beacon_block(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@with_configs([MINIMAL])
|
@with_configs([MINIMAL])
|
||||||
def test_custody_key_reveal(spec, state):
|
def test_custody_key_reveal(spec, state):
|
||||||
|
@ -179,7 +95,7 @@ def test_custody_key_reveal(spec, state):
|
||||||
yield from run_beacon_block(spec, state, block)
|
yield from run_beacon_block(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
def test_early_derived_secret_reveal(spec, state):
|
def test_early_derived_secret_reveal(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
@ -190,9 +106,8 @@ def test_early_derived_secret_reveal(spec, state):
|
||||||
yield from run_beacon_block(spec, state, block)
|
yield from run_beacon_block(spec, state, block)
|
||||||
|
|
||||||
|
|
||||||
@with_all_phases_except([PHASE0])
|
@with_phases([CUSTODY_GAME])
|
||||||
@spec_state_test
|
@spec_state_test
|
||||||
@only_full_crosslink
|
|
||||||
def test_custody_slashing(spec, state):
|
def test_custody_slashing(spec, state):
|
||||||
transition_to_valid_shard_slot(spec, state)
|
transition_to_valid_shard_slot(spec, state)
|
||||||
|
|
|
@ -2,10 +2,9 @@ from lru import LRU
|
||||||
|
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
from eth2spec.test.context import expect_assertion_error, PHASE1
|
from eth2spec.test.context import expect_assertion_error, is_post_altair
|
||||||
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
from eth2spec.test.helpers.state import state_transition_and_sign_block, next_epoch, next_slot
|
||||||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
from eth2spec.test.helpers.block import build_empty_block_for_next_slot
|
||||||
from eth2spec.test.helpers.shard_transitions import get_shard_transition_of_committee
|
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
from eth2spec.utils.ssz.ssz_typing import Bitlist
|
||||||
|
@ -30,23 +29,28 @@ def run_attestation_processing(spec, state, attestation, valid=True):
|
||||||
yield 'post', None
|
yield 'post', None
|
||||||
return
|
return
|
||||||
|
|
||||||
current_epoch_count = len(state.current_epoch_attestations)
|
if not is_post_altair(spec):
|
||||||
previous_epoch_count = len(state.previous_epoch_attestations)
|
current_epoch_count = len(state.current_epoch_attestations)
|
||||||
|
previous_epoch_count = len(state.previous_epoch_attestations)
|
||||||
|
|
||||||
# process attestation
|
# process attestation
|
||||||
spec.process_attestation(state, attestation)
|
spec.process_attestation(state, attestation)
|
||||||
|
|
||||||
# Make sure the attestation has been processed
|
# Make sure the attestation has been processed
|
||||||
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
if not is_post_altair(spec):
|
||||||
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
if attestation.data.target.epoch == spec.get_current_epoch(state):
|
||||||
|
assert len(state.current_epoch_attestations) == current_epoch_count + 1
|
||||||
|
else:
|
||||||
|
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
||||||
else:
|
else:
|
||||||
assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
|
# After accounting reform, there are cases when processing an attestation does not result in any flag updates
|
||||||
|
pass
|
||||||
|
|
||||||
# yield post-state
|
# yield post-state
|
||||||
yield 'post', state
|
yield 'post', state
|
||||||
|
|
||||||
|
|
||||||
def build_attestation_data(spec, state, slot, index, shard=None, shard_transition=None, on_time=True):
|
def build_attestation_data(spec, state, slot, index, shard=None, on_time=True):
|
||||||
assert state.slot >= slot
|
assert state.slot >= slot
|
||||||
|
|
||||||
if slot == state.slot:
|
if slot == state.slot:
|
||||||
|
@ -77,32 +81,11 @@ def build_attestation_data(spec, state, slot, index, shard=None, shard_transitio
|
||||||
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
target=spec.Checkpoint(epoch=spec.compute_epoch_at_slot(slot), root=epoch_boundary_root),
|
||||||
)
|
)
|
||||||
|
|
||||||
if spec.fork == PHASE1:
|
# if spec.fork == SHARDING # TODO: add extra data for shard voting
|
||||||
if shard is None:
|
|
||||||
shard = spec.compute_shard_from_committee_index(state, data.index, data.slot)
|
|
||||||
data.shard = shard
|
|
||||||
|
|
||||||
if shard_transition is not None:
|
|
||||||
last_offset_index = len(shard_transition.shard_data_roots) - 1
|
|
||||||
data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root
|
|
||||||
data.shard_transition_root = shard_transition.hash_tree_root()
|
|
||||||
else:
|
|
||||||
if on_time:
|
|
||||||
if data.slot == spec.GENESIS_SLOT:
|
|
||||||
data.shard_head_root = spec.Root()
|
|
||||||
data.shard_transition_root = spec.ShardTransition().hash_tree_root()
|
|
||||||
else:
|
|
||||||
shard_transition = spec.get_shard_transition(state, shard, shard_blocks=[])
|
|
||||||
last_offset_index = len(shard_transition.shard_data_roots) - 1
|
|
||||||
data.shard_head_root = shard_transition.shard_states[last_offset_index].latest_block_root
|
|
||||||
data.shard_transition_root = shard_transition.hash_tree_root()
|
|
||||||
else:
|
|
||||||
data.shard_head_root = state.shard_states[shard].latest_block_root
|
|
||||||
data.shard_transition_root = spec.Root()
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
|
||||||
def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_transition=None, signed=False):
|
def get_valid_on_time_attestation(spec, state, slot=None, index=None, signed=False):
|
||||||
'''
|
'''
|
||||||
Construct on-time attestation for next slot
|
Construct on-time attestation for next slot
|
||||||
'''
|
'''
|
||||||
|
@ -116,13 +99,12 @@ def get_valid_on_time_attestation(spec, state, slot=None, index=None, shard_tran
|
||||||
state,
|
state,
|
||||||
slot=slot,
|
slot=slot,
|
||||||
index=index,
|
index=index,
|
||||||
shard_transition=shard_transition,
|
|
||||||
signed=signed,
|
signed=signed,
|
||||||
on_time=True,
|
on_time=True,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False, shard_transition=None):
|
def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False):
|
||||||
'''
|
'''
|
||||||
Construct on-time attestation for next slot
|
Construct on-time attestation for next slot
|
||||||
'''
|
'''
|
||||||
|
@ -132,7 +114,7 @@ def get_valid_late_attestation(spec, state, slot=None, index=None, signed=False,
|
||||||
index = 0
|
index = 0
|
||||||
|
|
||||||
return get_valid_attestation(spec, state, slot=slot, index=index,
|
return get_valid_attestation(spec, state, slot=slot, index=index,
|
||||||
signed=signed, on_time=False, shard_transition=shard_transition)
|
signed=signed, on_time=False)
|
||||||
|
|
||||||
|
|
||||||
def get_valid_attestation(spec,
|
def get_valid_attestation(spec,
|
||||||
|
@ -140,7 +122,6 @@ def get_valid_attestation(spec,
|
||||||
slot=None,
|
slot=None,
|
||||||
index=None,
|
index=None,
|
||||||
filter_participant_set=None,
|
filter_participant_set=None,
|
||||||
shard_transition=None,
|
|
||||||
signed=False,
|
signed=False,
|
||||||
on_time=True):
|
on_time=True):
|
||||||
# If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
# If filter_participant_set filters everything, the attestation has 0 participants, and cannot be signed.
|
||||||
|
@ -151,7 +132,7 @@ def get_valid_attestation(spec,
|
||||||
index = 0
|
index = 0
|
||||||
|
|
||||||
attestation_data = build_attestation_data(
|
attestation_data = build_attestation_data(
|
||||||
spec, state, slot=slot, index=index, shard_transition=shard_transition, on_time=on_time
|
spec, state, slot=slot, index=index, on_time=on_time
|
||||||
)
|
)
|
||||||
|
|
||||||
beacon_committee = spec.get_beacon_committee(
|
beacon_committee = spec.get_beacon_committee(
|
||||||
|
@ -253,16 +234,11 @@ def next_epoch_with_attestations(spec,
|
||||||
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
|
committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest))
|
||||||
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(post_state)):
|
||||||
for index in range(committees_per_slot):
|
for index in range(committees_per_slot):
|
||||||
if spec.fork == PHASE1:
|
# if spec.fork == SHARDING: TODO: add shard data to attestation, include shard headers in block
|
||||||
shard = spec.compute_shard_from_committee_index(post_state, index, slot_to_attest)
|
|
||||||
shard_transition = get_shard_transition_of_committee(spec, post_state, index)
|
|
||||||
block.body.shard_transitions[shard] = shard_transition
|
|
||||||
else:
|
|
||||||
shard_transition = None
|
|
||||||
|
|
||||||
cur_attestation = get_valid_attestation(
|
cur_attestation = get_valid_attestation(
|
||||||
spec, post_state, slot_to_attest,
|
spec, post_state, slot_to_attest,
|
||||||
shard_transition=shard_transition, index=index, signed=True, on_time=True
|
index=index, signed=True, on_time=True
|
||||||
)
|
)
|
||||||
block.body.attestations.append(cur_attestation)
|
block.body.attestations.append(cur_attestation)
|
||||||
|
|
||||||
|
@ -315,7 +291,8 @@ def prepare_state_with_attestations(spec, state, participation_fn=None):
|
||||||
next_slot(spec, state)
|
next_slot(spec, state)
|
||||||
|
|
||||||
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
assert state.slot == next_epoch_start_slot + spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||||
assert len(state.previous_epoch_attestations) == len(attestations)
|
if not is_post_altair(spec):
|
||||||
|
assert len(state.previous_epoch_attestations) == len(attestations)
|
||||||
|
|
||||||
return attestations
|
return attestations
|
||||||
|
|
||||||
|
|
|
@ -1,3 +1,5 @@
|
||||||
|
from eth2spec.test.context import is_post_altair, is_post_merge
|
||||||
|
from eth2spec.test.helpers.execution_payload import build_empty_execution_payload
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.utils.bls import only_with_bls
|
from eth2spec.utils.bls import only_with_bls
|
||||||
|
@ -89,6 +91,13 @@ def build_empty_block(spec, state, slot=None):
|
||||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||||
empty_block.parent_root = parent_block_root
|
empty_block.parent_root = parent_block_root
|
||||||
|
|
||||||
|
if is_post_altair(spec):
|
||||||
|
empty_block.body.sync_aggregate.sync_committee_signature = spec.G2_POINT_AT_INFINITY
|
||||||
|
|
||||||
|
if is_post_merge(spec):
|
||||||
|
empty_block.body.execution_payload = build_empty_execution_payload(spec, state)
|
||||||
|
|
||||||
apply_randao_reveal(spec, state, empty_block)
|
apply_randao_reveal(spec, state, empty_block)
|
||||||
return empty_block
|
return empty_block
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,62 @@
|
||||||
|
def for_ops(state, operations, fn) -> None:
|
||||||
|
for operation in operations:
|
||||||
|
fn(state, operation)
|
||||||
|
|
||||||
|
|
||||||
|
def get_process_calls(spec):
|
||||||
|
return {
|
||||||
|
# PHASE0
|
||||||
|
'process_block_header':
|
||||||
|
lambda state, block: spec.process_block_header(state, block),
|
||||||
|
'process_randao':
|
||||||
|
lambda state, block: spec.process_randao(state, block.body),
|
||||||
|
'process_eth1_data':
|
||||||
|
lambda state, block: spec.process_eth1_data(state, block.body),
|
||||||
|
'process_proposer_slashing':
|
||||||
|
lambda state, block: for_ops(state, block.body.proposer_slashings, spec.process_proposer_slashing),
|
||||||
|
'process_attester_slashing':
|
||||||
|
lambda state, block: for_ops(state, block.body.attester_slashings, spec.process_attester_slashing),
|
||||||
|
'process_shard_header':
|
||||||
|
lambda state, block: for_ops(state, block.body.shard_headers, spec.process_shard_header),
|
||||||
|
'process_attestation':
|
||||||
|
lambda state, block: for_ops(state, block.body.attestations, spec.process_attestation),
|
||||||
|
'process_deposit':
|
||||||
|
lambda state, block: for_ops(state, block.body.deposits, spec.process_deposit),
|
||||||
|
'process_voluntary_exit':
|
||||||
|
lambda state, block: for_ops(state, block.body.voluntary_exits, spec.process_voluntary_exit),
|
||||||
|
# Altair
|
||||||
|
'process_sync_committee':
|
||||||
|
lambda state, block: spec.process_sync_committee(state, block.body.sync_aggregate),
|
||||||
|
# Merge
|
||||||
|
'process_application_payload':
|
||||||
|
lambda state, block: spec.process_application_payload(state, block.body),
|
||||||
|
# Custody Game
|
||||||
|
'process_custody_game_operations':
|
||||||
|
lambda state, block: spec.process_custody_game_operations(state, block.body),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def run_block_processing_to(spec, state, block, process_name: str):
|
||||||
|
"""
|
||||||
|
Processes to the block transition, up to, but not including, the sub-transition named ``process_name``.
|
||||||
|
Returns a Callable[[state, block], None] for the remaining ``process_name`` transition.
|
||||||
|
|
||||||
|
Tests should create full blocks to ensure a valid state transition, even if the operation itself is isolated.
|
||||||
|
(e.g. latest_header in the beacon state is up-to-date in a sync-committee test).
|
||||||
|
|
||||||
|
A test prepares a pre-state by calling this function, output the pre-state,
|
||||||
|
and it can then proceed to run the returned callable, and output a post-state.
|
||||||
|
"""
|
||||||
|
print(f"state.slot {state.slot} block.slot {block.slot}")
|
||||||
|
# transition state to slot before block state transition
|
||||||
|
if state.slot < block.slot:
|
||||||
|
spec.process_slots(state, block.slot)
|
||||||
|
print(f"state.slot {state.slot} block.slot {block.slot} A")
|
||||||
|
|
||||||
|
# process components of block transition
|
||||||
|
for name, call in get_process_calls(spec).items():
|
||||||
|
if name == process_name:
|
||||||
|
return call
|
||||||
|
# only run when present. Later phases introduce more to the block-processing.
|
||||||
|
if hasattr(spec, name):
|
||||||
|
call(state, block)
|
|
@ -0,0 +1,34 @@
|
||||||
|
from .typing import SpecForkName, ConfigName
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# SpecForkName
|
||||||
|
#
|
||||||
|
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||||
|
PHASE0 = SpecForkName('phase0')
|
||||||
|
ALTAIR = SpecForkName('altair')
|
||||||
|
MERGE = SpecForkName('merge')
|
||||||
|
|
||||||
|
# Experimental phases (not included in default "ALL_PHASES"):
|
||||||
|
SHARDING = SpecForkName('sharding')
|
||||||
|
CUSTODY_GAME = SpecForkName('custody_game')
|
||||||
|
DAS = SpecForkName('das')
|
||||||
|
|
||||||
|
# The forks that pytest runs with.
|
||||||
|
ALL_PHASES = (PHASE0, ALTAIR, MERGE)
|
||||||
|
# The forks that output to the test vectors.
|
||||||
|
TESTGEN_FORKS = (PHASE0, ALTAIR, MERGE)
|
||||||
|
# TODO: everything runs in parallel to Altair.
|
||||||
|
# After features are rebased on the Altair fork, this can be reduced to just PHASE0.
|
||||||
|
FORKS_BEFORE_ALTAIR = (PHASE0, MERGE, SHARDING, CUSTODY_GAME, DAS)
|
||||||
|
|
||||||
|
# TODO: when rebasing Merge onto Altair, add ALTAIR to this tuple.
|
||||||
|
FORKS_BEFORE_MERGE = (PHASE0,)
|
||||||
|
|
||||||
|
#
|
||||||
|
# Config
|
||||||
|
#
|
||||||
|
MAINNET = ConfigName('mainnet')
|
||||||
|
MINIMAL = ConfigName('minimal')
|
||||||
|
|
||||||
|
ALL_CONFIGS = (MINIMAL, MAINNET)
|
|
@ -1,7 +1,7 @@
|
||||||
from eth2spec.test.helpers.keys import privkeys
|
from eth2spec.test.helpers.keys import privkeys
|
||||||
|
from eth2spec.test.helpers.merkle import build_proof
|
||||||
from eth2spec.utils import bls
|
from eth2spec.utils import bls
|
||||||
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, ByteList
|
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, ByteList
|
||||||
from remerkleable.tree import gindex_bit_iter
|
|
||||||
|
|
||||||
BYTES_PER_CHUNK = 32
|
BYTES_PER_CHUNK = 32
|
||||||
|
|
||||||
|
@ -116,26 +116,6 @@ def custody_chunkify(spec, x):
|
||||||
return [ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](c) for c in chunks]
|
return [ByteVector[spec.BYTES_PER_CUSTODY_CHUNK](c) for c in chunks]
|
||||||
|
|
||||||
|
|
||||||
def build_proof(anchor, leaf_index):
|
|
||||||
if leaf_index <= 1:
|
|
||||||
return [] # Nothing to prove / invalid index
|
|
||||||
node = anchor
|
|
||||||
proof = []
|
|
||||||
# Walk down, top to bottom to the leaf
|
|
||||||
bit_iter, _ = gindex_bit_iter(leaf_index)
|
|
||||||
for bit in bit_iter:
|
|
||||||
# Always take the opposite hand for the proof.
|
|
||||||
# 1 = right as leaf, thus get left
|
|
||||||
if bit:
|
|
||||||
proof.append(node.get_left().merkle_root())
|
|
||||||
node = node.get_right()
|
|
||||||
else:
|
|
||||||
proof.append(node.get_right().merkle_root())
|
|
||||||
node = node.get_left()
|
|
||||||
|
|
||||||
return list(reversed(proof))
|
|
||||||
|
|
||||||
|
|
||||||
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
|
def get_valid_custody_chunk_response(spec, state, chunk_challenge, challenge_index,
|
||||||
block_length_or_custody_data,
|
block_length_or_custody_data,
|
||||||
invalid_chunk_data=False):
|
invalid_chunk_data=False):
|
||||||
|
|
|
@ -0,0 +1,65 @@
|
||||||
|
|
||||||
|
from eth2spec.test.context import is_post_altair
|
||||||
|
|
||||||
|
|
||||||
|
def get_process_calls(spec):
|
||||||
|
# unrecognized processing functions will be ignored.
|
||||||
|
# This sums up the aggregate of processing functions of all phases.
|
||||||
|
# Note: make sure to explicitly remove/override a processing function in later phases,
|
||||||
|
# or the old function will stick around.
|
||||||
|
return [
|
||||||
|
'process_justification_and_finalization',
|
||||||
|
'process_rewards_and_penalties',
|
||||||
|
'process_registry_updates',
|
||||||
|
'process_reveal_deadlines', # custody game
|
||||||
|
'process_challenge_deadlines', # custody game
|
||||||
|
'process_slashings',
|
||||||
|
'process_pending_header.', # sharding
|
||||||
|
'charge_confirmed_header_fees', # sharding
|
||||||
|
'reset_pending_headers', # sharding
|
||||||
|
'process_eth1_data_reset',
|
||||||
|
'process_effective_balance_updates',
|
||||||
|
'process_slashings_reset',
|
||||||
|
'process_randao_mixes_reset',
|
||||||
|
'process_historical_roots_update',
|
||||||
|
# Altair replaced `process_participation_record_updates` with `process_participation_flag_updates`
|
||||||
|
'process_participation_flag_updates' if is_post_altair(spec) else (
|
||||||
|
'process_participation_record_updates'
|
||||||
|
),
|
||||||
|
'process_sync_committee_updates',
|
||||||
|
'process_shard_epoch_increment' # sharding
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def run_epoch_processing_to(spec, state, process_name: str):
|
||||||
|
"""
|
||||||
|
Processes to the next epoch transition, up to, but not including, the sub-transition named ``process_name``
|
||||||
|
"""
|
||||||
|
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH)
|
||||||
|
|
||||||
|
# transition state to slot before epoch state transition
|
||||||
|
if state.slot < slot - 1:
|
||||||
|
spec.process_slots(state, slot - 1)
|
||||||
|
|
||||||
|
# start transitioning, do one slot update before the epoch itself.
|
||||||
|
spec.process_slot(state)
|
||||||
|
|
||||||
|
# process components of epoch transition before final-updates
|
||||||
|
for name in get_process_calls(spec):
|
||||||
|
if name == process_name:
|
||||||
|
break
|
||||||
|
# only run when present. Later phases introduce more to the epoch-processing.
|
||||||
|
if hasattr(spec, name):
|
||||||
|
getattr(spec, name)(state)
|
||||||
|
|
||||||
|
|
||||||
|
def run_epoch_processing_with(spec, state, process_name: str):
|
||||||
|
"""
|
||||||
|
Processes to the next epoch transition, up to and including the sub-transition named ``process_name``
|
||||||
|
- pre-state ('pre'), state before calling ``process_name``
|
||||||
|
- post-state ('post'), state after calling ``process_name``
|
||||||
|
"""
|
||||||
|
run_epoch_processing_to(spec, state, process_name)
|
||||||
|
yield 'pre', state
|
||||||
|
getattr(spec, process_name)(state)
|
||||||
|
yield 'post', state
|
|
@ -0,0 +1,26 @@
|
||||||
|
|
||||||
|
def build_empty_execution_payload(spec, state):
|
||||||
|
"""
|
||||||
|
Assuming a pre-state of the same slot, build a valid ExecutionPayload without any transactions.
|
||||||
|
"""
|
||||||
|
latest = state.latest_execution_payload_header
|
||||||
|
timestamp = spec.compute_time_at_slot(state, state.slot)
|
||||||
|
empty_txs = spec.List[spec.OpaqueTransaction, spec.MAX_EXECUTION_TRANSACTIONS]()
|
||||||
|
|
||||||
|
payload = spec.ExecutionPayload(
|
||||||
|
block_hash=spec.Hash32(),
|
||||||
|
parent_hash=latest.block_hash,
|
||||||
|
coinbase=spec.Bytes20(),
|
||||||
|
state_root=latest.state_root, # no changes to the state
|
||||||
|
number=latest.number + 1,
|
||||||
|
gas_limit=latest.gas_limit, # retain same limit
|
||||||
|
gas_used=0, # empty block, 0 gas
|
||||||
|
timestamp=timestamp,
|
||||||
|
receipt_root=b"no receipts here" + b"\x00" * 16, # TODO: root of empty MPT may be better.
|
||||||
|
logs_bloom=spec.ByteVector[spec.BYTES_PER_LOGS_BLOOM](), # TODO: zeroed logs bloom for empty logs ok?
|
||||||
|
transactions=empty_txs,
|
||||||
|
)
|
||||||
|
# TODO: real RLP + block hash logic would be nice, requires RLP and keccak256 dependency however.
|
||||||
|
payload.block_hash = spec.Hash32(spec.hash(payload.hash_tree_root() + b"FAKE RLP HASH"))
|
||||||
|
|
||||||
|
return payload
|
|
@ -1,4 +1,4 @@
|
||||||
from eth2spec.phase0 import spec as phase0_spec
|
from eth_utils import encode_hex
|
||||||
|
|
||||||
|
|
||||||
def get_anchor_root(spec, state):
|
def get_anchor_root(spec, state):
|
||||||
|
@ -18,7 +18,23 @@ def add_block_to_store(spec, store, signed_block):
|
||||||
spec.on_block(store, signed_block)
|
spec.on_block(store, signed_block)
|
||||||
|
|
||||||
|
|
||||||
def add_attestation_to_store(spec, store, attestation):
|
def tick_and_run_on_block(spec, store, signed_block, test_steps=None):
|
||||||
|
if test_steps is None:
|
||||||
|
test_steps = []
|
||||||
|
|
||||||
|
pre_state = store.block_states[signed_block.message.parent_root]
|
||||||
|
block_time = pre_state.genesis_time + signed_block.message.slot * spec.SECONDS_PER_SLOT
|
||||||
|
|
||||||
|
if store.time < block_time:
|
||||||
|
on_tick_and_append_step(spec, store, block_time, test_steps)
|
||||||
|
|
||||||
|
yield from run_on_block(spec, store, signed_block, test_steps)
|
||||||
|
|
||||||
|
|
||||||
|
def tick_and_run_on_attestation(spec, store, attestation, test_steps=None):
|
||||||
|
if test_steps is None:
|
||||||
|
test_steps = []
|
||||||
|
|
||||||
parent_block = store.blocks[attestation.data.beacon_block_root]
|
parent_block = store.blocks[attestation.data.beacon_block_root]
|
||||||
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
pre_state = store.block_states[spec.hash_tree_root(parent_block)]
|
||||||
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
|
block_time = pre_state.genesis_time + parent_block.slot * spec.SECONDS_PER_SLOT
|
||||||
|
@ -26,12 +42,71 @@ def add_attestation_to_store(spec, store, attestation):
|
||||||
|
|
||||||
if store.time < next_epoch_time:
|
if store.time < next_epoch_time:
|
||||||
spec.on_tick(store, next_epoch_time)
|
spec.on_tick(store, next_epoch_time)
|
||||||
|
test_steps.append({'tick': int(next_epoch_time)})
|
||||||
|
|
||||||
spec.on_attestation(store, attestation)
|
spec.on_attestation(store, attestation)
|
||||||
|
yield get_attestation_file_name(attestation), attestation
|
||||||
|
test_steps.append({'attestation': get_attestation_file_name(attestation)})
|
||||||
|
|
||||||
|
|
||||||
def get_genesis_forkchoice_store(spec, genesis_state):
|
def get_genesis_forkchoice_store(spec, genesis_state):
|
||||||
|
store, _ = get_genesis_forkchoice_store_and_block(spec, genesis_state)
|
||||||
|
return store
|
||||||
|
|
||||||
|
|
||||||
|
def get_genesis_forkchoice_store_and_block(spec, genesis_state):
|
||||||
assert genesis_state.slot == spec.GENESIS_SLOT
|
assert genesis_state.slot == spec.GENESIS_SLOT
|
||||||
# The genesis block must be a Phase 0 `BeaconBlock`
|
genesis_block = spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
|
||||||
genesis_block = phase0_spec.BeaconBlock(state_root=genesis_state.hash_tree_root())
|
return spec.get_forkchoice_store(genesis_state, genesis_block), genesis_block
|
||||||
return spec.get_forkchoice_store(genesis_state, genesis_block)
|
|
||||||
|
|
||||||
|
def get_block_file_name(block):
|
||||||
|
return f"block_{encode_hex(block.hash_tree_root())}"
|
||||||
|
|
||||||
|
|
||||||
|
def get_attestation_file_name(attestation):
|
||||||
|
return f"attestation_{encode_hex(attestation.hash_tree_root())}"
|
||||||
|
|
||||||
|
|
||||||
|
def on_tick_and_append_step(spec, store, time, test_steps):
|
||||||
|
spec.on_tick(store, time)
|
||||||
|
test_steps.append({'tick': int(time)})
|
||||||
|
|
||||||
|
|
||||||
|
def run_on_block(spec, store, signed_block, test_steps, valid=True):
|
||||||
|
if not valid:
|
||||||
|
try:
|
||||||
|
spec.on_block(store, signed_block)
|
||||||
|
|
||||||
|
except AssertionError:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
assert False
|
||||||
|
|
||||||
|
spec.on_block(store, signed_block)
|
||||||
|
yield get_block_file_name(signed_block), signed_block
|
||||||
|
test_steps.append({'block': get_block_file_name(signed_block)})
|
||||||
|
|
||||||
|
# An on_block step implies receiving block's attestations
|
||||||
|
for attestation in signed_block.message.body.attestations:
|
||||||
|
spec.on_attestation(store, attestation)
|
||||||
|
|
||||||
|
assert store.blocks[signed_block.message.hash_tree_root()] == signed_block.message
|
||||||
|
test_steps.append({
|
||||||
|
'checks': {
|
||||||
|
'time': int(store.time),
|
||||||
|
'head': get_formatted_head_output(spec, store),
|
||||||
|
'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root),
|
||||||
|
'finalized_checkpoint_root': encode_hex(store.finalized_checkpoint.root),
|
||||||
|
'best_justified_checkpoint': encode_hex(store.best_justified_checkpoint.root),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
def get_formatted_head_output(spec, store):
|
||||||
|
head = spec.get_head(store)
|
||||||
|
slot = store.blocks[head].slot
|
||||||
|
return {
|
||||||
|
'slot': int(slot),
|
||||||
|
'root': encode_hex(head),
|
||||||
|
}
|
||||||
|
|
|
@ -1,3 +1,8 @@
|
||||||
|
from eth2spec.test.helpers.constants import (
|
||||||
|
ALTAIR,
|
||||||
|
FORKS_BEFORE_ALTAIR,
|
||||||
|
MERGE,
|
||||||
|
)
|
||||||
from eth2spec.test.helpers.keys import pubkeys
|
from eth2spec.test.helpers.keys import pubkeys
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,6 +25,13 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
deposit_root = b'\x42' * 32
|
deposit_root = b'\x42' * 32
|
||||||
|
|
||||||
eth1_block_hash = b'\xda' * 32
|
eth1_block_hash = b'\xda' * 32
|
||||||
|
current_version = spec.GENESIS_FORK_VERSION
|
||||||
|
|
||||||
|
if spec.fork == ALTAIR:
|
||||||
|
current_version = spec.ALTAIR_FORK_VERSION
|
||||||
|
elif spec.fork == MERGE:
|
||||||
|
current_version = spec.MERGE_FORK_VERSION
|
||||||
|
|
||||||
state = spec.BeaconState(
|
state = spec.BeaconState(
|
||||||
genesis_time=0,
|
genesis_time=0,
|
||||||
eth1_deposit_index=len(validator_balances),
|
eth1_deposit_index=len(validator_balances),
|
||||||
|
@ -28,6 +40,11 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
deposit_count=len(validator_balances),
|
deposit_count=len(validator_balances),
|
||||||
block_hash=eth1_block_hash,
|
block_hash=eth1_block_hash,
|
||||||
),
|
),
|
||||||
|
fork=spec.Fork(
|
||||||
|
previous_version=spec.GENESIS_FORK_VERSION,
|
||||||
|
current_version=current_version,
|
||||||
|
epoch=spec.GENESIS_EPOCH,
|
||||||
|
),
|
||||||
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
|
latest_block_header=spec.BeaconBlockHeader(body_root=spec.hash_tree_root(spec.BeaconBlockBody())),
|
||||||
randao_mixes=[eth1_block_hash] * spec.EPOCHS_PER_HISTORICAL_VECTOR,
|
randao_mixes=[eth1_block_hash] * spec.EPOCHS_PER_HISTORICAL_VECTOR,
|
||||||
)
|
)
|
||||||
|
@ -42,8 +59,19 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
||||||
if validator.effective_balance >= activation_threshold:
|
if validator.effective_balance >= activation_threshold:
|
||||||
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
||||||
validator.activation_epoch = spec.GENESIS_EPOCH
|
validator.activation_epoch = spec.GENESIS_EPOCH
|
||||||
|
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||||
|
state.previous_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||||
|
state.current_epoch_participation.append(spec.ParticipationFlags(0b0000_0000))
|
||||||
|
state.inactivity_scores.append(spec.uint64(0))
|
||||||
|
|
||||||
# Set genesis validators root for domain separation and chain versioning
|
# Set genesis validators root for domain separation and chain versioning
|
||||||
state.genesis_validators_root = spec.hash_tree_root(state.validators)
|
state.genesis_validators_root = spec.hash_tree_root(state.validators)
|
||||||
|
|
||||||
|
if spec.fork not in FORKS_BEFORE_ALTAIR:
|
||||||
|
# Fill in sync committees
|
||||||
|
state.current_sync_committee = spec.get_sync_committee(state, spec.get_current_epoch(state))
|
||||||
|
state.next_sync_committee = (
|
||||||
|
spec.get_sync_committee(state, spec.get_current_epoch(state) + spec.EPOCHS_PER_SYNC_COMMITTEE_PERIOD)
|
||||||
|
)
|
||||||
|
|
||||||
return state
|
return state
|
||||||
|
|
|
@ -0,0 +1,21 @@
|
||||||
|
from remerkleable.tree import gindex_bit_iter
|
||||||
|
|
||||||
|
|
||||||
|
def build_proof(anchor, leaf_index):
|
||||||
|
if leaf_index <= 1:
|
||||||
|
return [] # Nothing to prove / invalid index
|
||||||
|
node = anchor
|
||||||
|
proof = []
|
||||||
|
# Walk down, top to bottom to the leaf
|
||||||
|
bit_iter, _ = gindex_bit_iter(leaf_index)
|
||||||
|
for bit in bit_iter:
|
||||||
|
# Always take the opposite hand for the proof.
|
||||||
|
# 1 = right as leaf, thus get left
|
||||||
|
if bit:
|
||||||
|
proof.append(node.get_left().merkle_root())
|
||||||
|
node = node.get_right()
|
||||||
|
else:
|
||||||
|
proof.append(node.get_right().merkle_root())
|
||||||
|
node = node.get_left()
|
||||||
|
|
||||||
|
return list(reversed(proof))
|
|
@ -1,8 +1,16 @@
|
||||||
|
from eth2spec.test.context import is_post_altair
|
||||||
from eth2spec.test.helpers.block_header import sign_block_header
|
from eth2spec.test.helpers.block_header import sign_block_header
|
||||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||||
from eth2spec.test.helpers.state import get_balance
|
from eth2spec.test.helpers.state import get_balance
|
||||||
|
|
||||||
|
|
||||||
|
def get_min_slashing_penalty_quotient(spec):
|
||||||
|
if is_post_altair(spec):
|
||||||
|
return spec.MIN_SLASHING_PENALTY_QUOTIENT_ALTAIR
|
||||||
|
else:
|
||||||
|
return spec.MIN_SLASHING_PENALTY_QUOTIENT
|
||||||
|
|
||||||
|
|
||||||
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||||
slashed_validator = state.validators[slashed_index]
|
slashed_validator = state.validators[slashed_index]
|
||||||
assert slashed_validator.slashed
|
assert slashed_validator.slashed
|
||||||
|
@ -10,7 +18,7 @@ def check_proposer_slashing_effect(spec, pre_state, state, slashed_index):
|
||||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||||
|
|
||||||
proposer_index = spec.get_beacon_proposer_index(state)
|
proposer_index = spec.get_beacon_proposer_index(state)
|
||||||
slash_penalty = state.validators[slashed_index].effective_balance // spec.MIN_SLASHING_PENALTY_QUOTIENT
|
slash_penalty = state.validators[slashed_index].effective_balance // get_min_slashing_penalty_quotient(spec)
|
||||||
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
whistleblower_reward = state.validators[slashed_index].effective_balance // spec.WHISTLEBLOWER_REWARD_QUOTIENT
|
||||||
if proposer_index != slashed_index:
|
if proposer_index != slashed_index:
|
||||||
# slashed validator lost initial slash penalty
|
# slashed validator lost initial slash penalty
|
||||||
|
|
|
@ -2,6 +2,7 @@ from random import Random
|
||||||
from lru import LRU
|
from lru import LRU
|
||||||
|
|
||||||
from eth2spec.phase0 import spec as spec_phase0
|
from eth2spec.phase0 import spec as spec_phase0
|
||||||
|
from eth2spec.test.context import is_post_altair
|
||||||
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
from eth2spec.test.helpers.attestations import cached_prepare_state_with_attestations
|
||||||
from eth2spec.test.helpers.deposits import mock_deposit
|
from eth2spec.test.helpers.deposits import mock_deposit
|
||||||
from eth2spec.test.helpers.state import next_epoch
|
from eth2spec.test.helpers.state import next_epoch
|
||||||
|
@ -26,6 +27,26 @@ def has_enough_for_reward(spec, state, index):
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def has_enough_for_leak_penalty(spec, state, index):
|
||||||
|
"""
|
||||||
|
Check if effective_balance and state of leak is high enough for a leak penalty.
|
||||||
|
|
||||||
|
At very low balances / leak values, it is possible for a validator have a positive effective_balance
|
||||||
|
and be in a leak, but have zero leak penalty.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if is_post_altair(spec):
|
||||||
|
return (
|
||||||
|
state.validators[index].effective_balance * state.inactivity_scores[index]
|
||||||
|
> spec.INACTIVITY_SCORE_BIAS * spec.INACTIVITY_PENALTY_QUOTIENT_ALTAIR
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
return (
|
||||||
|
state.validators[index].effective_balance * spec.get_finality_delay(state)
|
||||||
|
> spec.INACTIVITY_PENALTY_QUOTIENT
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def run_deltas(spec, state):
|
def run_deltas(spec, state):
|
||||||
"""
|
"""
|
||||||
Run all deltas functions yielding:
|
Run all deltas functions yielding:
|
||||||
|
@ -33,35 +54,58 @@ def run_deltas(spec, state):
|
||||||
- source deltas ('source_deltas')
|
- source deltas ('source_deltas')
|
||||||
- target deltas ('target_deltas')
|
- target deltas ('target_deltas')
|
||||||
- head deltas ('head_deltas')
|
- head deltas ('head_deltas')
|
||||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
- not if is_post_altair(spec)
|
||||||
|
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||||
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
- inactivity penalty deltas ('inactivity_penalty_deltas')
|
||||||
"""
|
"""
|
||||||
yield 'pre', state
|
yield 'pre', state
|
||||||
|
|
||||||
|
if is_post_altair(spec):
|
||||||
|
def get_source_deltas(state):
|
||||||
|
return spec.get_flag_index_deltas(state, spec.TIMELY_SOURCE_FLAG_INDEX, spec.TIMELY_SOURCE_WEIGHT)
|
||||||
|
|
||||||
|
def get_head_deltas(state):
|
||||||
|
return spec.get_flag_index_deltas(state, spec.TIMELY_HEAD_FLAG_INDEX, spec.TIMELY_HEAD_WEIGHT)
|
||||||
|
|
||||||
|
def get_target_deltas(state):
|
||||||
|
return spec.get_flag_index_deltas(state, spec.TIMELY_TARGET_FLAG_INDEX, spec.TIMELY_TARGET_WEIGHT)
|
||||||
|
|
||||||
yield from run_attestation_component_deltas(
|
yield from run_attestation_component_deltas(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
spec.get_source_deltas,
|
spec.get_source_deltas if not is_post_altair(spec) else get_source_deltas,
|
||||||
spec.get_matching_source_attestations,
|
spec.get_matching_source_attestations,
|
||||||
'source_deltas',
|
'source_deltas',
|
||||||
)
|
)
|
||||||
yield from run_attestation_component_deltas(
|
yield from run_attestation_component_deltas(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
spec.get_target_deltas,
|
spec.get_target_deltas if not is_post_altair(spec) else get_target_deltas,
|
||||||
spec.get_matching_target_attestations,
|
spec.get_matching_target_attestations,
|
||||||
'target_deltas',
|
'target_deltas',
|
||||||
)
|
)
|
||||||
yield from run_attestation_component_deltas(
|
yield from run_attestation_component_deltas(
|
||||||
spec,
|
spec,
|
||||||
state,
|
state,
|
||||||
spec.get_head_deltas,
|
spec.get_head_deltas if not is_post_altair(spec) else get_head_deltas,
|
||||||
spec.get_matching_head_attestations,
|
spec.get_matching_head_attestations,
|
||||||
'head_deltas',
|
'head_deltas',
|
||||||
)
|
)
|
||||||
yield from run_get_inclusion_delay_deltas(spec, state)
|
if not is_post_altair(spec):
|
||||||
|
yield from run_get_inclusion_delay_deltas(spec, state)
|
||||||
yield from run_get_inactivity_penalty_deltas(spec, state)
|
yield from run_get_inactivity_penalty_deltas(spec, state)
|
||||||
|
|
||||||
|
|
||||||
|
def deltas_name_to_flag_index(spec, deltas_name):
|
||||||
|
if 'source' in deltas_name:
|
||||||
|
return spec.TIMELY_SOURCE_FLAG_INDEX
|
||||||
|
elif 'head' in deltas_name:
|
||||||
|
return spec.TIMELY_HEAD_FLAG_INDEX
|
||||||
|
elif 'target' in deltas_name:
|
||||||
|
return spec.TIMELY_TARGET_FLAG_INDEX
|
||||||
|
raise ValueError("Wrong deltas_name %s" % deltas_name)
|
||||||
|
|
||||||
|
|
||||||
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
def run_attestation_component_deltas(spec, state, component_delta_fn, matching_att_fn, deltas_name):
|
||||||
"""
|
"""
|
||||||
Run ``component_delta_fn``, yielding:
|
Run ``component_delta_fn``, yielding:
|
||||||
|
@ -71,8 +115,14 @@ def run_attestation_component_deltas(spec, state, component_delta_fn, matching_a
|
||||||
|
|
||||||
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
yield deltas_name, Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
|
||||||
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
if not is_post_altair(spec):
|
||||||
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
matching_attestations = matching_att_fn(state, spec.get_previous_epoch(state))
|
||||||
|
matching_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||||
|
else:
|
||||||
|
matching_indices = spec.get_unslashed_participating_indices(
|
||||||
|
state, deltas_name_to_flag_index(spec, deltas_name), spec.get_previous_epoch(state)
|
||||||
|
)
|
||||||
|
|
||||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||||
for index in range(len(state.validators)):
|
for index in range(len(state.validators)):
|
||||||
if index not in eligible_indices:
|
if index not in eligible_indices:
|
||||||
|
@ -101,6 +151,12 @@ def run_get_inclusion_delay_deltas(spec, state):
|
||||||
Run ``get_inclusion_delay_deltas``, yielding:
|
Run ``get_inclusion_delay_deltas``, yielding:
|
||||||
- inclusion delay deltas ('inclusion_delay_deltas')
|
- inclusion delay deltas ('inclusion_delay_deltas')
|
||||||
"""
|
"""
|
||||||
|
if is_post_altair(spec):
|
||||||
|
# No inclusion_delay_deltas
|
||||||
|
yield 'inclusion_delay_deltas', Deltas(rewards=[0] * len(state.validators),
|
||||||
|
penalties=[0] * len(state.validators))
|
||||||
|
return
|
||||||
|
|
||||||
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
rewards, penalties = spec.get_inclusion_delay_deltas(state)
|
||||||
|
|
||||||
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
yield 'inclusion_delay_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
@ -148,8 +204,13 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
|
|
||||||
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
yield 'inactivity_penalty_deltas', Deltas(rewards=rewards, penalties=penalties)
|
||||||
|
|
||||||
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
if not is_post_altair(spec):
|
||||||
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
matching_attestations = spec.get_matching_target_attestations(state, spec.get_previous_epoch(state))
|
||||||
|
matching_attesting_indices = spec.get_unslashed_attesting_indices(state, matching_attestations)
|
||||||
|
else:
|
||||||
|
matching_attesting_indices = spec.get_unslashed_participating_indices(
|
||||||
|
state, spec.TIMELY_TARGET_FLAG_INDEX, spec.get_previous_epoch(state)
|
||||||
|
)
|
||||||
|
|
||||||
eligible_indices = spec.get_eligible_validator_indices(state)
|
eligible_indices = spec.get_eligible_validator_indices(state)
|
||||||
for index in range(len(state.validators)):
|
for index in range(len(state.validators)):
|
||||||
|
@ -159,11 +220,20 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if spec.is_in_inactivity_leak(state):
|
if spec.is_in_inactivity_leak(state):
|
||||||
|
# Compute base_penalty
|
||||||
base_reward = spec.get_base_reward(state, index)
|
base_reward = spec.get_base_reward(state, index)
|
||||||
base_penalty = spec.BASE_REWARDS_PER_EPOCH * base_reward - spec.get_proposer_reward(state, index)
|
if not is_post_altair(spec):
|
||||||
|
cancel_base_rewards_per_epoch = spec.BASE_REWARDS_PER_EPOCH
|
||||||
|
base_penalty = cancel_base_rewards_per_epoch * base_reward - spec.get_proposer_reward(state, index)
|
||||||
|
else:
|
||||||
|
base_penalty = sum(
|
||||||
|
base_reward * numerator // spec.WEIGHT_DENOMINATOR
|
||||||
|
for (_, numerator) in spec.get_flag_indices_and_weights()
|
||||||
|
)
|
||||||
|
|
||||||
if not has_enough_for_reward(spec, state, index):
|
if not has_enough_for_reward(spec, state, index):
|
||||||
assert penalties[index] == 0
|
assert penalties[index] == 0
|
||||||
elif index in matching_attesting_indices:
|
elif index in matching_attesting_indices or not has_enough_for_leak_penalty(spec, state, index):
|
||||||
assert penalties[index] == base_penalty
|
assert penalties[index] == base_penalty
|
||||||
else:
|
else:
|
||||||
assert penalties[index] > base_penalty
|
assert penalties[index] > base_penalty
|
||||||
|
@ -173,7 +243,8 @@ def run_get_inactivity_penalty_deltas(spec, state):
|
||||||
|
|
||||||
def transition_state_to_leak(spec, state, epochs=None):
|
def transition_state_to_leak(spec, state, epochs=None):
|
||||||
if epochs is None:
|
if epochs is None:
|
||||||
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
# +1 to trigger inactivity_score transitions
|
||||||
|
epochs = spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY + 1
|
||||||
assert epochs >= spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
assert epochs >= spec.MIN_EPOCHS_TO_INACTIVITY_PENALTY
|
||||||
|
|
||||||
for _ in range(epochs):
|
for _ in range(epochs):
|
||||||
|
@ -262,8 +333,13 @@ def run_test_full_all_correct(spec, state):
|
||||||
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
def run_test_full_but_partial_participation(spec, state, rng=Random(5522)):
|
||||||
cached_prepare_state_with_attestations(spec, state)
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
for a in state.previous_epoch_attestations:
|
if not is_post_altair(spec):
|
||||||
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
for a in state.previous_epoch_attestations:
|
||||||
|
a.aggregation_bits = [rng.choice([True, False]) for _ in a.aggregation_bits]
|
||||||
|
else:
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
if rng.choice([True, False]):
|
||||||
|
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||||
|
|
||||||
yield from run_deltas(spec, state)
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
@ -272,8 +348,12 @@ def run_test_partial(spec, state, fraction_filled):
|
||||||
cached_prepare_state_with_attestations(spec, state)
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
# Remove portion of attestations
|
# Remove portion of attestations
|
||||||
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
if not is_post_altair(spec):
|
||||||
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
num_attestations = int(len(state.previous_epoch_attestations) * fraction_filled)
|
||||||
|
state.previous_epoch_attestations = state.previous_epoch_attestations[:num_attestations]
|
||||||
|
else:
|
||||||
|
for index in range(int(len(state.validators) * fraction_filled)):
|
||||||
|
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||||
|
|
||||||
yield from run_deltas(spec, state)
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
@ -328,13 +408,18 @@ def run_test_some_very_low_effective_balances_that_attested(spec, state):
|
||||||
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
def run_test_some_very_low_effective_balances_that_did_not_attest(spec, state):
|
||||||
cached_prepare_state_with_attestations(spec, state)
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
# Remove attestation
|
if not is_post_altair(spec):
|
||||||
attestation = state.previous_epoch_attestations[0]
|
# Remove attestation
|
||||||
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
attestation = state.previous_epoch_attestations[0]
|
||||||
# Set removed indices effective balance to very low amount
|
state.previous_epoch_attestations = state.previous_epoch_attestations[1:]
|
||||||
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
# Set removed indices effective balance to very low amount
|
||||||
for i, index in enumerate(indices):
|
indices = spec.get_unslashed_attesting_indices(state, [attestation])
|
||||||
state.validators[index].effective_balance = i
|
for i, index in enumerate(indices):
|
||||||
|
state.validators[index].effective_balance = i
|
||||||
|
else:
|
||||||
|
index = 0
|
||||||
|
state.validators[index].effective_balance = 1
|
||||||
|
state.previous_epoch_participation[index] = spec.ParticipationFlags(0b0000_0000)
|
||||||
|
|
||||||
yield from run_deltas(spec, state)
|
yield from run_deltas(spec, state)
|
||||||
|
|
||||||
|
@ -442,16 +527,43 @@ def run_test_full_random(spec, state, rng=Random(8020)):
|
||||||
|
|
||||||
cached_prepare_state_with_attestations(spec, state)
|
cached_prepare_state_with_attestations(spec, state)
|
||||||
|
|
||||||
for pending_attestation in state.previous_epoch_attestations:
|
if not is_post_altair(spec):
|
||||||
# ~1/3 have bad target
|
for pending_attestation in state.previous_epoch_attestations:
|
||||||
if rng.randint(0, 2) == 0:
|
# ~1/3 have bad target
|
||||||
pending_attestation.data.target.root = b'\x55' * 32
|
if rng.randint(0, 2) == 0:
|
||||||
# ~1/3 have bad head
|
pending_attestation.data.target.root = b'\x55' * 32
|
||||||
if rng.randint(0, 2) == 0:
|
# ~1/3 have bad head
|
||||||
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
if rng.randint(0, 2) == 0:
|
||||||
# ~50% participation
|
pending_attestation.data.beacon_block_root = b'\x66' * 32
|
||||||
pending_attestation.aggregation_bits = [rng.choice([True, False]) for _ in pending_attestation.aggregation_bits]
|
# ~50% participation
|
||||||
# Random inclusion delay
|
pending_attestation.aggregation_bits = [rng.choice([True, False])
|
||||||
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
for _ in pending_attestation.aggregation_bits]
|
||||||
|
# Random inclusion delay
|
||||||
|
pending_attestation.inclusion_delay = rng.randint(1, spec.SLOTS_PER_EPOCH)
|
||||||
|
else:
|
||||||
|
for index in range(len(state.validators)):
|
||||||
|
# ~1/3 have bad head or bad target or not timely enough
|
||||||
|
is_timely_correct_head = rng.randint(0, 2) != 0
|
||||||
|
flags = state.previous_epoch_participation[index]
|
||||||
|
|
||||||
|
def set_flag(index, value):
|
||||||
|
nonlocal flags
|
||||||
|
flag = spec.ParticipationFlags(2**index)
|
||||||
|
if value:
|
||||||
|
flags |= flag
|
||||||
|
else:
|
||||||
|
flags &= 0xff ^ flag
|
||||||
|
|
||||||
|
set_flag(spec.TIMELY_HEAD_FLAG_INDEX, is_timely_correct_head)
|
||||||
|
if is_timely_correct_head:
|
||||||
|
# If timely head, then must be timely target
|
||||||
|
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, True)
|
||||||
|
# If timely head, then must be timely source
|
||||||
|
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, True)
|
||||||
|
else:
|
||||||
|
# ~50% of remaining have bad target or not timely enough
|
||||||
|
set_flag(spec.TIMELY_TARGET_FLAG_INDEX, rng.choice([True, False]))
|
||||||
|
# ~50% of remaining have bad source or not timely enough
|
||||||
|
set_flag(spec.TIMELY_SOURCE_FLAG_INDEX, rng.choice([True, False]))
|
||||||
|
state.previous_epoch_participation[index] = flags
|
||||||
yield from run_deltas(spec, state)
|
yield from run_deltas(spec, state)
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
from eth2spec.test.context import expect_assertion_error
|
|
||||||
|
|
||||||
|
|
||||||
def run_shard_transitions_processing(spec, state, shard_transitions, attestations, valid=True):
|
|
||||||
"""
|
|
||||||
Run ``process_shard_transitions``, yielding:
|
|
||||||
- pre-state ('pre')
|
|
||||||
- shard_transitions ('shard_transitions')
|
|
||||||
- attestations ('attestations')
|
|
||||||
- post-state ('post').
|
|
||||||
If ``valid == False``, run expecting ``AssertionError``
|
|
||||||
"""
|
|
||||||
# yield pre-state
|
|
||||||
yield 'pre', state
|
|
||||||
yield 'shard_transitions', shard_transitions
|
|
||||||
yield 'attestations', attestations
|
|
||||||
|
|
||||||
# If the attestation is invalid, processing is aborted, and there is no post-state.
|
|
||||||
if not valid:
|
|
||||||
expect_assertion_error(lambda: spec.process_shard_transitions(state, shard_transitions, attestations))
|
|
||||||
yield 'post', None
|
|
||||||
return
|
|
||||||
|
|
||||||
# process crosslinks
|
|
||||||
spec.process_shard_transitions(state, shard_transitions, attestations)
|
|
||||||
|
|
||||||
# yield post-state
|
|
||||||
yield 'post', state
|
|
||||||
|
|
||||||
|
|
||||||
def get_shard_transition_of_committee(spec, state, committee_index, shard_blocks=None):
|
|
||||||
if shard_blocks is None:
|
|
||||||
shard_blocks = []
|
|
||||||
|
|
||||||
shard = spec.compute_shard_from_committee_index(state, committee_index, state.slot)
|
|
||||||
shard_transition = spec.get_shard_transition(state, shard, shard_blocks=shard_blocks)
|
|
||||||
return shard_transition
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue