commit
e2bd8c7942
|
@ -35,32 +35,45 @@ commands:
|
|||
description: "Restore the cache with pyspec keys"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v7-pyspec
|
||||
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "tests/core/pyspec/requirements-testing.txt" }}
|
||||
venv_name: v19-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
save_pyspec_cached_venv:
|
||||
description: Save a venv into a cache with pyspec keys"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v7-pyspec
|
||||
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "tests/core/pyspec/requirements-testing.txt" }}
|
||||
venv_path: ./tests/core/pyspec/venv
|
||||
restore_deposit_contract_cached_venv:
|
||||
description: "Restore the cache with deposit_contract keys"
|
||||
venv_name: v19-pyspec
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}
|
||||
venv_path: ./venv
|
||||
restore_deposit_contract_compiler_cached_venv:
|
||||
description: "Restore the venv from cache for the deposit contract compiler"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v9-deposit-contract
|
||||
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
|
||||
save_deposit_contract_cached_venv:
|
||||
description: Save a venv into a cache with deposit_contract keys"
|
||||
venv_name: v18-deposit-contract-compiler
|
||||
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
|
||||
save_deposit_contract_compiler_cached_venv:
|
||||
description: "Save the venv to cache for later use of the deposit contract compiler"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v9-deposit-contract
|
||||
reqs_checksum: cache-{{ checksum "tests/core/pyspec/requirements.txt" }}-{{ checksum "deposit_contract/requirements-testing.txt" }}
|
||||
venv_path: ./deposit_contract/venv
|
||||
venv_name: v18-deposit-contract-compiler
|
||||
reqs_checksum: cache-{{ checksum "deposit_contract/compiler/requirements.txt" }}
|
||||
venv_path: ./deposit_contract/compiler/venv
|
||||
restore_deposit_contract_tester_cached_venv:
|
||||
description: "Restore the venv from cache for the deposit contract tester"
|
||||
steps:
|
||||
- restore_cached_venv:
|
||||
venv_name: v19-deposit-contract-tester
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
|
||||
save_deposit_contract_tester_cached_venv:
|
||||
description: "Save the venv to cache for later use of the deposit contract tester"
|
||||
steps:
|
||||
- save_cached_venv:
|
||||
venv_name: v19-deposit-contract-tester
|
||||
reqs_checksum: cache-{{ checksum "setup.py" }}-{{ checksum "deposit_contract/tester/requirements.txt" }}
|
||||
venv_path: ./deposit_contract/tester/venv
|
||||
jobs:
|
||||
checkout_specs:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
# Restore git repo at point close to target branch/revision, to speed up checkout
|
||||
|
@ -80,7 +93,7 @@ jobs:
|
|||
- ~/specs-repo
|
||||
install_pyspec_test:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
|
@ -92,7 +105,7 @@ jobs:
|
|||
- save_pyspec_cached_venv
|
||||
test:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
|
@ -114,7 +127,7 @@ jobs:
|
|||
command: sudo npm install -g doctoc && make check_toc
|
||||
codespell:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- checkout
|
||||
|
@ -123,7 +136,7 @@ jobs:
|
|||
command: pip install codespell --user && make codespell
|
||||
lint:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
|
@ -132,29 +145,54 @@ jobs:
|
|||
- run:
|
||||
name: Run linter
|
||||
command: make lint
|
||||
install_deposit_contract_test:
|
||||
install_deposit_contract_compiler:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
# The deposit contract compiler is pinned to python 3.7 because of the vyper version pin.
|
||||
- image: circleci/python:3.7
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_cached_venv
|
||||
- restore_deposit_contract_compiler_cached_venv
|
||||
- run:
|
||||
name: Install deposit contract requirements
|
||||
command: make install_deposit_contract_test
|
||||
- save_deposit_contract_cached_venv
|
||||
deposit_contract:
|
||||
name: Install deposit contract compiler requirements
|
||||
command: make install_deposit_contract_compiler
|
||||
- save_deposit_contract_compiler_cached_venv
|
||||
install_deposit_contract_tester:
|
||||
docker:
|
||||
- image: circleci/python:3.6
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_cached_venv
|
||||
- restore_deposit_contract_tester_cached_venv
|
||||
- run:
|
||||
name: Install deposit contract tester requirements
|
||||
command: make install_deposit_contract_tester
|
||||
- save_deposit_contract_tester_cached_venv
|
||||
test_compile_deposit_contract:
|
||||
docker:
|
||||
- image: circleci/python:3.7
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_compiler_cached_venv
|
||||
- run:
|
||||
name: Run deposit contract compile test
|
||||
command: make test_compile_deposit_contract
|
||||
test_deposit_contract:
|
||||
docker:
|
||||
- image: circleci/python:3.8
|
||||
working_directory: ~/specs-repo
|
||||
steps:
|
||||
- restore_cache:
|
||||
key: v2-specs-repo-{{ .Branch }}-{{ .Revision }}
|
||||
- restore_deposit_contract_tester_cached_venv
|
||||
- run:
|
||||
name: Run deposit contract test
|
||||
command: make test_deposit_contract
|
||||
|
||||
workflows:
|
||||
version: 2.1
|
||||
test_spec:
|
||||
|
@ -171,9 +209,15 @@ workflows:
|
|||
- lint:
|
||||
requires:
|
||||
- test
|
||||
- install_deposit_contract_test:
|
||||
- install_deposit_contract_compiler:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- deposit_contract:
|
||||
- test_compile_deposit_contract:
|
||||
requires:
|
||||
- install_deposit_contract_test
|
||||
- install_deposit_contract_compiler
|
||||
- install_deposit_contract_tester:
|
||||
requires:
|
||||
- checkout_specs
|
||||
- test_deposit_contract:
|
||||
requires:
|
||||
- install_deposit_contract_tester
|
||||
|
|
|
@ -7,6 +7,7 @@ venv
|
|||
|
||||
build/
|
||||
output/
|
||||
dist/
|
||||
|
||||
eth2.0-spec-tests/
|
||||
|
||||
|
@ -14,12 +15,16 @@ eth2.0-spec-tests/
|
|||
.mypy_cache
|
||||
|
||||
# Dynamically built from Markdown spec
|
||||
tests/core/pyspec/eth2spec/phase0/spec.py
|
||||
tests/core/pyspec/eth2spec/phase1/spec.py
|
||||
tests/core/pyspec/eth2spec/phase0/
|
||||
tests/core/pyspec/eth2spec/phase1/
|
||||
|
||||
# coverage reports
|
||||
.htmlcov
|
||||
.coverage
|
||||
.coverage.*
|
||||
|
||||
# local CI testing output
|
||||
tests/core/pyspec/test-reports
|
||||
tests/core/pyspec/eth2spec/test_results.xml
|
||||
|
||||
*.egg-info
|
||||
|
|
98
Makefile
98
Makefile
|
@ -1,11 +1,11 @@
|
|||
SPEC_DIR = ./specs
|
||||
SSZ_DIR = ./ssz
|
||||
SCRIPT_DIR = ./scripts
|
||||
TEST_LIBS_DIR = ./tests/core
|
||||
PY_SPEC_DIR = $(TEST_LIBS_DIR)/pyspec
|
||||
TEST_VECTOR_DIR = ./eth2.0-spec-tests/tests
|
||||
TEST_VECTOR_DIR = ../eth2.0-spec-tests/tests
|
||||
GENERATOR_DIR = ./tests/generators
|
||||
DEPOSIT_CONTRACT_DIR = ./deposit_contract
|
||||
DEPOSIT_CONTRACT_COMPILER_DIR = ./deposit_contract/compiler
|
||||
DEPOSIT_CONTRACT_TESTER_DIR = ./deposit_contract/tester
|
||||
CONFIGS_DIR = ./configs
|
||||
|
||||
# Collect a list of generator names
|
||||
|
@ -17,25 +17,14 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER
|
|||
# To check generator matching:
|
||||
#$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}])
|
||||
|
||||
PHASE0_SPEC_DIR = $(SPEC_DIR)/phase0
|
||||
PY_SPEC_PHASE_0_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase0/spec.py
|
||||
PY_SPEC_PHASE_0_DEPS = $(wildcard $(SPEC_DIR)/phase0/*.md)
|
||||
|
||||
PHASE1_SPEC_DIR = $(SPEC_DIR)/phase1
|
||||
PY_SPEC_PHASE_1_TARGETS = $(PY_SPEC_DIR)/eth2spec/phase1/spec.py
|
||||
PY_SPEC_PHASE_1_DEPS = $(wildcard $(SPEC_DIR)/phase1/*.md)
|
||||
|
||||
PY_SPEC_ALL_DEPS = $(PY_SPEC_PHASE_0_DEPS) $(PY_SPEC_PHASE_1_DEPS)
|
||||
|
||||
PY_SPEC_ALL_TARGETS = $(PY_SPEC_PHASE_0_TARGETS) $(PY_SPEC_PHASE_1_TARGETS)
|
||||
|
||||
MARKDOWN_FILES = $(PY_SPEC_ALL_DEPS) $(wildcard $(SPEC_DIR)/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) $(wildcard $(SPEC_DIR)/phase1/*.md) $(wildcard $(SSZ_DIR)/*.md) $(wildcard $(SPEC_DIR)/networking/*.md) $(wildcard $(SPEC_DIR)/validator/*.md)
|
||||
|
||||
COV_HTML_OUT=.htmlcov
|
||||
COV_INDEX_FILE=$(PY_SPEC_DIR)/$(COV_HTML_OUT)/index.html
|
||||
|
||||
.PHONY: clean partial_clean all test citest lint generate_tests pyspec phase0 phase1 install_test open_cov \
|
||||
install_deposit_contract_test test_deposit_contract compile_deposit_contract check_toc
|
||||
.PHONY: clean partial_clean all test citest lint generate_tests pyspec install_test open_cov \
|
||||
install_deposit_contract_tester test_deposit_contract install_deposit_contract_compiler \
|
||||
compile_deposit_contract test_compile_deposit_contract check_toc
|
||||
|
||||
all: $(PY_SPEC_ALL_TARGETS)
|
||||
|
||||
|
@ -43,31 +32,54 @@ all: $(PY_SPEC_ALL_TARGETS)
|
|||
partial_clean:
|
||||
rm -rf $(TEST_VECTOR_DIR)
|
||||
rm -rf $(GENERATOR_VENVS)
|
||||
rm -rf .pytest_cache
|
||||
rm -f .coverage
|
||||
rm -rf $(PY_SPEC_DIR)/.pytest_cache
|
||||
rm -rf $(PY_SPEC_ALL_TARGETS)
|
||||
rm -rf $(DEPOSIT_CONTRACT_DIR)/.pytest_cache
|
||||
rm -rf $(DEPOSIT_CONTRACT_COMPILER_DIR)/.pytest_cache
|
||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache
|
||||
rm -rf $(PY_SPEC_DIR)/phase0
|
||||
rm -rf $(PY_SPEC_DIR)/phase1
|
||||
rm -rf $(PY_SPEC_DIR)/$(COV_HTML_OUT)
|
||||
rm -rf $(PY_SPEC_DIR)/.coverage
|
||||
rm -rf $(PY_SPEC_DIR)/test-reports
|
||||
rm -rf eth2spec.egg-info dist build
|
||||
|
||||
|
||||
clean: partial_clean
|
||||
rm -rf venv
|
||||
rm -rf $(PY_SPEC_DIR)/venv
|
||||
rm -rf $(DEPOSIT_CONTRACT_DIR)/venv
|
||||
rm -rf $(DEPOSIT_CONTRACT_COMPILER_DIR)/venv
|
||||
rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/venv
|
||||
|
||||
# The pyspec is rebuilt to enforce the /specs being part of eth2specs source distribution. It could be forgotten otherwise.
|
||||
dist_build: pyspec
|
||||
python3 setup.py sdist bdist_wheel
|
||||
|
||||
dist_check:
|
||||
python3 -m twine check dist/*
|
||||
|
||||
dist_upload:
|
||||
python3 -m twine upload dist/*
|
||||
|
||||
|
||||
# "make generate_tests" to run all generators
|
||||
generate_tests: $(PY_SPEC_ALL_TARGETS) $(GENERATOR_TARGETS)
|
||||
generate_tests: $(GENERATOR_TARGETS)
|
||||
|
||||
# "make pyspec" to create the pyspec for all phases.
|
||||
pyspec:
|
||||
. venv/bin/activate; python3 setup.py pyspecdev
|
||||
|
||||
# installs the packages to run pyspec tests
|
||||
install_test:
|
||||
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt;
|
||||
python3 -m venv venv; . venv/bin/activate; pip3 install .[test] .[lint]
|
||||
|
||||
test: $(PY_SPEC_ALL_TARGETS)
|
||||
cd $(PY_SPEC_DIR); . venv/bin/activate; export PYTHONPATH="./"; \
|
||||
test: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --cov=eth2spec.phase0.spec --cov=eth2spec.phase1.spec --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec
|
||||
|
||||
citest: $(PY_SPEC_ALL_TARGETS)
|
||||
cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; \
|
||||
python -m pytest -n 4 --junitxml=test-reports/eth2spec/test_results.xml eth2spec
|
||||
citest: pyspec
|
||||
mkdir -p tests/core/pyspec/test-reports/eth2spec; . venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
python -m pytest -n 4 --junitxml=eth2spec/test_results.xml eth2spec
|
||||
|
||||
open_cov:
|
||||
((open "$(COV_INDEX_FILE)" || xdg-open "$(COV_INDEX_FILE)") &> /dev/null) &
|
||||
|
@ -83,31 +95,29 @@ check_toc: $(MARKDOWN_FILES:=.toc)
|
|||
codespell:
|
||||
codespell . --skip ./.git -I .codespell-whitelist
|
||||
|
||||
lint: $(PY_SPEC_ALL_TARGETS)
|
||||
cd $(PY_SPEC_DIR); . venv/bin/activate; \
|
||||
lint: pyspec
|
||||
. venv/bin/activate; cd $(PY_SPEC_DIR); \
|
||||
flake8 --ignore=E252,W504,W503 --max-line-length=120 ./eth2spec \
|
||||
&& cd ./eth2spec && mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase0 \
|
||||
&& mypy --follow-imports=silent --warn-unused-ignores --ignore-missing-imports --check-untyped-defs --disallow-incomplete-defs --disallow-untyped-defs -p phase1;
|
||||
|
||||
install_deposit_contract_test: $(PY_SPEC_ALL_TARGETS)
|
||||
cd $(DEPOSIT_CONTRACT_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt
|
||||
|
||||
compile_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_DIR); . venv/bin/activate; \
|
||||
python tool/compile_deposit_contract.py contracts/validator_registration.vy;
|
||||
install_deposit_contract_tester:
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements.txt
|
||||
|
||||
test_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_DIR); . venv/bin/activate; \
|
||||
cd $(DEPOSIT_CONTRACT_TESTER_DIR); . venv/bin/activate; \
|
||||
python -m pytest .
|
||||
|
||||
# "make pyspec" to create the pyspec for all phases.
|
||||
pyspec: $(PY_SPEC_ALL_TARGETS)
|
||||
install_deposit_contract_compiler:
|
||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); python3.7 -m venv venv; . venv/bin/activate; pip3.7 install -r requirements.txt
|
||||
|
||||
$(PY_SPEC_PHASE_0_TARGETS): $(PY_SPEC_PHASE_0_DEPS)
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p0 $(PHASE0_SPEC_DIR)/beacon-chain.md $(PHASE0_SPEC_DIR)/fork-choice.md $(PHASE0_SPEC_DIR)/validator.md $@
|
||||
compile_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||
python3.7 deposit_contract/compile.py contracts/validator_registration.vy
|
||||
|
||||
$(PY_SPEC_DIR)/eth2spec/phase1/spec.py: $(PY_SPEC_PHASE_1_DEPS)
|
||||
python3 $(SCRIPT_DIR)/build_spec.py -p1 $(PHASE0_SPEC_DIR)/beacon-chain.md $(PHASE0_SPEC_DIR)/fork-choice.md $(SSZ_DIR)/merkle-proofs.md $(PHASE1_SPEC_DIR)/custody-game.md $(PHASE1_SPEC_DIR)/shard-data-chains.md $(PHASE1_SPEC_DIR)/beacon-chain-misc.md $@
|
||||
test_compile_deposit_contract:
|
||||
cd $(DEPOSIT_CONTRACT_COMPILER_DIR); . venv/bin/activate; \
|
||||
python3.7 -m pytest .
|
||||
|
||||
CURRENT_DIR = ${CURDIR}
|
||||
|
||||
|
@ -141,5 +151,5 @@ $(TEST_VECTOR_DIR)/:
|
|||
|
||||
# For any generator, build it using the run_generator function.
|
||||
# (creation of output dir is a dependency)
|
||||
gen_%: $(PY_SPEC_ALL_TARGETS) $(TEST_VECTOR_DIR)
|
||||
gen_%: $(TEST_VECTOR_DIR)
|
||||
$(call run_generator,$*)
|
||||
|
|
|
@ -16,11 +16,13 @@ Core specifications for Eth2 clients be found in [specs/](specs/). These are div
|
|||
* [Fork Choice](specs/phase0/fork-choice.md)
|
||||
* [Deposit Contract](specs/phase0/deposit-contract.md)
|
||||
* [Honest Validator](specs/phase0/validator.md)
|
||||
* [P2P Networking](specs/phase0/p2p-interface.md)
|
||||
|
||||
### Phase 1
|
||||
* [From Phase 0 to Phase 1](specs/phase1/phase1-fork.md)
|
||||
* [The Beacon Chain for Shards](specs/phase1/beacon-chain.md)
|
||||
* [Custody Game](specs/phase1/custody-game.md)
|
||||
* [Shard Data Chains](specs/phase1/shard-data-chains.md)
|
||||
* [Misc beacon chain updates](specs/phase1/beacon-chain-misc.md)
|
||||
* [Shard Transition and Fraud Proofs](specs/phase1/fraud-proofs.md)
|
||||
* [Light client syncing protocol](specs/phase1/light-client-sync.md)
|
||||
|
||||
### Phase 2
|
||||
|
@ -57,6 +59,7 @@ The following are the broad design goals for Ethereum 2.0:
|
|||
|
||||
* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#)
|
||||
* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB)
|
||||
* [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052)
|
||||
|
||||
|
||||
## For spec contributors
|
||||
|
@ -64,4 +67,3 @@ The following are the broad design goals for Ethereum 2.0:
|
|||
Documentation on the different components used during spec writing can be found here:
|
||||
* [YAML Test Generators](tests/generators/README.md)
|
||||
* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md)
|
||||
|
||||
|
|
|
@ -21,6 +21,12 @@ SHUFFLE_ROUND_COUNT: 90
|
|||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 16384
|
||||
# Jan 3, 2020
|
||||
MIN_GENESIS_TIME: 1578009600
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
# Fork Choice
|
||||
|
@ -82,8 +88,8 @@ SLOTS_PER_EPOCH: 32
|
|||
MIN_SEED_LOOKAHEAD: 1
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# 2**10 (= 1,024) slots ~1.7 hours
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD: 1024
|
||||
# 2**5 (= 32) epochs ~3.4 hours
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 32
|
||||
# 2**13 (= 8,192) slots ~13 hours
|
||||
SLOTS_PER_HISTORICAL_ROOT: 8192
|
||||
# 2**8 (= 256) epochs ~27 hours
|
||||
|
@ -94,9 +100,6 @@ PERSISTENT_COMMITTEE_PERIOD: 2048
|
|||
MAX_EPOCHS_PER_CROSSLINK: 64
|
||||
# 2**2 (= 4) epochs 25.6 minutes
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
# 2**14 (= 16,384) epochs ~73 days
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 16384
|
||||
|
||||
|
||||
|
||||
# State vector lengths
|
||||
|
@ -146,6 +149,75 @@ DOMAIN_BEACON_ATTESTER: 0x01000000
|
|||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000
|
||||
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||
DOMAIN_SHARD_ATTESTER: 0x81000000
|
||||
DOMAIN_SELECTION_PROOF: 0x05000000
|
||||
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
|
||||
# Phase 1
|
||||
DOMAIN_SHARD_PROPOSAL: 0x80000000
|
||||
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
||||
DOMAIN_LIGHT_CLIENT: 0x82000000
|
||||
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||
|
||||
|
||||
# Phase 1: Upgrade from Phase 0
|
||||
# ---------------------------------------------------------------
|
||||
PHASE_1_FORK_VERSION: 0x01000000
|
||||
INITIAL_ACTIVE_SHARDS: 64
|
||||
# Placeholder
|
||||
INITIAL_GASPRICE: 10
|
||||
|
||||
|
||||
# Phase 1: General
|
||||
# ---------------------------------------------------------------
|
||||
# 2**10` (= 1024)
|
||||
MAX_SHARDS: 1024
|
||||
# 2**3 (= 8) | online epochs | ~51 min
|
||||
ONLINE_PERIOD: 8
|
||||
# 2**7 (= 128)
|
||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
||||
# 2**8 (= 256) | epochs | ~27 hours
|
||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
||||
# 2**8 (= 256) | epochs | ~27 hours
|
||||
SHARD_COMMITTEE_PERIOD: 256
|
||||
# 2**18 (= 262,144)
|
||||
SHARD_BLOCK_CHUNK_SIZE: 262144
|
||||
# 2**2 (= 4)
|
||||
MAX_SHARD_BLOCK_CHUNKS: 4
|
||||
# 3 * 2**16` (= 196,608)
|
||||
TARGET_SHARD_BLOCK_SIZE: 196608
|
||||
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
|
||||
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
||||
# len(SHARD_BLOCK_OFFSETS)
|
||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
||||
# 2**14 (= 16,384) Gwei
|
||||
MAX_GASPRICE: 16384
|
||||
# 2**5 (= 32) Gwei
|
||||
MIN_GASPRICE: 32
|
||||
# 2**3 (= 8)
|
||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||
|
||||
|
||||
# Phase 1: Custody Game
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
# Time parameters
|
||||
# 2**1 (= 2) epochs, 12.8 minutes
|
||||
RANDAO_PENALTY_EPOCHS: 2
|
||||
# 2**14 (= 16,384) epochs ~73 days
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 16384
|
||||
# 2**11 (= 2,048) epochs, ~9 days
|
||||
EPOCHS_PER_CUSTODY_PERIOD: 2048
|
||||
# 2**11 (= 2,048) epochs, ~9 days
|
||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
|
||||
# 2**7 (= 128) epochs, ~14 hours
|
||||
MAX_REVEAL_LATENESS_DECREMENT: 128
|
||||
|
||||
# Max operations
|
||||
# 2**8 (= 256)
|
||||
MAX_CUSTODY_KEY_REVEALS: 256
|
||||
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
||||
MAX_CUSTODY_SLASHINGS: 1
|
||||
|
||||
# Reward and penalty quotients
|
||||
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
||||
# 2**8 (= 256)
|
||||
MINOR_REWARD_QUOTIENT: 256
|
||||
|
|
|
@ -20,15 +20,21 @@ SHUFFLE_ROUND_COUNT: 10
|
|||
MIN_GENESIS_ACTIVE_VALIDATOR_COUNT: 64
|
||||
# Jan 3, 2020
|
||||
MIN_GENESIS_TIME: 1578009600
|
||||
# 4
|
||||
HYSTERESIS_QUOTIENT: 4
|
||||
# 1 (minus 0.25)
|
||||
HYSTERESIS_DOWNWARD_MULTIPLIER: 1
|
||||
# 5 (plus 1.25)
|
||||
HYSTERESIS_UPWARD_MULTIPLIER: 5
|
||||
|
||||
|
||||
|
||||
#
|
||||
#
|
||||
# Fork Choice
|
||||
# ---------------------------------------------------------------
|
||||
# 2**1 (= 1)
|
||||
SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2
|
||||
|
||||
#
|
||||
|
||||
# Validator
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] process deposits more quickly, but insecure
|
||||
|
@ -83,23 +89,17 @@ MIN_SEED_LOOKAHEAD: 1
|
|||
# 2**2 (= 4) epochs
|
||||
MAX_SEED_LOOKAHEAD: 4
|
||||
# [customized] higher frequency new deposits from eth1 for testing
|
||||
SLOTS_PER_ETH1_VOTING_PERIOD: 16
|
||||
EPOCHS_PER_ETH1_VOTING_PERIOD: 2
|
||||
# [customized] smaller state
|
||||
SLOTS_PER_HISTORICAL_ROOT: 64
|
||||
# 2**8 (= 256) epochs
|
||||
MIN_VALIDATOR_WITHDRAWABILITY_DELAY: 256
|
||||
# 2**11 (= 2,048) epochs
|
||||
PERSISTENT_COMMITTEE_PERIOD: 2048
|
||||
# [customized] higher frequency of committee turnover and faster time to acceptable voluntary exit
|
||||
PERSISTENT_COMMITTEE_PERIOD: 128
|
||||
# [customized] fast catchup crosslinks
|
||||
MAX_EPOCHS_PER_CROSSLINK: 4
|
||||
# 2**2 (= 4) epochs
|
||||
MIN_EPOCHS_TO_INACTIVITY_PENALTY: 4
|
||||
# [customized] 2**12 (= 4,096) epochs
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096
|
||||
# 2**2 (= 4) epochs
|
||||
EPOCHS_PER_CUSTODY_PERIOD: 4
|
||||
# 2**2 (= 4) epochs
|
||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 4
|
||||
|
||||
|
||||
# State vector lengths
|
||||
|
@ -149,16 +149,77 @@ DOMAIN_BEACON_ATTESTER: 0x01000000
|
|||
DOMAIN_RANDAO: 0x02000000
|
||||
DOMAIN_DEPOSIT: 0x03000000
|
||||
DOMAIN_VOLUNTARY_EXIT: 0x04000000
|
||||
DOMAIN_CUSTODY_BIT_CHALLENGE: 0x06000000
|
||||
DOMAIN_SHARD_PROPOSER: 0x80000000
|
||||
DOMAIN_SHARD_ATTESTER: 0x81000000
|
||||
|
||||
|
||||
DOMAIN_SELECTION_PROOF: 0x05000000
|
||||
DOMAIN_AGGREGATE_AND_PROOF: 0x06000000
|
||||
# Phase 1
|
||||
DOMAIN_SHARD_PROPOSAL: 0x80000000
|
||||
DOMAIN_SHARD_COMMITTEE: 0x81000000
|
||||
DOMAIN_LIGHT_CLIENT: 0x82000000
|
||||
DOMAIN_CUSTODY_BIT_SLASHING: 0x83000000
|
||||
|
||||
|
||||
# Phase 1: Upgrade from Phase 0
|
||||
# ---------------------------------------------------------------
|
||||
SHARD_SLOTS_PER_BEACON_SLOT: 2
|
||||
EPOCHS_PER_SHARD_PERIOD: 4
|
||||
# PHASE_1_FORK_EPOCH >= EPOCHS_PER_SHARD_PERIOD * 2
|
||||
PHASE_1_FORK_EPOCH: 8
|
||||
# PHASE_1_FORK_SLOT = PHASE_1_FORK_EPOCH * SLOTS_PER_EPOCH
|
||||
PHASE_1_FORK_SLOT: 64
|
||||
# [customized] for testnet distinction
|
||||
PHASE_1_FORK_VERSION: 0x01000001
|
||||
# [customized] reduced for testing
|
||||
INITIAL_ACTIVE_SHARDS: 4
|
||||
# Placeholder
|
||||
INITIAL_GASPRICE: 10
|
||||
|
||||
|
||||
# Phase 1: General
|
||||
# ---------------------------------------------------------------
|
||||
# [customized] reduced for testing
|
||||
MAX_SHARDS: 8
|
||||
# 2**3 (= 8) | online epochs
|
||||
ONLINE_PERIOD: 8
|
||||
# 2**7 (= 128)
|
||||
LIGHT_CLIENT_COMMITTEE_SIZE: 128
|
||||
# 2**8 (= 256) | epochs
|
||||
LIGHT_CLIENT_COMMITTEE_PERIOD: 256
|
||||
# 2**8 (= 256) | epochs
|
||||
SHARD_COMMITTEE_PERIOD: 256
|
||||
# 2**18 (= 262,144)
|
||||
SHARD_BLOCK_CHUNK_SIZE: 262144
|
||||
# 2**2 (= 4)
|
||||
MAX_SHARD_BLOCK_CHUNKS: 4
|
||||
# 3 * 2**16` (= 196,608)
|
||||
TARGET_SHARD_BLOCK_SIZE: 196608
|
||||
# Note: MAX_SHARD_BLOCKS_PER_ATTESTATION is derived from the list length.
|
||||
SHARD_BLOCK_OFFSETS: [1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]
|
||||
# len(SHARD_BLOCK_OFFSETS)
|
||||
MAX_SHARD_BLOCKS_PER_ATTESTATION: 12
|
||||
# 2**14 (= 16,384) Gwei
|
||||
MAX_GASPRICE: 16384
|
||||
# 2**5 (= 32) Gwei
|
||||
MIN_GASPRICE: 32
|
||||
# 2**3 (= 8)
|
||||
GASPRICE_ADJUSTMENT_COEFFICIENT: 8
|
||||
|
||||
|
||||
# Phase 1: Custody Game
|
||||
# ---------------------------------------------------------------
|
||||
|
||||
# Time parameters
|
||||
# 2**1 (= 2) epochs
|
||||
RANDAO_PENALTY_EPOCHS: 2
|
||||
# [customized] quicker for testing
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS: 4096
|
||||
# 2**11 (= 2,048) epochs
|
||||
EPOCHS_PER_CUSTODY_PERIOD: 2048
|
||||
# 2**11 (= 2,048) epochs
|
||||
CUSTODY_PERIOD_TO_RANDAO_PADDING: 2048
|
||||
# 2**7 (= 128) epochs
|
||||
MAX_REVEAL_LATENESS_DECREMENT: 128
|
||||
|
||||
# Max operations
|
||||
# 2**8 (= 256)
|
||||
MAX_CUSTODY_KEY_REVEALS: 256
|
||||
MAX_EARLY_DERIVED_SECRET_REVEALS: 1
|
||||
MAX_CUSTODY_SLASHINGS: 1
|
||||
|
||||
# Reward and penalty quotients
|
||||
EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE: 2
|
||||
# 2**8 (= 256)
|
||||
MINOR_REWARD_QUOTIENT: 256
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
Under the `eth2.0-specs` directory, execute:
|
||||
|
||||
```sh
|
||||
make install_deposit_contract_test
|
||||
make install_deposit_contract_tester
|
||||
```
|
||||
|
||||
## How to compile the contract?
|
||||
|
@ -14,11 +14,25 @@ make install_deposit_contract_test
|
|||
make compile_deposit_contract
|
||||
```
|
||||
|
||||
The compiler dependencies can be installed with:
|
||||
|
||||
```sh
|
||||
make install_deposit_contract_compiler
|
||||
```
|
||||
|
||||
Note that this requires python 3.7 to be installed. The pinned vyper version will not work on 3.8.
|
||||
|
||||
The ABI and bytecode will be updated at [`contracts/validator_registration.json`](./contracts/validator_registration.json).
|
||||
|
||||
|
||||
## How to run tests?
|
||||
|
||||
For running the contract tests:
|
||||
```sh
|
||||
make test_deposit_contract
|
||||
```
|
||||
|
||||
For testing the compiler output against the expected formally-verified bytecode:
|
||||
```sh
|
||||
make test_compile_deposit_contract
|
||||
```
|
||||
|
|
|
@ -2,9 +2,7 @@ import argparse
|
|||
import json
|
||||
import os
|
||||
|
||||
from vyper import (
|
||||
compiler,
|
||||
)
|
||||
from vyper import compiler
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
from vyper import compiler
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def get_deposit_contract_code():
|
||||
file_path = os.path.join(DIR, '../../contracts/validator_registration.vy')
|
||||
deposit_contract_code = open(file_path).read()
|
||||
return deposit_contract_code
|
||||
|
||||
|
||||
def get_deposit_contract_json():
|
||||
file_path = os.path.join(DIR, '../../contracts/validator_registration.json')
|
||||
deposit_contract_json = open(file_path).read()
|
||||
return json.loads(deposit_contract_json)
|
||||
|
||||
|
||||
def test_compile_deposit_contract():
|
||||
compiled_deposit_contract_json = get_deposit_contract_json()
|
||||
|
||||
deposit_contract_code = get_deposit_contract_code()
|
||||
abi = compiler.mk_full_signature(deposit_contract_code)
|
||||
bytecode = compiler.compile_code(deposit_contract_code)['bytecode']
|
||||
|
||||
assert abi == compiled_deposit_contract_json["abi"]
|
||||
assert bytecode == compiled_deposit_contract_json["bytecode"]
|
|
@ -0,0 +1,7 @@
|
|||
# Vyper beta version used to generate the bytecode that was then formally verified.
|
||||
# On top of this beta version, a later change was backported, and included in the formal verification:
|
||||
# https://github.com/vyperlang/vyper/issues/1761
|
||||
# The resulting vyper version is pinned and maintained as protected branch.
|
||||
git+https://github.com/vyperlang/vyper@1761-HOTFIX-v0.1.0-beta.13
|
||||
|
||||
pytest==3.6.1
|
|
@ -0,0 +1,10 @@
|
|||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='deposit_contract_compiler',
|
||||
packages=['deposit_contract'],
|
||||
package_dir={"": "."},
|
||||
python_requires="3.7", # pinned vyper compiler stops working after 3.7. See vyper issue 1835.
|
||||
tests_requires=["pytest==3.6.1"],
|
||||
install_requires=[], # see requirements.txt file
|
||||
)
|
|
@ -1,5 +0,0 @@
|
|||
eth-tester[py-evm]==0.1.0b39
|
||||
git+https://github.com/vyperlang/vyper@1761-HOTFIX-v0.1.0-beta.13
|
||||
web3==5.0.0b2
|
||||
pytest==3.6.1
|
||||
../tests/core/pyspec
|
|
@ -1,8 +1,3 @@
|
|||
from random import (
|
||||
randint,
|
||||
)
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
||||
import eth_tester
|
||||
|
@ -10,17 +5,19 @@ from eth_tester import (
|
|||
EthereumTester,
|
||||
PyEVMBackend,
|
||||
)
|
||||
from vyper import (
|
||||
compiler,
|
||||
)
|
||||
from web3 import Web3
|
||||
from web3.providers.eth_tester import (
|
||||
EthereumTesterProvider,
|
||||
)
|
||||
from .utils import (
|
||||
get_deposit_contract_code,
|
||||
get_deposit_contract_json,
|
||||
)
|
||||
from web3.providers.eth_tester import EthereumTesterProvider
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def get_deposit_contract_json():
|
||||
file_path = os.path.join(DIR, '../../contracts/validator_registration.json')
|
||||
deposit_contract_json = open(file_path).read()
|
||||
return json.loads(deposit_contract_json)
|
||||
|
||||
|
||||
# Constants
|
|
@ -1,23 +1,16 @@
|
|||
from random import (
|
||||
randint,
|
||||
)
|
||||
|
||||
from random import randint
|
||||
import pytest
|
||||
|
||||
import eth_utils
|
||||
from tests.contracts.conftest import (
|
||||
|
||||
from eth2spec.phase0.spec import DepositData
|
||||
from eth2spec.utils.ssz.ssz_typing import List
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
|
||||
from deposit_contract.conftest import (
|
||||
FULL_DEPOSIT_AMOUNT,
|
||||
MIN_DEPOSIT_AMOUNT,
|
||||
)
|
||||
|
||||
from eth2spec.phase0.spec import (
|
||||
DepositData,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import List
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
)
|
||||
|
||||
|
||||
SAMPLE_PUBKEY = b'\x11' * 48
|
||||
SAMPLE_WITHDRAWAL_CREDENTIALS = b'\x22' * 32
|
|
@ -0,0 +1,5 @@
|
|||
eth-tester[py-evm]>=0.3.0b1,<0.4
|
||||
web3==5.4.0
|
||||
pytest==3.6.1
|
||||
# The eth2spec
|
||||
../../
|
|
@ -0,0 +1,9 @@
|
|||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='deposit_contract_tester',
|
||||
packages=['deposit_contract'],
|
||||
package_dir={"": "."},
|
||||
tests_requires=[],
|
||||
install_requires=[] # see requirements.txt file
|
||||
)
|
|
@ -1,19 +0,0 @@
|
|||
from vyper import (
|
||||
compiler,
|
||||
)
|
||||
|
||||
from .utils import (
|
||||
get_deposit_contract_code,
|
||||
get_deposit_contract_json,
|
||||
)
|
||||
|
||||
|
||||
def test_compile_deposit_contract():
|
||||
compiled_deposit_contract_json = get_deposit_contract_json()
|
||||
|
||||
deposit_contract_code = get_deposit_contract_code()
|
||||
abi = compiler.mk_full_signature(deposit_contract_code)
|
||||
bytecode = compiler.compile_code(deposit_contract_code)['bytecode']
|
||||
|
||||
assert abi == compiled_deposit_contract_json["abi"]
|
||||
assert bytecode == compiled_deposit_contract_json["bytecode"]
|
|
@ -1,16 +0,0 @@
|
|||
import json
|
||||
import os
|
||||
|
||||
DIR = os.path.dirname(__file__)
|
||||
|
||||
|
||||
def get_deposit_contract_code():
|
||||
file_path = os.path.join(DIR, './../../contracts/validator_registration.vy')
|
||||
deposit_contract_code = open(file_path).read()
|
||||
return deposit_contract_code
|
||||
|
||||
|
||||
def get_deposit_contract_json():
|
||||
file_path = os.path.join(DIR, './../../contracts/validator_registration.json')
|
||||
deposit_contract_json = open(file_path).read()
|
||||
return json.loads(deposit_contract_json)
|
|
@ -1,32 +0,0 @@
|
|||
# Building pyspecs from specs.md
|
||||
|
||||
The benefit of the particular spec design is that the given Markdown files can be converted to a `spec.py` file for the purposes of testing and linting. As a result, bugs are discovered and patched more quickly.
|
||||
|
||||
Specs can be built from either a single Markdown document or multiple files that must be combined in a given order. Given 2 spec objects, `build_spec.combine_spec_objects` will combine them into a single spec object which, subsequently, can be converted into a `specs.py`.
|
||||
|
||||
## Usage
|
||||
|
||||
For usage of the spec builder, run `python3 -m build_spec --help`.
|
||||
|
||||
## `@Labels` and inserts
|
||||
|
||||
The functioning of the spec combiner is largely automatic in that given `spec0.md` and `spec1.md`, SSZ Objects will be extended and old functions will be overwritten. Extra functionality is provided for more granular control over how files are combined. In the event that only a small portion of code is to be added to an existing function, insert functionality is provided. This saves having to completely redefine the old function from `spec0.md` in `spec1.md`. This is done by marking where the change is to occur in the old file and marking which code is to be inserted in the new file. This is done as follows:
|
||||
|
||||
* In the old file, a label is added as a Python comment marking where the code is to be inserted. This would appear as follows in `spec0.md`:
|
||||
|
||||
```python
|
||||
def foo(x):
|
||||
x << 1
|
||||
# @YourLabelHere
|
||||
return x
|
||||
```
|
||||
|
||||
* In spec1, the new code can then be inserted by having a code-block that looks as follows:
|
||||
|
||||
```python
|
||||
#begin insert @YourLabelHere
|
||||
x += x
|
||||
#end insert @YourLabelHere
|
||||
```
|
||||
|
||||
*Note*: The code to be inserted has the **same level of indentation** as the surrounding code of its destination insert point.
|
|
@ -1,347 +0,0 @@
|
|||
import re
|
||||
from function_puller import (
|
||||
get_spec,
|
||||
SpecObject,
|
||||
)
|
||||
from argparse import ArgumentParser
|
||||
from typing import (
|
||||
Dict,
|
||||
Optional,
|
||||
)
|
||||
|
||||
|
||||
PHASE0_IMPORTS = '''from typing import (
|
||||
Any, Dict, Set, Sequence, Tuple, Optional, TypeVar
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
boolean, Container, List, Vector, uint64, SSZType,
|
||||
Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
SSZObject = TypeVar('SSZObject', bound=SSZType)
|
||||
'''
|
||||
PHASE1_IMPORTS = '''from typing import (
|
||||
Any, Dict, Set, Sequence, MutableSequence, NewType, Optional, Tuple, Union, TypeVar
|
||||
)
|
||||
from math import (
|
||||
log2,
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import (
|
||||
hash_tree_root,
|
||||
is_zero,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
BasicValue, Elements, BaseBytes, BaseList, SSZType,
|
||||
Container, List, Vector, ByteList, ByteVector, Bitlist, Bitvector, Bits,
|
||||
Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96,
|
||||
uint64, bit, boolean, byte,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
|
||||
SSZVariableName = str
|
||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||
SSZObject = TypeVar('SSZObject', bound=SSZType)
|
||||
'''
|
||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
def ceillog2(x: uint64) -> int:
|
||||
return (x - 1).bit_length()
|
||||
'''
|
||||
SUNDRY_FUNCTIONS = '''
|
||||
# Monkey patch hash cache
|
||||
_hash = hash
|
||||
hash_cache: Dict[bytes, Bytes32] = {}
|
||||
|
||||
|
||||
def get_eth1_data(distance: uint64) -> Bytes32:
|
||||
return hash(distance)
|
||||
|
||||
|
||||
def hash(x: bytes) -> Bytes32: # type: ignore
|
||||
if x not in hash_cache:
|
||||
hash_cache[x] = Bytes32(_hash(x))
|
||||
return hash_cache[x]
|
||||
|
||||
|
||||
# Monkey patch validator compute committee code
|
||||
_compute_committee = compute_committee
|
||||
committee_cache: Dict[Tuple[Bytes32, Bytes32, int, int], Sequence[ValidatorIndex]] = {}
|
||||
|
||||
|
||||
def compute_committee(indices: Sequence[ValidatorIndex], # type: ignore
|
||||
seed: Bytes32,
|
||||
index: int,
|
||||
count: int) -> Sequence[ValidatorIndex]:
|
||||
param_hash = (hash(b''.join(index.to_bytes(length=4, byteorder='little') for index in indices)), seed, index, count)
|
||||
|
||||
if param_hash not in committee_cache:
|
||||
committee_cache[param_hash] = _compute_committee(indices, seed, index, count)
|
||||
return committee_cache[param_hash]
|
||||
|
||||
|
||||
# Access to overwrite spec constants based on configuration
|
||||
def apply_constants_preset(preset: Dict[str, Any]) -> None:
|
||||
global_vars = globals()
|
||||
for k, v in preset.items():
|
||||
if k.startswith('DOMAIN_'):
|
||||
global_vars[k] = DomainType(v) # domain types are defined as bytes in the configs
|
||||
else:
|
||||
global_vars[k] = v
|
||||
|
||||
# Deal with derived constants
|
||||
global_vars['GENESIS_EPOCH'] = compute_epoch_at_slot(GENESIS_SLOT)
|
||||
|
||||
# Initialize SSZ types again, to account for changed lengths
|
||||
init_SSZ_types()
|
||||
'''
|
||||
|
||||
|
||||
def remove_for_phase1(functions: Dict[str, str]):
|
||||
for key, value in functions.items():
|
||||
lines = value.split("\n")
|
||||
lines = filter(lambda s: "[to be removed in phase 1]" not in s, lines)
|
||||
functions[key] = "\n".join(lines)
|
||||
|
||||
|
||||
def strip_comments(raw: str) -> str:
|
||||
comment_line_regex = re.compile(r'^\s+# ')
|
||||
lines = raw.split('\n')
|
||||
out = []
|
||||
for line in lines:
|
||||
if not comment_line_regex.match(line):
|
||||
if ' #' in line:
|
||||
line = line[:line.index(' #')]
|
||||
out.append(line)
|
||||
return '\n'.join(out)
|
||||
|
||||
|
||||
def objects_to_spec(functions: Dict[str, str],
|
||||
custom_types: Dict[str, str],
|
||||
constants: Dict[str, str],
|
||||
ssz_objects: Dict[str, str],
|
||||
inserts: Dict[str, str],
|
||||
imports: Dict[str, str],
|
||||
) -> str:
|
||||
"""
|
||||
Given all the objects that constitute a spec, combine them into a single pyfile.
|
||||
"""
|
||||
new_type_definitions = (
|
||||
'\n\n'.join(
|
||||
[
|
||||
f"class {key}({value}):\n pass\n"
|
||||
for key, value in custom_types.items()
|
||||
]
|
||||
)
|
||||
)
|
||||
for k in list(functions):
|
||||
if "ceillog2" in k:
|
||||
del functions[k]
|
||||
functions_spec = '\n\n'.join(functions.values())
|
||||
for k in list(constants.keys()):
|
||||
if k == "BLS12_381_Q":
|
||||
constants[k] += " # noqa: E501"
|
||||
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, constants[x]), constants))
|
||||
ssz_objects_instantiation_spec = '\n\n'.join(ssz_objects.values())
|
||||
ssz_objects_reinitialization_spec = (
|
||||
'def init_SSZ_types() -> None:\n global_vars = globals()\n\n '
|
||||
+ '\n\n '.join([strip_comments(re.sub(r'(?!\n\n)\n', r'\n ', value[:-1]))
|
||||
for value in ssz_objects.values()])
|
||||
+ '\n\n'
|
||||
+ '\n'.join(map(lambda x: ' global_vars[\'%s\'] = %s' % (x, x), ssz_objects.keys()))
|
||||
)
|
||||
spec = (
|
||||
imports
|
||||
+ '\n\n' + new_type_definitions
|
||||
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
|
||||
+ '\n\n' + constants_spec
|
||||
+ '\n\n\n' + ssz_objects_instantiation_spec
|
||||
+ '\n\n' + functions_spec
|
||||
+ '\n' + SUNDRY_FUNCTIONS
|
||||
+ '\n\n' + ssz_objects_reinitialization_spec
|
||||
+ '\n'
|
||||
)
|
||||
# Handle @inserts
|
||||
for key, value in inserts.items():
|
||||
spec = re.sub('[ ]*# %s\\n' % key, value, spec)
|
||||
return spec
|
||||
|
||||
|
||||
def combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:
|
||||
for key, value in new_functions.items():
|
||||
old_functions[key] = value
|
||||
return old_functions
|
||||
|
||||
|
||||
def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:
|
||||
for key, value in new_constants.items():
|
||||
old_constants[key] = value
|
||||
return old_constants
|
||||
|
||||
|
||||
ignored_dependencies = [
|
||||
'bit', 'boolean', 'Vector', 'List', 'Container', 'Hash', 'BLSPubkey', 'BLSSignature', 'ByteList', 'ByteVector'
|
||||
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||
'bytes', 'byte', 'ByteVector' # to be removed after updating spec doc
|
||||
]
|
||||
|
||||
|
||||
def dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
|
||||
"""
|
||||
Determines which SSZ Object is dependent on which other and orders them appropriately
|
||||
"""
|
||||
items = list(objects.items())
|
||||
for key, value in items:
|
||||
dependencies = []
|
||||
for line in value.split('\n'):
|
||||
if not re.match(r'\s+\w+: .+', line):
|
||||
continue # skip whitespace etc.
|
||||
line = line[line.index(':') + 1:] # strip of field name
|
||||
if '#' in line:
|
||||
line = line[:line.index('#')] # strip of comment
|
||||
dependencies.extend(re.findall(r'(\w+)', line)) # catch all legible words, potential dependencies
|
||||
dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants
|
||||
dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)
|
||||
dependencies = filter(lambda x: x not in custom_types, dependencies)
|
||||
for dep in dependencies:
|
||||
key_list = list(objects.keys())
|
||||
for item in [dep, key] + key_list[key_list.index(dep)+1:]:
|
||||
objects[item] = objects.pop(item)
|
||||
|
||||
|
||||
def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:
|
||||
"""
|
||||
Takes in old spec and new spec ssz objects, combines them,
|
||||
and returns the newer versions of the objects in dependency order.
|
||||
"""
|
||||
for key, value in new_objects.items():
|
||||
if key in old_objects:
|
||||
# remove trailing newline
|
||||
old_objects[key] = old_objects[key]
|
||||
# remove leading variable name
|
||||
value = re.sub(r'^class [\w]*\(Container\):\n', '', value)
|
||||
old_objects[key] = old_objects.get(key, '') + value
|
||||
dependency_order_ssz_objects(old_objects, custom_types)
|
||||
return old_objects
|
||||
|
||||
|
||||
# inserts are handled the same way as functions
|
||||
combine_inserts = combine_functions
|
||||
|
||||
|
||||
def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
||||
"""
|
||||
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
|
||||
"""
|
||||
functions0, custom_types0, constants0, ssz_objects0, inserts0 = spec0
|
||||
functions1, custom_types1, constants1, ssz_objects1, inserts1 = spec1
|
||||
functions = combine_functions(functions0, functions1)
|
||||
custom_types = combine_constants(custom_types0, custom_types1)
|
||||
constants = combine_constants(constants0, constants1)
|
||||
ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
|
||||
inserts = combine_inserts(inserts0, inserts1)
|
||||
return functions, custom_types, constants, ssz_objects, inserts
|
||||
|
||||
|
||||
def build_phase0_spec(phase0_sourcefile: str, fork_choice_sourcefile: str,
|
||||
v_guide_sourcefile: str, outfile: str=None) -> Optional[str]:
|
||||
phase0_spec = get_spec(phase0_sourcefile)
|
||||
fork_choice_spec = get_spec(fork_choice_sourcefile)
|
||||
v_guide = get_spec(v_guide_sourcefile)
|
||||
spec_objects = phase0_spec
|
||||
for value in [fork_choice_spec, v_guide]:
|
||||
spec_objects = combine_spec_objects(spec_objects, value)
|
||||
spec = objects_to_spec(*spec_objects, PHASE0_IMPORTS)
|
||||
if outfile is not None:
|
||||
with open(outfile, 'w') as out:
|
||||
out.write(spec)
|
||||
return spec
|
||||
|
||||
|
||||
def build_phase1_spec(phase0_beacon_sourcefile: str,
|
||||
phase0_fork_choice_sourcefile: str,
|
||||
merkle_proofs_sourcefile: str,
|
||||
phase1_custody_sourcefile: str,
|
||||
phase1_shard_sourcefile: str,
|
||||
phase1_beacon_misc_sourcefile: str,
|
||||
outfile: str=None) -> Optional[str]:
|
||||
all_sourcefiles = (
|
||||
phase0_beacon_sourcefile,
|
||||
phase0_fork_choice_sourcefile,
|
||||
merkle_proofs_sourcefile,
|
||||
phase1_custody_sourcefile,
|
||||
phase1_shard_sourcefile,
|
||||
phase1_beacon_misc_sourcefile,
|
||||
)
|
||||
all_spescs = [get_spec(spec) for spec in all_sourcefiles]
|
||||
for spec in all_spescs:
|
||||
remove_for_phase1(spec[0])
|
||||
spec_objects = all_spescs[0]
|
||||
for value in all_spescs[1:]:
|
||||
spec_objects = combine_spec_objects(spec_objects, value)
|
||||
spec = objects_to_spec(*spec_objects, PHASE1_IMPORTS)
|
||||
if outfile is not None:
|
||||
with open(outfile, 'w') as out:
|
||||
out.write(spec)
|
||||
return spec
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
description = '''
|
||||
Build the specs from the md docs.
|
||||
If building phase 0:
|
||||
1st argument is input phase0/beacon-chain.md
|
||||
2nd argument is input phase0/fork-choice.md
|
||||
3rd argument is input phase0/validator.md
|
||||
4th argument is output spec.py
|
||||
|
||||
If building phase 1:
|
||||
1st argument is input phase0/beacon-chain.md
|
||||
2nd argument is input phase0/fork-choice.md
|
||||
3rd argument is input ssz/merkle-proofs.md
|
||||
4th argument is input phase1/custody-game.md
|
||||
5th argument is input phase1/shard-data-chains.md
|
||||
6th argument is input phase1/beacon-chain-misc.md
|
||||
7th argument is output spec.py
|
||||
'''
|
||||
parser = ArgumentParser(description=description)
|
||||
parser.add_argument("-p", "--phase", dest="phase", type=int, default=0, help="Build for phase #")
|
||||
parser.add_argument(dest="files", help="Input and output files", nargs="+")
|
||||
|
||||
args = parser.parse_args()
|
||||
if args.phase == 0:
|
||||
if len(args.files) == 4:
|
||||
build_phase0_spec(*args.files)
|
||||
else:
|
||||
print(" Phase 0 requires spec, forkchoice, and v-guide inputs as well as an output file.")
|
||||
elif args.phase == 1:
|
||||
if len(args.files) == 7:
|
||||
build_phase1_spec(*args.files)
|
||||
else:
|
||||
print(
|
||||
" Phase 1 requires input files as well as an output file:\n"
|
||||
"\t phase0: (beacon-chain.md, fork-choice.md)\n"
|
||||
"\t ssz: (merkle-proofs.md)\n"
|
||||
"\t phase1: (custody-game.md, shard-data-chains.md, beacon-chain-misc.md)\n"
|
||||
"\t and output.py"
|
||||
)
|
||||
else:
|
||||
print("Invalid phase: {0}".format(args.phase))
|
|
@ -1,87 +0,0 @@
|
|||
import re
|
||||
from typing import Dict, Tuple, NewType
|
||||
|
||||
|
||||
FUNCTION_REGEX = r'^def [\w_]*'
|
||||
BEGIN_INSERT_REGEX = r'# begin insert '
|
||||
END_INSERT_REGEX = r'# end insert'
|
||||
|
||||
SpecObject = NewType('SpecObjects', Tuple[Dict[str, str], Dict[str, str], Dict[str, str], Dict[str, str]])
|
||||
|
||||
|
||||
def get_spec(file_name: str) -> SpecObject:
|
||||
"""
|
||||
Takes in the file name of a spec.md file, opens it and returns the following objects:
|
||||
functions = {function_name: function_code}
|
||||
constants= {constant_name: constant_code}
|
||||
ssz_objects= {object_name: object}
|
||||
inserts= {insert_tag: code to be inserted}
|
||||
|
||||
Note: This function makes heavy use of the inherent ordering of dicts,
|
||||
if this is not supported by your python version, it will not work.
|
||||
"""
|
||||
pulling_from = None # line number of start of latest object
|
||||
current_name = None # most recent section title
|
||||
insert_name = None # stores the label of the current insert object
|
||||
functions = {}
|
||||
constants = {}
|
||||
ssz_objects = {}
|
||||
inserts = {}
|
||||
function_matcher = re.compile(FUNCTION_REGEX)
|
||||
inserts_matcher = re.compile(BEGIN_INSERT_REGEX)
|
||||
is_ssz = False
|
||||
custom_types = {}
|
||||
for linenum, line in enumerate(open(file_name).readlines()):
|
||||
line = line.rstrip()
|
||||
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
|
||||
current_name = line[line[:-1].rfind('`') + 1: -1]
|
||||
if line[:9] == '```python':
|
||||
assert pulling_from is None
|
||||
pulling_from = linenum + 1
|
||||
elif line[:3] == '```':
|
||||
pulling_from = None
|
||||
elif inserts_matcher.match(line) is not None:
|
||||
# Find @insert names
|
||||
insert_name = re.search(r'@[\w]*', line).group(0)
|
||||
elif insert_name is not None:
|
||||
# In insert mode, either the next line is more code, or the end of the insert
|
||||
if re.match(END_INSERT_REGEX, line) is not None:
|
||||
insert_name = None
|
||||
else:
|
||||
inserts[insert_name] = inserts.get(insert_name, '') + line + '\n'
|
||||
else:
|
||||
# Handle function definitions & ssz_objects
|
||||
if pulling_from is not None:
|
||||
# SSZ Object
|
||||
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
|
||||
name = line[6:-12]
|
||||
# Check consistency with markdown header
|
||||
assert name == current_name
|
||||
is_ssz = True
|
||||
# function definition
|
||||
elif function_matcher.match(line) is not None:
|
||||
current_name = function_matcher.match(line).group(0)
|
||||
is_ssz = False
|
||||
if is_ssz:
|
||||
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
|
||||
else:
|
||||
functions[current_name] = functions.get(current_name, '') + line + '\n'
|
||||
# Handle constant and custom types table entries
|
||||
elif pulling_from is None and len(line) > 0 and line[0] == '|':
|
||||
row = line[1:].split('|')
|
||||
if len(row) >= 2:
|
||||
for i in range(2):
|
||||
row[i] = row[i].strip().strip('`')
|
||||
if '`' in row[i]:
|
||||
row[i] = row[i][:row[i].find('`')]
|
||||
is_constant_def = True
|
||||
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
|
||||
is_constant_def = False
|
||||
for c in row[0]:
|
||||
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
|
||||
is_constant_def = False
|
||||
if is_constant_def:
|
||||
constants[row[0]] = row[1].replace('**TBD**', '2**32')
|
||||
elif row[1].startswith('uint') or row[1].startswith('Bytes'):
|
||||
custom_types[row[0]] = row[1]
|
||||
return functions, custom_types, constants, ssz_objects, inserts
|
|
@ -0,0 +1,486 @@
|
|||
from setuptools import setup, find_packages, Command
|
||||
from setuptools.command.build_py import build_py
|
||||
from distutils import dir_util
|
||||
from distutils.util import convert_path
|
||||
import os
|
||||
import re
|
||||
from typing import Dict, NamedTuple, List
|
||||
|
||||
FUNCTION_REGEX = r'^def [\w_]*'
|
||||
|
||||
|
||||
class SpecObject(NamedTuple):
|
||||
functions: Dict[str, str]
|
||||
custom_types: Dict[str, str]
|
||||
constants: Dict[str, str]
|
||||
ssz_objects: Dict[str, str]
|
||||
|
||||
|
||||
def get_spec(file_name: str) -> SpecObject:
|
||||
"""
|
||||
Takes in the file name of a spec.md file, opens it and returns a parsed spec object.
|
||||
|
||||
Note: This function makes heavy use of the inherent ordering of dicts,
|
||||
if this is not supported by your python version, it will not work.
|
||||
"""
|
||||
pulling_from = None # line number of start of latest object
|
||||
current_name = None # most recent section title
|
||||
functions: Dict[str, str] = {}
|
||||
constants: Dict[str, str] = {}
|
||||
ssz_objects: Dict[str, str] = {}
|
||||
function_matcher = re.compile(FUNCTION_REGEX)
|
||||
is_ssz = False
|
||||
custom_types: Dict[str, str] = {}
|
||||
for linenum, line in enumerate(open(file_name).readlines()):
|
||||
line = line.rstrip()
|
||||
if pulling_from is None and len(line) > 0 and line[0] == '#' and line[-1] == '`':
|
||||
current_name = line[line[:-1].rfind('`') + 1: -1]
|
||||
if line[:9] == '```python':
|
||||
assert pulling_from is None
|
||||
pulling_from = linenum + 1
|
||||
elif line[:3] == '```':
|
||||
pulling_from = None
|
||||
else:
|
||||
# Handle function definitions & ssz_objects
|
||||
if pulling_from is not None:
|
||||
# SSZ Object
|
||||
if len(line) > 18 and line[:6] == 'class ' and line[-12:] == '(Container):':
|
||||
name = line[6:-12]
|
||||
# Check consistency with markdown header
|
||||
assert name == current_name
|
||||
is_ssz = True
|
||||
# function definition
|
||||
elif function_matcher.match(line) is not None:
|
||||
current_name = function_matcher.match(line).group(0)
|
||||
is_ssz = False
|
||||
if is_ssz:
|
||||
ssz_objects[current_name] = ssz_objects.get(current_name, '') + line + '\n'
|
||||
else:
|
||||
functions[current_name] = functions.get(current_name, '') + line + '\n'
|
||||
# Handle constant and custom types table entries
|
||||
elif pulling_from is None and len(line) > 0 and line[0] == '|':
|
||||
row = line[1:].split('|')
|
||||
if len(row) >= 2:
|
||||
for i in range(2):
|
||||
row[i] = row[i].strip().strip('`')
|
||||
if '`' in row[i]:
|
||||
row[i] = row[i][:row[i].find('`')]
|
||||
is_constant_def = True
|
||||
if row[0][0] not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_':
|
||||
is_constant_def = False
|
||||
for c in row[0]:
|
||||
if c not in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ_0123456789':
|
||||
is_constant_def = False
|
||||
if is_constant_def:
|
||||
constants[row[0]] = row[1].replace('**TBD**', '2**32')
|
||||
elif row[1].startswith('uint') or row[1].startswith('Bytes'):
|
||||
custom_types[row[0]] = row[1]
|
||||
return SpecObject(functions, custom_types, constants, ssz_objects)
|
||||
|
||||
|
||||
CONFIG_LOADER = '''
|
||||
apply_constants_config(globals())
|
||||
'''
|
||||
|
||||
PHASE0_IMPORTS = '''from eth2spec.config.config_util import apply_constants_config
|
||||
from typing import (
|
||||
Any, Callable, Dict, Set, Sequence, Tuple, Optional, TypeVar
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint64,
|
||||
Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
SSZObject = TypeVar('SSZObject', bound=View)
|
||||
'''
|
||||
PHASE1_IMPORTS = '''from eth2spec.phase0 import spec as phase0
|
||||
from eth2spec.config.config_util import apply_constants_config
|
||||
from typing import (
|
||||
Any, Dict, Set, Sequence, NewType, Tuple, TypeVar, Callable
|
||||
)
|
||||
|
||||
from dataclasses import (
|
||||
dataclass,
|
||||
field,
|
||||
)
|
||||
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
View, boolean, Container, List, Vector, uint64, uint8, bit,
|
||||
ByteList, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist, Bitvector,
|
||||
)
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from eth2spec.utils.hash_function import hash
|
||||
|
||||
# Whenever phase 1 is loaded, make sure we have the latest phase0
|
||||
from importlib import reload
|
||||
reload(phase0)
|
||||
|
||||
|
||||
SSZVariableName = str
|
||||
GeneralizedIndex = NewType('GeneralizedIndex', int)
|
||||
SSZObject = TypeVar('SSZObject', bound=View)
|
||||
'''
|
||||
SUNDRY_CONSTANTS_FUNCTIONS = '''
|
||||
def ceillog2(x: uint64) -> int:
|
||||
return (x - 1).bit_length()
|
||||
'''
|
||||
SUNDRY_FUNCTIONS = '''
|
||||
# Monkey patch hash cache
|
||||
_hash = hash
|
||||
hash_cache: Dict[bytes, Bytes32] = {}
|
||||
|
||||
|
||||
def get_eth1_data(distance: uint64) -> Bytes32:
|
||||
return hash(distance)
|
||||
|
||||
|
||||
def hash(x: bytes) -> Bytes32: # type: ignore
|
||||
if x not in hash_cache:
|
||||
hash_cache[x] = Bytes32(_hash(x))
|
||||
return hash_cache[x]
|
||||
|
||||
|
||||
def cache_this(key_fn, value_fn): # type: ignore
|
||||
cache_dict = {} # type: ignore
|
||||
|
||||
def wrapper(*args, **kw): # type: ignore
|
||||
key = key_fn(*args, **kw)
|
||||
nonlocal cache_dict
|
||||
if key not in cache_dict:
|
||||
cache_dict[key] = value_fn(*args, **kw)
|
||||
return cache_dict[key]
|
||||
return wrapper
|
||||
|
||||
|
||||
_get_base_reward = get_base_reward
|
||||
get_base_reward = cache_this(
|
||||
lambda state, index: (state.validators.hash_tree_root(), state.slot),
|
||||
_get_base_reward)
|
||||
|
||||
_get_committee_count_at_slot = get_committee_count_at_slot
|
||||
get_committee_count_at_slot = cache_this(
|
||||
lambda state, epoch: (state.validators.hash_tree_root(), epoch),
|
||||
_get_committee_count_at_slot)
|
||||
|
||||
_get_active_validator_indices = get_active_validator_indices
|
||||
get_active_validator_indices = cache_this(
|
||||
lambda state, epoch: (state.validators.hash_tree_root(), epoch),
|
||||
_get_active_validator_indices)
|
||||
|
||||
_get_beacon_committee = get_beacon_committee
|
||||
get_beacon_committee = cache_this(
|
||||
lambda state, slot, index: (state.validators.hash_tree_root(), state.randao_mixes.hash_tree_root(), slot, index),
|
||||
_get_beacon_committee)
|
||||
|
||||
_get_matching_target_attestations = get_matching_target_attestations
|
||||
get_matching_target_attestations = cache_this(
|
||||
lambda state, epoch: (state.hash_tree_root(), epoch),
|
||||
_get_matching_target_attestations)
|
||||
|
||||
_get_matching_head_attestations = get_matching_head_attestations
|
||||
get_matching_head_attestations = cache_this(
|
||||
lambda state, epoch: (state.hash_tree_root(), epoch),
|
||||
_get_matching_head_attestations)'''
|
||||
|
||||
|
||||
def objects_to_spec(spec_object: SpecObject, imports: str, fork: str) -> str:
|
||||
"""
|
||||
Given all the objects that constitute a spec, combine them into a single pyfile.
|
||||
"""
|
||||
new_type_definitions = (
|
||||
'\n\n'.join(
|
||||
[
|
||||
f"class {key}({value}):\n pass\n"
|
||||
for key, value in spec_object.custom_types.items()
|
||||
]
|
||||
)
|
||||
)
|
||||
for k in list(spec_object.functions):
|
||||
if "ceillog2" in k:
|
||||
del spec_object.functions[k]
|
||||
functions_spec = '\n\n'.join(spec_object.functions.values())
|
||||
for k in list(spec_object.constants.keys()):
|
||||
if k == "BLS12_381_Q":
|
||||
spec_object.constants[k] += " # noqa: E501"
|
||||
constants_spec = '\n'.join(map(lambda x: '%s = %s' % (x, spec_object.constants[x]), spec_object.constants))
|
||||
ssz_objects_instantiation_spec = '\n\n'.join(spec_object.ssz_objects.values())
|
||||
spec = (
|
||||
imports
|
||||
+ '\n\n' + f"fork = \'{fork}\'\n"
|
||||
+ '\n\n' + new_type_definitions
|
||||
+ '\n' + SUNDRY_CONSTANTS_FUNCTIONS
|
||||
+ '\n\n' + constants_spec
|
||||
+ '\n\n' + CONFIG_LOADER
|
||||
+ '\n\n' + ssz_objects_instantiation_spec
|
||||
+ '\n\n' + functions_spec
|
||||
+ '\n' + SUNDRY_FUNCTIONS
|
||||
+ '\n'
|
||||
)
|
||||
return spec
|
||||
|
||||
|
||||
def combine_functions(old_functions: Dict[str, str], new_functions: Dict[str, str]) -> Dict[str, str]:
|
||||
for key, value in new_functions.items():
|
||||
old_functions[key] = value
|
||||
return old_functions
|
||||
|
||||
|
||||
def combine_constants(old_constants: Dict[str, str], new_constants: Dict[str, str]) -> Dict[str, str]:
|
||||
for key, value in new_constants.items():
|
||||
old_constants[key] = value
|
||||
return old_constants
|
||||
|
||||
|
||||
ignored_dependencies = [
|
||||
'bit', 'boolean', 'Vector', 'List', 'Container', 'BLSPubkey', 'BLSSignature',
|
||||
'Bytes1', 'Bytes4', 'Bytes32', 'Bytes48', 'Bytes96', 'Bitlist', 'Bitvector',
|
||||
'uint8', 'uint16', 'uint32', 'uint64', 'uint128', 'uint256',
|
||||
'bytes', 'byte', 'ByteList', 'ByteVector'
|
||||
]
|
||||
|
||||
|
||||
def dependency_order_ssz_objects(objects: Dict[str, str], custom_types: Dict[str, str]) -> None:
|
||||
"""
|
||||
Determines which SSZ Object is dependent on which other and orders them appropriately
|
||||
"""
|
||||
items = list(objects.items())
|
||||
for key, value in items:
|
||||
dependencies = []
|
||||
for line in value.split('\n'):
|
||||
if not re.match(r'\s+\w+: .+', line):
|
||||
continue # skip whitespace etc.
|
||||
line = line[line.index(':') + 1:] # strip of field name
|
||||
if '#' in line:
|
||||
line = line[:line.index('#')] # strip of comment
|
||||
dependencies.extend(re.findall(r'(\w+)', line)) # catch all legible words, potential dependencies
|
||||
dependencies = filter(lambda x: '_' not in x and x.upper() != x, dependencies) # filter out constants
|
||||
dependencies = filter(lambda x: x not in ignored_dependencies, dependencies)
|
||||
dependencies = filter(lambda x: x not in custom_types, dependencies)
|
||||
for dep in dependencies:
|
||||
key_list = list(objects.keys())
|
||||
for item in [dep, key] + key_list[key_list.index(dep)+1:]:
|
||||
objects[item] = objects.pop(item)
|
||||
|
||||
|
||||
def combine_ssz_objects(old_objects: Dict[str, str], new_objects: Dict[str, str], custom_types) -> Dict[str, str]:
|
||||
"""
|
||||
Takes in old spec and new spec ssz objects, combines them,
|
||||
and returns the newer versions of the objects in dependency order.
|
||||
"""
|
||||
for key, value in new_objects.items():
|
||||
old_objects[key] = value
|
||||
return old_objects
|
||||
|
||||
|
||||
def combine_spec_objects(spec0: SpecObject, spec1: SpecObject) -> SpecObject:
|
||||
"""
|
||||
Takes in two spec variants (as tuples of their objects) and combines them using the appropriate combiner function.
|
||||
"""
|
||||
functions0, custom_types0, constants0, ssz_objects0 = spec0
|
||||
functions1, custom_types1, constants1, ssz_objects1 = spec1
|
||||
functions = combine_functions(functions0, functions1)
|
||||
custom_types = combine_constants(custom_types0, custom_types1)
|
||||
constants = combine_constants(constants0, constants1)
|
||||
ssz_objects = combine_ssz_objects(ssz_objects0, ssz_objects1, custom_types)
|
||||
return SpecObject(functions, custom_types, constants, ssz_objects)
|
||||
|
||||
|
||||
fork_imports = {
|
||||
'phase0': PHASE0_IMPORTS,
|
||||
'phase1': PHASE1_IMPORTS,
|
||||
}
|
||||
|
||||
|
||||
def build_spec(fork: str, source_files: List[str]) -> str:
|
||||
all_specs = [get_spec(spec) for spec in source_files]
|
||||
|
||||
spec_object = all_specs[0]
|
||||
for value in all_specs[1:]:
|
||||
spec_object = combine_spec_objects(spec_object, value)
|
||||
|
||||
dependency_order_ssz_objects(spec_object.ssz_objects, spec_object.custom_types)
|
||||
|
||||
return objects_to_spec(spec_object, fork_imports[fork], fork)
|
||||
|
||||
|
||||
class PySpecCommand(Command):
|
||||
"""Convert spec markdown files to a spec python file"""
|
||||
|
||||
description = "Convert spec markdown files to a spec python file"
|
||||
|
||||
spec_fork: str
|
||||
md_doc_paths: str
|
||||
parsed_md_doc_paths: List[str]
|
||||
out_dir: str
|
||||
|
||||
# The format is (long option, short option, description).
|
||||
user_options = [
|
||||
('spec-fork=', None, "Spec fork to tag build with. Used to select md-docs defaults."),
|
||||
('md-doc-paths=', None, "List of paths of markdown files to build spec with"),
|
||||
('out-dir=', None, "Output directory to write spec package to")
|
||||
]
|
||||
|
||||
def initialize_options(self):
|
||||
"""Set default values for options."""
|
||||
# Each user option must be listed here with their default value.
|
||||
self.spec_fork = 'phase0'
|
||||
self.md_doc_paths = ''
|
||||
self.out_dir = 'pyspec_output'
|
||||
|
||||
def finalize_options(self):
|
||||
"""Post-process options."""
|
||||
if len(self.md_doc_paths) == 0:
|
||||
print("no paths were specified, using default markdown file paths for pyspec"
|
||||
" build (spec fork: %s)" % self.spec_fork)
|
||||
if self.spec_fork == "phase0":
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase0/validator.md
|
||||
"""
|
||||
elif self.spec_fork == "phase1":
|
||||
self.md_doc_paths = """
|
||||
specs/phase0/beacon-chain.md
|
||||
specs/phase0/fork-choice.md
|
||||
specs/phase1/custody-game.md
|
||||
specs/phase1/beacon-chain.md
|
||||
specs/phase1/fraud-proofs.md
|
||||
specs/phase1/fork-choice.md
|
||||
specs/phase1/phase1-fork.md
|
||||
"""
|
||||
else:
|
||||
raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork)
|
||||
|
||||
self.parsed_md_doc_paths = self.md_doc_paths.split()
|
||||
|
||||
for filename in self.parsed_md_doc_paths:
|
||||
if not os.path.exists(filename):
|
||||
raise Exception('Pyspec markdown input file "%s" does not exist.' % filename)
|
||||
|
||||
def run(self):
|
||||
spec_str = build_spec(self.spec_fork, self.parsed_md_doc_paths)
|
||||
if self.dry_run:
|
||||
self.announce('dry run successfully prepared contents for spec.'
|
||||
f' out dir: "{self.out_dir}", spec fork: "{self.spec_fork}"')
|
||||
self.debug_print(spec_str)
|
||||
else:
|
||||
dir_util.mkpath(self.out_dir)
|
||||
with open(os.path.join(self.out_dir, 'spec.py'), 'w') as out:
|
||||
out.write(spec_str)
|
||||
with open(os.path.join(self.out_dir, '__init__.py'), 'w') as out:
|
||||
out.write("")
|
||||
|
||||
|
||||
class BuildPyCommand(build_py):
|
||||
"""Customize the build command to run the spec-builder on setup.py build"""
|
||||
|
||||
def initialize_options(self):
|
||||
super(BuildPyCommand, self).initialize_options()
|
||||
|
||||
def run_pyspec_cmd(self, spec_fork: str, **opts):
|
||||
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
|
||||
cmd_obj.spec_fork = spec_fork
|
||||
cmd_obj.out_dir = os.path.join(self.build_lib, 'eth2spec', spec_fork)
|
||||
for k, v in opts.items():
|
||||
setattr(cmd_obj, k, v)
|
||||
self.run_command('pyspec')
|
||||
|
||||
def run(self):
|
||||
for spec_fork in fork_imports:
|
||||
self.run_pyspec_cmd(spec_fork=spec_fork)
|
||||
|
||||
super(BuildPyCommand, self).run()
|
||||
|
||||
|
||||
class PyspecDevCommand(Command):
|
||||
"""Build the markdown files in-place to their source location for testing."""
|
||||
description = "Build the markdown files in-place to their source location for testing."
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run_pyspec_cmd(self, spec_fork: str, **opts):
|
||||
cmd_obj: PySpecCommand = self.distribution.reinitialize_command("pyspec")
|
||||
cmd_obj.spec_fork = spec_fork
|
||||
eth2spec_dir = convert_path(self.distribution.package_dir['eth2spec'])
|
||||
cmd_obj.out_dir = os.path.join(eth2spec_dir, spec_fork)
|
||||
for k, v in opts.items():
|
||||
setattr(cmd_obj, k, v)
|
||||
self.run_command('pyspec')
|
||||
|
||||
def run(self):
|
||||
print("running build_py command")
|
||||
for spec_fork in fork_imports:
|
||||
self.run_pyspec_cmd(spec_fork=spec_fork)
|
||||
|
||||
commands = {
|
||||
'pyspec': PySpecCommand,
|
||||
'build_py': BuildPyCommand,
|
||||
'pyspecdev': PyspecDevCommand,
|
||||
}
|
||||
|
||||
with open("README.md", "rt", encoding="utf8") as f:
|
||||
readme = f.read()
|
||||
|
||||
# How to use "VERSION.txt" file:
|
||||
# - dev branch contains "X.Y.Z.dev", where "X.Y.Z" is the target version to release dev into.
|
||||
# -> Changed as part of 'master' backport to 'dev'
|
||||
# - master branch contains "X.Y.Z", where "X.Y.Z" is the current version.
|
||||
# -> Changed as part of 'dev' release (or other branch) into 'master'
|
||||
# -> In case of a commit on master without git tag, target the next version
|
||||
# with ".postN" (release candidate, numbered) suffixed.
|
||||
# See https://www.python.org/dev/peps/pep-0440/#public-version-identifiers
|
||||
with open(os.path.join('tests', 'core', 'pyspec', 'eth2spec', 'VERSION.txt')) as f:
|
||||
spec_version = f.read().strip()
|
||||
|
||||
setup(
|
||||
name='eth2spec',
|
||||
version=spec_version,
|
||||
description="Eth2 spec, provided as Python package for tooling and testing",
|
||||
long_description=readme,
|
||||
long_description_content_type="text/markdown",
|
||||
author="ethereum",
|
||||
url="https://github.com/ethereum/eth2.0-specs",
|
||||
include_package_data=False,
|
||||
package_data={'configs': ['*.yaml'],
|
||||
'specs': ['**/*.md'],
|
||||
'eth2spec': ['VERSION.txt']},
|
||||
package_dir={
|
||||
"eth2spec": "tests/core/pyspec/eth2spec",
|
||||
"configs": "configs",
|
||||
"specs": "specs"
|
||||
},
|
||||
packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'],
|
||||
py_modules=["eth2spec"],
|
||||
cmdclass=commands,
|
||||
python_requires=">=3.8, <4",
|
||||
extras_require={
|
||||
"test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"],
|
||||
"lint": ["flake8==3.7.7", "mypy==0.750"],
|
||||
},
|
||||
install_requires=[
|
||||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==2.0.0",
|
||||
"dataclasses==0.6",
|
||||
"remerkleable==0.1.12",
|
||||
"ruamel.yaml==0.16.5"
|
||||
]
|
||||
)
|
|
@ -24,6 +24,7 @@
|
|||
- [Containers](#containers)
|
||||
- [Misc dependencies](#misc-dependencies)
|
||||
- [`Fork`](#fork)
|
||||
- [`ForkData`](#forkdata)
|
||||
- [`Checkpoint`](#checkpoint)
|
||||
- [`Validator`](#validator)
|
||||
- [`AttestationData`](#attestationdata)
|
||||
|
@ -75,6 +76,8 @@
|
|||
- [`compute_epoch_at_slot`](#compute_epoch_at_slot)
|
||||
- [`compute_start_slot_at_epoch`](#compute_start_slot_at_epoch)
|
||||
- [`compute_activation_exit_epoch`](#compute_activation_exit_epoch)
|
||||
- [`compute_fork_data_root`](#compute_fork_data_root)
|
||||
- [`compute_fork_digest`](#compute_fork_digest)
|
||||
- [`compute_domain`](#compute_domain)
|
||||
- [`compute_signing_root`](#compute_signing_root)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
|
@ -149,7 +152,8 @@ We define the following Python custom types for type hinting and readability:
|
|||
| `Root` | `Bytes32` | a Merkle root |
|
||||
| `Version` | `Bytes4` | a fork version number |
|
||||
| `DomainType` | `Bytes4` | a domain type |
|
||||
| `Domain` | `Bytes8` | a signature domain |
|
||||
| `ForkDigest` | `Bytes4` | a digest of the current fork data |
|
||||
| `Domain` | `Bytes32` | a signature domain |
|
||||
| `BLSPubkey` | `Bytes48` | a BLS12-381 public key |
|
||||
| `BLSSignature` | `Bytes96` | a BLS12-381 signature |
|
||||
|
||||
|
@ -183,6 +187,10 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `SHUFFLE_ROUND_COUNT` | `90` |
|
||||
| `MIN_GENESIS_ACTIVE_VALIDATOR_COUNT` | `2**14` (= 16,384) |
|
||||
| `MIN_GENESIS_TIME` | `1578009600` (Jan 3, 2020) |
|
||||
| `HYSTERESIS_QUOTIENT` | `4` |
|
||||
| `HYSTERESIS_DOWNWARD_MULTIPLIER` | `1` |
|
||||
| `HYSTERESIS_UPWARD_MULTIPLIER` | `5` |
|
||||
|
||||
|
||||
- For the safety of committees, `TARGET_COMMITTEE_SIZE` exceeds [the recommended minimum committee size of 111](http://web.archive.org/web/20190504131341/https://vitalik.ca/files/Ithaca201807_Sharding.pdf); with sufficient active validators (at least `SLOTS_PER_EPOCH * TARGET_COMMITTEE_SIZE`), the shuffling algorithm ensures committee sizes of at least `TARGET_COMMITTEE_SIZE`. (Unbiasable randomness with a Verifiable Delay Function (VDF) will improve committee robustness and lower the safe minimum committee size.)
|
||||
|
||||
|
@ -213,7 +221,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| `MIN_SEED_LOOKAHEAD` | `2**0` (= 1) | epochs | 6.4 minutes |
|
||||
| `MAX_SEED_LOOKAHEAD` | `2**2` (= 4) | epochs | 25.6 minutes |
|
||||
| `MIN_EPOCHS_TO_INACTIVITY_PENALTY` | `2**2` (= 4) | epochs | 25.6 minutes |
|
||||
| `SLOTS_PER_ETH1_VOTING_PERIOD` | `2**10` (= 1,024) | slots | ~3.4 hours |
|
||||
| `EPOCHS_PER_ETH1_VOTING_PERIOD` | `2**5` (= 32) | epochs | ~3.4 hours |
|
||||
| `SLOTS_PER_HISTORICAL_ROOT` | `2**13` (= 8,192) | slots | ~27 hours |
|
||||
| `MIN_VALIDATOR_WITHDRAWABILITY_DELAY` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
| `PERSISTENT_COMMITTEE_PERIOD` | `2**11` (= 2,048) | epochs | 9 days |
|
||||
|
@ -224,7 +232,7 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
| - | - | :-: | :-: |
|
||||
| `EPOCHS_PER_HISTORICAL_VECTOR` | `2**16` (= 65,536) | epochs | ~0.8 years |
|
||||
| `EPOCHS_PER_SLASHINGS_VECTOR` | `2**13` (= 8,192) | epochs | ~36 days |
|
||||
| `HISTORICAL_ROOTS_LIMIT` | `2**24` (= 16,777,216) | historical roots | ~26,131 years |
|
||||
| `HISTORICAL_ROOTS_LIMIT` | `2**24` (= 16,777,216) | historical roots | ~52,262 years |
|
||||
| `VALIDATOR_REGISTRY_LIMIT` | `2**40` (= 1,099,511,627,776) | validators |
|
||||
|
||||
### Rewards and penalties
|
||||
|
@ -253,11 +261,14 @@ The following values are (non-configurable) constants used throughout the specif
|
|||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_BEACON_PROPOSER` | `DomainType('0x00000000')` |
|
||||
| `DOMAIN_BEACON_ATTESTER` | `DomainType('0x01000000')` |
|
||||
| `DOMAIN_RANDAO` | `DomainType('0x02000000')` |
|
||||
| `DOMAIN_DEPOSIT` | `DomainType('0x03000000')` |
|
||||
| `DOMAIN_VOLUNTARY_EXIT` | `DomainType('0x04000000')` |
|
||||
| `DOMAIN_BEACON_PROPOSER` | `DomainType('0x00000000')` |
|
||||
| `DOMAIN_BEACON_ATTESTER` | `DomainType('0x01000000')` |
|
||||
| `DOMAIN_RANDAO` | `DomainType('0x02000000')` |
|
||||
| `DOMAIN_DEPOSIT` | `DomainType('0x03000000')` |
|
||||
| `DOMAIN_VOLUNTARY_EXIT` | `DomainType('0x04000000')` |
|
||||
| `DOMAIN_SELECTION_PROOF` | `DomainType('0x05000000')` |
|
||||
| `DOMAIN_AGGREGATE_AND_PROOF` | `DomainType('0x06000000')` |
|
||||
|
||||
|
||||
## Containers
|
||||
|
||||
|
@ -278,6 +289,14 @@ class Fork(Container):
|
|||
epoch: Epoch # Epoch of latest fork
|
||||
```
|
||||
|
||||
#### `ForkData`
|
||||
|
||||
```python
|
||||
class ForkData(Container):
|
||||
current_version: Version
|
||||
genesis_validators_root: Root
|
||||
```
|
||||
|
||||
#### `Checkpoint`
|
||||
|
||||
```python
|
||||
|
@ -374,6 +393,7 @@ class DepositData(Container):
|
|||
```python
|
||||
class BeaconBlockHeader(Container):
|
||||
slot: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
parent_root: Root
|
||||
state_root: Root
|
||||
body_root: Root
|
||||
|
@ -393,7 +413,6 @@ class SigningRoot(Container):
|
|||
|
||||
```python
|
||||
class ProposerSlashing(Container):
|
||||
proposer_index: ValidatorIndex
|
||||
signed_header_1: SignedBeaconBlockHeader
|
||||
signed_header_2: SignedBeaconBlockHeader
|
||||
```
|
||||
|
@ -453,6 +472,7 @@ class BeaconBlockBody(Container):
|
|||
```python
|
||||
class BeaconBlock(Container):
|
||||
slot: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
parent_root: Root
|
||||
state_root: Root
|
||||
body: BeaconBlockBody
|
||||
|
@ -466,6 +486,7 @@ class BeaconBlock(Container):
|
|||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
|
@ -475,7 +496,7 @@ class BeaconState(Container):
|
|||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, SLOTS_PER_ETH1_VOTING_PERIOD]
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
|
@ -785,16 +806,45 @@ def compute_activation_exit_epoch(epoch: Epoch) -> Epoch:
|
|||
return Epoch(epoch + 1 + MAX_SEED_LOOKAHEAD)
|
||||
```
|
||||
|
||||
#### `compute_fork_data_root`
|
||||
|
||||
```python
|
||||
def compute_fork_data_root(current_version: Version, genesis_validators_root: Root) -> Root:
|
||||
"""
|
||||
Return the 32-byte fork data root for the ``current_version`` and ``genesis_validators_root``.
|
||||
This is used primarily in signature domains to avoid collisions across forks/chains.
|
||||
"""
|
||||
return hash_tree_root(ForkData(
|
||||
current_version=current_version,
|
||||
genesis_validators_root=genesis_validators_root,
|
||||
))
|
||||
```
|
||||
|
||||
#### `compute_fork_digest`
|
||||
|
||||
```python
|
||||
def compute_fork_digest(current_version: Version, genesis_validators_root: Root) -> ForkDigest:
|
||||
"""
|
||||
Return the 4-byte fork digest for the ``current_version`` and ``genesis_validators_root``.
|
||||
This is a digest primarily used for domain separation on the p2p layer.
|
||||
4-bytes suffices for practical separation of forks/chains.
|
||||
"""
|
||||
return ForkDigest(compute_fork_data_root(current_version, genesis_validators_root)[:4])
|
||||
```
|
||||
|
||||
#### `compute_domain`
|
||||
|
||||
```python
|
||||
def compute_domain(domain_type: DomainType, fork_version: Optional[Version]=None) -> Domain:
|
||||
def compute_domain(domain_type: DomainType, fork_version: Version=None, genesis_validators_root: Root=None) -> Domain:
|
||||
"""
|
||||
Return the domain for the ``domain_type`` and ``fork_version``.
|
||||
"""
|
||||
if fork_version is None:
|
||||
fork_version = GENESIS_FORK_VERSION
|
||||
return Domain(domain_type + fork_version)
|
||||
if genesis_validators_root is None:
|
||||
genesis_validators_root = Root() # all bytes zero by default
|
||||
fork_data_root = compute_fork_data_root(fork_version, genesis_validators_root)
|
||||
return Domain(domain_type + fork_data_root[:28])
|
||||
```
|
||||
|
||||
#### `compute_signing_root`
|
||||
|
@ -947,6 +997,7 @@ def get_beacon_proposer_index(state: BeaconState) -> ValidatorIndex:
|
|||
def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei:
|
||||
"""
|
||||
Return the combined effective balance of the ``indices``. (1 Gwei minimum to avoid divisions by zero.)
|
||||
Math safe up to ~10B ETH, afterwhich this overflows uint64.
|
||||
"""
|
||||
return Gwei(max(1, sum([state.validators[index].effective_balance for index in indices])))
|
||||
```
|
||||
|
@ -970,7 +1021,7 @@ def get_domain(state: BeaconState, domain_type: DomainType, epoch: Epoch=None) -
|
|||
"""
|
||||
epoch = get_current_epoch(state) if epoch is None else epoch
|
||||
fork_version = state.fork.previous_version if epoch < state.fork.epoch else state.fork.current_version
|
||||
return compute_domain(domain_type, fork_version)
|
||||
return compute_domain(domain_type, fork_version, state.genesis_validators_root)
|
||||
```
|
||||
|
||||
#### `get_indexed_attestation`
|
||||
|
@ -1115,6 +1166,9 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Bytes32,
|
|||
validator.activation_eligibility_epoch = GENESIS_EPOCH
|
||||
validator.activation_epoch = GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = hash_tree_root(state.validators)
|
||||
|
||||
return state
|
||||
```
|
||||
|
||||
|
@ -1160,7 +1214,7 @@ def state_transition(state: BeaconState, signed_block: SignedBeaconBlock, valida
|
|||
|
||||
```python
|
||||
def verify_block_signature(state: BeaconState, signed_block: SignedBeaconBlock) -> bool:
|
||||
proposer = state.validators[get_beacon_proposer_index(state)]
|
||||
proposer = state.validators[signed_block.message.proposer_index]
|
||||
signing_root = compute_signing_root(signed_block.message, get_domain(state, DOMAIN_BEACON_PROPOSER))
|
||||
return bls.Verify(proposer.pubkey, signing_root, signed_block.signature)
|
||||
```
|
||||
|
@ -1191,19 +1245,13 @@ def process_slot(state: BeaconState) -> None:
|
|||
|
||||
### Epoch processing
|
||||
|
||||
*Note*: The `# @LabelHere` lines below are placeholders to show that code will be inserted here in a future phase.
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
# @process_reveal_deadlines
|
||||
# @process_challenge_deadlines
|
||||
process_slashings(state)
|
||||
# @update_period_committee
|
||||
process_final_updates(state)
|
||||
# @after_process_final_updates
|
||||
```
|
||||
|
||||
#### Helper functions
|
||||
|
@ -1225,7 +1273,7 @@ def get_matching_target_attestations(state: BeaconState, epoch: Epoch) -> Sequen
|
|||
```python
|
||||
def get_matching_head_attestations(state: BeaconState, epoch: Epoch) -> Sequence[PendingAttestation]:
|
||||
return [
|
||||
a for a in get_matching_source_attestations(state, epoch)
|
||||
a for a in get_matching_target_attestations(state, epoch)
|
||||
if a.data.beacon_block_root == get_block_root_at_slot(state, a.data.slot)
|
||||
]
|
||||
```
|
||||
|
@ -1316,7 +1364,9 @@ def get_attestation_deltas(state: BeaconState) -> Tuple[Sequence[Gwei], Sequence
|
|||
attesting_balance = get_total_balance(state, unslashed_attesting_indices)
|
||||
for index in eligible_validator_indices:
|
||||
if index in unslashed_attesting_indices:
|
||||
rewards[index] += get_base_reward(state, index) * attesting_balance // total_balance
|
||||
increment = EFFECTIVE_BALANCE_INCREMENT # Factored out from balance totals to avoid uint64 overflow
|
||||
reward_numerator = get_base_reward(state, index) * (attesting_balance // increment)
|
||||
rewards[index] = reward_numerator // (total_balance // increment)
|
||||
else:
|
||||
penalties[index] += get_base_reward(state, index)
|
||||
|
||||
|
@ -1400,13 +1450,18 @@ def process_final_updates(state: BeaconState) -> None:
|
|||
current_epoch = get_current_epoch(state)
|
||||
next_epoch = Epoch(current_epoch + 1)
|
||||
# Reset eth1 data votes
|
||||
if (state.slot + 1) % SLOTS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
if next_epoch % EPOCHS_PER_ETH1_VOTING_PERIOD == 0:
|
||||
state.eth1_data_votes = []
|
||||
# Update effective balances with hysteresis
|
||||
for index, validator in enumerate(state.validators):
|
||||
balance = state.balances[index]
|
||||
HALF_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // 2
|
||||
if balance < validator.effective_balance or validator.effective_balance + 3 * HALF_INCREMENT < balance:
|
||||
HYSTERESIS_INCREMENT = EFFECTIVE_BALANCE_INCREMENT // HYSTERESIS_QUOTIENT
|
||||
DOWNWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||
UPWARD_THRESHOLD = HYSTERESIS_INCREMENT * HYSTERESIS_UPWARD_MULTIPLIER
|
||||
if (
|
||||
balance + DOWNWARD_THRESHOLD < validator.effective_balance
|
||||
or validator.effective_balance + UPWARD_THRESHOLD < balance
|
||||
):
|
||||
validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE)
|
||||
# Reset slashings
|
||||
state.slashings[next_epoch % EPOCHS_PER_SLASHINGS_VECTOR] = Gwei(0)
|
||||
|
@ -1437,18 +1492,21 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
|||
def process_block_header(state: BeaconState, block: BeaconBlock) -> None:
|
||||
# Verify that the slots match
|
||||
assert block.slot == state.slot
|
||||
# Verify that proposer index is the correct index
|
||||
assert block.proposer_index == get_beacon_proposer_index(state)
|
||||
# Verify that the parent matches
|
||||
assert block.parent_root == hash_tree_root(state.latest_block_header)
|
||||
# Cache current block as the new latest block
|
||||
state.latest_block_header = BeaconBlockHeader(
|
||||
slot=block.slot,
|
||||
proposer_index=block.proposer_index,
|
||||
parent_root=block.parent_root,
|
||||
state_root=Bytes32(), # Overwritten in the next process_slot call
|
||||
body_root=hash_tree_root(block.body),
|
||||
)
|
||||
|
||||
# Verify proposer is not slashed
|
||||
proposer = state.validators[get_beacon_proposer_index(state)]
|
||||
proposer = state.validators[block.proposer_index]
|
||||
assert not proposer.slashed
|
||||
```
|
||||
|
||||
|
@ -1471,7 +1529,7 @@ def process_randao(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
```python
|
||||
def process_eth1_data(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
state.eth1_data_votes.append(body.eth1_data)
|
||||
if state.eth1_data_votes.count(body.eth1_data) * 2 > SLOTS_PER_ETH1_VOTING_PERIOD:
|
||||
if state.eth1_data_votes.count(body.eth1_data) * 2 > EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH:
|
||||
state.eth1_data = body.eth1_data
|
||||
```
|
||||
|
||||
|
@ -1482,28 +1540,32 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
|||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
|
||||
for operations, function in (
|
||||
(body.proposer_slashings, process_proposer_slashing),
|
||||
(body.attester_slashings, process_attester_slashing),
|
||||
(body.attestations, process_attestation),
|
||||
(body.deposits, process_deposit),
|
||||
(body.voluntary_exits, process_voluntary_exit),
|
||||
# @process_shard_receipt_proofs
|
||||
):
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
function(state, operation)
|
||||
fn(state, operation)
|
||||
|
||||
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
for_ops(body.attester_slashings, process_attester_slashing)
|
||||
for_ops(body.attestations, process_attestation)
|
||||
for_ops(body.deposits, process_deposit)
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
```
|
||||
|
||||
##### Proposer slashings
|
||||
|
||||
```python
|
||||
def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSlashing) -> None:
|
||||
header_1 = proposer_slashing.signed_header_1.message
|
||||
header_2 = proposer_slashing.signed_header_2.message
|
||||
|
||||
# Verify header slots match
|
||||
assert proposer_slashing.signed_header_1.message.slot == proposer_slashing.signed_header_2.message.slot
|
||||
assert header_1.slot == header_2.slot
|
||||
# Verify header proposer indices match
|
||||
assert header_1.proposer_index == header_2.proposer_index
|
||||
# Verify the headers are different
|
||||
assert proposer_slashing.signed_header_1 != proposer_slashing.signed_header_2
|
||||
assert header_1 != header_2
|
||||
# Verify the proposer is slashable
|
||||
proposer = state.validators[proposer_slashing.proposer_index]
|
||||
proposer = state.validators[header_1.proposer_index]
|
||||
assert is_slashable_validator(proposer, get_current_epoch(state))
|
||||
# Verify signatures
|
||||
for signed_header in (proposer_slashing.signed_header_1, proposer_slashing.signed_header_2):
|
||||
|
@ -1511,7 +1573,7 @@ def process_proposer_slashing(state: BeaconState, proposer_slashing: ProposerSla
|
|||
signing_root = compute_signing_root(signed_header.message, domain)
|
||||
assert bls.Verify(proposer.pubkey, signing_root, signed_header.signature)
|
||||
|
||||
slash_validator(state, proposer_slashing.proposer_index)
|
||||
slash_validator(state, header_1.proposer_index)
|
||||
```
|
||||
|
||||
##### Attester slashings
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
- [Helpers](#helpers)
|
||||
- [`LatestMessage`](#latestmessage)
|
||||
- [`Store`](#store)
|
||||
- [`get_genesis_store`](#get_genesis_store)
|
||||
- [`get_forkchoice_store`](#get_forkchoice_store)
|
||||
- [`get_slots_since_genesis`](#get_slots_since_genesis)
|
||||
- [`get_current_slot`](#get_current_slot)
|
||||
- [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start)
|
||||
|
@ -24,6 +24,10 @@
|
|||
- [`get_filtered_block_tree`](#get_filtered_block_tree)
|
||||
- [`get_head`](#get_head)
|
||||
- [`should_update_justified_checkpoint`](#should_update_justified_checkpoint)
|
||||
- [`on_attestation` helpers](#on_attestation-helpers)
|
||||
- [`validate_on_attestation`](#validate_on_attestation)
|
||||
- [`store_target_checkpoint_state`](#store_target_checkpoint_state)
|
||||
- [`update_latest_messages`](#update_latest_messages)
|
||||
- [Handlers](#handlers)
|
||||
- [`on_tick`](#on_tick)
|
||||
- [`on_block`](#on_block)
|
||||
|
@ -38,7 +42,7 @@ This document is the beacon chain fork choice spec, part of Ethereum 2.0 Phase 0
|
|||
|
||||
## Fork choice
|
||||
|
||||
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_genesis_store(genesis_state)` and update `store` by running:
|
||||
The head block root associated with a `store` is defined as `get_head(store)`. At genesis, let `store = get_forkchoice_store(genesis_state)` and update `store` by running:
|
||||
|
||||
- `on_tick(time)` whenever `time > store.time` where `time` is the current Unix time
|
||||
- `on_block(block)` whenever a block `block: SignedBeaconBlock` is received
|
||||
|
@ -85,23 +89,33 @@ class Store(object):
|
|||
latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict)
|
||||
```
|
||||
|
||||
#### `get_genesis_store`
|
||||
#### `get_forkchoice_store`
|
||||
|
||||
The provided anchor-state will be regarded as a trusted state, to not roll back beyond.
|
||||
This should be the genesis state for a full client.
|
||||
|
||||
*Note* With regards to fork choice, block headers are interchangeable with blocks. The spec is likely to move to headers for reduced overhead in test vectors and better encapsulation. Full implementations store blocks as part of their database and will often use full blocks when dealing with production fork choice.
|
||||
|
||||
_The block for `anchor_root` is incorrectly initialized to the block header, rather than the full block. This does not affect functionality but will be cleaned up in subsequent releases._
|
||||
|
||||
```python
|
||||
def get_genesis_store(genesis_state: BeaconState) -> Store:
|
||||
genesis_block = BeaconBlock(state_root=hash_tree_root(genesis_state))
|
||||
root = hash_tree_root(genesis_block)
|
||||
justified_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
finalized_checkpoint = Checkpoint(epoch=GENESIS_EPOCH, root=root)
|
||||
def get_forkchoice_store(anchor_state: BeaconState) -> Store:
|
||||
anchor_block_header = anchor_state.latest_block_header.copy()
|
||||
if anchor_block_header.state_root == Bytes32():
|
||||
anchor_block_header.state_root = hash_tree_root(anchor_state)
|
||||
anchor_root = hash_tree_root(anchor_block_header)
|
||||
anchor_epoch = get_current_epoch(anchor_state)
|
||||
justified_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
finalized_checkpoint = Checkpoint(epoch=anchor_epoch, root=anchor_root)
|
||||
return Store(
|
||||
time=genesis_state.genesis_time,
|
||||
genesis_time=genesis_state.genesis_time,
|
||||
time=anchor_state.genesis_time,
|
||||
genesis_time=anchor_state.genesis_time,
|
||||
justified_checkpoint=justified_checkpoint,
|
||||
finalized_checkpoint=finalized_checkpoint,
|
||||
best_justified_checkpoint=justified_checkpoint,
|
||||
blocks={root: genesis_block},
|
||||
block_states={root: genesis_state.copy()},
|
||||
checkpoint_states={justified_checkpoint: genesis_state.copy()},
|
||||
blocks={anchor_root: anchor_block_header},
|
||||
block_states={anchor_root: anchor_state.copy()},
|
||||
checkpoint_states={justified_checkpoint: anchor_state.copy()},
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -247,6 +261,59 @@ def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: C
|
|||
return True
|
||||
```
|
||||
|
||||
#### `on_attestation` helpers
|
||||
|
||||
##### `validate_on_attestation`
|
||||
|
||||
```python
|
||||
def validate_on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
target = attestation.data.target
|
||||
|
||||
# Attestations must be from the current or previous epoch
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
assert target.epoch in [current_epoch, previous_epoch]
|
||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
|
||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
assert target.root in store.blocks
|
||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)
|
||||
|
||||
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
assert attestation.data.beacon_block_root in store.blocks
|
||||
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
|
||||
# Attestations can only affect the fork choice of subsequent slots.
|
||||
# Delay consideration in the fork choice until their slot is in the past.
|
||||
assert get_current_slot(store) >= attestation.data.slot + 1
|
||||
```
|
||||
|
||||
##### `store_target_checkpoint_state`
|
||||
|
||||
```python
|
||||
def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None:
|
||||
# Store target checkpoint state if not yet seen
|
||||
if target not in store.checkpoint_states:
|
||||
base_state = store.block_states[target.root].copy()
|
||||
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||
store.checkpoint_states[target] = base_state
|
||||
```
|
||||
|
||||
##### `update_latest_messages`
|
||||
|
||||
```python
|
||||
def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None:
|
||||
target = attestation.data.target
|
||||
beacon_block_root = attestation.data.beacon_block_root
|
||||
for i in attesting_indices:
|
||||
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=beacon_block_root)
|
||||
```
|
||||
|
||||
|
||||
### Handlers
|
||||
|
||||
#### `on_tick`
|
||||
|
@ -322,42 +389,14 @@ def on_attestation(store: Store, attestation: Attestation) -> None:
|
|||
An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
consider scheduling it for later processing in such case.
|
||||
"""
|
||||
target = attestation.data.target
|
||||
validate_on_attestation(store, attestation)
|
||||
store_target_checkpoint_state(store, attestation.data.target)
|
||||
|
||||
# Attestations must be from the current or previous epoch
|
||||
current_epoch = compute_epoch_at_slot(get_current_slot(store))
|
||||
# Use GENESIS_EPOCH for previous when genesis to avoid underflow
|
||||
previous_epoch = current_epoch - 1 if current_epoch > GENESIS_EPOCH else GENESIS_EPOCH
|
||||
assert target.epoch in [current_epoch, previous_epoch]
|
||||
assert target.epoch == compute_epoch_at_slot(attestation.data.slot)
|
||||
|
||||
# Attestations target be for a known block. If target block is unknown, delay consideration until the block is found
|
||||
assert target.root in store.blocks
|
||||
# Attestations cannot be from future epochs. If they are, delay consideration until the epoch arrives
|
||||
base_state = store.block_states[target.root].copy()
|
||||
assert get_current_slot(store) >= compute_start_slot_at_epoch(target.epoch)
|
||||
|
||||
# Attestations must be for a known block. If block is unknown, delay consideration until the block is found
|
||||
assert attestation.data.beacon_block_root in store.blocks
|
||||
# Attestations must not be for blocks in the future. If not, the attestation should not be considered
|
||||
assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot
|
||||
|
||||
# Store target checkpoint state if not yet seen
|
||||
if target not in store.checkpoint_states:
|
||||
process_slots(base_state, compute_start_slot_at_epoch(target.epoch))
|
||||
store.checkpoint_states[target] = base_state
|
||||
target_state = store.checkpoint_states[target]
|
||||
|
||||
# Attestations can only affect the fork choice of subsequent slots.
|
||||
# Delay consideration in the fork choice until their slot is in the past.
|
||||
assert get_current_slot(store) >= attestation.data.slot + 1
|
||||
|
||||
# Get state at the `target` to validate attestation and calculate the committees
|
||||
# Get state at the `target` to fully validate attestation
|
||||
target_state = store.checkpoint_states[attestation.data.target]
|
||||
indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
|
||||
# Update latest messages
|
||||
for i in indexed_attestation.attesting_indices:
|
||||
if i not in store.latest_messages or target.epoch > store.latest_messages[i].epoch:
|
||||
store.latest_messages[i] = LatestMessage(epoch=target.epoch, root=attestation.data.beacon_block_root)
|
||||
# Update latest messages for attesting indices
|
||||
update_latest_messages(store, indexed_attestation.attesting_indices, attestation)
|
||||
```
|
||||
|
|
|
@ -55,6 +55,8 @@ It consists of four main sections:
|
|||
- [Attestation subnet bitfield](#attestation-subnet-bitfield)
|
||||
- [Interop](#interop-5)
|
||||
- [Mainnet](#mainnet-5)
|
||||
- [`eth2` field](#eth2-field)
|
||||
- [General capabilities](#general-capabilities)
|
||||
- [Topic advertisement](#topic-advertisement)
|
||||
- [Mainnet](#mainnet-6)
|
||||
- [Design decision rationale](#design-decision-rationale)
|
||||
|
@ -88,12 +90,15 @@ It consists of four main sections:
|
|||
- [Why are we sending entire objects in the pubsub and not just hashes?](#why-are-we-sending-entire-objects-in-the-pubsub-and-not-just-hashes)
|
||||
- [Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc?](#should-clients-gossip-blocks-if-they-cannot-validate-the-proposer-signature-due-to-not-yet-being-synced-not-knowing-the-head-block-etc)
|
||||
- [How are we going to discover peers in a gossipsub topic?](#how-are-we-going-to-discover-peers-in-a-gossipsub-topic)
|
||||
- [How should fork version be used in practice?](#how-should-fork-version-be-used-in-practice)
|
||||
- [Req/Resp](#reqresp)
|
||||
- [Why segregate requests into dedicated protocol IDs?](#why-segregate-requests-into-dedicated-protocol-ids)
|
||||
- [Why are messages length-prefixed with a protobuf varint in the SSZ-encoding?](#why-are-messages-length-prefixed-with-a-protobuf-varint-in-the-ssz-encoding)
|
||||
- [Why do we version protocol strings with ordinals instead of semver?](#why-do-we-version-protocol-strings-with-ordinals-instead-of-semver)
|
||||
- [Why is it called Req/Resp and not RPC?](#why-is-it-called-reqresp-and-not-rpc)
|
||||
- [Why do we allow empty responses in block requests?](#why-do-we-allow-empty-responses-in-block-requests)
|
||||
- [Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?](#why-does-beaconblocksbyrange-let-the-server-choose-which-branch-to-send-blocks-from)
|
||||
- [What's the effect of empty slots on the sync algorithm?](#whats-the-effect-of-empty-slots-on-the-sync-algorithm)
|
||||
- [Discovery](#discovery)
|
||||
- [Why are we using discv5 and not libp2p Kademlia DHT?](#why-are-we-using-discv5-and-not-libp2p-kademlia-dht)
|
||||
- [What is the difference between an ENR and a multiaddr, and why are we using ENRs?](#what-is-the-difference-between-an-enr-and-a-multiaddr-and-why-are-we-using-enrs)
|
||||
|
@ -149,9 +154,11 @@ The following SecIO parameters MUST be supported by all stacks:
|
|||
|
||||
#### Mainnet
|
||||
|
||||
[Noise Framework](http://www.noiseprotocol.org/) handshakes will be used for mainnet. libp2p Noise support [is in the process of being standardized](https://github.com/libp2p/specs/issues/195) in the libp2p project.
|
||||
The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure
|
||||
channel handshake with `secp256k1` identities will be used for mainnet.
|
||||
|
||||
Noise support will presumably include IX, IK, and XX handshake patterns, and may rely on Curve25519 keys, ChaCha20 and Poly1305 ciphers, and SHA-256 as a hash function. These aspects are being actively debated in the referenced issue (Eth2 implementers are welcome to comment and contribute to the discussion).
|
||||
As specified in the libp2p specification, clients MUST support the `XX` handshake pattern and
|
||||
can optionally implement the `IK` and `XXfallback` patterns for optimistic 0-RTT.
|
||||
|
||||
## Protocol Negotiation
|
||||
|
||||
|
@ -212,7 +219,13 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/tree/master
|
|||
|
||||
### Topics and messages
|
||||
|
||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings have form: `/eth2/TopicName/TopicEncoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded.
|
||||
Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`. This defines both the type of data being sent on the topic and how the data field of the message is encoded.
|
||||
|
||||
- `ForkDigestValue` - the lowercase hex-encoded (no "0x" prefix) bytes of `compute_fork_digest(current_fork_version, genesis_validators_root)` where
|
||||
- `current_fork_version` is the fork version of the epoch of the message to be sent on the topic
|
||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||
- `Name` - see table below
|
||||
- `Encoding` - the encoding strategy describes a specific representation of bytes that will be transmitted over the wire. See the [Encodings](#Encoding-strategies) section for further details.
|
||||
|
||||
Each gossipsub [message](https://github.com/libp2p/go-libp2p-pubsub/blob/master/pb/rpc.proto#L17-L24) has a maximum size of `GOSSIP_MAX_SIZE`. Clients MUST reject (fail validation) messages that are over this size limit. Likewise, clients MUST NOT emit or propagate messages larger than this limit.
|
||||
|
||||
|
@ -225,15 +238,15 @@ where `base64` is the [URL-safe base64 alphabet](https://tools.ietf.org/html/rfc
|
|||
|
||||
The payload is carried in the `data` field of a gossipsub message, and varies depending on the topic:
|
||||
|
||||
| Topic | Message Type |
|
||||
|------------------------------------------------|----------------------|
|
||||
| beacon_block | SignedBeaconBlock |
|
||||
| beacon_aggregate_and_proof | AggregateAndProof |
|
||||
| beacon_attestation\* | Attestation |
|
||||
| committee_index{subnet_id}\_beacon_attestation | Attestation |
|
||||
| voluntary_exit | SignedVoluntaryExit |
|
||||
| proposer_slashing | ProposerSlashing |
|
||||
| attester_slashing | AttesterSlashing |
|
||||
| Name | Message Type |
|
||||
|------------------------------------------------|-------------------------|
|
||||
| beacon_block | SignedBeaconBlock |
|
||||
| beacon_aggregate_and_proof | SignedAggregateAndProof |
|
||||
| beacon_attestation\* | Attestation |
|
||||
| committee_index{subnet_id}\_beacon_attestation | Attestation |
|
||||
| voluntary_exit | SignedVoluntaryExit |
|
||||
| proposer_slashing | ProposerSlashing |
|
||||
| attester_slashing | AttesterSlashing |
|
||||
|
||||
Clients MUST reject (fail validation) messages containing an incorrect type, or invalid payload.
|
||||
|
||||
|
@ -243,35 +256,47 @@ When processing incoming gossip, clients MAY descore or disconnect peers who fai
|
|||
|
||||
#### Global topics
|
||||
|
||||
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `TopicName`s are:
|
||||
There are two primary global topics used to propagate beacon blocks and aggregate attestations to all nodes on the network. Their `Name`s are:
|
||||
|
||||
- `beacon_block` - This topic is used solely for propagating new signed beacon blocks to all nodes on the networks. Signed blocks are sent in their entirety. The following validations MUST pass before forwarding the `signed_beacon_block` on the network
|
||||
- The proposer signature, `signed_beacon_block.signature` is valid.
|
||||
- The block is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `signed_beacon_block.message.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot).
|
||||
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `AggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `aggregate_and_proof` on the network.
|
||||
- The aggregate attestation defined by `hash_tree_root(aggregate_and_proof.aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
||||
- The block being voted for (`aggregate_and_proof.aggregate.data.beacon_block_root`) passes validation.
|
||||
- `aggregate_and_proof.aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate_and_proof.aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate_and_proof.aggregate.data.slot`.
|
||||
- The validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate_and_proof.aggregate.data, aggregate_and_proof.aggregate.aggregation_bits)`.
|
||||
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate_and_proof.aggregate.data.slot, aggregate_and_proof.aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate_and_proof.aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
|
||||
- The signature of `aggregate_and_proof.aggregate` is valid.
|
||||
- The block is from a slot greater than the latest finalized slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `signed_beacon_block.message.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` (a client MAY choose to validate and store such blocks for additional purposes -- e.g. slashing detection, archive nodes, etc).
|
||||
- The block is the first block with valid signature received for the proposer for the slot, `signed_beacon_block.message.slot`.
|
||||
- The proposer signature, `signed_beacon_block.signature`, is valid.
|
||||
- `beacon_aggregate_and_proof` - This topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. The following validations MUST pass before forwarding the `signed_aggregate_and_proof` on the network. (We define the following for convenience -- `aggregate_and_proof = signed_aggregate_and_proof.message` and `aggregate = aggregate_and_proof.aggregate`)
|
||||
- `aggregate.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `aggregate.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= aggregate.data.slot` (a client MAY queue future aggregates for processing at the appropriate slot).
|
||||
- The aggregate attestation defined by `hash_tree_root(aggregate)` has _not_ already been seen (via aggregate gossip, within a block, or through the creation of an equivalent aggregate locally).
|
||||
- The `aggregate` is the first valid aggregate received for the aggregator with index `aggregate_and_proof.aggregator_index` for the slot `aggregate.data.slot`.
|
||||
- The block being voted for (`aggregate.data.beacon_block_root`) passes validation.
|
||||
- `aggregate_and_proof.selection_proof` selects the validator as an aggregator for the slot -- i.e. `is_aggregator(state, aggregate.data.slot, aggregate.data.index, aggregate_and_proof.selection_proof)` returns `True`.
|
||||
- The aggregator's validator index is within the aggregate's committee -- i.e. `aggregate_and_proof.aggregator_index in get_attesting_indices(state, aggregate.data, aggregate.aggregation_bits)`.
|
||||
- The `aggregate_and_proof.selection_proof` is a valid signature of the `aggregate.data.slot` by the validator with index `aggregate_and_proof.aggregator_index`.
|
||||
- The aggregator signature, `signed_aggregate_and_proof.signature`, is valid.
|
||||
- The signature of `aggregate` is valid.
|
||||
|
||||
Additional global topics are used to propagate lower frequency validator messages. Their `TopicName`s are:
|
||||
Additional global topics are used to propagate lower frequency validator messages. Their `Name`s are:
|
||||
|
||||
- `voluntary_exit` - This topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. Clients who receive a signed voluntary exit on this topic MUST validate the conditions within `process_voluntary_exit` before forwarding it across the network.
|
||||
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. Clients who receive a proposer slashing on this topic MUST validate the conditions within `process_proposer_slashing` before forwarding it across the network.
|
||||
- `voluntary_exit` - This topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. The following validations MUST pass before forwarding the `signed_voluntary_exit` on to the network
|
||||
- The voluntary exit is the first valid voluntary exit received for the validator with index `signed_voluntary_exit.message.validator_index`.
|
||||
- All of the conditions within `process_voluntary_exit` pass validation.
|
||||
- `proposer_slashing` - This topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. The following validations MUST pass before forwarding the `proposer_slashing` on to the network
|
||||
- The proposer slashing is the first valid proposer slashing received for the proposer with index `proposer_slashing.index`.
|
||||
- All of the conditions within `process_proposer_slashing` pass validation.
|
||||
- `attester_slashing` - This topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. Clients who receive an attester slashing on this topic MUST validate the conditions within `process_attester_slashing` before forwarding it across the network.
|
||||
- At least one index in the intersection of the attesting indices of each attestation has not yet been seen in any prior `attester_slashing` (i.e. `attester_slashed_indices = set(attestation_1.attesting_indices).intersection(attestation_2.attesting_indices)`, verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`).
|
||||
- All of the conditions within `process_attester_slashing` pass validation.
|
||||
|
||||
|
||||
#### Attestation subnets
|
||||
|
||||
Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `TopicName`s are:
|
||||
Attestation subnets are used to propagate unaggregated attestations to subsections of the network. Their `Name`s are:
|
||||
|
||||
- `committee_index{subnet_id}_beacon_attestation` - These topics are used to propagate unaggregated attestations to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. The following validations MUST pass before forwarding the `attestation` on the subnet.
|
||||
- The attestation's committee index (`attestation.data.index`) is for the correct subnet.
|
||||
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot` (a client MAY queue future attestations for processing at the appropriate slot).
|
||||
- The attestation is unaggregated -- that is, it has exactly one participating validator (`len([bit for bit in attestation.aggregation_bits if bit == 0b1]) == 1`).
|
||||
- The attestation is the first valid attestation received for the participating validator for the slot, `attestation.data.slot`.
|
||||
- The block being voted for (`attestation.data.beacon_block_root`) passes validation.
|
||||
- `attestation.data.slot` is within the last `ATTESTATION_PROPAGATION_SLOT_RANGE` slots (within a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. `attestation.data.slot + ATTESTATION_PROPAGATION_SLOT_RANGE >= current_slot >= attestation.data.slot`.
|
||||
- The signature of `attestation` is valid.
|
||||
|
||||
#### Interop
|
||||
|
@ -398,14 +423,42 @@ Here, `result` represents the 1-byte response code.
|
|||
|
||||
The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Two values are possible at this time:
|
||||
|
||||
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Bytes32`'s.
|
||||
- `ssz`: the contents are [SSZ-encoded](../../ssz/simple-serialize.md). This encoding type MUST be supported by all clients. For objects containing a single field, only the field is SSZ-encoded not a container with a single field. For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s.
|
||||
- `ssz_snappy`: The contents are SSZ-encoded and then compressed with [Snappy](https://github.com/google/snappy). MAY be supported in the interoperability testnet; MUST be supported in mainnet.
|
||||
|
||||
#### SSZ-encoding strategy (with or without Snappy)
|
||||
|
||||
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded. If the Snappy variant is selected, we feed the serialized form to the Snappy compressor on encoding. The inverse happens on decoding.
|
||||
The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded.
|
||||
|
||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST prefix all encoded and compressed (if applicable) payloads with an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
If the Snappy variant is selected, we feed the serialized form of the object to the Snappy compressor on encoding. The inverse happens on decoding.
|
||||
|
||||
Snappy has two formats: "block" and "frames" (streaming). To support large requests and response chunks, snappy-framing is used.
|
||||
|
||||
Since snappy frame contents [have a maximum size of `65536` bytes](https://github.com/google/snappy/blob/master/framing_format.txt#L104)
|
||||
and frame headers are just `identifier (1) + checksum (4)` bytes, the expected buffering of a single frame is acceptable.
|
||||
|
||||
**Encoding-dependent header:** Req/Resp protocols using the `ssz` or `ssz_snappy` encoding strategies MUST encode the length of the raw SSZ bytes, encoded as an unsigned [protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints).
|
||||
|
||||
*Writing*: By first computing and writing the SSZ byte length, the SSZ encoder can then directly write the chunk contents to the stream.
|
||||
If Snappy is applied, it can be passed through a buffered Snappy writer to compress frame by frame.
|
||||
|
||||
*Reading*: After reading the expected SSZ byte length, the SSZ decoder can directly read the contents from the stream.
|
||||
If snappy is applied, it can be passed through a buffered Snappy reader to decompress frame by frame.
|
||||
|
||||
A reader SHOULD NOT read more than `max_encoded_len(n)` bytes after reading the SSZ length prefix `n` from the header.
|
||||
- For `ssz` this is: `n`
|
||||
- For `ssz_snappy` this is: `32 + n + n // 6`. This is considered the [worst-case compression result](https://github.com/google/snappy/blob/537f4ad6240e586970fe554614542e9717df7902/snappy.cc#L98) by Snappy.
|
||||
|
||||
A reader SHOULD consider the following cases as invalid input:
|
||||
- A SSZ length prefix that, compared against the SSZ type information (vector lengths, list limits, integer sizes, etc.), is:
|
||||
- Smaller than the expected minimum serialized length.
|
||||
- Bigger than the expected maximum serialized length.
|
||||
- Any remaining bytes, after having read the `n` SSZ bytes. An EOF is expected.
|
||||
- An early EOF, before fully reading the declared length prefix worth of SSZ bytes.
|
||||
|
||||
In case of an invalid input, a reader MUST:
|
||||
- From requests: send back an error message, response code `InvalidRequest`. The request itself is ignored.
|
||||
- From responses: ignore the response, the response MUST be considered bad server behavior.
|
||||
|
||||
All messages that contain only a single field MUST be encoded directly as the type of that field and MUST NOT be encoded as an SSZ container.
|
||||
|
||||
|
@ -422,16 +475,18 @@ constituents individually as `response_chunk`s. For example, the
|
|||
Request, Response Content:
|
||||
```
|
||||
(
|
||||
head_fork_version: Bytes4
|
||||
finalized_root: Bytes32
|
||||
finalized_epoch: uint64
|
||||
head_root: Bytes32
|
||||
head_slot: uint64
|
||||
fork_digest: ForkDigest
|
||||
finalized_root: Root
|
||||
finalized_epoch: Epoch
|
||||
head_root: Root
|
||||
head_slot: Slot
|
||||
)
|
||||
```
|
||||
The fields are, as seen by the client at the time of sending the message:
|
||||
|
||||
- `head_fork_version`: The beacon_state `Fork` version.
|
||||
- `fork_digest`: The node's `ForkDigest` (`compute_fork_digest(current_fork_version, genesis_validators_root)`) where
|
||||
- `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time (not necessarily the epoch to which the node is sync)
|
||||
- `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||
- `finalized_root`: `state.finalized_checkpoint.root` for the state corresponding to the head block.
|
||||
- `finalized_epoch`: `state.finalized_checkpoint.epoch` for the state corresponding to the head block.
|
||||
- `head_root`: The hash_tree_root root of the current head block.
|
||||
|
@ -445,7 +500,7 @@ The response MUST consist of a single `response_chunk`.
|
|||
|
||||
Clients SHOULD immediately disconnect from one another following the handshake above under the following conditions:
|
||||
|
||||
1. If `head_fork_version` does not match the expected fork version at the epoch of the `head_slot`, since the client’s chain is on another fork. `head_fork_version` can also be used to segregate testnets.
|
||||
1. If `fork_digest` does not match the node's local `fork_digest`, since the client’s chain is on another fork.
|
||||
2. If the (`finalized_root`, `finalized_epoch`) shared by the peer is not in the client's chain at the expected epoch. For example, if Peer 1 sends (root, epoch) of (A, 5) and Peer 2 sends (B, 3) but Peer 1 has root C at epoch 3, then Peer 1 would disconnect because it knows that their chains are irreparably disjoint.
|
||||
|
||||
Once the handshake completes, the client with the lower `finalized_epoch` or `head_slot` (if the clients have equal `finalized_epoch`s) SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange` request.
|
||||
|
@ -483,8 +538,7 @@ The response MUST consist of a single `response_chunk`.
|
|||
Request Content:
|
||||
```
|
||||
(
|
||||
head_block_root: Bytes32
|
||||
start_slot: uint64
|
||||
start_slot: Slot
|
||||
count: uint64
|
||||
step: uint64
|
||||
)
|
||||
|
@ -497,22 +551,24 @@ Response Content:
|
|||
)
|
||||
```
|
||||
|
||||
Requests count beacon blocks from the peer starting from `start_slot` on the chain defined by `head_block_root` (= `hash_tree_root(SignedBeaconBlock.message)`). The response MUST contain no more than count blocks. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`.
|
||||
Requests count beacon blocks from the peer starting from `start_slot`, leading up to the current head block as selected by fork choice. `step` defines the slot increment between blocks. For example, requesting blocks starting at `start_slot` 2 with a step value of 2 would return the blocks at slots [2, 4, 6, …]. In cases where a slot is empty for a given slot number, no block is returned. For example, if slot 4 were empty in the previous example, the returned array would contain [2, 6, …]. A step value of 1 returns all blocks on the range `[start_slot, start_slot + count)`.
|
||||
|
||||
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
||||
|
||||
The request MUST be encoded as an SSZ-container.
|
||||
|
||||
The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlock` payload.
|
||||
|
||||
`BeaconBlocksByRange` is primarily used to sync historical blocks.
|
||||
|
||||
Clients MUST support requesting blocks since the start of the weak subjectivity period and up to the given `head_block_root`.
|
||||
|
||||
Clients MUST support `head_block_root` values since the latest finalized epoch.
|
||||
Clients MUST keep a record of signed blocks seen since the since the start of the weak subjectivity period and MUST support serving requests of blocks up to their own `head_block_root`.
|
||||
|
||||
Clients MUST respond with at least one block, if they have it and it exists in the range. Clients MAY limit the number of blocks in the response.
|
||||
|
||||
The response MUST contain no more than `count` blocks.
|
||||
|
||||
Clients MUST order blocks by increasing slot number.
|
||||
|
||||
Clients MUST respond with blocks from their view of the current fork choice. In particular, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake.
|
||||
|
||||
#### BeaconBlocksByRoot
|
||||
|
||||
**Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/`
|
||||
|
@ -521,7 +577,7 @@ Request Content:
|
|||
|
||||
```
|
||||
(
|
||||
[]Bytes32
|
||||
[]Root
|
||||
)
|
||||
```
|
||||
|
||||
|
@ -588,6 +644,38 @@ Nonetheless, ENRs MUST carry a generic `eth2` key with nil value, denoting that
|
|||
|
||||
#### Mainnet
|
||||
|
||||
##### `eth2` field
|
||||
|
||||
ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended eth2 network.
|
||||
|
||||
| Key | Value |
|
||||
|:-------------|:--------------------|
|
||||
| `eth2` | SSZ `ENRForkID` |
|
||||
|
||||
Specifically, the value of the `eth2` key MUST be the following SSZ encoded object (`ENRForkID`)
|
||||
|
||||
```
|
||||
(
|
||||
fork_digest: ForkDigest
|
||||
next_fork_version: Version
|
||||
next_fork_epoch: Epoch
|
||||
)
|
||||
```
|
||||
|
||||
where the fields of `ENRForkID` are defined as
|
||||
|
||||
* `fork_digest` is `compute_fork_digest(current_fork_version, genesis_validators_root)` where
|
||||
* `current_fork_version` is the fork version at the node's current epoch defined by the wall-clock time (not necessarily the epoch to which the node is sync)
|
||||
* `genesis_validators_root` is the static `Root` found in `state.genesis_validators_root`
|
||||
* `next_fork_version` is the fork version corresponding to the next planned hard fork at a future epoch. If no future fork is planned, set `next_fork_version = current_fork_version` to signal this fact
|
||||
* `next_fork_epoch` is the epoch at which the next fork is planned and the `current_fork_version` will be updated. If no future fork is planned, set `next_fork_epoch = FAR_FUTURE_EPOCH` to signal this fact
|
||||
|
||||
Clients SHOULD connect to peers with `fork_digest`, `next_fork_version`, and `next_fork_epoch` that match local values.
|
||||
|
||||
Clients MAY connect to peers with the same `fork_digest` but a different `next_fork_version`/`next_fork_epoch`. Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`.
|
||||
|
||||
##### General capabilities
|
||||
|
||||
On mainnet, ENRs MUST include a structure enumerating the capabilities offered by the peer in an efficient manner. The concrete solution is currently undefined. Proposals include using namespaced bloom filters mapping capabilities to specific protocol IDs supported under that capability.
|
||||
|
||||
### Topic advertisement
|
||||
|
@ -737,9 +825,9 @@ For future extensibility with almost zero overhead now (besides the extra bytes
|
|||
|
||||
### How do we upgrade gossip channels (e.g. changes in encoding, compression)?
|
||||
|
||||
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, for example during a hard fork.
|
||||
Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork.
|
||||
|
||||
One can envision a two-phase deployment as well where clients start listening to the new topic in the first phase then start publishing some time later, letting the traffic naturally move over to the new topic.
|
||||
When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic, the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch.
|
||||
|
||||
### Why must all clients use the same gossip topic instead of one negotiated between each peer pair?
|
||||
|
||||
|
@ -807,6 +895,14 @@ In Phase 0, peers for attestation subnets will be found using the `attnets` entr
|
|||
|
||||
Although this method will be sufficient for early phases of Eth2, we aim to use the more appropriate discv5 topics for this and other similar tasks in the future. ENRs should ultimately not be used for this purpose. They are best suited to store identity, location, and capability information, rather than more volatile advertisements.
|
||||
|
||||
### How should fork version be used in practice?
|
||||
|
||||
Fork versions are to be manually updated (likely via incrementing) at each hard fork. This is to provide native domain separation for signatures as well as to aid in usefulness for identitying peers (via ENRs) and versioning network protocols (e.g. using fork version to naturally version gossipsub topics).
|
||||
|
||||
`BeaconState.genesis_validators_root` is mixed into signature and ENR fork domains (`ForkDigest`) to aid in the ease of domain separation between chains. This allows fork versions to safely be reused across chains except for the case of contentious forks using the same genesis. In these cases, extra care should be taken to isolate fork versions (e.g. flip a high order bit in all future versions of one of the chains).
|
||||
|
||||
A node locally stores all previous and future planned fork versions along with the each fork epoch. This allows for handling sync and processing messages starting from past forks/epochs.
|
||||
|
||||
## Req/Resp
|
||||
|
||||
### Why segregate requests into dedicated protocol IDs?
|
||||
|
@ -829,23 +925,14 @@ Requests are segregated by protocol ID to:
|
|||
|
||||
We are using single-use streams where each stream is closed at the end of the message. Thus, libp2p transparently handles message delimiting in the underlying stream. libp2p streams are full-duplex, and each party is responsible for closing their write side (like in TCP). We can therefore use stream closure to mark the end of the request and response independently.
|
||||
|
||||
Nevertheless, messages are still length-prefixed—this is now being considered for removal.
|
||||
|
||||
Advantages of length-prefixing include:
|
||||
|
||||
* Reader can prepare a correctly sized buffer before reading message
|
||||
Nevertheless, in the case of `ssz` and `ssz_snappy`, messages are still length-prefixed with the length of the underlying data:
|
||||
* A basic reader can prepare a correctly sized buffer before reading the message
|
||||
* A more advanced reader can stream-decode SSZ given the length of the SSZ data.
|
||||
* Alignment with protocols like gRPC over HTTP/2 that prefix with length
|
||||
* Sanity checking of stream closure / message length
|
||||
* Sanity checking of message length, and enabling much stricter message length limiting based on SSZ type information,
|
||||
to provide even more DOS protection than the global message length already does. E.g. a small `Status` message does not nearly require `MAX_CHUNK_SIZE` bytes.
|
||||
|
||||
Disadvantages include:
|
||||
|
||||
* Redundant methods of message delimiting—both stream end marker and length prefix
|
||||
* Harder to stream as length must be known up-front
|
||||
* Additional code path required to verify length
|
||||
|
||||
In some protocols, adding a length prefix serves as a form of DoS protection against very long messages, allowing the client to abort if an overlong message is about to be sent. In this protocol, we are globally limiting message sizes using `MAX_CHUNK_SIZE`, thus the length prefix does not afford any additional protection.
|
||||
|
||||
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
||||
[Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte.
|
||||
|
||||
### Why do we version protocol strings with ordinals instead of semver?
|
||||
|
||||
|
@ -886,6 +973,18 @@ Assuming option 0 with no special `null` encoding, consider a request for slots
|
|||
|
||||
Failing to provide blocks that nodes "should" have is reason to trust a peer less - for example, if a particular peer gossips a block, it should have access to its parent. If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them.
|
||||
|
||||
### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from?
|
||||
|
||||
When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time. By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale, and the responding side might have moved on to a new finalization point and pruned blocks around the previous head and finalized blocks.
|
||||
|
||||
To avoid this race condition, we allow the responding side to choose which branch to send to the requesting client. The requesting client then goes on to validate the blocks and incorporate them in their own database - because they follow the same rules, they should at this point arrive at the same canonical chain.
|
||||
|
||||
### What's the effect of empty slots on the sync algorithm?
|
||||
|
||||
When syncing one can only tell that a slot has been skipped on a particular branch by examining subsequent blocks and analyzing the graph formed by the parent root. Because the server side may choose to omit blocks in the response for any reason, clients must validate the graph and be prepared to fill in gaps.
|
||||
|
||||
For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], clients may not assume that block 4 doesn't exist - it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it) and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch.
|
||||
|
||||
## Discovery
|
||||
|
||||
### Why are we using discv5 and not libp2p Kademlia DHT?
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
- [Block proposal](#block-proposal)
|
||||
- [Preparing for a `BeaconBlock`](#preparing-for-a-beaconblock)
|
||||
- [Slot](#slot)
|
||||
- [Proposer index](#proposer-index)
|
||||
- [Parent root](#parent-root)
|
||||
- [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody)
|
||||
- [Randao reveal](#randao-reveal)
|
||||
|
@ -59,6 +60,7 @@
|
|||
- [Aggregate signature](#aggregate-signature-1)
|
||||
- [Broadcast aggregate](#broadcast-aggregate)
|
||||
- [`AggregateAndProof`](#aggregateandproof)
|
||||
- [`SignedAggregateAndProof`](#signedaggregateandproof)
|
||||
- [Phase 0 attestation subnet stability](#phase-0-attestation-subnet-stability)
|
||||
- [How to avoid slashing](#how-to-avoid-slashing)
|
||||
- [Proposer slashing](#proposer-slashing)
|
||||
|
@ -128,7 +130,7 @@ To submit a deposit:
|
|||
|
||||
### Process deposit
|
||||
|
||||
Deposits cannot be processed into the beacon chain until the Eth1 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~4 hours) plus `SLOTS_PER_ETH1_VOTING_PERIOD` slots (~3.4 hours). Once the requisite Eth1 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated.
|
||||
Deposits cannot be processed into the beacon chain until the Eth1 block in which they were deposited or any of its descendants is added to the beacon chain `state.eth1_data`. This takes _a minimum_ of `ETH1_FOLLOW_DISTANCE` Eth1 blocks (~4 hours) plus `EPOCHS_PER_ETH1_VOTING_PERIOD` epochs (~3.4 hours). Once the requisite Eth1 data is added, the deposit will normally be added to a beacon chain block and processed into the `state.validators` within an epoch or two. The validator is then in a queue to be activated.
|
||||
|
||||
### Validator index
|
||||
|
||||
|
@ -182,14 +184,13 @@ def get_committee_assignment(state: BeaconState,
|
|||
A validator can use the following function to see if they are supposed to propose during a slot. This function can only be run with a `state` of the slot in question. Proposer selection is only stable within the context of the current epoch.
|
||||
|
||||
```python
|
||||
def is_proposer(state: BeaconState,
|
||||
validator_index: ValidatorIndex) -> bool:
|
||||
def is_proposer(state: BeaconState, validator_index: ValidatorIndex) -> bool:
|
||||
return get_beacon_proposer_index(state) == validator_index
|
||||
```
|
||||
|
||||
*Note*: To see if a validator is assigned to propose during the slot, the beacon state must be in the epoch in question. At the epoch boundaries, the validator must run an epoch transition into the epoch to successfully check the proposal assignment of the first slot.
|
||||
|
||||
*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at different a different slot.
|
||||
*Note*: `BeaconBlock` proposal is distinct from beacon committee assignment, and in a given epoch each responsibility might occur at a different slot.
|
||||
|
||||
### Lookahead
|
||||
|
||||
|
@ -223,11 +224,14 @@ Set `block.slot = slot` where `slot` is the current slot at which the validator
|
|||
|
||||
*Note*: There might be "skipped" slots between the `parent` and `block`. These skipped slots are processed in the state transition function without per-block processing.
|
||||
|
||||
##### Proposer index
|
||||
|
||||
Set `block.proposer_index = validator_index` where `validator_index` is the validator chosen to propose at this slot. The private key mapping to `state.validators[validator_index].pubkey` is used to sign the block.
|
||||
|
||||
##### Parent root
|
||||
|
||||
Set `block.parent_root = hash_tree_root(parent)`.
|
||||
|
||||
|
||||
#### Constructing the `BeaconBlockBody`
|
||||
|
||||
##### Randao reveal
|
||||
|
@ -268,7 +272,7 @@ def compute_time_at_slot(state: BeaconState, slot: Slot) -> uint64:
|
|||
|
||||
```python
|
||||
def voting_period_start_time(state: BeaconState) -> uint64:
|
||||
eth1_voting_period_start_slot = Slot(state.slot - state.slot % SLOTS_PER_ETH1_VOTING_PERIOD)
|
||||
eth1_voting_period_start_slot = Slot(state.slot - state.slot % (EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH))
|
||||
return compute_time_at_slot(state, eth1_voting_period_start_slot)
|
||||
```
|
||||
|
||||
|
@ -354,9 +358,9 @@ def get_block_signature(state: BeaconState, header: BeaconBlockHeader, privkey:
|
|||
|
||||
A validator is expected to create, sign, and broadcast an attestation during each epoch. The `committee`, assigned `index`, and assigned `slot` for which the validator performs this role during an epoch are defined by `get_committee_assignment(state, epoch, validator_index)`.
|
||||
|
||||
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid block from the expected block proposer for the assigned `slot` or (b) one-third of the `slot` hash transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
|
||||
A validator should create and broadcast the `attestation` to the associated attestation subnet when either (a) the validator has received a valid block from the expected block proposer for the assigned `slot` or (b) one-third of the `slot` has transpired (`SECONDS_PER_SLOT / 3` seconds after the start of `slot`) -- whichever comes _first_.
|
||||
|
||||
*Note*: Although attestations during `GENESIS_EPOCH` do not count toward FFG finality, these initial attestations do give weight to the fork choice, are rewarded fork, and should be made.
|
||||
*Note*: Although attestations during `GENESIS_EPOCH` do not count toward FFG finality, these initial attestations do give weight to the fork choice, are rewarded, and should be made.
|
||||
|
||||
#### Attestation data
|
||||
|
||||
|
@ -411,7 +415,7 @@ def get_signed_attestation_data(state: BeaconState, attestation: IndexedAttestat
|
|||
|
||||
#### Broadcast attestation
|
||||
|
||||
Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` pubsub topic.
|
||||
Finally, the validator broadcasts `attestation` to the associated attestation subnet -- the `committee_index{attestation.data.index % ATTESTATION_SUBNET_COUNT}_beacon_attestation` pubsub topic.
|
||||
|
||||
### Attestation aggregation
|
||||
|
||||
|
@ -423,7 +427,7 @@ A validator is selected to aggregate based upon the return value of `is_aggregat
|
|||
|
||||
```python
|
||||
def get_slot_signature(state: BeaconState, slot: Slot, privkey: int) -> BLSSignature:
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, compute_epoch_at_slot(slot))
|
||||
domain = get_domain(state, DOMAIN_SELECTION_PROOF, compute_epoch_at_slot(slot))
|
||||
signing_root = compute_signing_root(slot, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
@ -461,9 +465,37 @@ def get_aggregate_signature(attestations: Sequence[Attestation]) -> BLSSignature
|
|||
|
||||
#### Broadcast aggregate
|
||||
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate to the global aggregate channel (`beacon_aggregate_and_proof`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
||||
If the validator is selected to aggregate (`is_aggregator`), then they broadcast their best aggregate as a `SignedAggregateAndProof` to the global aggregate channel (`beacon_aggregate_and_proof`) two-thirds of the way through the `slot`-that is, `SECONDS_PER_SLOT * 2 / 3` seconds after the start of `slot`.
|
||||
|
||||
Aggregate attestations are broadcast as `AggregateAndProof` objects to prove to the gossip channel that the validator has been selected as an aggregator.
|
||||
Selection proofs are provided in `AggregateAndProof` to prove to the gossip channel that the validator has been selected as an aggregator.
|
||||
|
||||
`AggregateAndProof` messages are signed by the aggregator and broadcast inside of `SignedAggregateAndProof` objects to prevent a class of DoS attacks and message forgeries.
|
||||
|
||||
First, `aggregate_and_proof = get_aggregate_and_proof(state, validator_index, aggregate_attestation, privkey)` is constructed.
|
||||
|
||||
```python
|
||||
def get_aggregate_and_proof(state: BeaconState,
|
||||
aggregator_index: ValidatorIndex,
|
||||
aggregate: Attestation,
|
||||
privkey: int) -> AggregateAndProof:
|
||||
return AggregateAndProof(
|
||||
aggregator_index=aggregator_index,
|
||||
aggregate=aggregate,
|
||||
selection_proof=get_slot_signature(state, aggregate.data.slot, privkey),
|
||||
)
|
||||
```
|
||||
|
||||
Then `signed_aggregate_and_proof = SignedAggregateAndProof(message=aggregate_and_proof, signature=signature)` is constructed and broadast. Where `signature` is obtained from:
|
||||
|
||||
```python
|
||||
def get_aggregate_and_proof_signature(state: BeaconState,
|
||||
aggregate_and_proof: AggregateAndProof,
|
||||
privkey: int) -> BLSSignature:
|
||||
aggregate = aggregate_and_proof.aggregate
|
||||
domain = get_domain(state, DOMAIN_AGGREGATE_AND_PROOF, compute_epoch_at_slot(aggregate.data.slot))
|
||||
signing_root = compute_signing_root(aggregate_and_proof, domain)
|
||||
return bls.Sign(privkey, signing_root)
|
||||
```
|
||||
|
||||
##### `AggregateAndProof`
|
||||
|
||||
|
@ -474,10 +506,13 @@ class AggregateAndProof(Container):
|
|||
selection_proof: BLSSignature
|
||||
```
|
||||
|
||||
Where
|
||||
* `aggregator_index` is the validator's `ValidatorIndex`.
|
||||
* `aggregate` is the `aggregate_attestation` constructed in the previous section.
|
||||
* `selection_proof` is the signature of the slot (`get_slot_signature()`).
|
||||
##### `SignedAggregateAndProof`
|
||||
|
||||
```python
|
||||
class SignedAggregateAndProof(Container):
|
||||
message: AggregateAndProof
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
## Phase 0 attestation subnet stability
|
||||
|
||||
|
@ -487,6 +522,8 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th
|
|||
* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets
|
||||
* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR
|
||||
|
||||
*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements.
|
||||
|
||||
## How to avoid slashing
|
||||
|
||||
"Slashing" is the burning of some amount of validator funds and immediate ejection from the active validator set. In Phase 0, there are two ways in which funds can be slashed: [proposer slashing](#proposer-slashing) and [attester slashing](#attester-slashing). Although being slashed has serious repercussions, it is simple enough to avoid being slashed all together by remaining _consistent_ with respect to the messages a validator has previously signed.
|
||||
|
@ -495,13 +532,13 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th
|
|||
|
||||
### Proposer slashing
|
||||
|
||||
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](./beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same epoch.
|
||||
To avoid "proposer slashings", a validator must not sign two conflicting [`BeaconBlock`](./beacon-chain.md#beaconblock) where conflicting is defined as two distinct blocks within the same slot.
|
||||
|
||||
*In Phase 0, as long as the validator does not sign two different beacon blocks for the same epoch, the validator is safe against proposer slashings.*
|
||||
*In Phase 0, as long as the validator does not sign two different beacon blocks for the same slot, the validator is safe against proposer slashings.*
|
||||
|
||||
Specifically, when signing a `BeaconBlock`, a validator should perform the following steps in the following order:
|
||||
|
||||
1. Save a record to hard disk that a beacon block has been signed for the `epoch=compute_epoch_at_slot(block.slot)`.
|
||||
1. Save a record to hard disk that a beacon block has been signed for the `slot=block.slot`.
|
||||
2. Generate and broadcast the block.
|
||||
|
||||
If the software crashes at some point within this routine, then when the validator comes back online, the hard disk has the record of the *potentially* signed/broadcast block and can effectively avoid slashing.
|
||||
|
|
|
@ -1,254 +0,0 @@
|
|||
# Phase 1 miscellaneous beacon chain changes
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Configuration](#configuration)
|
||||
- [Containers](#containers)
|
||||
- [`CompactCommittee`](#compactcommittee)
|
||||
- [`ShardReceiptDelta`](#shardreceiptdelta)
|
||||
- [`ShardReceiptProof`](#shardreceiptproof)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [`pack_compact_validator`](#pack_compact_validator)
|
||||
- [`unpack_compact_validator`](#unpack_compact_validator)
|
||||
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
||||
- [`verify_merkle_proof`](#verify_merkle_proof)
|
||||
- [`compute_historical_state_generalized_index`](#compute_historical_state_generalized_index)
|
||||
- [`get_generalized_index_of_crosslink_header`](#get_generalized_index_of_crosslink_header)
|
||||
- [`process_shard_receipt_proof`](#process_shard_receipt_proof)
|
||||
- [Changes](#changes)
|
||||
- [Phase 0 container updates](#phase-0-container-updates)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Persistent committees](#persistent-committees)
|
||||
- [Shard receipt processing](#shard-receipt-processing)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Configuration
|
||||
|
||||
| Name | Value | Unit | Duration
|
||||
| - | - | - | - |
|
||||
| `MAX_SHARD_RECEIPT_PROOFS` | `2**0` (= 1) | - | - |
|
||||
| `PERIOD_COMMITTEE_ROOT_LENGTH` | `2**8` (= 256) | periods | ~9 months |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (=256) | - | - |
|
||||
| `REWARD_COEFFICIENT_BASE` | **TBD** | - | - |
|
||||
|
||||
## Containers
|
||||
|
||||
#### `CompactCommittee`
|
||||
|
||||
```python
|
||||
class CompactCommittee(Container):
|
||||
pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
```
|
||||
|
||||
#### `ShardReceiptDelta`
|
||||
|
||||
```python
|
||||
class ShardReceiptDelta(Container):
|
||||
index: ValidatorIndex
|
||||
reward_coefficient: uint64
|
||||
block_fee: Gwei
|
||||
```
|
||||
|
||||
|
||||
#### `ShardReceiptProof`
|
||||
|
||||
```python
|
||||
class ShardReceiptProof(Container):
|
||||
shard: Shard
|
||||
proof: List[Bytes32, PLACEHOLDER]
|
||||
receipt: List[ShardReceiptDelta, PLACEHOLDER]
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
#### `pack_compact_validator`
|
||||
|
||||
```python
|
||||
def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int:
|
||||
"""
|
||||
Creates a compact validator object representing index, slashed status, and compressed balance.
|
||||
Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with
|
||||
the unpacking function.
|
||||
"""
|
||||
return (index << 16) + (slashed << 15) + balance_in_increments
|
||||
```
|
||||
|
||||
#### `unpack_compact_validator`
|
||||
|
||||
```python
|
||||
def unpack_compact_validator(compact_validator: int) -> Tuple[int, bool, int]:
|
||||
"""
|
||||
Returns validator index, slashed, balance // EFFECTIVE_BALANCE_INCREMENT
|
||||
"""
|
||||
return compact_validator >> 16, bool((compact_validator >> 15) % 2), compact_validator & (2**15 - 1)
|
||||
```
|
||||
|
||||
#### `committee_to_compact_committee`
|
||||
|
||||
```python
|
||||
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
||||
"""
|
||||
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
|
||||
"""
|
||||
validators = [state.validators[i] for i in committee]
|
||||
compact_validators = [
|
||||
pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT)
|
||||
for i, v in zip(committee, validators)
|
||||
]
|
||||
pubkeys = [v.pubkey for v in validators]
|
||||
return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators)
|
||||
```
|
||||
|
||||
#### `verify_merkle_proof`
|
||||
|
||||
```python
|
||||
def verify_merkle_proof(leaf: Bytes32, proof: Sequence[Bytes32], index: GeneralizedIndex, root: Root) -> bool:
|
||||
assert len(proof) == get_generalized_index_length(index)
|
||||
for i, h in enumerate(proof):
|
||||
if get_generalized_index_bit(index, i):
|
||||
leaf = hash(h + leaf)
|
||||
else:
|
||||
leaf = hash(leaf + h)
|
||||
return leaf == root
|
||||
```
|
||||
|
||||
#### `compute_historical_state_generalized_index`
|
||||
|
||||
```python
|
||||
def compute_historical_state_generalized_index(earlier: ShardSlot, later: ShardSlot) -> GeneralizedIndex:
|
||||
"""
|
||||
Computes the generalized index of the state root of slot `earlier` based on the state root of slot `later`.
|
||||
Relies on the `history_accumulator` in the `ShardState`, where `history_accumulator[i]` maintains the most
|
||||
recent 2**i'th slot state. Works by tracing a `log(later-earlier)` step path from `later` to `earlier`
|
||||
through intermediate blocks at the next available multiples of descending powers of two.
|
||||
"""
|
||||
o = GeneralizedIndex(1)
|
||||
for i in range(HISTORY_ACCUMULATOR_DEPTH - 1, -1, -1):
|
||||
if (later - 1) & 2**i > (earlier - 1) & 2**i:
|
||||
later = later - ((later - 1) % 2**i) - 1
|
||||
gindex = GeneralizedIndex(get_generalized_index(ShardState, ['history_accumulator', i]))
|
||||
o = concat_generalized_indices(o, gindex)
|
||||
return o
|
||||
```
|
||||
|
||||
#### `get_generalized_index_of_crosslink_header`
|
||||
|
||||
```python
|
||||
def get_generalized_index_of_crosslink_header(index: int) -> GeneralizedIndex:
|
||||
"""
|
||||
Gets the generalized index for the root of the index'th header in a crosslink.
|
||||
"""
|
||||
MAX_CROSSLINK_SIZE = (
|
||||
MAX_SHARD_BLOCK_SIZE * SHARD_SLOTS_PER_EPOCH * MAX_EPOCHS_PER_CROSSLINK
|
||||
)
|
||||
assert MAX_CROSSLINK_SIZE == get_previous_power_of_two(MAX_CROSSLINK_SIZE)
|
||||
return GeneralizedIndex(MAX_CROSSLINK_SIZE // SHARD_HEADER_SIZE + index)
|
||||
```
|
||||
|
||||
#### `process_shard_receipt_proof`
|
||||
|
||||
```python
|
||||
def process_shard_receipt_proof(state: BeaconState, receipt_proof: ShardReceiptProof) -> None:
|
||||
"""
|
||||
Processes a ShardReceipt object.
|
||||
"""
|
||||
receipt_slot = (
|
||||
state.next_shard_receipt_period[receipt_proof.shard] *
|
||||
SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD
|
||||
)
|
||||
first_slot_in_last_crosslink = state.current_crosslinks[receipt_proof.shard].start_epoch * SHARD_SLOTS_PER_EPOCH
|
||||
gindex = concat_generalized_indices(
|
||||
get_generalized_index_of_crosslink_header(0),
|
||||
GeneralizedIndex(get_generalized_index(ShardBlockHeader, 'state_root')),
|
||||
compute_historical_state_generalized_index(receipt_slot, first_slot_in_last_crosslink),
|
||||
GeneralizedIndex(get_generalized_index(ShardState, 'receipt_root'))
|
||||
)
|
||||
assert verify_merkle_proof(
|
||||
leaf=hash_tree_root(receipt_proof.receipt),
|
||||
proof=receipt_proof.proof,
|
||||
index=gindex,
|
||||
root=state.current_crosslinks[receipt_proof.shard].data_root
|
||||
)
|
||||
for delta in receipt_proof.receipt:
|
||||
if get_current_epoch(state) < state.validators[delta.index].withdrawable_epoch:
|
||||
increase_amount = (
|
||||
state.validators[delta.index].effective_balance * delta.reward_coefficient // REWARD_COEFFICIENT_BASE
|
||||
)
|
||||
increase_balance(state, delta.index, increase_amount)
|
||||
decrease_balance(state, delta.index, delta.block_fee)
|
||||
state.next_shard_receipt_period[receipt_proof.shard] += 1
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
|
||||
```
|
||||
|
||||
## Changes
|
||||
|
||||
### Phase 0 container updates
|
||||
|
||||
Add the following fields to the end of the specified container objects.
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Period committees
|
||||
period_committee_roots: Vector[Root, PERIOD_COMMITTEE_ROOT_LENGTH]
|
||||
next_shard_receipt_period: Vector[uint64, SHARD_COUNT]
|
||||
```
|
||||
|
||||
`period_committee_roots` values are initialized to `Bytes32()` (empty bytes value).
|
||||
`next_shard_receipt_period` values are initialized to `compute_epoch_at_slot(PHASE_1_FORK_SLOT) // EPOCHS_PER_SHARD_PERIOD`.
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
shard_receipt_proofs: List[ShardReceiptProof, MAX_SHARD_RECEIPT_PROOFS]
|
||||
```
|
||||
|
||||
`shard_receipt_proofs` is initialized to `[]`.
|
||||
|
||||
### Persistent committees
|
||||
|
||||
Run `update_period_committee` immediately before `process_final_updates`:
|
||||
|
||||
```python
|
||||
# begin insert @update_period_committee
|
||||
update_period_committee(state)
|
||||
# end insert @update_period_committee
|
||||
def update_period_committee(state: BeaconState) -> None:
|
||||
"""
|
||||
Updates period committee roots at boundary blocks.
|
||||
"""
|
||||
if (get_current_epoch(state) + 1) % EPOCHS_PER_SHARD_PERIOD != 0:
|
||||
return
|
||||
|
||||
period = (get_current_epoch(state) + 1) // EPOCHS_PER_SHARD_PERIOD
|
||||
committees = Vector[CompactCommittee, SHARD_COUNT]([
|
||||
committee_to_compact_committee(
|
||||
state,
|
||||
get_period_committee(state, Shard(shard), Epoch(get_current_epoch(state) + 1)),
|
||||
)
|
||||
for shard in range(SHARD_COUNT)
|
||||
])
|
||||
state.period_committee_roots[period % PERIOD_COMMITTEE_ROOT_LENGTH] = hash_tree_root(committees)
|
||||
```
|
||||
|
||||
### Shard receipt processing
|
||||
|
||||
Run `process_shard_receipt_proof` on each `ShardReceiptProof` during block processing.
|
||||
|
||||
```python
|
||||
# begin insert @process_shard_receipt_proofs
|
||||
(body.shard_receipt_proofs, process_shard_receipt_proof),
|
||||
# end insert @process_shard_receipt_proofs
|
||||
```
|
|
@ -0,0 +1,908 @@
|
|||
# Ethereum 2.0 Phase 1 -- The Beacon Chain for Shards
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents**
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Configuration](#configuration)
|
||||
- [Misc](#misc)
|
||||
- [Updated containers](#updated-containers)
|
||||
- [Extended `AttestationData`](#extended-attestationdata)
|
||||
- [Extended `Attestation`](#extended-attestation)
|
||||
- [Extended `PendingAttestation`](#extended-pendingattestation)
|
||||
- [`IndexedAttestation`](#indexedattestation)
|
||||
- [Extended `AttesterSlashing`](#extended-attesterslashing)
|
||||
- [Extended `Validator`](#extended-validator)
|
||||
- [Extended `BeaconBlockBody`](#extended-beaconblockbody)
|
||||
- [Extended `BeaconBlock`](#extended-beaconblock)
|
||||
- [Extended `SignedBeaconBlock`](#extended-signedbeaconblock)
|
||||
- [Extended `BeaconState`](#extended-beaconstate)
|
||||
- [New containers](#new-containers)
|
||||
- [`ShardBlockWrapper`](#shardblockwrapper)
|
||||
- [`ShardSignableHeader`](#shardsignableheader)
|
||||
- [`ShardState`](#shardstate)
|
||||
- [`ShardTransition`](#shardtransition)
|
||||
- [`CompactCommittee`](#compactcommittee)
|
||||
- [`AttestationCustodyBitWrapper`](#attestationcustodybitwrapper)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc-1)
|
||||
- [`get_previous_slot`](#get_previous_slot)
|
||||
- [`pack_compact_validator`](#pack_compact_validator)
|
||||
- [`committee_to_compact_committee`](#committee_to_compact_committee)
|
||||
- [`compute_shard_from_committee_index`](#compute_shard_from_committee_index)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_active_shard_count`](#get_active_shard_count)
|
||||
- [`get_online_validator_indices`](#get_online_validator_indices)
|
||||
- [`get_shard_committee`](#get_shard_committee)
|
||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||
- [`get_light_client_committee`](#get_light_client_committee)
|
||||
- [`get_indexed_attestation`](#get_indexed_attestation)
|
||||
- [`get_updated_gasprice`](#get_updated_gasprice)
|
||||
- [`get_start_shard`](#get_start_shard)
|
||||
- [`get_shard`](#get_shard)
|
||||
- [`get_next_slot_for_shard`](#get_next_slot_for_shard)
|
||||
- [`get_offset_slots`](#get_offset_slots)
|
||||
- [Predicates](#predicates)
|
||||
- [Updated `is_valid_indexed_attestation`](#updated-is_valid_indexed_attestation)
|
||||
- [Block processing](#block-processing)
|
||||
- [Operations](#operations)
|
||||
- [New Attestation processing](#new-attestation-processing)
|
||||
- [`validate_attestation`](#validate_attestation)
|
||||
- [`apply_shard_transition`](#apply_shard_transition)
|
||||
- [`process_crosslink_for_shard`](#process_crosslink_for_shard)
|
||||
- [`process_crosslinks`](#process_crosslinks)
|
||||
- [`process_attestations`](#process_attestations)
|
||||
- [New Attester slashing processing](#new-attester-slashing-processing)
|
||||
- [Shard transition false positives](#shard-transition-false-positives)
|
||||
- [Light client processing](#light-client-processing)
|
||||
- [Epoch transition](#epoch-transition)
|
||||
- [Custody game updates](#custody-game-updates)
|
||||
- [Online-tracking](#online-tracking)
|
||||
- [Light client committee updates](#light-client-committee-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the extensions made to the Phase 0 design of The Beacon Chain
|
||||
to facilitate the new shards as part of Phase 1 of Eth2.
|
||||
|
||||
## Custom types
|
||||
|
||||
We define the following Python custom types for type hinting and readability:
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `Shard` | `uint64` | a shard number |
|
||||
| `OnlineEpochs` | `uint8` | online countdown epochs |
|
||||
|
||||
## Configuration
|
||||
|
||||
Configuration is not namespaced. Instead it is strictly an extension;
|
||||
no constants of phase 0 change, but new constants are adopted for changing behaviors.
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | - | - |
|
||||
| `MAX_SHARDS` | `2**10` (= 1024) |
|
||||
| `ONLINE_PERIOD` | `OnlineEpochs(2**3)` (= 8) | online epochs | ~51 min |
|
||||
| `LIGHT_CLIENT_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||
| `LIGHT_CLIENT_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||
| `SHARD_COMMITTEE_PERIOD` | `Epoch(2**8)` (= 256) | epochs | ~27 hours |
|
||||
| `MAX_SHARD_BLOCK_SIZE` | `2**20` (= 1,048,576) | |
|
||||
| `TARGET_SHARD_BLOCK_SIZE` | `2**18` (= 262,144) | |
|
||||
| `SHARD_BLOCK_OFFSETS` | `[1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233]` | |
|
||||
| `MAX_SHARD_BLOCKS_PER_ATTESTATION` | `len(SHARD_BLOCK_OFFSETS)` | |
|
||||
| `MAX_GASPRICE` | `Gwei(2**14)` (= 16,384) | Gwei | |
|
||||
| `MIN_GASPRICE` | `Gwei(2**5)` (= 32) | Gwei | |
|
||||
| `GASPRICE_ADJUSTMENT_COEFFICIENT` | `2**3` (= 8) | |
|
||||
| `DOMAIN_SHARD_PROPOSAL` | `DomainType('0x80000000')` | |
|
||||
| `DOMAIN_SHARD_COMMITTEE` | `DomainType('0x81000000')` | |
|
||||
| `DOMAIN_LIGHT_CLIENT` | `DomainType('0x82000000')` | |
|
||||
|
||||
## Updated containers
|
||||
|
||||
The following containers have updated definitions in Phase 1.
|
||||
|
||||
### Extended `AttestationData`
|
||||
|
||||
```python
|
||||
class AttestationData(Container):
|
||||
slot: Slot
|
||||
index: CommitteeIndex
|
||||
# LMD GHOST vote
|
||||
beacon_block_root: Root
|
||||
# FFG vote
|
||||
source: Checkpoint
|
||||
target: Checkpoint
|
||||
# Current-slot shard block root
|
||||
head_shard_root: Root
|
||||
# Shard transition root
|
||||
shard_transition_root: Root
|
||||
```
|
||||
|
||||
### Extended `Attestation`
|
||||
|
||||
```python
|
||||
class Attestation(Container):
|
||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||
data: AttestationData
|
||||
custody_bits_blocks: List[Bitlist[MAX_VALIDATORS_PER_COMMITTEE], MAX_SHARD_BLOCKS_PER_ATTESTATION]
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### Extended `PendingAttestation`
|
||||
|
||||
```python
|
||||
class PendingAttestation(Container):
|
||||
aggregation_bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]
|
||||
data: AttestationData
|
||||
inclusion_delay: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
crosslink_success: boolean
|
||||
```
|
||||
|
||||
### `IndexedAttestation`
|
||||
|
||||
```python
|
||||
class IndexedAttestation(Container):
|
||||
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
attestation: Attestation
|
||||
```
|
||||
|
||||
#### Extended `AttesterSlashing`
|
||||
|
||||
Note that the `attestation_1` and `attestation_2` have a new `IndexedAttestation` definition.
|
||||
|
||||
```python
|
||||
class AttesterSlashing(Container):
|
||||
attestation_1: IndexedAttestation
|
||||
attestation_2: IndexedAttestation
|
||||
```
|
||||
|
||||
### Extended `Validator`
|
||||
|
||||
```python
|
||||
class Validator(Container):
|
||||
pubkey: BLSPubkey
|
||||
withdrawal_credentials: Bytes32 # Commitment to pubkey for withdrawals
|
||||
effective_balance: Gwei # Balance at stake
|
||||
slashed: boolean
|
||||
# Status epochs
|
||||
activation_eligibility_epoch: Epoch # When criteria for activation were met
|
||||
activation_epoch: Epoch
|
||||
exit_epoch: Epoch
|
||||
withdrawable_epoch: Epoch # When validator can withdraw funds
|
||||
# Custody game
|
||||
# next_custody_secret_to_reveal is initialised to the custody period
|
||||
# (of the particular validator) in which the validator is activated
|
||||
# = get_custody_period_for_validator(...)
|
||||
next_custody_secret_to_reveal: uint64
|
||||
max_reveal_lateness: Epoch
|
||||
```
|
||||
|
||||
### Extended `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
randao_reveal: BLSSignature
|
||||
eth1_data: Eth1Data # Eth1 data vote
|
||||
graffiti: Bytes32 # Arbitrary data
|
||||
# Slashings
|
||||
proposer_slashings: List[ProposerSlashing, MAX_PROPOSER_SLASHINGS]
|
||||
attester_slashings: List[AttesterSlashing, MAX_ATTESTER_SLASHINGS]
|
||||
# Attesting
|
||||
attestations: List[Attestation, MAX_ATTESTATIONS]
|
||||
# Entry & exit
|
||||
deposits: List[Deposit, MAX_DEPOSITS]
|
||||
voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS]
|
||||
# Custody game
|
||||
custody_slashings: List[SignedCustodySlashing, MAX_CUSTODY_SLASHINGS]
|
||||
custody_key_reveals: List[CustodyKeyReveal, MAX_CUSTODY_KEY_REVEALS]
|
||||
early_derived_secret_reveals: List[EarlyDerivedSecretReveal, MAX_EARLY_DERIVED_SECRET_REVEALS]
|
||||
# Shards
|
||||
shard_transitions: Vector[ShardTransition, MAX_SHARDS]
|
||||
# Light clients
|
||||
light_client_signature_bitfield: Bitvector[LIGHT_CLIENT_COMMITTEE_SIZE]
|
||||
light_client_signature: BLSSignature
|
||||
```
|
||||
|
||||
### Extended `BeaconBlock`
|
||||
|
||||
Note that the `body` has a new `BeaconBlockBody` definition.
|
||||
|
||||
```python
|
||||
class BeaconBlock(Container):
|
||||
slot: Slot
|
||||
proposer_index: ValidatorIndex
|
||||
parent_root: Root
|
||||
state_root: Root
|
||||
body: BeaconBlockBody
|
||||
```
|
||||
|
||||
#### Extended `SignedBeaconBlock`
|
||||
|
||||
Note that the `message` has a new `BeaconBlock` definition.
|
||||
|
||||
```python
|
||||
class SignedBeaconBlock(Container):
|
||||
message: BeaconBlock
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### Extended `BeaconState`
|
||||
|
||||
Note that aside from the new additions, `Validator` and `PendingAttestation` have new definitions.
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
# Versioning
|
||||
genesis_time: uint64
|
||||
genesis_validators_root: Root
|
||||
slot: Slot
|
||||
fork: Fork
|
||||
# History
|
||||
latest_block_header: BeaconBlockHeader
|
||||
block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT]
|
||||
historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT]
|
||||
# Eth1
|
||||
eth1_data: Eth1Data
|
||||
eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH]
|
||||
eth1_deposit_index: uint64
|
||||
# Registry
|
||||
validators: List[Validator, VALIDATOR_REGISTRY_LIMIT]
|
||||
balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT]
|
||||
# Randomness
|
||||
randao_mixes: Vector[Root, EPOCHS_PER_HISTORICAL_VECTOR]
|
||||
# Slashings
|
||||
slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances
|
||||
# Attestations
|
||||
previous_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
current_epoch_attestations: List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH]
|
||||
# Finality
|
||||
justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch
|
||||
previous_justified_checkpoint: Checkpoint # Previous epoch snapshot
|
||||
current_justified_checkpoint: Checkpoint
|
||||
finalized_checkpoint: Checkpoint
|
||||
# Phase 1
|
||||
shard_states: List[ShardState, MAX_SHARDS]
|
||||
online_countdown: List[OnlineEpochs, VALIDATOR_REGISTRY_LIMIT] # not a raw byte array, considered its large size.
|
||||
current_light_committee: CompactCommittee
|
||||
next_light_committee: CompactCommittee
|
||||
# Custody game
|
||||
# Future derived secrets already exposed; contains the indices of the exposed validator
|
||||
# at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
|
||||
exposed_derived_secrets: Vector[List[ValidatorIndex, MAX_EARLY_DERIVED_SECRET_REVEALS * SLOTS_PER_EPOCH],
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
|
||||
```
|
||||
|
||||
## New containers
|
||||
|
||||
The following containers are new in Phase 1.
|
||||
|
||||
### `ShardBlockWrapper`
|
||||
|
||||
_Wrapper for being broadcasted over the network._
|
||||
|
||||
```python
|
||||
class ShardBlockWrapper(Container):
|
||||
shard_parent_root: Root
|
||||
beacon_parent_root: Root
|
||||
slot: Slot
|
||||
body: ByteList[MAX_SHARD_BLOCK_SIZE]
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `ShardSignableHeader`
|
||||
|
||||
```python
|
||||
class ShardSignableHeader(Container):
|
||||
shard_parent_root: Root
|
||||
beacon_parent_root: Root
|
||||
slot: Slot
|
||||
body_root: Root
|
||||
```
|
||||
|
||||
### `ShardState`
|
||||
|
||||
```python
|
||||
class ShardState(Container):
|
||||
slot: Slot
|
||||
gasprice: Gwei
|
||||
data: Bytes32
|
||||
latest_block_root: Root
|
||||
```
|
||||
|
||||
### `ShardTransition`
|
||||
|
||||
```python
|
||||
class ShardTransition(Container):
|
||||
# Starting from slot
|
||||
start_slot: Slot
|
||||
# Shard block lengths
|
||||
shard_block_lengths: List[uint64, MAX_SHARD_BLOCKS_PER_ATTESTATION]
|
||||
# Shard data roots
|
||||
shard_data_roots: List[Bytes32, MAX_SHARD_BLOCKS_PER_ATTESTATION]
|
||||
# Intermediate shard states
|
||||
shard_states: List[ShardState, MAX_SHARD_BLOCKS_PER_ATTESTATION]
|
||||
# Proposer signature aggregate
|
||||
proposer_signature_aggregate: BLSSignature
|
||||
```
|
||||
|
||||
### `CompactCommittee`
|
||||
|
||||
```python
|
||||
class CompactCommittee(Container):
|
||||
pubkeys: List[BLSPubkey, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
compact_validators: List[uint64, MAX_VALIDATORS_PER_COMMITTEE]
|
||||
```
|
||||
|
||||
### `AttestationCustodyBitWrapper`
|
||||
|
||||
```python
|
||||
class AttestationCustodyBitWrapper(Container):
|
||||
attestation_data_root: Root
|
||||
block_index: uint64
|
||||
bit: boolean
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### `get_previous_slot`
|
||||
|
||||
```python
|
||||
def get_previous_slot(slot: Slot) -> Slot:
|
||||
if slot > 0:
|
||||
return Slot(slot - 1)
|
||||
else:
|
||||
return Slot(0)
|
||||
```
|
||||
|
||||
#### `pack_compact_validator`
|
||||
|
||||
```python
|
||||
def pack_compact_validator(index: int, slashed: bool, balance_in_increments: int) -> int:
|
||||
"""
|
||||
Creates a compact validator object representing index, slashed status, and compressed balance.
|
||||
Takes as input balance-in-increments (// EFFECTIVE_BALANCE_INCREMENT) to preserve symmetry with
|
||||
the unpacking function.
|
||||
"""
|
||||
return (index << 16) + (slashed << 15) + balance_in_increments
|
||||
```
|
||||
|
||||
#### `committee_to_compact_committee`
|
||||
|
||||
```python
|
||||
def committee_to_compact_committee(state: BeaconState, committee: Sequence[ValidatorIndex]) -> CompactCommittee:
|
||||
"""
|
||||
Given a state and a list of validator indices, outputs the CompactCommittee representing them.
|
||||
"""
|
||||
validators = [state.validators[i] for i in committee]
|
||||
compact_validators = [
|
||||
pack_compact_validator(i, v.slashed, v.effective_balance // EFFECTIVE_BALANCE_INCREMENT)
|
||||
for i, v in zip(committee, validators)
|
||||
]
|
||||
pubkeys = [v.pubkey for v in validators]
|
||||
return CompactCommittee(pubkeys=pubkeys, compact_validators=compact_validators)
|
||||
```
|
||||
|
||||
#### `compute_shard_from_committee_index`
|
||||
|
||||
```python
|
||||
def compute_shard_from_committee_index(state: BeaconState, index: CommitteeIndex, slot: Slot) -> Shard:
|
||||
active_shards = get_active_shard_count(state)
|
||||
return Shard((index + get_start_shard(state, slot)) % active_shards)
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_active_shard_count`
|
||||
|
||||
```python
|
||||
def get_active_shard_count(state: BeaconState) -> uint64:
|
||||
return len(state.shard_states) # May adapt in the future, or change over time.
|
||||
```
|
||||
|
||||
#### `get_online_validator_indices`
|
||||
|
||||
```python
|
||||
def get_online_validator_indices(state: BeaconState) -> Set[ValidatorIndex]:
|
||||
active_validators = get_active_validator_indices(state, get_current_epoch(state))
|
||||
return set([i for i in active_validators if state.online_countdown[i] != 0])
|
||||
```
|
||||
|
||||
#### `get_shard_committee`
|
||||
|
||||
```python
|
||||
def get_shard_committee(beacon_state: BeaconState, epoch: Epoch, shard: Shard) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % SHARD_COMMITTEE_PERIOD
|
||||
if source_epoch > 0:
|
||||
source_epoch -= SHARD_COMMITTEE_PERIOD
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_SHARD_COMMITTEE)
|
||||
return compute_committee(active_validator_indices, seed, shard, get_active_shard_count(beacon_state))
|
||||
```
|
||||
|
||||
#### `get_shard_proposer_index`
|
||||
|
||||
```python
|
||||
def get_shard_proposer_index(beacon_state: BeaconState, slot: Slot, shard: Shard) -> ValidatorIndex:
|
||||
committee = get_shard_committee(beacon_state, compute_epoch_at_slot(slot), shard)
|
||||
r = bytes_to_int(get_seed(beacon_state, get_current_epoch(beacon_state), DOMAIN_SHARD_COMMITTEE)[:8])
|
||||
return committee[r % len(committee)]
|
||||
```
|
||||
|
||||
#### `get_light_client_committee`
|
||||
|
||||
```python
|
||||
def get_light_client_committee(beacon_state: BeaconState, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
source_epoch = epoch - epoch % LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
if source_epoch > 0:
|
||||
source_epoch -= LIGHT_CLIENT_COMMITTEE_PERIOD
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, source_epoch)
|
||||
seed = get_seed(beacon_state, source_epoch, DOMAIN_LIGHT_CLIENT)
|
||||
active_shards = get_active_shard_count(beacon_state)
|
||||
return compute_committee(active_validator_indices, seed, 0, active_shards)[:TARGET_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
#### `get_indexed_attestation`
|
||||
|
||||
```python
|
||||
def get_indexed_attestation(beacon_state: BeaconState, attestation: Attestation) -> IndexedAttestation:
|
||||
committee = get_beacon_committee(beacon_state, attestation.data.slot, attestation.data.index)
|
||||
return IndexedAttestation(
|
||||
committee=committee,
|
||||
attestation=attestation,
|
||||
)
|
||||
```
|
||||
|
||||
#### `get_updated_gasprice`
|
||||
|
||||
```python
|
||||
def get_updated_gasprice(prev_gasprice: Gwei, length: uint8) -> Gwei:
|
||||
if length > TARGET_SHARD_BLOCK_SIZE:
|
||||
delta = (prev_gasprice * (length - TARGET_SHARD_BLOCK_SIZE)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return min(prev_gasprice + delta, MAX_GASPRICE)
|
||||
else:
|
||||
delta = (prev_gasprice * (TARGET_SHARD_BLOCK_SIZE - length)
|
||||
// TARGET_SHARD_BLOCK_SIZE // GASPRICE_ADJUSTMENT_COEFFICIENT)
|
||||
return max(prev_gasprice, MIN_GASPRICE + delta) - delta
|
||||
```
|
||||
|
||||
#### `get_start_shard`
|
||||
|
||||
```python
|
||||
def get_start_shard(state: BeaconState, slot: Slot) -> Shard:
|
||||
# TODO: implement start shard logic
|
||||
return Shard(0)
|
||||
```
|
||||
|
||||
#### `get_shard`
|
||||
|
||||
```python
|
||||
def get_shard(state: BeaconState, attestation: Attestation) -> Shard:
|
||||
return compute_shard_from_committee_index(state, attestation.data.index, attestation.data.slot)
|
||||
```
|
||||
|
||||
#### `get_next_slot_for_shard`
|
||||
|
||||
```python
|
||||
def get_next_slot_for_shard(state: BeaconState, shard: Shard) -> Slot:
|
||||
return Slot(state.shard_states[shard].slot + 1)
|
||||
```
|
||||
|
||||
|
||||
#### `get_offset_slots`
|
||||
|
||||
```python
|
||||
def get_offset_slots(state: BeaconState, start_slot: Slot) -> Sequence[Slot]:
|
||||
return [Slot(start_slot + x) for x in SHARD_BLOCK_OFFSETS if start_slot + x < state.slot]
|
||||
```
|
||||
|
||||
### Predicates
|
||||
|
||||
#### Updated `is_valid_indexed_attestation`
|
||||
|
||||
Note that this replaces the Phase 0 `is_valid_indexed_attestation`.
|
||||
|
||||
```python
|
||||
def is_valid_indexed_attestation(state: BeaconState, indexed_attestation: IndexedAttestation) -> bool:
|
||||
"""
|
||||
Check if ``indexed_attestation`` has valid indices and signature.
|
||||
"""
|
||||
# Verify aggregate signature
|
||||
all_pubkeys = []
|
||||
all_signing_roots = []
|
||||
attestation = indexed_attestation.attestation
|
||||
domain = get_domain(state, DOMAIN_BEACON_ATTESTER, attestation.data.target.epoch)
|
||||
aggregation_bits = attestation.aggregation_bits
|
||||
assert len(aggregation_bits) == len(indexed_attestation.committee)
|
||||
|
||||
if len(attestation.custody_bits_blocks) == 0:
|
||||
# fall back on phase0 behavior if there is no shard data.
|
||||
for participant, abit in zip(indexed_attestation.committee, aggregation_bits):
|
||||
if abit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
signing_root = compute_signing_root(indexed_attestation.attestation.data, domain)
|
||||
return bls.FastAggregateVerify(all_pubkeys, signing_root, signature=attestation.signature)
|
||||
else:
|
||||
for i, custody_bits in enumerate(attestation.custody_bits_blocks):
|
||||
assert len(custody_bits) == len(indexed_attestation.committee)
|
||||
for participant, abit, cbit in zip(indexed_attestation.committee, aggregation_bits, custody_bits):
|
||||
if abit:
|
||||
all_pubkeys.append(state.validators[participant].pubkey)
|
||||
# Note: only 2N distinct message hashes
|
||||
all_signing_roots.append(compute_signing_root(
|
||||
AttestationCustodyBitWrapper(hash_tree_root(attestation.data), i, cbit), domain))
|
||||
else:
|
||||
assert not cbit
|
||||
return bls.AggregateVerify(zip(all_pubkeys, all_signing_roots), signature=attestation.signature)
|
||||
```
|
||||
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_block(state: BeaconState, block: BeaconBlock) -> None:
|
||||
process_block_header(state, block)
|
||||
process_randao(state, block.body)
|
||||
process_eth1_data(state, block.body)
|
||||
verify_shard_transition_false_positives(state, block.body)
|
||||
process_light_client_signatures(state, block.body)
|
||||
process_operations(state, block.body)
|
||||
```
|
||||
|
||||
|
||||
#### Operations
|
||||
|
||||
```python
|
||||
def process_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
# Verify that outstanding deposits are processed up to the maximum number of deposits
|
||||
assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index)
|
||||
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
fn(state, operation)
|
||||
|
||||
for_ops(body.proposer_slashings, process_proposer_slashing)
|
||||
for_ops(body.attester_slashings, process_attester_slashing)
|
||||
|
||||
# New attestation processing
|
||||
process_attestations(state, body, body.attestations)
|
||||
|
||||
for_ops(body.deposits, process_deposit)
|
||||
for_ops(body.voluntary_exits, process_voluntary_exit)
|
||||
|
||||
# See custody game spec.
|
||||
process_custody_game_operations(state, body)
|
||||
|
||||
# TODO process_operations(body.shard_receipt_proofs, process_shard_receipt_proofs)
|
||||
```
|
||||
|
||||
##### New Attestation processing
|
||||
|
||||
###### `validate_attestation`
|
||||
|
||||
```python
|
||||
def validate_attestation(state: BeaconState, attestation: Attestation) -> None:
|
||||
data = attestation.data
|
||||
assert data.index < get_committee_count_at_slot(state, data.slot)
|
||||
assert data.index < get_active_shard_count(state)
|
||||
assert data.target.epoch in (get_previous_epoch(state), get_current_epoch(state))
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY <= state.slot <= data.slot + SLOTS_PER_EPOCH
|
||||
|
||||
committee = get_beacon_committee(state, data.slot, data.index)
|
||||
assert len(attestation.aggregation_bits) == len(committee)
|
||||
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
assert attestation.data.source == state.current_justified_checkpoint
|
||||
else:
|
||||
assert attestation.data.source == state.previous_justified_checkpoint
|
||||
|
||||
shard = get_shard(state, attestation)
|
||||
shard_start_slot = get_next_slot_for_shard(state, shard)
|
||||
|
||||
# Signature check
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
# Type 1: on-time attestations
|
||||
if attestation.custody_bits_blocks != []:
|
||||
# Correct slot
|
||||
assert data.slot + MIN_ATTESTATION_INCLUSION_DELAY == state.slot
|
||||
# Correct data root count
|
||||
assert len(attestation.custody_bits_blocks) == len(get_offset_slots(state, shard_start_slot))
|
||||
# Correct parent block root
|
||||
assert data.beacon_block_root == get_block_root_at_slot(state, get_previous_slot(state.slot))
|
||||
# Type 2: no shard transition, no custody bits # TODO: could only allow for older attestations.
|
||||
else:
|
||||
# assert state.slot - compute_start_slot_at_epoch(compute_epoch_at_slot(data.slot)) < SLOTS_PER_EPOCH
|
||||
assert data.shard_transition_root == Root()
|
||||
```
|
||||
|
||||
###### `apply_shard_transition`
|
||||
|
||||
```python
|
||||
def apply_shard_transition(state: BeaconState, shard: Shard, transition: ShardTransition) -> None:
|
||||
# Slot the attestation starts counting from
|
||||
start_slot = get_next_slot_for_shard(state, shard)
|
||||
|
||||
# Correct data root count
|
||||
offset_slots = get_offset_slots(state, start_slot)
|
||||
assert (
|
||||
len(transition.shard_data_roots)
|
||||
== len(transition.shard_states)
|
||||
== len(transition.shard_block_lengths)
|
||||
== len(offset_slots)
|
||||
)
|
||||
assert transition.start_slot == start_slot
|
||||
|
||||
# Reconstruct shard headers
|
||||
headers = []
|
||||
proposers = []
|
||||
shard_parent_root = state.shard_states[shard].latest_block_root
|
||||
for i in range(len(offset_slots)):
|
||||
if any(transition.shard_data_roots):
|
||||
headers.append(ShardSignableHeader(
|
||||
shard_parent_root=shard_parent_root,
|
||||
parent_hash=get_block_root_at_slot(state, get_previous_slot(state.slot)),
|
||||
slot=offset_slots[i],
|
||||
body_root=transition.shard_data_roots[i]
|
||||
))
|
||||
proposers.append(get_shard_proposer_index(state, shard, offset_slots[i]))
|
||||
shard_parent_root = hash_tree_root(headers[-1])
|
||||
|
||||
# Verify correct calculation of gas prices and slots
|
||||
prev_gasprice = state.shard_states[shard].gasprice
|
||||
for i in range(len(offset_slots)):
|
||||
shard_state = transition.shard_states[i]
|
||||
block_length = transition.shard_block_lengths[i]
|
||||
assert shard_state.gasprice == get_updated_gasprice(prev_gasprice, block_length)
|
||||
assert shard_state.slot == offset_slots[i]
|
||||
prev_gasprice = shard_state.gasprice
|
||||
|
||||
pubkeys = [state.validators[proposer].pubkey for proposer in proposers]
|
||||
signing_roots = [
|
||||
compute_signing_root(header, get_domain(state, DOMAIN_SHARD_PROPOSAL, compute_epoch_at_slot(header.slot)))
|
||||
for header in headers
|
||||
]
|
||||
# Verify combined proposer signature
|
||||
assert bls.AggregateVerify(zip(pubkeys, signing_roots), signature=transition.proposer_signature_aggregate)
|
||||
|
||||
# Save updated state
|
||||
state.shard_states[shard] = transition.shard_states[-1]
|
||||
state.shard_states[shard].slot = state.slot - 1
|
||||
```
|
||||
|
||||
###### `process_crosslink_for_shard`
|
||||
|
||||
```python
|
||||
def process_crosslink_for_shard(state: BeaconState,
|
||||
shard: Shard,
|
||||
shard_transition: ShardTransition,
|
||||
attestations: Sequence[Attestation]) -> Root:
|
||||
committee = get_beacon_committee(state, get_current_epoch(state), shard)
|
||||
online_indices = get_online_validator_indices(state)
|
||||
|
||||
# Loop over all shard transition roots
|
||||
shard_transition_roots = set([a.data.shard_transition_root for a in attestations])
|
||||
for shard_transition_root in sorted(shard_transition_roots):
|
||||
transition_attestations = [a for a in attestations if a.data.shard_transition_root == shard_transition_root]
|
||||
transition_participants: Set[ValidatorIndex] = set()
|
||||
for attestation in transition_attestations:
|
||||
participants = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
transition_participants = transition_participants.union(participants)
|
||||
|
||||
enough_online_stake = (
|
||||
get_total_balance(state, online_indices.intersection(transition_participants)) * 3 >=
|
||||
get_total_balance(state, online_indices.intersection(committee)) * 2
|
||||
)
|
||||
# If not enough stake, try next transition root
|
||||
if not enough_online_stake:
|
||||
continue
|
||||
|
||||
# Attestation <-> shard transition consistency
|
||||
assert shard_transition_root == hash_tree_root(shard_transition)
|
||||
assert attestation.data.head_shard_root == shard_transition.shard_data_roots[-1]
|
||||
|
||||
# Apply transition
|
||||
apply_shard_transition(state, shard, shard_transition)
|
||||
# Apply proposer reward and cost
|
||||
beacon_proposer_index = get_beacon_proposer_index(state)
|
||||
estimated_attester_reward = sum([get_base_reward(state, attester) for attester in transition_participants])
|
||||
proposer_reward = Gwei(estimated_attester_reward // PROPOSER_REWARD_QUOTIENT)
|
||||
increase_balance(state, beacon_proposer_index, proposer_reward)
|
||||
states_slots_lengths = zip(
|
||||
shard_transition.shard_states,
|
||||
get_offset_slots(state, get_next_slot_for_shard(state, shard)),
|
||||
shard_transition.shard_block_lengths
|
||||
)
|
||||
for shard_state, slot, length in states_slots_lengths:
|
||||
proposer_index = get_shard_proposer_index(state, shard, slot)
|
||||
decrease_balance(state, proposer_index, shard_state.gasprice * length)
|
||||
|
||||
# Return winning transition root
|
||||
return shard_transition_root
|
||||
|
||||
# No winning transition root, ensure empty and return empty root
|
||||
assert shard_transition == ShardTransition()
|
||||
return Root()
|
||||
```
|
||||
|
||||
###### `process_crosslinks`
|
||||
|
||||
```python
|
||||
def process_crosslinks(state: BeaconState,
|
||||
block_body: BeaconBlockBody,
|
||||
attestations: Sequence[Attestation]) -> Set[Tuple[Shard, Root]]:
|
||||
winners: Set[Tuple[Shard, Root]] = set()
|
||||
committee_count = get_committee_count_at_slot(state, state.slot)
|
||||
for committee_index in map(CommitteeIndex, range(committee_count)):
|
||||
shard = compute_shard_from_committee_index(state, committee_index, state.slot)
|
||||
# All attestations in the block for this shard
|
||||
shard_attestations = [
|
||||
attestation for attestation in attestations
|
||||
if get_shard(state, attestation) == shard and attestation.data.slot == state.slot
|
||||
]
|
||||
shard_transition = block_body.shard_transitions[shard]
|
||||
winning_root = process_crosslink_for_shard(state, shard, shard_transition, shard_attestations)
|
||||
if winning_root != Root():
|
||||
winners.add((shard, winning_root))
|
||||
return winners
|
||||
```
|
||||
|
||||
###### `process_attestations`
|
||||
|
||||
```python
|
||||
def process_attestations(state: BeaconState, block_body: BeaconBlockBody, attestations: Sequence[Attestation]) -> None:
|
||||
# Basic validation
|
||||
for attestation in attestations:
|
||||
validate_attestation(state, attestation)
|
||||
|
||||
# Process crosslinks
|
||||
winners = process_crosslinks(state, block_body, attestations)
|
||||
|
||||
# Store pending attestations for epoch processing
|
||||
for attestation in attestations:
|
||||
is_winning_transition = (get_shard(state, attestation), attestation.data.shard_transition_root) in winners
|
||||
pending_attestation = PendingAttestation(
|
||||
aggregation_bits=attestation.aggregation_bits,
|
||||
data=attestation.data,
|
||||
inclusion_delay=state.slot - attestation.data.slot,
|
||||
crosslink_success=is_winning_transition and attestation.data.slot == state.slot,
|
||||
proposer_index=get_beacon_proposer_index(state),
|
||||
)
|
||||
if attestation.data.target.epoch == get_current_epoch(state):
|
||||
state.current_epoch_attestations.append(pending_attestation)
|
||||
else:
|
||||
state.previous_epoch_attestations.append(pending_attestation)
|
||||
```
|
||||
|
||||
##### New Attester slashing processing
|
||||
|
||||
```python
|
||||
def get_indices_from_committee(
|
||||
committee: List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE],
|
||||
bits: Bitlist[MAX_VALIDATORS_PER_COMMITTEE]) -> List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE]:
|
||||
assert len(bits) == len(committee)
|
||||
return List[ValidatorIndex, MAX_VALIDATORS_PER_COMMITTEE](
|
||||
[validator_index for i, validator_index in enumerate(committee) if bits[i]]
|
||||
)
|
||||
```
|
||||
|
||||
```python
|
||||
def process_attester_slashing(state: BeaconState, attester_slashing: AttesterSlashing) -> None:
|
||||
indexed_attestation_1 = attester_slashing.attestation_1
|
||||
indexed_attestation_2 = attester_slashing.attestation_2
|
||||
assert is_slashable_attestation_data(
|
||||
indexed_attestation_1.attestation.data,
|
||||
indexed_attestation_2.attestation.data,
|
||||
)
|
||||
assert is_valid_indexed_attestation(state, indexed_attestation_1)
|
||||
assert is_valid_indexed_attestation(state, indexed_attestation_2)
|
||||
|
||||
indices_1 = get_indices_from_committee(
|
||||
indexed_attestation_1.committee,
|
||||
indexed_attestation_1.attestation.aggregation_bits,
|
||||
)
|
||||
indices_2 = get_indices_from_committee(
|
||||
indexed_attestation_2.committee,
|
||||
indexed_attestation_2.attestation.aggregation_bits,
|
||||
)
|
||||
|
||||
slashed_any = False
|
||||
indices = set(indices_1).intersection(indices_2)
|
||||
for index in sorted(indices):
|
||||
if is_slashable_validator(state.validators[index], get_current_epoch(state)):
|
||||
slash_validator(state, index)
|
||||
slashed_any = True
|
||||
assert slashed_any
|
||||
```
|
||||
|
||||
#### Shard transition false positives
|
||||
|
||||
```python
|
||||
def verify_shard_transition_false_positives(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
||||
# Verify that a `shard_transition` in a block is empty if an attestation was not processed for it
|
||||
for shard in range(get_active_shard_count(state)):
|
||||
if state.shard_states[shard].slot != state.slot - 1:
|
||||
assert block_body.shard_transitions[shard] == ShardTransition()
|
||||
```
|
||||
|
||||
#### Light client processing
|
||||
|
||||
```python
|
||||
def process_light_client_signatures(state: BeaconState, block_body: BeaconBlockBody) -> None:
|
||||
committee = get_light_client_committee(state, get_current_epoch(state))
|
||||
total_reward = Gwei(0)
|
||||
signer_pubkeys = []
|
||||
for bit_index, participant_index in enumerate(committee):
|
||||
if block_body.light_client_signature_bitfield[bit_index]:
|
||||
signer_pubkeys.append(state.validators[participant_index].pubkey)
|
||||
increase_balance(state, participant_index, get_base_reward(state, participant_index))
|
||||
total_reward += get_base_reward(state, participant_index)
|
||||
|
||||
increase_balance(state, get_beacon_proposer_index(state), Gwei(total_reward // PROPOSER_REWARD_QUOTIENT))
|
||||
|
||||
slot = get_previous_slot(state.slot)
|
||||
signing_root = compute_signing_root(get_block_root_at_slot(state, slot),
|
||||
get_domain(state, DOMAIN_LIGHT_CLIENT, compute_epoch_at_slot(slot)))
|
||||
return bls.FastAggregateVerify(signer_pubkeys, signing_root, signature=block_body.light_client_signature)
|
||||
```
|
||||
|
||||
|
||||
### Epoch transition
|
||||
|
||||
This epoch transition overrides the phase0 epoch transition:
|
||||
|
||||
```python
|
||||
def process_epoch(state: BeaconState) -> None:
|
||||
process_justification_and_finalization(state)
|
||||
process_rewards_and_penalties(state)
|
||||
process_registry_updates(state)
|
||||
process_reveal_deadlines(state)
|
||||
process_slashings(state)
|
||||
process_final_updates(state)
|
||||
process_custody_final_updates(state)
|
||||
process_online_tracking(state)
|
||||
process_light_client_committee_updates(state)
|
||||
```
|
||||
|
||||
#### Custody game updates
|
||||
|
||||
`process_reveal_deadlines` and `process_custody_final_updates` are defined in [the Custody Game spec](./1_custody-game.md),
|
||||
|
||||
#### Online-tracking
|
||||
|
||||
```python
|
||||
def process_online_tracking(state: BeaconState) -> None:
|
||||
# Slowly remove validators from the "online" set if they do not show up
|
||||
for index in range(len(state.validators)):
|
||||
if state.online_countdown[index] != 0:
|
||||
state.online_countdown[index] = state.online_countdown[index] - 1
|
||||
|
||||
# Process pending attestations
|
||||
for pending_attestation in state.current_epoch_attestations + state.previous_epoch_attestations:
|
||||
for index in get_attesting_indices(state, pending_attestation.data, pending_attestation.aggregation_bits):
|
||||
state.online_countdown[index] = ONLINE_PERIOD
|
||||
```
|
||||
|
||||
#### Light client committee updates
|
||||
|
||||
```python
|
||||
def process_light_client_committee_updates(state: BeaconState) -> None:
|
||||
# Update light client committees
|
||||
if get_current_epoch(state) % LIGHT_CLIENT_COMMITTEE_PERIOD == 0:
|
||||
state.current_light_committee = state.next_light_committee
|
||||
new_committee = get_light_client_committee(state, get_current_epoch(state) + LIGHT_CLIENT_COMMITTEE_PERIOD)
|
||||
state.next_light_committee = committee_to_compact_committee(state, new_committee)
|
||||
```
|
|
@ -4,107 +4,62 @@
|
|||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Terminology](#terminology)
|
||||
- [Constants](#constants)
|
||||
- [Misc](#misc)
|
||||
- [Custody game parameters](#custody-game-parameters)
|
||||
- [Configuration](#configuration)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [Max operations per block](#max-operations-per-block)
|
||||
- [Reward and penalty quotients](#reward-and-penalty-quotients)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [TODO PLACEHOLDER](#todo-placeholder)
|
||||
- [Data structures](#data-structures)
|
||||
- [Custody objects](#custody-objects)
|
||||
- [`CustodyChunkChallenge`](#custodychunkchallenge)
|
||||
- [`CustodyBitChallenge`](#custodybitchallenge)
|
||||
- [`CustodyChunkChallengeRecord`](#custodychunkchallengerecord)
|
||||
- [`CustodyBitChallengeRecord`](#custodybitchallengerecord)
|
||||
- [`CustodyResponse`](#custodyresponse)
|
||||
- [New beacon operations](#new-beacon-operations)
|
||||
- [New Beacon Chain operations](#new-beacon-chain-operations)
|
||||
- [`CustodySlashing`](#custodyslashing)
|
||||
- [`SignedCustodySlashing`](#signedcustodyslashing)
|
||||
- [`CustodyKeyReveal`](#custodykeyreveal)
|
||||
- [`EarlyDerivedSecretReveal`](#earlyderivedsecretreveal)
|
||||
- [Phase 0 container updates](#phase-0-container-updates)
|
||||
- [`Validator`](#validator)
|
||||
- [`BeaconState`](#beaconstate)
|
||||
- [`BeaconBlockBody`](#beaconblockbody)
|
||||
- [Helpers](#helpers)
|
||||
- [`ceillog2`](#ceillog2)
|
||||
- [`is_valid_merkle_branch_with_mixin`](#is_valid_merkle_branch_with_mixin)
|
||||
- [`get_crosslink_chunk_count`](#get_crosslink_chunk_count)
|
||||
- [`legendre_bit`](#legendre_bit)
|
||||
- [`custody_subchunkify`](#custody_subchunkify)
|
||||
- [`get_custody_chunk_bit`](#get_custody_chunk_bit)
|
||||
- [`get_chunk_bits_root`](#get_chunk_bits_root)
|
||||
- [`custody_atoms`](#custody_atoms)
|
||||
- [`compute_custody_bit`](#compute_custody_bit)
|
||||
- [`get_randao_epoch_for_custody_period`](#get_randao_epoch_for_custody_period)
|
||||
- [`get_custody_period_for_validator`](#get_custody_period_for_validator)
|
||||
- [`replace_empty_or_append`](#replace_empty_or_append)
|
||||
- [Per-block processing](#per-block-processing)
|
||||
- [Operations](#operations)
|
||||
- [Custody Game Operations](#custody-game-operations)
|
||||
- [Custody key reveals](#custody-key-reveals)
|
||||
- [Early derived secret reveals](#early-derived-secret-reveals)
|
||||
- [Chunk challenges](#chunk-challenges)
|
||||
- [Bit challenges](#bit-challenges)
|
||||
- [Custody responses](#custody-responses)
|
||||
- [Custody Slashings](#custody-slashings)
|
||||
- [Per-epoch processing](#per-epoch-processing)
|
||||
- [Handling of custody-related deadlines](#handling-of-custody-related-deadlines)
|
||||
- [Handling of reveal deadlines](#handling-of-reveal-deadlines)
|
||||
- [Final updates](#final-updates)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document details the beacon chain additions and changes in Phase 1 of Ethereum 2.0 to support the shard data custody game, building upon the [Phase 0](../phase0/beacon-chain.md) specification.
|
||||
|
||||
## Terminology
|
||||
|
||||
- **Custody game**—
|
||||
- **Custody period**—
|
||||
- **Custody chunk**—
|
||||
- **Custody chunk bit**—
|
||||
- **Custody chunk challenge**—
|
||||
- **Custody bit**—
|
||||
- **Custody bit challenge**—
|
||||
- **Custody key**—
|
||||
- **Custody key reveal**—
|
||||
- **Custody key mask**—
|
||||
- **Custody response**—
|
||||
- **Custody response deadline**—
|
||||
|
||||
## Constants
|
||||
|
||||
### Misc
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
|
||||
| `MAX_EPOCHS_PER_CROSSLINK` | `2**6` (= 64) | epochs | ~7 hours |
|
||||
|
||||
### Custody game parameters
|
||||
| Name | Value | Unit |
|
||||
| - | - | - |
|
||||
| `BLS12_381_Q` | `4002409555221667393417789825735904156556882819939007885332058136124031650490837864442687629129015664037894272559787` | - |
|
||||
| `BYTES_PER_CUSTODY_ATOM` | `48` | bytes |
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BYTES_PER_SHARD_BLOCK` | `2**14` (= 16,384) |
|
||||
| `BYTES_PER_CUSTODY_CHUNK` | `2**9` (= 512) |
|
||||
| `BYTES_PER_CUSTODY_SUBCHUNK` | `48` |
|
||||
| `CHUNKS_PER_EPOCH` | `2 * BYTES_PER_SHARD_BLOCK * SLOTS_PER_EPOCH // BYTES_PER_CUSTODY_CHUNK` |
|
||||
| `MAX_CUSTODY_CHUNKS` | `MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH` |
|
||||
| `CUSTODY_DATA_DEPTH` | `ceillog2(MAX_CUSTODY_CHUNKS) + 1` |
|
||||
| `CUSTODY_CHUNK_BIT_DEPTH` | `ceillog2(MAX_EPOCHS_PER_CROSSLINK * CHUNKS_PER_EPOCH // 256) + 2` |
|
||||
## Configuration
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `MAX_CHUNK_CHALLENGE_DELAY` | `2**11` (= 2,048) | epochs | ~9 days |
|
||||
| `CUSTODY_RESPONSE_DEADLINE` | `2**14` (= 16,384) | epochs | ~73 days |
|
||||
| `RANDAO_PENALTY_EPOCHS` | `2**1` (= 2) | epochs | 12.8 minutes |
|
||||
| `EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS` | `2**14` | epochs | ~73 days |
|
||||
| `EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS` | `2**14` (= 16,384) | epochs | ~73 days |
|
||||
| `EPOCHS_PER_CUSTODY_PERIOD` | `2**11` (= 2,048) | epochs | ~9 days |
|
||||
| `CUSTODY_PERIOD_TO_RANDAO_PADDING` | `2**11` (= 2,048) | epochs | ~9 days |
|
||||
| `MAX_REVEAL_LATENESS_DECREMENT` | `2**7` (= 128) | epochs | ~14 hours |
|
||||
|
@ -113,17 +68,16 @@ This document details the beacon chain additions and changes in Phase 1 of Ether
|
|||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `MAX_CUSTODY_KEY_REVEALS` | `2**4` (= 16) |
|
||||
| `MAX_CUSTODY_KEY_REVEALS` | `2**8` (= 256) |
|
||||
| `MAX_EARLY_DERIVED_SECRET_REVEALS` | `1` |
|
||||
| `MAX_CUSTODY_CHUNK_CHALLENGES` | `2**2` (= 4) |
|
||||
| `MAX_CUSTODY_BIT_CHALLENGES` | `2**2` (= 4) |
|
||||
| `MAX_CUSTODY_RESPONSES` | `2**5` (= 32) |
|
||||
| `MAX_CUSTODY_SLASHINGS` | `1` |
|
||||
|
||||
### Reward and penalty quotients
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `EARLY_DERIVED_SECRET_REVEAL_SLOT_REWARD_MULTIPLE` | `2**1` (= 2) |
|
||||
| `MINOR_REWARD_QUOTIENT` | `2**8` (= 256) |
|
||||
|
||||
### Signature domain types
|
||||
|
||||
|
@ -131,79 +85,35 @@ The following types are defined, mapping into `DomainType` (little endian):
|
|||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_CUSTODY_BIT_CHALLENGE` | `DomainType('0x06000000')` |
|
||||
|
||||
### TODO PLACEHOLDER
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PLACEHOLDER` | `2**32` |
|
||||
| `DOMAIN_CUSTODY_BIT_SLASHING` | `DomainType('0x83000000')` |
|
||||
|
||||
## Data structures
|
||||
|
||||
### Custody objects
|
||||
### New Beacon Chain operations
|
||||
|
||||
#### `CustodyChunkChallenge`
|
||||
#### `CustodySlashing`
|
||||
|
||||
```python
|
||||
class CustodyChunkChallenge(Container):
|
||||
responder_index: ValidatorIndex
|
||||
class CustodySlashing(Container):
|
||||
# Attestation.custody_bits_blocks[data_index][committee.index(malefactor_index)] is the target custody bit to check.
|
||||
# (Attestation.data.shard_transition_root as ShardTransition).shard_data_roots[data_index] is the root of the data.
|
||||
data_index: uint64
|
||||
malefactor_index: ValidatorIndex
|
||||
malefactor_secret: BLSSignature
|
||||
whistleblower_index: ValidatorIndex
|
||||
shard_transition: ShardTransition
|
||||
attestation: Attestation
|
||||
chunk_index: uint64
|
||||
data: ByteList[MAX_SHARD_BLOCK_SIZE]
|
||||
```
|
||||
|
||||
#### `CustodyBitChallenge`
|
||||
#### `SignedCustodySlashing`
|
||||
|
||||
```python
|
||||
class CustodyBitChallenge(Container):
|
||||
responder_index: ValidatorIndex
|
||||
attestation: Attestation
|
||||
challenger_index: ValidatorIndex
|
||||
responder_key: BLSSignature
|
||||
chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS]
|
||||
class SignedCustodySlashing(Container):
|
||||
message: CustodySlashing
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
#### `CustodyChunkChallengeRecord`
|
||||
|
||||
```python
|
||||
class CustodyChunkChallengeRecord(Container):
|
||||
challenge_index: uint64
|
||||
challenger_index: ValidatorIndex
|
||||
responder_index: ValidatorIndex
|
||||
inclusion_epoch: Epoch
|
||||
data_root: Root
|
||||
depth: uint64
|
||||
chunk_index: uint64
|
||||
```
|
||||
|
||||
#### `CustodyBitChallengeRecord`
|
||||
|
||||
```python
|
||||
class CustodyBitChallengeRecord(Container):
|
||||
challenge_index: uint64
|
||||
challenger_index: ValidatorIndex
|
||||
responder_index: ValidatorIndex
|
||||
inclusion_epoch: Epoch
|
||||
data_root: Root
|
||||
chunk_count: uint64
|
||||
chunk_bits_merkle_root: Root
|
||||
responder_key: BLSSignature
|
||||
```
|
||||
|
||||
#### `CustodyResponse`
|
||||
|
||||
```python
|
||||
class CustodyResponse(Container):
|
||||
challenge_index: uint64
|
||||
chunk_index: uint64
|
||||
chunk: ByteVector[BYTES_PER_CUSTODY_CHUNK]
|
||||
data_branch: List[Bytes32, CUSTODY_DATA_DEPTH]
|
||||
chunk_bits_branch: List[Bytes32, CUSTODY_CHUNK_BIT_DEPTH]
|
||||
chunk_bits_leaf: Bitvector[256]
|
||||
```
|
||||
|
||||
### New beacon operations
|
||||
|
||||
#### `CustodyKeyReveal`
|
||||
|
||||
|
@ -233,82 +143,9 @@ class EarlyDerivedSecretReveal(Container):
|
|||
mask: Bytes32
|
||||
```
|
||||
|
||||
### Phase 0 container updates
|
||||
|
||||
Add the following fields to the end of the specified container objects. Fields with underlying type `uint64` are initialized to `0` and list fields are initialized to `[]`.
|
||||
|
||||
#### `Validator`
|
||||
|
||||
```python
|
||||
class Validator(Container):
|
||||
# next_custody_secret_to_reveal is initialised to the custody period
|
||||
# (of the particular validator) in which the validator is activated
|
||||
# = get_custody_period_for_validator(...)
|
||||
next_custody_secret_to_reveal: uint64
|
||||
max_reveal_lateness: Epoch
|
||||
```
|
||||
|
||||
#### `BeaconState`
|
||||
|
||||
```python
|
||||
class BeaconState(Container):
|
||||
custody_chunk_challenge_records: List[CustodyChunkChallengeRecord, PLACEHOLDER]
|
||||
custody_bit_challenge_records: List[CustodyBitChallengeRecord, PLACEHOLDER]
|
||||
custody_challenge_index: uint64
|
||||
|
||||
# Future derived secrets already exposed; contains the indices of the exposed validator
|
||||
# at RANDAO reveal period % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS
|
||||
exposed_derived_secrets: Vector[List[ValidatorIndex, PLACEHOLDER],
|
||||
EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS]
|
||||
```
|
||||
|
||||
#### `BeaconBlockBody`
|
||||
|
||||
```python
|
||||
class BeaconBlockBody(Container):
|
||||
custody_chunk_challenges: List[CustodyChunkChallenge, PLACEHOLDER]
|
||||
custody_bit_challenges: List[CustodyBitChallenge, PLACEHOLDER]
|
||||
custody_responses: List[CustodyResponse, PLACEHOLDER]
|
||||
custody_key_reveals: List[CustodyKeyReveal, PLACEHOLDER]
|
||||
early_derived_secret_reveals: List[EarlyDerivedSecretReveal, PLACEHOLDER]
|
||||
```
|
||||
|
||||
## Helpers
|
||||
|
||||
### `ceillog2`
|
||||
|
||||
```python
|
||||
def ceillog2(x: uint64) -> int:
|
||||
return (x - 1).bit_length()
|
||||
```
|
||||
|
||||
### `is_valid_merkle_branch_with_mixin`
|
||||
|
||||
```python
|
||||
def is_valid_merkle_branch_with_mixin(leaf: Bytes32,
|
||||
branch: Sequence[Bytes32],
|
||||
depth: uint64,
|
||||
index: uint64,
|
||||
root: Root,
|
||||
mixin: uint64) -> bool:
|
||||
value = leaf
|
||||
for i in range(depth):
|
||||
if index // (2**i) % 2:
|
||||
value = hash(branch[i] + value)
|
||||
else:
|
||||
value = hash(value + branch[i])
|
||||
value = hash(value + mixin.to_bytes(32, "little"))
|
||||
return value == root
|
||||
```
|
||||
|
||||
### `get_crosslink_chunk_count`
|
||||
|
||||
```python
|
||||
def get_custody_chunk_count(crosslink: Crosslink) -> int:
|
||||
crosslink_length = min(MAX_EPOCHS_PER_CROSSLINK, crosslink.end_epoch - crosslink.start_epoch)
|
||||
return crosslink_length * CHUNKS_PER_EPOCH
|
||||
```
|
||||
|
||||
### `legendre_bit`
|
||||
|
||||
Returns the Legendre symbol `(a/q)` normalizes as a bit (i.e. `((a/q) + 1) // 2`). In a production implementation, a well-optimized library (e.g. GMP) should be used for this.
|
||||
|
@ -338,37 +175,27 @@ def legendre_bit(a: int, q: int) -> int:
|
|||
return 0
|
||||
```
|
||||
|
||||
### `custody_subchunkify`
|
||||
### `custody_atoms`
|
||||
|
||||
Given one proof of custody chunk, returns the proof of custody subchunks of the correct sizes.
|
||||
Given one set of data, return the custody atoms: each atom will be combined with one legendre bit.
|
||||
|
||||
```python
|
||||
def custody_subchunkify(bytez: bytes) -> Sequence[bytes]:
|
||||
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_SUBCHUNK)
|
||||
return [bytez[i:i + BYTES_PER_CUSTODY_SUBCHUNK]
|
||||
for i in range(0, len(bytez), BYTES_PER_CUSTODY_SUBCHUNK)]
|
||||
def get_custody_atoms(bytez: bytes) -> Sequence[bytes]:
|
||||
bytez += b'\x00' * (-len(bytez) % BYTES_PER_CUSTODY_ATOM) # right-padding
|
||||
return [bytez[i:i + BYTES_PER_CUSTODY_ATOM]
|
||||
for i in range(0, len(bytez), BYTES_PER_CUSTODY_ATOM)]
|
||||
```
|
||||
|
||||
### `get_custody_chunk_bit`
|
||||
### `compute_custody_bit`
|
||||
|
||||
```python
|
||||
def get_custody_chunk_bit(key: BLSSignature, chunk: bytes) -> bool:
|
||||
def compute_custody_bit(key: BLSSignature, data: bytes) -> bit:
|
||||
full_G2_element = bls.signature_to_G2(key)
|
||||
s = full_G2_element[0].coeffs
|
||||
bits = [legendre_bit((i + 1) * s[i % 2] + int.from_bytes(subchunk, "little"), BLS12_381_Q)
|
||||
for i, subchunk in enumerate(custody_subchunkify(chunk))]
|
||||
|
||||
return bool(sum(bits) % 2)
|
||||
```
|
||||
|
||||
### `get_chunk_bits_root`
|
||||
|
||||
```python
|
||||
def get_chunk_bits_root(chunk_bits: Bitlist[MAX_CUSTODY_CHUNKS]) -> bit:
|
||||
aggregated_bits = 0
|
||||
for i, b in enumerate(chunk_bits):
|
||||
aggregated_bits += 2**i * b
|
||||
return legendre_bit(aggregated_bits, BLS12_381_Q)
|
||||
bits = [legendre_bit(sum(s[i % 2]**i * int.from_bytes(atom, "little")), BLS12_381_Q)
|
||||
for i, atom in enumerate(get_custody_atoms(data))]
|
||||
# XOR all atom bits
|
||||
return bit(sum(bits) % 2)
|
||||
```
|
||||
|
||||
### `get_randao_epoch_for_custody_period`
|
||||
|
@ -382,38 +209,31 @@ def get_randao_epoch_for_custody_period(period: uint64, validator_index: Validat
|
|||
### `get_custody_period_for_validator`
|
||||
|
||||
```python
|
||||
def get_custody_period_for_validator(state: BeaconState, validator_index: ValidatorIndex, epoch: Epoch=None) -> int:
|
||||
def get_custody_period_for_validator(validator_index: ValidatorIndex, epoch: Epoch) -> int:
|
||||
'''
|
||||
Return the reveal period for a given validator.
|
||||
'''
|
||||
epoch = get_current_epoch(state) if epoch is None else epoch
|
||||
return (epoch + validator_index % EPOCHS_PER_CUSTODY_PERIOD) // EPOCHS_PER_CUSTODY_PERIOD
|
||||
```
|
||||
|
||||
### `replace_empty_or_append`
|
||||
|
||||
```python
|
||||
def replace_empty_or_append(list: MutableSequence[Any], new_element: Any) -> int:
|
||||
for i in range(len(list)):
|
||||
if is_zero(list[i]):
|
||||
list[i] = new_element
|
||||
return i
|
||||
list.append(new_element)
|
||||
return len(list) - 1
|
||||
```
|
||||
|
||||
## Per-block processing
|
||||
|
||||
### Operations
|
||||
### Custody Game Operations
|
||||
|
||||
Add the following operations to the per-block processing, in the order given below and after all other operations in Phase 0.
|
||||
```python
|
||||
def process_custody_game_operations(state: BeaconState, body: BeaconBlockBody) -> None:
|
||||
def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None:
|
||||
for operation in operations:
|
||||
fn(state, operation)
|
||||
|
||||
for_ops(body.custody_key_reveals, process_custody_key_reveal)
|
||||
for_ops(body.early_derived_secret_reveals, process_early_derived_secret_reveal)
|
||||
for_ops(body.custody_slashings, process_custody_slashing)
|
||||
```
|
||||
|
||||
#### Custody key reveals
|
||||
|
||||
Verify that `len(block.body.custody_key_reveals) <= MAX_CUSTODY_KEY_REVEALS`.
|
||||
|
||||
For each `reveal` in `block.body.custody_key_reveals`, run the following function:
|
||||
|
||||
```python
|
||||
def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) -> None:
|
||||
"""
|
||||
|
@ -423,7 +243,8 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
|||
revealer = state.validators[reveal.revealer_index]
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(revealer.next_custody_secret_to_reveal, reveal.revealer_index)
|
||||
|
||||
assert revealer.next_custody_secret_to_reveal < get_custody_period_for_validator(state, reveal.revealer_index)
|
||||
custody_reveal_period = get_custody_period_for_validator(reveal.revealer_index, get_current_epoch(state))
|
||||
assert revealer.next_custody_secret_to_reveal < custody_reveal_period
|
||||
|
||||
# Revealed validator is active or exited, but not withdrawn
|
||||
assert is_slashable_validator(revealer, get_current_epoch(state))
|
||||
|
@ -448,7 +269,7 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
|||
# Process reveal
|
||||
revealer.next_custody_secret_to_reveal += 1
|
||||
|
||||
# Reward Block Preposer
|
||||
# Reward Block Proposer
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
increase_balance(
|
||||
state,
|
||||
|
@ -459,10 +280,6 @@ def process_custody_key_reveal(state: BeaconState, reveal: CustodyKeyReveal) ->
|
|||
|
||||
#### Early derived secret reveals
|
||||
|
||||
Verify that `len(block.body.early_derived_secret_reveals) <= MAX_EARLY_DERIVED_SECRET_REVEALS`.
|
||||
|
||||
For each `reveal` in `block.body.early_derived_secret_reveals`, run the following function:
|
||||
|
||||
```python
|
||||
def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerivedSecretReveal) -> None:
|
||||
"""
|
||||
|
@ -520,252 +337,94 @@ def process_early_derived_secret_reveal(state: BeaconState, reveal: EarlyDerived
|
|||
state.exposed_derived_secrets[derived_secret_location].append(reveal.revealed_index)
|
||||
```
|
||||
|
||||
#### Chunk challenges
|
||||
|
||||
Verify that `len(block.body.custody_chunk_challenges) <= MAX_CUSTODY_CHUNK_CHALLENGES`.
|
||||
|
||||
For each `challenge` in `block.body.custody_chunk_challenges`, run the following function:
|
||||
#### Custody Slashings
|
||||
|
||||
```python
|
||||
def process_chunk_challenge(state: BeaconState, challenge: CustodyChunkChallenge) -> None:
|
||||
def process_custody_slashing(state: BeaconState, signed_custody_slashing: SignedCustodySlashing) -> None:
|
||||
custody_slashing = signed_custody_slashing.message
|
||||
attestation = custody_slashing.attestation
|
||||
|
||||
# Any signed custody-slashing should result in at least one slashing.
|
||||
# If the custody bits are valid, then the claim itself is slashed.
|
||||
malefactor = state.validators[custody_slashing.malefactor_index]
|
||||
whistleblower = state.validators[custody_slashing.whistleblower_index]
|
||||
domain = get_domain(state, DOMAIN_CUSTODY_BIT_SLASHING, get_current_epoch(state))
|
||||
signing_root = compute_signing_root(custody_slashing, domain)
|
||||
assert bls.Verify(whistleblower.pubkey, signing_root, signed_custody_slashing.signature)
|
||||
# Verify that the whistleblower is slashable
|
||||
assert is_slashable_validator(whistleblower, get_current_epoch(state))
|
||||
# Verify that the claimed malefactor is slashable
|
||||
assert is_slashable_validator(malefactor, get_current_epoch(state))
|
||||
|
||||
# Verify the attestation
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, challenge.attestation))
|
||||
# Verify it is not too late to challenge
|
||||
assert (compute_epoch_at_slot(challenge.attestation.data.slot)
|
||||
>= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY)
|
||||
responder = state.validators[challenge.responder_index]
|
||||
assert responder.exit_epoch >= get_current_epoch(state) - MAX_CHUNK_CHALLENGE_DELAY
|
||||
# Verify the responder participated in the attestation
|
||||
attesters = get_attesting_indices(state, challenge.attestation.data, challenge.attestation.aggregation_bits)
|
||||
assert challenge.responder_index in attesters
|
||||
# Verify the challenge is not a duplicate
|
||||
for record in state.custody_chunk_challenge_records:
|
||||
assert (
|
||||
record.data_root != challenge.attestation.data.crosslink.data_root or
|
||||
record.chunk_index != challenge.chunk_index
|
||||
)
|
||||
# Verify depth
|
||||
depth = ceillog2(get_custody_chunk_count(challenge.attestation.data.crosslink))
|
||||
assert challenge.chunk_index < 2**depth
|
||||
# Add new chunk challenge record
|
||||
new_record = CustodyChunkChallengeRecord(
|
||||
challenge_index=state.custody_challenge_index,
|
||||
challenger_index=get_beacon_proposer_index(state),
|
||||
responder_index=challenge.responder_index,
|
||||
inclusion_epoch=get_current_epoch(state),
|
||||
data_root=challenge.attestation.data.crosslink.data_root,
|
||||
depth=depth,
|
||||
chunk_index=challenge.chunk_index,
|
||||
)
|
||||
replace_empty_or_append(state.custody_chunk_challenge_records, new_record)
|
||||
|
||||
state.custody_challenge_index += 1
|
||||
# Postpone responder withdrawability
|
||||
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||
```
|
||||
|
||||
#### Bit challenges
|
||||
|
||||
Verify that `len(block.body.custody_bit_challenges) <= MAX_CUSTODY_BIT_CHALLENGES`.
|
||||
|
||||
For each `challenge` in `block.body.custody_bit_challenges`, run the following function:
|
||||
|
||||
```python
|
||||
def process_bit_challenge(state: BeaconState, challenge: CustodyBitChallenge) -> None:
|
||||
attestation = challenge.attestation
|
||||
epoch = attestation.data.target.epoch
|
||||
shard = attestation.data.crosslink.shard
|
||||
|
||||
# Verify challenge signature
|
||||
challenger = state.validators[challenge.challenger_index]
|
||||
domain = get_domain(state, DOMAIN_CUSTODY_BIT_CHALLENGE, get_current_epoch(state))
|
||||
# TODO incorrect hash-tree-root, but this changes with phase 1 PR #1483
|
||||
assert bls.Verify(challenger.pubkey, compute_signing_root(challenge, domain), challenge.signature)
|
||||
# Verify challenger is slashable
|
||||
assert is_slashable_validator(challenger, get_current_epoch(state))
|
||||
# Verify attestation
|
||||
assert is_valid_indexed_attestation(state, get_indexed_attestation(state, attestation))
|
||||
# Verify attestation is eligible for challenging
|
||||
responder = state.validators[challenge.responder_index]
|
||||
assert get_current_epoch(state) <= get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
challenge.responder_index
|
||||
) + 2 * EPOCHS_PER_CUSTODY_PERIOD + responder.max_reveal_lateness
|
||||
|
||||
# Verify the responder participated in the attestation
|
||||
# TODO: custody_slashing.data is not chunked like shard blocks yet, result is lots of padding.
|
||||
|
||||
# TODO: can do a single combined merkle proof of data being attested.
|
||||
# Verify the shard transition is indeed attested by the attestation
|
||||
shard_transition = custody_slashing.shard_transition
|
||||
assert hash_tree_root(shard_transition) == attestation.shard_transition_root
|
||||
# Verify that the provided data matches the shard-transition
|
||||
assert hash_tree_root(custody_slashing.data) == shard_transition.shard_data_roots[custody_slashing.data_index]
|
||||
|
||||
# Verify existence and participation of claimed malefactor
|
||||
attesters = get_attesting_indices(state, attestation.data, attestation.aggregation_bits)
|
||||
assert challenge.responder_index in attesters
|
||||
# Verifier challenger is not already challenging
|
||||
for record in state.custody_bit_challenge_records:
|
||||
assert record.challenger_index != challenge.challenger_index
|
||||
# Verify the responder custody key
|
||||
assert custody_slashing.malefactor_index in attesters
|
||||
|
||||
# Verify the malefactor custody key
|
||||
epoch_to_sign = get_randao_epoch_for_custody_period(
|
||||
get_custody_period_for_validator(state, challenge.responder_index, epoch),
|
||||
challenge.responder_index,
|
||||
get_custody_period_for_validator(custody_slashing.malefactor_index, attestation.data.target.epoch),
|
||||
custody_slashing.malefactor_index,
|
||||
)
|
||||
domain = get_domain(state, DOMAIN_RANDAO, epoch_to_sign)
|
||||
assert bls.Verify(responder.pubkey, compute_signing_root(epoch_to_sign, domain), challenge.responder_key)
|
||||
# Verify the chunk count
|
||||
chunk_count = get_custody_chunk_count(attestation.data.crosslink)
|
||||
assert chunk_count == len(challenge.chunk_bits)
|
||||
# Verify custody bit is incorrect
|
||||
committee = get_beacon_committee(state, epoch, shard)
|
||||
custody_bit = attestation.custody_bits[committee.index(challenge.responder_index)]
|
||||
assert custody_bit != get_chunk_bits_root(challenge.chunk_bits)
|
||||
# Add new bit challenge record
|
||||
new_record = CustodyBitChallengeRecord(
|
||||
challenge_index=state.custody_challenge_index,
|
||||
challenger_index=challenge.challenger_index,
|
||||
responder_index=challenge.responder_index,
|
||||
inclusion_epoch=get_current_epoch(state),
|
||||
data_root=attestation.data.crosslink.data_root,
|
||||
chunk_count=chunk_count,
|
||||
chunk_bits_merkle_root=hash_tree_root(challenge.chunk_bits),
|
||||
responder_key=challenge.responder_key,
|
||||
)
|
||||
replace_empty_or_append(state.custody_bit_challenge_records, new_record)
|
||||
state.custody_challenge_index += 1
|
||||
# Postpone responder withdrawability
|
||||
responder.withdrawable_epoch = FAR_FUTURE_EPOCH
|
||||
signing_root = compute_signing_root(epoch_to_sign, domain)
|
||||
assert bls.Verify(malefactor.pubkey, signing_root, custody_slashing.malefactor_secret)
|
||||
|
||||
# Get the custody bit
|
||||
custody_bits = attestation.custody_bits_blocks[custody_slashing.data_index]
|
||||
committee = get_beacon_committee(state, attestation.data.slot, attestation.data.index)
|
||||
claimed_custody_bit = custody_bits[committee.index(custody_slashing.malefactor_index)]
|
||||
|
||||
# Compute the custody bit
|
||||
computed_custody_bit = compute_custody_bit(custody_slashing.malefactor_secret, custody_slashing.data)
|
||||
|
||||
# Verify the claim
|
||||
if claimed_custody_bit != computed_custody_bit:
|
||||
# Slash the malefactor, reward the other committee members
|
||||
slash_validator(state, custody_slashing.malefactor_index)
|
||||
others_count = len(committee) - 1
|
||||
whistleblower_reward = Gwei(malefactor.effective_balance // WHISTLEBLOWER_REWARD_QUOTIENT // others_count)
|
||||
for attester_index in attesters:
|
||||
if attester_index != custody_slashing.malefactor_index:
|
||||
increase_balance(state, attester_index, whistleblower_reward)
|
||||
# No special whisteblower reward: it is expected to be an attester. Others are free to slash too however.
|
||||
else:
|
||||
# The claim was false, the custody bit was correct. Slash the whistleblower that induced this work.
|
||||
slash_validator(state, custody_slashing.whistleblower_index)
|
||||
```
|
||||
|
||||
#### Custody responses
|
||||
|
||||
Verify that `len(block.body.custody_responses) <= MAX_CUSTODY_RESPONSES`.
|
||||
|
||||
For each `response` in `block.body.custody_responses`, run the following function:
|
||||
|
||||
```python
|
||||
def process_custody_response(state: BeaconState, response: CustodyResponse) -> None:
|
||||
chunk_challenge = next((record for record in state.custody_chunk_challenge_records
|
||||
if record.challenge_index == response.challenge_index), None)
|
||||
if chunk_challenge is not None:
|
||||
return process_chunk_challenge_response(state, response, chunk_challenge)
|
||||
|
||||
bit_challenge = next((record for record in state.custody_bit_challenge_records
|
||||
if record.challenge_index == response.challenge_index), None)
|
||||
if bit_challenge is not None:
|
||||
return process_bit_challenge_response(state, response, bit_challenge)
|
||||
|
||||
assert False
|
||||
```
|
||||
|
||||
```python
|
||||
def process_chunk_challenge_response(state: BeaconState,
|
||||
response: CustodyResponse,
|
||||
challenge: CustodyChunkChallengeRecord) -> None:
|
||||
# Verify chunk index
|
||||
assert response.chunk_index == challenge.chunk_index
|
||||
# Verify bit challenge data is null
|
||||
assert response.chunk_bits_branch == [] and response.chunk_bits_leaf == Bytes32()
|
||||
# Verify minimum delay
|
||||
assert get_current_epoch(state) >= challenge.inclusion_epoch + MAX_SEED_LOOKAHEAD
|
||||
# Verify the chunk matches the crosslink data root
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(response.chunk),
|
||||
branch=response.data_branch,
|
||||
depth=challenge.depth,
|
||||
index=response.chunk_index,
|
||||
root=challenge.data_root,
|
||||
)
|
||||
# Clear the challenge
|
||||
records = state.custody_chunk_challenge_records
|
||||
records[records.index(challenge)] = CustodyChunkChallengeRecord()
|
||||
# Reward the proposer
|
||||
proposer_index = get_beacon_proposer_index(state)
|
||||
increase_balance(state, proposer_index, Gwei(get_base_reward(state, proposer_index) // MINOR_REWARD_QUOTIENT))
|
||||
```
|
||||
|
||||
```python
|
||||
def process_bit_challenge_response(state: BeaconState,
|
||||
response: CustodyResponse,
|
||||
challenge: CustodyBitChallengeRecord) -> None:
|
||||
# Verify chunk index
|
||||
assert response.chunk_index < challenge.chunk_count
|
||||
# Verify responder has not been slashed
|
||||
responder = state.validators[challenge.responder_index]
|
||||
assert not responder.slashed
|
||||
# Verify the chunk matches the crosslink data root
|
||||
assert is_valid_merkle_branch(
|
||||
leaf=hash_tree_root(response.chunk),
|
||||
branch=response.data_branch,
|
||||
depth=ceillog2(challenge.chunk_count),
|
||||
index=response.chunk_index,
|
||||
root=challenge.data_root,
|
||||
)
|
||||
# Verify the chunk bit leaf matches the challenge data
|
||||
assert is_valid_merkle_branch_with_mixin(
|
||||
leaf=hash_tree_root(response.chunk_bits_leaf),
|
||||
branch=response.chunk_bits_branch,
|
||||
depth=ceillog2(MAX_CUSTODY_CHUNKS // 256),
|
||||
index=response.chunk_index // 256,
|
||||
root=challenge.chunk_bits_merkle_root,
|
||||
mixin=challenge.chunk_count,
|
||||
)
|
||||
# Verify the chunk bit does not match the challenge chunk bit
|
||||
assert (get_custody_chunk_bit(challenge.responder_key, response.chunk)
|
||||
!= response.chunk_bits_leaf[response.chunk_index % 256])
|
||||
# Clear the challenge
|
||||
records = state.custody_bit_challenge_records
|
||||
records[records.index(challenge)] = CustodyBitChallengeRecord()
|
||||
# Slash challenger
|
||||
slash_validator(state, challenge.challenger_index, challenge.responder_index)
|
||||
```
|
||||
|
||||
## Per-epoch processing
|
||||
|
||||
### Handling of custody-related deadlines
|
||||
### Handling of reveal deadlines
|
||||
|
||||
Run `process_reveal_deadlines(state)` immediately after `process_registry_updates(state)`:
|
||||
Run `process_reveal_deadlines(state)` after `process_registry_updates(state)`:
|
||||
|
||||
```python
|
||||
# begin insert @process_reveal_deadlines
|
||||
process_reveal_deadlines(state)
|
||||
# end insert @process_reveal_deadlines
|
||||
def process_reveal_deadlines(state: BeaconState) -> None:
|
||||
epoch = get_current_epoch(state)
|
||||
for index, validator in enumerate(state.validators):
|
||||
deadline = validator.next_custody_secret_to_reveal + (CUSTODY_RESPONSE_DEADLINE // EPOCHS_PER_CUSTODY_PERIOD)
|
||||
if get_custody_period_for_validator(state, ValidatorIndex(index)) > deadline:
|
||||
if get_custody_period_for_validator(ValidatorIndex(index), epoch) > validator.next_custody_secret_to_reveal:
|
||||
slash_validator(state, ValidatorIndex(index))
|
||||
```
|
||||
|
||||
Run `process_challenge_deadlines(state)` immediately after `process_reveal_deadlines(state)`:
|
||||
### Final updates
|
||||
|
||||
After `process_final_updates(state)`, additional updates are made for the custody game:
|
||||
|
||||
```python
|
||||
# begin insert @process_challenge_deadlines
|
||||
process_challenge_deadlines(state)
|
||||
# end insert @process_challenge_deadlines
|
||||
def process_challenge_deadlines(state: BeaconState) -> None:
|
||||
for custody_chunk_challenge in state.custody_chunk_challenge_records:
|
||||
if get_current_epoch(state) > custody_chunk_challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
|
||||
slash_validator(state, custody_chunk_challenge.responder_index, custody_chunk_challenge.challenger_index)
|
||||
records = state.custody_chunk_challenge
|
||||
records[records.index(custody_chunk_challenge)] = CustodyChunkChallengeRecord()
|
||||
|
||||
for custody_bit_challenge in state.custody_bit_challenge_records:
|
||||
if get_current_epoch(state) > custody_bit_challenge.inclusion_epoch + CUSTODY_RESPONSE_DEADLINE:
|
||||
slash_validator(state, custody_bit_challenge.responder_index, custody_bit_challenge.challenger_index)
|
||||
records = state.custody_bit_challenge_records
|
||||
records[records.index(custody_bit_challenge)] = CustodyBitChallengeRecord()
|
||||
```
|
||||
|
||||
Append this to `process_final_updates(state)`:
|
||||
|
||||
```python
|
||||
# begin insert @after_process_final_updates
|
||||
after_process_final_updates(state)
|
||||
# end insert @after_process_final_updates
|
||||
def after_process_final_updates(state: BeaconState) -> None:
|
||||
current_epoch = get_current_epoch(state)
|
||||
def process_custody_final_updates(state: BeaconState) -> None:
|
||||
# Clean up exposed RANDAO key reveals
|
||||
state.exposed_derived_secrets[current_epoch % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
|
||||
# Reset withdrawable epochs if challenge records are empty
|
||||
records = state.custody_chunk_challenge_records + state.custody_bit_challenge_records
|
||||
validator_indices_in_records = set(
|
||||
[record.challenger_index for record in records] + [record.responder_index for record in records]
|
||||
)
|
||||
for index, validator in enumerate(state.validators):
|
||||
if index not in validator_indices_in_records:
|
||||
if validator.exit_epoch != FAR_FUTURE_EPOCH and validator.withdrawable_epoch == FAR_FUTURE_EPOCH:
|
||||
validator.withdrawable_epoch = Epoch(validator.exit_epoch + MIN_VALIDATOR_WITHDRAWABILITY_DELAY)
|
||||
state.exposed_derived_secrets[get_current_epoch(state) % EARLY_DERIVED_SECRET_PENALTY_MAX_FUTURE_EPOCHS] = []
|
||||
```
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
# Ethereum 2.0 Phase 1 -- Beacon Chain Fork Choice
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Fork choice](#fork-choice)
|
||||
- [Handlers](#handlers)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document is the beacon chain fork choice spec for part of Ethereum 2.0 Phase 1.
|
||||
|
||||
## Fork choice
|
||||
|
||||
Due to the changes in the structure of `IndexedAttestation` in Phase 1, `on_attestation` must be re-specified to handle this. The bulk of `on_attestation` has been moved out into a few helpers to reduce code duplication where possible.
|
||||
|
||||
The rest of the fork choice remains stable.
|
||||
|
||||
### Handlers
|
||||
|
||||
```python
|
||||
def on_attestation(store: Store, attestation: Attestation) -> None:
|
||||
"""
|
||||
Run ``on_attestation`` upon receiving a new ``attestation`` from either within a block or directly on the wire.
|
||||
|
||||
An ``attestation`` that is asserted as invalid may be valid at a later time,
|
||||
consider scheduling it for later processing in such case.
|
||||
"""
|
||||
validate_on_attestation(store, attestation)
|
||||
store_target_checkpoint_state(store, attestation.data.target)
|
||||
|
||||
# Get state at the `target` to fully validate attestation
|
||||
target_state = store.checkpoint_states[attestation.data.target]
|
||||
indexed_attestation = get_indexed_attestation(target_state, attestation)
|
||||
assert is_valid_indexed_attestation(target_state, indexed_attestation)
|
||||
|
||||
# Update latest messages for attesting indices
|
||||
attesting_indices = [
|
||||
index for i, index in enumerate(indexed_attestation.committee)
|
||||
if attestation.aggregation_bits[i]
|
||||
]
|
||||
update_latest_messages(store, attesting_indices, attestation)
|
||||
```
|
|
@ -0,0 +1,70 @@
|
|||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs](#ethereum-20-phase-1----shard-transition-and-fraud-proofs)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Fraud proofs](#fraud-proofs)
|
||||
- [Shard state transition function](#shard-state-transition-function)
|
||||
- [Honest committee member behavior](#honest-committee-member-behavior)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
# Ethereum 2.0 Phase 1 -- Shard Transition and Fraud Proofs
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
TODO
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the shard transition function and fraud proofs as part of Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Fraud proofs
|
||||
|
||||
TODO. The intent is to have a single universal fraud proof type, which contains the following parts:
|
||||
|
||||
1. An on-time attestation on some `shard` signing a `ShardTransition`
|
||||
2. An index `i` of a particular position to focus on
|
||||
3. The `ShardTransition` itself
|
||||
4. The full body of the block
|
||||
5. A Merkle proof to the `shard_states` in the parent block the attestation is referencing
|
||||
|
||||
The proof verifies that one of the two conditions is false:
|
||||
|
||||
1. `custody_bits[i][j] != generate_custody_bit(subkey, block_contents)` for any `j`
|
||||
2. `execute_state_transition(shard, slot, transition.shard_states[i-1].data, hash_tree_root(parent), get_shard_proposer_index(state, shard, slot), block_contents) != transition.shard_states[i].data` (if `i=0` then instead use `parent.shard_states[shard][-1].data`)
|
||||
|
||||
## Shard state transition function
|
||||
|
||||
```python
|
||||
def shard_state_transition(shard: Shard,
|
||||
slot: Slot,
|
||||
pre_state: Root,
|
||||
previous_beacon_root: Root,
|
||||
proposer_pubkey: BLSPubkey,
|
||||
block_data: ByteList[MAX_SHARD_BLOCK_SIZE]) -> Root:
|
||||
# We will add something more substantive in phase 2
|
||||
return hash(pre_state + hash_tree_root(previous_beacon_root) + hash_tree_root(block_data))
|
||||
```
|
||||
|
||||
## Honest committee member behavior
|
||||
|
||||
Suppose you are a committee member on shard `shard` at slot `current_slot`. Let `state` be the head beacon state you are building on, and let `QUARTER_PERIOD = SECONDS_PER_SLOT // 4`. `2 * QUARTER_PERIOD` seconds into slot `slot`, run the following procedure:
|
||||
|
||||
* Initialize `proposals = []`, `shard_states = []`, `shard_state = state.shard_states[shard][-1]`, `start_slot = shard_state.slot`.
|
||||
* For `slot in get_offset_slots(state, start_slot)`, do the following:
|
||||
* Look for all valid proposals for `slot`; that is, a Bytes `proposal` where `shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposal)` returns a result and does not throw an exception. Let `choices` be the set of non-empty valid proposals you discover.
|
||||
* If `len(choices) == 0`, do `proposals.append(make_empty_proposal(shard_state, slot))`
|
||||
* If `len(choices) == 1`, do `proposals.append(choices[0])`
|
||||
* If `len(choices) > 1`, let `winning_proposal` be the proposal with the largest number of total attestations from slots in `state.shard_next_slots[shard]....slot-1` supporting it or any of its descendants, breaking ties by choosing the first proposal locally seen. Do `proposals.append(winning_proposal)`.
|
||||
* If `proposals[-1]` is NOT an empty proposal, set `shard_state = shard_state_transition(shard, slot, shard_state, get_block_root_at_slot(state, state.slot - 1), get_shard_proposer_index(state, shard, slot), proposals[-1])` and do `shard_states.append(shard_state)`. If it is an empty proposal, leave `shard_state` unchanged.
|
||||
|
||||
Make an attestation using `shard_data_roots = [hash_tree_root(proposal) for proposal in proposals]` and `shard_state_roots = shard_states`.
|
|
@ -49,7 +49,7 @@ We define the following Python custom types for type hinting and readability:
|
|||
### `LightClientUpdate`
|
||||
|
||||
```python
|
||||
class LightClientUpdate(container):
|
||||
class LightClientUpdate(Container):
|
||||
# Shard block root (and authenticating signature data)
|
||||
shard_block_root: Root
|
||||
fork_version: Version
|
||||
|
|
|
@ -0,0 +1,121 @@
|
|||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
**Table of Contents** *generated with [DocToc](https://github.com/thlorenz/doctoc)*
|
||||
|
||||
- [Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1](#ethereum-20-phase-1----from-phase-0-to-phase-1)
|
||||
- [Table of contents](#table-of-contents)
|
||||
- [Introduction](#introduction)
|
||||
- [Configuration](#configuration)
|
||||
- [Fork to Phase 1](#fork-to-phase-1)
|
||||
- [Fork trigger.](#fork-trigger)
|
||||
- [Upgrading the state](#upgrading-the-state)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
|
||||
# Ethereum 2.0 Phase 1 -- From Phase 0 to Phase 1
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
|
||||
TODO
|
||||
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the process of moving from Phase 0 to Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Configuration
|
||||
|
||||
Warning: this configuration is not definitive.
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `PHASE_1_FORK_VERSION` | `Version('0x01000000')` |
|
||||
| `INITIAL_ACTIVE_SHARDS` | `2**6` (= 64) |
|
||||
| `INITIAL_GASPRICE` | `Gwei(10)` |
|
||||
|
||||
## Fork to Phase 1
|
||||
|
||||
### Fork trigger.
|
||||
|
||||
TBD. Social consensus, along with state conditions such as epoch boundary, finality, deposits, active validator count, etc. may be part of the decision process to trigger the fork.
|
||||
|
||||
### Upgrading the state
|
||||
|
||||
After `process_slots` of Phase 0 finishes, but before the first Phase 1 block is processed, an irregular state change is made to upgrade to Phase 1.
|
||||
|
||||
```python
|
||||
def upgrade_to_phase1(pre: phase0.BeaconState) -> BeaconState:
|
||||
epoch = get_current_epoch(pre)
|
||||
post = BeaconState(
|
||||
genesis_time=pre.genesis_time,
|
||||
slot=pre.slot,
|
||||
fork=Fork(
|
||||
previous_version=pre.fork.current_version,
|
||||
current_version=PHASE_1_FORK_VERSION,
|
||||
epoch=epoch,
|
||||
),
|
||||
# History
|
||||
latest_block_header=pre.latest_block_header,
|
||||
block_roots=pre.block_roots,
|
||||
state_roots=pre.state_roots,
|
||||
historical_roots=pre.historical_roots,
|
||||
# Eth1
|
||||
eth1_data=pre.eth1_data,
|
||||
eth1_data_votes=pre.eth1_data_votes,
|
||||
eth1_deposit_index=pre.eth1_deposit_index,
|
||||
# Registry
|
||||
validators=List[Validator, VALIDATOR_REGISTRY_LIMIT](
|
||||
Validator(
|
||||
pubkey=phase0_validator.pubkey,
|
||||
withdrawal_credentials=phase0_validator.withdrawal_credentials,
|
||||
effective_balance=phase0_validator.effective_balance,
|
||||
slashed=phase0_validator.slashed,
|
||||
activation_eligibility_epoch=phase0_validator.activation_eligibility_epoch,
|
||||
activation_epoch=phase0_validator.activation_eligibility_epoch,
|
||||
exit_epoch=phase0_validator.exit_epoch,
|
||||
withdrawable_epoch=phase0_validator.withdrawable_epoch,
|
||||
next_custody_secret_to_reveal=get_custody_period_for_validator(ValidatorIndex(i), epoch),
|
||||
max_reveal_lateness=0, # TODO custody refactor. Outdated?
|
||||
) for i, phase0_validator in enumerate(pre.validators)
|
||||
),
|
||||
balances=pre.balances,
|
||||
# Randomness
|
||||
randao_mixes=pre.randao_mixes,
|
||||
# Slashings
|
||||
slashings=pre.slashings,
|
||||
# Attestations
|
||||
# previous_epoch_attestations is cleared on upgrade.
|
||||
previous_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
||||
# empty in pre state, since the upgrade is performed just after an epoch boundary.
|
||||
current_epoch_attestations=List[PendingAttestation, MAX_ATTESTATIONS * SLOTS_PER_EPOCH](),
|
||||
# Finality
|
||||
justification_bits=pre.justification_bits,
|
||||
previous_justified_checkpoint=pre.previous_justified_checkpoint,
|
||||
current_justified_checkpoint=pre.current_justified_checkpoint,
|
||||
finalized_checkpoint=pre.finalized_checkpoint,
|
||||
# Phase 1
|
||||
shard_states=List[ShardState, MAX_SHARDS](
|
||||
ShardState(
|
||||
slot=pre.slot,
|
||||
gasprice=INITIAL_GASPRICE,
|
||||
data=Root(),
|
||||
latest_block_root=Root(),
|
||||
) for i in range(INITIAL_ACTIVE_SHARDS)
|
||||
),
|
||||
online_countdown=[ONLINE_PERIOD] * len(pre.validators), # all online
|
||||
current_light_committee=CompactCommittee(), # computed after state creation
|
||||
next_light_committee=CompactCommittee(),
|
||||
# Custody game
|
||||
custody_challenge_index=0,
|
||||
# exposed_derived_secrets will fully default to zeroes
|
||||
)
|
||||
next_epoch = Epoch(epoch + 1)
|
||||
post.current_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, epoch))
|
||||
post.next_light_committee = committee_to_compact_committee(post, get_light_client_committee(post, next_epoch))
|
||||
return post
|
||||
```
|
|
@ -1,444 +0,0 @@
|
|||
# Ethereum 2.0 Phase 1 -- Shard Data Chains
|
||||
|
||||
**Notice**: This document is a work-in-progress for researchers and implementers.
|
||||
|
||||
## Table of contents
|
||||
|
||||
<!-- TOC -->
|
||||
<!-- START doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->
|
||||
|
||||
|
||||
- [Introduction](#introduction)
|
||||
- [Custom types](#custom-types)
|
||||
- [Configuration](#configuration)
|
||||
- [Misc](#misc)
|
||||
- [Initial values](#initial-values)
|
||||
- [Time parameters](#time-parameters)
|
||||
- [State list lengths](#state-list-lengths)
|
||||
- [Rewards and penalties](#rewards-and-penalties)
|
||||
- [Signature domain types](#signature-domain-types)
|
||||
- [Containers](#containers)
|
||||
- [`Crosslink`](#crosslink)
|
||||
- [`ShardBlock`](#shardblock)
|
||||
- [`ShardBlockHeader`](#shardblockheader)
|
||||
- [`ShardState`](#shardstate)
|
||||
- [`ShardAttestationData`](#shardattestationdata)
|
||||
- [Helper functions](#helper-functions)
|
||||
- [Misc](#misc-1)
|
||||
- [`compute_epoch_of_shard_slot`](#compute_epoch_of_shard_slot)
|
||||
- [`compute_shard_period_start_epoch`](#compute_shard_period_start_epoch)
|
||||
- [Beacon state accessors](#beacon-state-accessors)
|
||||
- [`get_period_committee`](#get_period_committee)
|
||||
- [`get_shard_committee`](#get_shard_committee)
|
||||
- [`get_shard_proposer_index`](#get_shard_proposer_index)
|
||||
- [Shard state mutators](#shard-state-mutators)
|
||||
- [`process_delta`](#process_delta)
|
||||
- [Genesis](#genesis)
|
||||
- [`get_genesis_shard_state`](#get_genesis_shard_state)
|
||||
- [`get_genesis_shard_block`](#get_genesis_shard_block)
|
||||
- [Shard state transition function](#shard-state-transition-function)
|
||||
- [Period processing](#period-processing)
|
||||
- [Block processing](#block-processing)
|
||||
- [Block header](#block-header)
|
||||
- [Attestations](#attestations)
|
||||
- [Block body](#block-body)
|
||||
- [Shard fork choice rule](#shard-fork-choice-rule)
|
||||
|
||||
<!-- END doctoc generated TOC please keep comment here to allow auto update -->
|
||||
<!-- /TOC -->
|
||||
|
||||
## Introduction
|
||||
|
||||
This document describes the shard transition function (data layer only) and the shard fork choice rule as part of Phase 1 of Ethereum 2.0.
|
||||
|
||||
## Custom types
|
||||
|
||||
| Name | SSZ equivalent | Description |
|
||||
| - | - | - |
|
||||
| `Shard` | `uint64` | a shard number |
|
||||
| `ShardSlot` | `uint64` | a shard slot number |
|
||||
|
||||
## Configuration
|
||||
|
||||
### Misc
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `SHARD_COUNT` | `2**10` (= 1,024) |
|
||||
| `MIN_BLOCK_BODY_PRICE` | `2**0` (= 1) |
|
||||
| `MAX_PERIOD_COMMITTEE_SIZE` | `2**7` (= 128) |
|
||||
| `SHARD_HEADER_SIZE` | `2**10` (= 1024) |
|
||||
| `SHARD_BLOCK_SIZE_TARGET` | `2**14` (= 16,384) |
|
||||
| `MAX_SHARD_BLOCK_SIZE` | `2**16` (= 65,536) |
|
||||
|
||||
### Initial values
|
||||
|
||||
| Name | Value | Unit |
|
||||
| - | - |
|
||||
| `SHARD_GENESIS_EPOCH` | **TBD** | Epoch |
|
||||
|
||||
### Time parameters
|
||||
|
||||
| Name | Value | Unit | Duration |
|
||||
| - | - | :-: | :-: |
|
||||
| `SHARD_SLOTS_PER_EPOCH` | `2**7` (= 128) | shard slots | 6.4 minutes |
|
||||
| `EPOCHS_PER_SHARD_PERIOD` | `2**8` (= 256) | epochs | ~27 hours |
|
||||
|
||||
### State list lengths
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `HISTORY_ACCUMULATOR_DEPTH` | `2**6` (= 64) |
|
||||
|
||||
### Rewards and penalties
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `BLOCK_BODY_PRICE_QUOTIENT` | `2**3` (= 8) |
|
||||
|
||||
### Signature domain types
|
||||
|
||||
| Name | Value |
|
||||
| - | - |
|
||||
| `DOMAIN_SHARD_PROPOSER` | `DomainType('0x80000000')` |
|
||||
| `DOMAIN_SHARD_ATTESTER` | `DomainType('0x81000000')` |
|
||||
|
||||
## Containers
|
||||
|
||||
### `Crosslink`
|
||||
|
||||
```python
|
||||
# Crosslink is a placeholder to appease the build script until phase 1 is reworked
|
||||
class Crosslink(Container):
|
||||
shard: Shard
|
||||
```
|
||||
|
||||
### `ShardBlock`
|
||||
|
||||
```python
|
||||
class ShardBlock(Container):
|
||||
shard: Shard
|
||||
slot: ShardSlot
|
||||
beacon_block_root: Root
|
||||
parent_root: Root
|
||||
state_root: Root
|
||||
body: List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]
|
||||
block_size_sum: uint64
|
||||
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
||||
attestations: BLSSignature
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `ShardBlockHeader`
|
||||
|
||||
```python
|
||||
class ShardBlockHeader(Container):
|
||||
shard: Shard
|
||||
slot: ShardSlot
|
||||
beacon_block_root: Root
|
||||
parent_root: Root
|
||||
state_root: Root
|
||||
body_root: Root
|
||||
block_size_sum: uint64
|
||||
aggregation_bits: Bitvector[2 * MAX_PERIOD_COMMITTEE_SIZE]
|
||||
attestations: BLSSignature
|
||||
signature: BLSSignature
|
||||
```
|
||||
|
||||
### `ShardState`
|
||||
|
||||
```python
|
||||
class ShardState(Container):
|
||||
shard: Shard
|
||||
slot: ShardSlot
|
||||
history_accumulator: Vector[Bytes32, HISTORY_ACCUMULATOR_DEPTH]
|
||||
latest_block_header: ShardBlockHeader
|
||||
block_size_sum: uint64
|
||||
# Fees and rewards
|
||||
block_body_price: Gwei
|
||||
older_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
|
||||
older_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
|
||||
newer_committee_positive_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
|
||||
newer_committee_negative_deltas: Vector[Gwei, MAX_PERIOD_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
### `ShardAttestationData`
|
||||
|
||||
```python
|
||||
class ShardAttestationData(Container):
|
||||
slot: ShardSlot
|
||||
parent_root: Root
|
||||
```
|
||||
|
||||
## Helper functions
|
||||
|
||||
### Misc
|
||||
|
||||
#### `compute_epoch_of_shard_slot`
|
||||
|
||||
```python
|
||||
def compute_epoch_of_shard_slot(slot: ShardSlot) -> Epoch:
|
||||
return Epoch(slot // SHARD_SLOTS_PER_EPOCH)
|
||||
```
|
||||
|
||||
#### `compute_shard_period_start_epoch`
|
||||
|
||||
```python
|
||||
def compute_shard_period_start_epoch(epoch: Epoch, lookback: uint64) -> Epoch:
|
||||
return Epoch(epoch - (epoch % EPOCHS_PER_SHARD_PERIOD) - lookback * EPOCHS_PER_SHARD_PERIOD)
|
||||
```
|
||||
|
||||
### Beacon state accessors
|
||||
|
||||
#### `get_period_committee`
|
||||
|
||||
```python
|
||||
def get_period_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
active_validator_indices = get_active_validator_indices(beacon_state, epoch)
|
||||
seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_ATTESTER)
|
||||
return compute_committee(active_validator_indices, seed, shard, SHARD_COUNT)[:MAX_PERIOD_COMMITTEE_SIZE]
|
||||
```
|
||||
|
||||
#### `get_shard_committee`
|
||||
|
||||
```python
|
||||
def get_shard_committee(beacon_state: BeaconState, shard: Shard, epoch: Epoch) -> Sequence[ValidatorIndex]:
|
||||
older_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 2))
|
||||
newer_committee = get_period_committee(beacon_state, shard, compute_shard_period_start_epoch(epoch, 1))
|
||||
# Every epoch cycle out validators from the older committee and cycle in validators from the newer committee
|
||||
older_subcommittee = [i for i in older_committee if i % EPOCHS_PER_SHARD_PERIOD > epoch % EPOCHS_PER_SHARD_PERIOD]
|
||||
newer_subcommittee = [i for i in newer_committee if i % EPOCHS_PER_SHARD_PERIOD <= epoch % EPOCHS_PER_SHARD_PERIOD]
|
||||
return older_subcommittee + newer_subcommittee
|
||||
```
|
||||
|
||||
#### `get_shard_proposer_index`
|
||||
|
||||
```python
|
||||
def get_shard_proposer_index(beacon_state: BeaconState, shard: Shard, slot: ShardSlot) -> ValidatorIndex:
|
||||
epoch = get_current_epoch(beacon_state)
|
||||
shard_committee = get_shard_committee(beacon_state, shard, epoch)
|
||||
active_indices = [i for i in shard_committee if is_active_validator(beacon_state.validators[i], epoch)]
|
||||
assert any(active_indices)
|
||||
|
||||
epoch_seed = get_seed(beacon_state, epoch, DOMAIN_SHARD_PROPOSER)
|
||||
seed = hash(epoch_seed + int_to_bytes(slot, length=8) + int_to_bytes(shard, length=8))
|
||||
return compute_proposer_index(beacon_state, active_indices, seed)
|
||||
```
|
||||
|
||||
### Shard state mutators
|
||||
|
||||
#### `process_delta`
|
||||
|
||||
```python
|
||||
def process_delta(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
index: ValidatorIndex,
|
||||
delta: Gwei,
|
||||
positive: bool=True) -> None:
|
||||
epoch = compute_epoch_of_shard_slot(shard_state.slot)
|
||||
older_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 2))
|
||||
newer_committee = get_period_committee(beacon_state, shard_state.shard, compute_shard_period_start_epoch(epoch, 1))
|
||||
if index in older_committee:
|
||||
if positive:
|
||||
shard_state.older_committee_positive_deltas[older_committee.index(index)] += delta
|
||||
else:
|
||||
shard_state.older_committee_negative_deltas[older_committee.index(index)] += delta
|
||||
elif index in newer_committee:
|
||||
if positive:
|
||||
shard_state.newer_committee_positive_deltas[newer_committee.index(index)] += delta
|
||||
else:
|
||||
shard_state.newer_committee_negative_deltas[newer_committee.index(index)] += delta
|
||||
```
|
||||
|
||||
## Genesis
|
||||
|
||||
### `get_genesis_shard_state`
|
||||
|
||||
```python
|
||||
def get_genesis_shard_state(shard: Shard) -> ShardState:
|
||||
return ShardState(
|
||||
shard=shard,
|
||||
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
|
||||
latest_block_header=ShardBlockHeader(
|
||||
shard=shard,
|
||||
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
|
||||
body_root=hash_tree_root(List[byte, MAX_SHARD_BLOCK_SIZE - SHARD_HEADER_SIZE]()),
|
||||
),
|
||||
block_body_price=MIN_BLOCK_BODY_PRICE,
|
||||
)
|
||||
```
|
||||
|
||||
### `get_genesis_shard_block`
|
||||
|
||||
```python
|
||||
def get_genesis_shard_block(shard: Shard) -> ShardBlock:
|
||||
return ShardBlock(
|
||||
shard=shard,
|
||||
slot=ShardSlot(SHARD_GENESIS_EPOCH * SHARD_SLOTS_PER_EPOCH),
|
||||
state_root=hash_tree_root(get_genesis_shard_state(shard)),
|
||||
)
|
||||
```
|
||||
|
||||
## Shard state transition function
|
||||
|
||||
```python
|
||||
def shard_state_transition(beacon_state: BeaconState,
|
||||
shard_state: ShardState,
|
||||
block: ShardBlock,
|
||||
validate_state_root: bool=False) -> ShardState:
|
||||
# Process slots (including those with no blocks) since block
|
||||
process_shard_slots(shard_state, block.slot)
|
||||
# Process block
|
||||
process_shard_block(beacon_state, shard_state, block)
|
||||
# Validate state root (`validate_state_root == True` in production)
|
||||
if validate_state_root:
|
||||
assert block.state_root == hash_tree_root(shard_state)
|
||||
# Return post-state
|
||||
return shard_state
|
||||
```
|
||||
|
||||
```python
|
||||
def process_shard_slots(shard_state: ShardState, slot: ShardSlot) -> None:
|
||||
assert shard_state.slot <= slot
|
||||
while shard_state.slot < slot:
|
||||
process_shard_slot(shard_state)
|
||||
# Process shard period on the start slot of the next shard period
|
||||
if (shard_state.slot + 1) % (SHARD_SLOTS_PER_EPOCH * EPOCHS_PER_SHARD_PERIOD) == 0:
|
||||
process_shard_period(shard_state)
|
||||
shard_state.slot += ShardSlot(1)
|
||||
```
|
||||
|
||||
```python
|
||||
def process_shard_slot(shard_state: ShardState) -> None:
|
||||
# Cache state root
|
||||
previous_state_root = hash_tree_root(shard_state)
|
||||
if shard_state.latest_block_header.state_root == Bytes32():
|
||||
shard_state.latest_block_header.state_root = previous_state_root
|
||||
# Cache state root in history accumulator
|
||||
depth = 0
|
||||
while shard_state.slot % 2**depth == 0 and depth < HISTORY_ACCUMULATOR_DEPTH:
|
||||
shard_state.history_accumulator[depth] = previous_state_root
|
||||
depth += 1
|
||||
```
|
||||
|
||||
### Period processing
|
||||
|
||||
```python
|
||||
def process_shard_period(shard_state: ShardState) -> None:
|
||||
# Rotate committee deltas
|
||||
shard_state.older_committee_positive_deltas = shard_state.newer_committee_positive_deltas
|
||||
shard_state.older_committee_negative_deltas = shard_state.newer_committee_negative_deltas
|
||||
shard_state.newer_committee_positive_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)]
|
||||
shard_state.newer_committee_negative_deltas = [Gwei(0) for _ in range(MAX_PERIOD_COMMITTEE_SIZE)]
|
||||
```
|
||||
|
||||
### Block processing
|
||||
|
||||
```python
|
||||
def process_shard_block(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
|
||||
process_shard_block_header(beacon_state, shard_state, block)
|
||||
process_shard_attestations(beacon_state, shard_state, block)
|
||||
process_shard_block_body(beacon_state, shard_state, block)
|
||||
```
|
||||
|
||||
#### Block header
|
||||
|
||||
```python
|
||||
def process_shard_block_header(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
|
||||
# Verify the shard number
|
||||
assert block.shard == shard_state.shard
|
||||
# Verify the slot number
|
||||
assert block.slot == shard_state.slot
|
||||
# Verify the beacon chain root
|
||||
epoch = compute_epoch_of_shard_slot(shard_state.slot)
|
||||
assert epoch * SLOTS_PER_EPOCH == beacon_state.slot
|
||||
beacon_block_header = BeaconBlockHeader(
|
||||
slot=beacon_state.latest_block_header.slot,
|
||||
parent_root=beacon_state.latest_block_header.parent_root,
|
||||
state_root=beacon_state.latest_block_header.state_root,
|
||||
body_root=beacon_state.latest_block_header.body_root,
|
||||
)
|
||||
if beacon_block_header.state_root == Bytes32():
|
||||
beacon_block_header.state_root = hash_tree_root(beacon_state)
|
||||
assert block.beacon_block_root == hash_tree_root(beacon_block_header)
|
||||
# Verify the parent root
|
||||
assert block.parent_root == hash_tree_root(shard_state.latest_block_header)
|
||||
# Save current block as the new latest block
|
||||
shard_state.latest_block_header = ShardBlockHeader(
|
||||
shard=block.shard,
|
||||
slot=block.slot,
|
||||
beacon_block_root=block.beacon_block_root,
|
||||
parent_root=block.parent_root,
|
||||
# `state_root` is zeroed and overwritten in the next `process_shard_slot` call
|
||||
body_root=hash_tree_root(block.body),
|
||||
block_size_sum=block.block_size_sum,
|
||||
aggregation_bits=block.aggregation_bits,
|
||||
attestations=block.attestations,
|
||||
# `signature` is zeroed
|
||||
)
|
||||
# Verify the sum of the block sizes since genesis
|
||||
shard_state.block_size_sum += SHARD_HEADER_SIZE + len(block.body)
|
||||
assert block.block_size_sum == shard_state.block_size_sum
|
||||
# Verify proposer is not slashed
|
||||
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
|
||||
proposer = beacon_state.validators[proposer_index]
|
||||
assert not proposer.slashed
|
||||
# Verify proposer signature
|
||||
domain = get_domain(beacon_state, DOMAIN_SHARD_PROPOSER, compute_epoch_of_shard_slot(block.slot))
|
||||
assert bls.Verify(proposer.pubkey, compute_signing_root(block, domain), block.signature)
|
||||
```
|
||||
|
||||
#### Attestations
|
||||
|
||||
```python
|
||||
def process_shard_attestations(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
|
||||
pubkeys = []
|
||||
attestation_count = 0
|
||||
shard_committee = get_shard_committee(beacon_state, shard_state.shard, block.slot)
|
||||
for i, validator_index in enumerate(shard_committee):
|
||||
if block.aggregation_bits[i]:
|
||||
pubkeys.append(beacon_state.validators[validator_index].pubkey)
|
||||
process_delta(beacon_state, shard_state, validator_index, get_base_reward(beacon_state, validator_index))
|
||||
attestation_count += 1
|
||||
# Verify there are no extraneous bits set beyond the shard committee
|
||||
for i in range(len(shard_committee), 2 * MAX_PERIOD_COMMITTEE_SIZE):
|
||||
assert block.aggregation_bits[i] == 0b0
|
||||
# Verify attester aggregate signature
|
||||
domain = get_domain(beacon_state, DOMAIN_SHARD_ATTESTER, compute_epoch_of_shard_slot(block.slot))
|
||||
shard_attestation_data = ShardAttestationData(slot=shard_state.slot, parent_root=block.parent_root)
|
||||
signing_root = compute_signing_root(shard_attestation_data, domain)
|
||||
assert bls.FastAggregateVerify(pubkeys, signing_root, block.attestations)
|
||||
# Proposer micro-reward
|
||||
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
|
||||
reward = attestation_count * get_base_reward(beacon_state, proposer_index) // PROPOSER_REWARD_QUOTIENT
|
||||
process_delta(beacon_state, shard_state, proposer_index, Gwei(reward))
|
||||
```
|
||||
|
||||
#### Block body
|
||||
|
||||
```python
|
||||
def process_shard_block_body(beacon_state: BeaconState, shard_state: ShardState, block: ShardBlock) -> None:
|
||||
# Verify block body size is a multiple of the header size
|
||||
assert len(block.body) % SHARD_HEADER_SIZE == 0
|
||||
# Apply proposer block body fee
|
||||
block_body_fee = shard_state.block_body_price * len(block.body) // MAX_SHARD_BLOCK_SIZE
|
||||
proposer_index = get_shard_proposer_index(beacon_state, shard_state.shard, block.slot)
|
||||
process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee), positive=False) # Burn
|
||||
process_delta(beacon_state, shard_state, proposer_index, Gwei(block_body_fee // PROPOSER_REWARD_QUOTIENT)) # Reward
|
||||
# Calculate new block body price
|
||||
block_size = SHARD_HEADER_SIZE + len(block.body)
|
||||
QUOTIENT = MAX_SHARD_BLOCK_SIZE * BLOCK_BODY_PRICE_QUOTIENT
|
||||
if block_size > SHARD_BLOCK_SIZE_TARGET:
|
||||
price_delta = Gwei(shard_state.block_body_price * (block_size - SHARD_BLOCK_SIZE_TARGET) // QUOTIENT)
|
||||
# The maximum block body price caps the amount burnt on fees within a shard period
|
||||
MAX_BLOCK_BODY_PRICE = MAX_EFFECTIVE_BALANCE // EPOCHS_PER_SHARD_PERIOD // SHARD_SLOTS_PER_EPOCH
|
||||
shard_state.block_body_price = Gwei(min(MAX_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta))
|
||||
else:
|
||||
price_delta = Gwei(shard_state.block_body_price * (SHARD_BLOCK_SIZE_TARGET - block_size) // QUOTIENT)
|
||||
shard_state.block_body_price = Gwei(max(MIN_BLOCK_BODY_PRICE, shard_state.block_body_price + price_delta))
|
||||
```
|
||||
|
||||
## Shard fork choice rule
|
||||
|
||||
The fork choice rule for any shard is LMD GHOST using the shard attestations of the shard committee and the beacon chain attestations of the crosslink committee currently assigned to that shard, but instead of being rooted in the genesis it is rooted in the block referenced in the most recent accepted crosslink (i.e. `beacon_state.crosslinks[shard].shard_block_root`). Only blocks whose `beacon_block_root` is the block in the main beacon chain at the specified `slot` should be considered. (If the beacon chain skips a slot, then the block at that slot is considered to be the block in the beacon chain at the highest slot lower than that slot.)
|
|
@ -1,19 +0,0 @@
|
|||
# Eth2 config helpers
|
||||
|
||||
`preset_loader`: A util to load config-presets with.
|
||||
See [Configs documentation](../../../configs/README.md).
|
||||
|
||||
Usage:
|
||||
|
||||
```python
|
||||
configs_path = 'configs/'
|
||||
|
||||
...
|
||||
|
||||
import preset_loader
|
||||
from eth2spec.phase0 import spec
|
||||
my_presets = preset_loader.load_presets(configs_path, 'mainnet')
|
||||
spec.apply_constants_preset(my_presets)
|
||||
```
|
||||
|
||||
WARNING: this overwrites globals, make sure to prevent accidental collisions with other usage of the same imported specs package.
|
|
@ -1,25 +0,0 @@
|
|||
from typing import Dict, Any
|
||||
|
||||
from ruamel.yaml import (
|
||||
YAML,
|
||||
)
|
||||
from pathlib import Path
|
||||
from os.path import join
|
||||
|
||||
|
||||
def load_presets(configs_dir, presets_name) -> Dict[str, Any]:
|
||||
"""
|
||||
Loads the given preset
|
||||
:param presets_name: The name of the presets. (lowercase snake_case)
|
||||
:return: Dictionary, mapping of constant-name -> constant-value
|
||||
"""
|
||||
path = Path(join(configs_dir, presets_name+'.yaml'))
|
||||
yaml = YAML(typ='base')
|
||||
loaded = yaml.load(path)
|
||||
out = dict()
|
||||
for k, v in loaded.items():
|
||||
if v.startswith("0x"):
|
||||
out[k] = bytes.fromhex(v[2:])
|
||||
else:
|
||||
out[k] = int(v)
|
||||
return out
|
|
@ -1 +0,0 @@
|
|||
ruamel.yaml==0.16.5
|
|
@ -1,9 +0,0 @@
|
|||
from distutils.core import setup
|
||||
|
||||
setup(
|
||||
name='config_helpers',
|
||||
packages=['preset_loader'],
|
||||
install_requires=[
|
||||
"ruamel.yaml==0.16.5"
|
||||
]
|
||||
)
|
|
@ -7,22 +7,31 @@ With this executable spec,
|
|||
test-generators can easily create test-vectors for client implementations,
|
||||
and the spec itself can be verified to be consistent and coherent through sanity tests implemented with pytest.
|
||||
|
||||
|
||||
## Building
|
||||
|
||||
All the dynamic parts of the spec can be build at once with `make pyspec`.
|
||||
To build the pyspec: `python setup.py build`
|
||||
(or `pip install .`, but beware that ignored files will still be copied over to a temporary dir, due to pip issue 2195).
|
||||
This outputs the build files to the `./build/lib/eth2spec/...` dir, and can't be used for local test running. Instead, use the dev-install as described below.
|
||||
|
||||
Alternatively, you can build a sub-set of the pyspec: `make phase0`.
|
||||
## Dev Install
|
||||
|
||||
Or, to build a single file, specify the path, e.g. `make test_libs/pyspec/eth2spec/phase0/spec.py`.
|
||||
All the dynamic parts of the spec are automatically built with `python setup.py pyspecdev`.
|
||||
Unlike the regular install, this outputs spec files to their original source location, instead of build output only.
|
||||
|
||||
Alternatively, you can build a sub-set of the pyspec with the distutil command:
|
||||
```bash
|
||||
python setup.py pyspec --spec-fork=phase0 --md-doc-paths="specs/phase0/beacon-chain.md specs/phase0/fork-choice.md" --out-dir=my_spec_dir
|
||||
```
|
||||
|
||||
## Py-tests
|
||||
|
||||
After building, you can install the dependencies for running the `pyspec` tests with `make install_test`.
|
||||
After installing, you can install the optional dependencies for testing and linting.
|
||||
With makefile: `make install_test`.
|
||||
Or manually: run `pip install .[testing]` and `pip install .[linting]`.
|
||||
|
||||
These tests are not intended for client-consumption.
|
||||
These tests are sanity tests, to verify if the spec itself is consistent.
|
||||
These tests are testing the spec itself, to verify consistency and provide feedback on modifications of the spec.
|
||||
However, most of the tests can be run in generator-mode, to output test vectors for client-consumption.
|
||||
|
||||
### How to run tests
|
||||
|
||||
|
@ -32,23 +41,19 @@ Run `make test` from the root of the specs repository (after running `make insta
|
|||
|
||||
#### Manual
|
||||
|
||||
From within the `pyspec` folder:
|
||||
From the repository root:
|
||||
|
||||
Install dependencies:
|
||||
Install venv and install:
|
||||
```bash
|
||||
python3 -m venv venv
|
||||
. venv/bin/activate
|
||||
pip3 install -r requirements-testing.txt
|
||||
python setup.py pyspecdev
|
||||
```
|
||||
*Note*: Make sure to run `make -B pyspec` from the root of the specs repository,
|
||||
to build the parts of the pyspec module derived from the markdown specs.
|
||||
The `-B` flag may be helpful to force-overwrite the `pyspec` output after you made a change to the markdown source files.
|
||||
|
||||
Run the tests:
|
||||
Run the test command from the `tests/core/pyspec` directory:
|
||||
```
|
||||
pytest --config=minimal eth2spec
|
||||
```
|
||||
Note the package-name, this is to locate the tests.
|
||||
|
||||
### How to view code coverage report
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
0.11.0
|
|
@ -0,0 +1,4 @@
|
|||
# See setup.py about usage of VERSION.txt
|
||||
import os
|
||||
with open(os.path.join(os.path.dirname(__file__), 'VERSION.txt')) as f:
|
||||
__version__ = f.read().strip()
|
|
@ -0,0 +1,20 @@
|
|||
# Eth2 config util
|
||||
|
||||
For configuration, see [Configs documentation](../../../../../configs/README.md).
|
||||
|
||||
## Usage:
|
||||
|
||||
```python
|
||||
configs_path = 'configs/'
|
||||
|
||||
...
|
||||
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec
|
||||
from importlib import reload
|
||||
my_presets = config_util.prepare_config(configs_path, 'mainnet')
|
||||
# reload spec to make loaded config effective
|
||||
reload(spec)
|
||||
```
|
||||
|
||||
WARNING: this overwrites globals, make sure to prevent accidental collisions with other usage of the same imported specs package.
|
|
@ -0,0 +1,44 @@
|
|||
from ruamel.yaml import YAML
|
||||
from pathlib import Path
|
||||
from os.path import join
|
||||
from typing import Dict, Any
|
||||
|
||||
config: Dict[str, Any] = {}
|
||||
|
||||
|
||||
# Access to overwrite spec constants based on configuration
|
||||
# This is called by the spec module after declaring its globals, and applies the loaded presets.
|
||||
def apply_constants_config(spec_globals: Dict[str, Any]) -> None:
|
||||
global config
|
||||
for k, v in config.items():
|
||||
if k.startswith('DOMAIN_'):
|
||||
spec_globals[k] = spec_globals['DomainType'](v) # domain types are defined as bytes in the configs
|
||||
else:
|
||||
spec_globals[k] = v
|
||||
|
||||
|
||||
# Load presets from a file, and then prepares the global config setting. This does not apply the config.
|
||||
# To apply the config, reload the spec module (it will re-initialize with the config taken from here).
|
||||
def prepare_config(configs_path, config_name):
|
||||
global config
|
||||
config = load_config_file(configs_path, config_name)
|
||||
|
||||
|
||||
def load_config_file(configs_dir, presets_name) -> Dict[str, Any]:
|
||||
"""
|
||||
Loads the given preset
|
||||
:param presets_name: The name of the presets. (lowercase snake_case)
|
||||
:return: Dictionary, mapping of constant-name -> constant-value
|
||||
"""
|
||||
path = Path(join(configs_dir, presets_name + '.yaml'))
|
||||
yaml = YAML(typ='base')
|
||||
loaded = yaml.load(path)
|
||||
out = dict()
|
||||
for k, v in loaded.items():
|
||||
if isinstance(v, list):
|
||||
out[k] = v
|
||||
elif isinstance(v, str) and v.startswith("0x"):
|
||||
out[k] = bytes.fromhex(v[2:])
|
||||
else:
|
||||
out[k] = int(v)
|
||||
return out
|
|
@ -1,21 +1,23 @@
|
|||
from typing import Any
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
SSZType, SSZValue, uint, Container, ByteList, List, boolean,
|
||||
Vector, ByteVector
|
||||
uint, Container, List, boolean,
|
||||
Vector, ByteVector, ByteList
|
||||
)
|
||||
|
||||
|
||||
def decode(data: Any, typ: SSZType) -> SSZValue:
|
||||
def decode(data: Any, typ):
|
||||
if issubclass(typ, (uint, boolean)):
|
||||
return typ(data)
|
||||
elif issubclass(typ, (List, Vector)):
|
||||
return typ(decode(element, typ.elem_type) for element in data)
|
||||
elif issubclass(typ, (ByteList, ByteVector)):
|
||||
return typ(decode(element, typ.element_cls()) for element in data)
|
||||
elif issubclass(typ, ByteVector):
|
||||
return typ(bytes.fromhex(data[2:]))
|
||||
elif issubclass(typ, ByteList):
|
||||
return typ(bytes.fromhex(data[2:]))
|
||||
elif issubclass(typ, Container):
|
||||
temp = {}
|
||||
for field_name, field_type in typ.get_fields().items():
|
||||
for field_name, field_type in typ.fields().items():
|
||||
temp[field_name] = decode(data[field_name], field_type)
|
||||
if field_name + "_hash_tree_root" in data:
|
||||
assert (data[field_name + "_hash_tree_root"][2:] ==
|
||||
|
|
|
@ -1,27 +1,30 @@
|
|||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root, serialize
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
uint, boolean,
|
||||
Bitlist, Bitvector, Container
|
||||
Bitlist, Bitvector, Container, Vector, List
|
||||
)
|
||||
|
||||
|
||||
def encode(value, include_hash_tree_roots=False):
|
||||
if isinstance(value, uint):
|
||||
# Larger uints are boxed and the class declares their byte length
|
||||
if value.type().byte_len > 8:
|
||||
if value.__class__.type_byte_length() > 8:
|
||||
return str(int(value))
|
||||
return int(value)
|
||||
elif isinstance(value, boolean):
|
||||
return value == 1
|
||||
elif isinstance(value, (Bitlist, Bitvector)):
|
||||
return '0x' + serialize(value).hex()
|
||||
elif isinstance(value, list): # normal python lists, ssz-List, Vector
|
||||
elif isinstance(value, list): # normal python lists
|
||||
return [encode(element, include_hash_tree_roots) for element in value]
|
||||
elif isinstance(value, bytes): # both bytes and ByteVector
|
||||
elif isinstance(value, (List, Vector)):
|
||||
return [encode(element, include_hash_tree_roots) for element in value]
|
||||
elif isinstance(value, bytes): # bytes, ByteList, ByteVector
|
||||
return '0x' + value.hex()
|
||||
elif isinstance(value, Container):
|
||||
ret = {}
|
||||
for field_value, field_name in zip(value, value.get_fields().keys()):
|
||||
for field_name in value.fields().keys():
|
||||
field_value = getattr(value, field_name)
|
||||
ret[field_name] = encode(field_value, include_hash_tree_roots)
|
||||
if include_hash_tree_roots:
|
||||
ret[field_name + "_hash_tree_root"] = '0x' + hash_tree_root(field_value).hex()
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
from random import Random
|
||||
from enum import Enum
|
||||
|
||||
from typing import Type
|
||||
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
SSZType, SSZValue, BasicValue, BasicType, uint, Container, ByteList, List, boolean,
|
||||
Vector, ByteVector, Bitlist, Bitvector
|
||||
View, BasicView, uint, Container, List, boolean,
|
||||
Vector, ByteVector, ByteList, Bitlist, Bitvector
|
||||
)
|
||||
|
||||
# in bytes
|
||||
|
@ -34,11 +36,11 @@ class RandomizationMode(Enum):
|
|||
|
||||
|
||||
def get_random_ssz_object(rng: Random,
|
||||
typ: SSZType,
|
||||
typ: Type[View],
|
||||
max_bytes_length: int,
|
||||
max_list_length: int,
|
||||
mode: RandomizationMode,
|
||||
chaos: bool) -> SSZValue:
|
||||
chaos: bool) -> View:
|
||||
"""
|
||||
Create an object for a given type, filled with random data.
|
||||
:param rng: The random number generator to use.
|
||||
|
@ -56,26 +58,26 @@ def get_random_ssz_object(rng: Random,
|
|||
if mode == RandomizationMode.mode_nil_count:
|
||||
return typ(b'')
|
||||
elif mode == RandomizationMode.mode_max_count:
|
||||
return typ(get_random_bytes_list(rng, min(max_bytes_length, typ.length)))
|
||||
return typ(get_random_bytes_list(rng, min(max_bytes_length, typ.limit())))
|
||||
elif mode == RandomizationMode.mode_one_count:
|
||||
return typ(get_random_bytes_list(rng, min(1, typ.length)))
|
||||
return typ(get_random_bytes_list(rng, min(1, typ.limit())))
|
||||
elif mode == RandomizationMode.mode_zero:
|
||||
return typ(b'\x00' * min(1, typ.length))
|
||||
return typ(b'\x00' * min(1, typ.limit()))
|
||||
elif mode == RandomizationMode.mode_max:
|
||||
return typ(b'\xff' * min(1, typ.length))
|
||||
return typ(b'\xff' * min(1, typ.limit()))
|
||||
else:
|
||||
return typ(get_random_bytes_list(rng, rng.randint(0, min(max_bytes_length, typ.length))))
|
||||
elif issubclass(typ, ByteVector):
|
||||
return typ(get_random_bytes_list(rng, rng.randint(0, min(max_bytes_length, typ.limit()))))
|
||||
if issubclass(typ, ByteVector):
|
||||
# Sanity, don't generate absurdly big random values
|
||||
# If a client is aiming to performance-test, they should create a benchmark suite.
|
||||
assert typ.length <= max_bytes_length
|
||||
assert typ.type_byte_length() <= max_bytes_length
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return typ(b'\x00' * typ.length)
|
||||
return typ(b'\x00' * typ.type_byte_length())
|
||||
elif mode == RandomizationMode.mode_max:
|
||||
return typ(b'\xff' * typ.length)
|
||||
return typ(b'\xff' * typ.type_byte_length())
|
||||
else:
|
||||
return typ(get_random_bytes_list(rng, typ.length))
|
||||
elif issubclass(typ, BasicValue):
|
||||
return typ(get_random_bytes_list(rng, typ.type_byte_length()))
|
||||
elif issubclass(typ, (boolean, uint)):
|
||||
# Basic types
|
||||
if mode == RandomizationMode.mode_zero:
|
||||
return get_min_basic_value(typ)
|
||||
|
@ -83,13 +85,14 @@ def get_random_ssz_object(rng: Random,
|
|||
return get_max_basic_value(typ)
|
||||
else:
|
||||
return get_random_basic_value(rng, typ)
|
||||
elif issubclass(typ, Vector) or issubclass(typ, Bitvector):
|
||||
elif issubclass(typ, (Vector, Bitvector)):
|
||||
elem_type = typ.element_cls() if issubclass(typ, Vector) else boolean
|
||||
return typ(
|
||||
get_random_ssz_object(rng, typ.elem_type, max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(typ.length)
|
||||
get_random_ssz_object(rng, elem_type, max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(typ.vector_length())
|
||||
)
|
||||
elif issubclass(typ, List) or issubclass(typ, Bitlist):
|
||||
length = rng.randint(0, min(typ.length, max_list_length))
|
||||
length = rng.randint(0, min(typ.limit(), max_list_length))
|
||||
if mode == RandomizationMode.mode_one_count:
|
||||
length = 1
|
||||
elif mode == RandomizationMode.mode_max_count:
|
||||
|
@ -97,19 +100,21 @@ def get_random_ssz_object(rng: Random,
|
|||
elif mode == RandomizationMode.mode_nil_count:
|
||||
length = 0
|
||||
|
||||
if typ.length < length: # SSZ imposes a hard limit on lists, we can't put in more than that
|
||||
length = typ.length
|
||||
if typ.limit() < length: # SSZ imposes a hard limit on lists, we can't put in more than that
|
||||
length = typ.limit()
|
||||
|
||||
elem_type = typ.element_cls() if issubclass(typ, List) else boolean
|
||||
return typ(
|
||||
get_random_ssz_object(rng, typ.elem_type, max_bytes_length, max_list_length, mode, chaos)
|
||||
get_random_ssz_object(rng, elem_type, max_bytes_length, max_list_length, mode, chaos)
|
||||
for _ in range(length)
|
||||
)
|
||||
elif issubclass(typ, Container):
|
||||
fields = typ.fields()
|
||||
# Container
|
||||
return typ(**{
|
||||
field_name:
|
||||
get_random_ssz_object(rng, field_type, max_bytes_length, max_list_length, mode, chaos)
|
||||
for field_name, field_type in typ.get_fields().items()
|
||||
for field_name, field_type in fields.items()
|
||||
})
|
||||
else:
|
||||
raise Exception(f"Type not recognized: typ={typ}")
|
||||
|
@ -119,31 +124,31 @@ def get_random_bytes_list(rng: Random, length: int) -> bytes:
|
|||
return bytes(rng.getrandbits(8) for _ in range(length))
|
||||
|
||||
|
||||
def get_random_basic_value(rng: Random, typ: BasicType) -> BasicValue:
|
||||
def get_random_basic_value(rng: Random, typ) -> BasicView:
|
||||
if issubclass(typ, boolean):
|
||||
return typ(rng.choice((True, False)))
|
||||
elif issubclass(typ, uint):
|
||||
assert typ.byte_len in UINT_BYTE_SIZES
|
||||
return typ(rng.randint(0, 256 ** typ.byte_len - 1))
|
||||
assert typ.type_byte_length() in UINT_BYTE_SIZES
|
||||
return typ(rng.randint(0, 256 ** typ.type_byte_length() - 1))
|
||||
else:
|
||||
raise ValueError(f"Not a basic type: typ={typ}")
|
||||
|
||||
|
||||
def get_min_basic_value(typ: BasicType) -> BasicValue:
|
||||
def get_min_basic_value(typ) -> BasicView:
|
||||
if issubclass(typ, boolean):
|
||||
return typ(False)
|
||||
elif issubclass(typ, uint):
|
||||
assert typ.byte_len in UINT_BYTE_SIZES
|
||||
assert typ.type_byte_length() in UINT_BYTE_SIZES
|
||||
return typ(0)
|
||||
else:
|
||||
raise ValueError(f"Not a basic type: typ={typ}")
|
||||
|
||||
|
||||
def get_max_basic_value(typ: BasicType) -> BasicValue:
|
||||
def get_max_basic_value(typ) -> BasicView:
|
||||
if issubclass(typ, boolean):
|
||||
return typ(True)
|
||||
elif issubclass(typ, uint):
|
||||
assert typ.byte_len in UINT_BYTE_SIZES
|
||||
return typ(256 ** typ.byte_len - 1)
|
||||
assert typ.type_byte_length() in UINT_BYTE_SIZES
|
||||
return typ(256 ** typ.type_byte_length() - 1)
|
||||
else:
|
||||
raise ValueError(f"Not a basic type: typ={typ}")
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
from eth2spec.utils.ssz import ssz_typing as spec_ssz
|
||||
import ssz
|
||||
|
||||
|
||||
def translate_typ(typ) -> ssz.BaseSedes:
|
||||
"""
|
||||
Translates a spec type to a Py-SSZ type description (sedes).
|
||||
:param typ: The spec type, a class.
|
||||
:return: The Py-SSZ equivalent.
|
||||
"""
|
||||
if issubclass(typ, spec_ssz.Container):
|
||||
return ssz.Container(
|
||||
[translate_typ(field_typ) for field_name, field_typ in typ.get_fields().items()])
|
||||
elif issubclass(typ, spec_ssz.ByteVector):
|
||||
return ssz.ByteVector(typ.length)
|
||||
elif issubclass(typ, spec_ssz.ByteList):
|
||||
return ssz.ByteList()
|
||||
elif issubclass(typ, spec_ssz.Vector):
|
||||
return ssz.Vector(translate_typ(typ.elem_type), typ.length)
|
||||
elif issubclass(typ, spec_ssz.List):
|
||||
return ssz.List(translate_typ(typ.elem_type), typ.length)
|
||||
elif issubclass(typ, spec_ssz.Bitlist):
|
||||
return ssz.Bitlist(typ.length)
|
||||
elif issubclass(typ, spec_ssz.Bitvector):
|
||||
return ssz.Bitvector(typ.length)
|
||||
elif issubclass(typ, spec_ssz.boolean):
|
||||
return ssz.boolean
|
||||
elif issubclass(typ, spec_ssz.uint):
|
||||
if typ.byte_len == 1:
|
||||
return ssz.uint8
|
||||
elif typ.byte_len == 2:
|
||||
return ssz.uint16
|
||||
elif typ.byte_len == 4:
|
||||
return ssz.uint32
|
||||
elif typ.byte_len == 8:
|
||||
return ssz.uint64
|
||||
elif typ.byte_len == 16:
|
||||
return ssz.uint128
|
||||
elif typ.byte_len == 32:
|
||||
return ssz.uint256
|
||||
else:
|
||||
raise TypeError("invalid uint size")
|
||||
else:
|
||||
raise TypeError("Type not supported: {}".format(typ))
|
||||
|
||||
|
||||
def translate_value(value, typ):
|
||||
"""
|
||||
Translate a value output from Py-SSZ deserialization into the given spec type.
|
||||
:param value: The PySSZ value
|
||||
:param typ: The type from the spec to translate into
|
||||
:return: the translated value
|
||||
"""
|
||||
if issubclass(typ, spec_ssz.uint):
|
||||
if typ.byte_len == 1:
|
||||
return spec_ssz.uint8(value)
|
||||
elif typ.byte_len == 2:
|
||||
return spec_ssz.uint16(value)
|
||||
elif typ.byte_len == 4:
|
||||
return spec_ssz.uint32(value)
|
||||
elif typ.byte_len == 8:
|
||||
return spec_ssz.uint64(value)
|
||||
elif typ.byte_len == 16:
|
||||
return spec_ssz.uint128(value)
|
||||
elif typ.byte_len == 32:
|
||||
return spec_ssz.uint256(value)
|
||||
else:
|
||||
raise TypeError("invalid uint size")
|
||||
elif issubclass(typ, spec_ssz.List):
|
||||
return [translate_value(elem, typ.elem_type) for elem in value]
|
||||
elif issubclass(typ, spec_ssz.boolean):
|
||||
return value
|
||||
elif issubclass(typ, spec_ssz.Vector):
|
||||
return typ(*(translate_value(elem, typ.elem_type) for elem in value))
|
||||
elif issubclass(typ, spec_ssz.Bitlist):
|
||||
return typ(value)
|
||||
elif issubclass(typ, spec_ssz.Bitvector):
|
||||
return typ(value)
|
||||
elif issubclass(typ, spec_ssz.ByteVector):
|
||||
return typ(value)
|
||||
elif issubclass(typ, spec_ssz.ByteList):
|
||||
return value
|
||||
if issubclass(typ, spec_ssz.Container):
|
||||
return typ(**{f_name: translate_value(f_val, f_typ) for (f_val, (f_name, f_typ))
|
||||
in zip(value, typ.get_fields().items())})
|
||||
else:
|
||||
raise TypeError("Type not supported: {}".format(typ))
|
|
@ -1,35 +0,0 @@
|
|||
from eth2spec.fuzzing.decoder import translate_typ, translate_value
|
||||
from eth2spec.phase0 import spec
|
||||
from eth2spec.utils.ssz import ssz_impl as spec_ssz_impl
|
||||
from random import Random
|
||||
from eth2spec.debug import random_value
|
||||
|
||||
|
||||
def test_decoder():
|
||||
rng = Random(123)
|
||||
|
||||
# check these types only, Block covers a lot of operation types already.
|
||||
for typ in [spec.Attestation, spec.BeaconState, spec.BeaconBlock]:
|
||||
# create a random pyspec value
|
||||
original = random_value.get_random_ssz_object(rng, typ, 100, 10,
|
||||
mode=random_value.RandomizationMode.mode_random,
|
||||
chaos=True)
|
||||
# serialize it, using pyspec
|
||||
pyspec_data = spec_ssz_impl.serialize(original)
|
||||
# get the py-ssz type for it
|
||||
block_sedes = translate_typ(typ)
|
||||
# try decoding using the py-ssz type
|
||||
raw_value = block_sedes.deserialize(pyspec_data)
|
||||
|
||||
# serialize it using py-ssz
|
||||
pyssz_data = block_sedes.serialize(raw_value)
|
||||
# now check if the serialized form is equal. If so, we confirmed decoding and encoding to work.
|
||||
assert pyspec_data == pyssz_data
|
||||
|
||||
# now translate the py-ssz value in a pyspec-value
|
||||
block = translate_value(raw_value, typ)
|
||||
|
||||
# and see if the hash-tree-root of the original matches the hash-tree-root of the decoded & translated value.
|
||||
original_hash_tree_root = spec_ssz_impl.hash_tree_root(original)
|
||||
assert original_hash_tree_root == spec_ssz_impl.hash_tree_root(block)
|
||||
assert original_hash_tree_root == block_sedes.get_hash_tree_root(raw_value)
|
|
@ -1,5 +1,6 @@
|
|||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.test.context import reload_specs
|
||||
|
||||
|
||||
# We import pytest only when it's present, i.e. when we are running tests.
|
||||
# The test-cases themselves can be generated without installing pytest.
|
||||
|
@ -33,7 +34,6 @@ def pytest_addoption(parser):
|
|||
@fixture(autouse=True)
|
||||
def config(request):
|
||||
config_name = request.config.getoption("--config")
|
||||
from preset_loader import loader
|
||||
presets = loader.load_presets('../../../configs/', config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
config_util.prepare_config('../../../configs/', config_name)
|
||||
# now that the presets are loaded, reload the specs to apply them
|
||||
reload_specs()
|
||||
|
|
|
@ -1,29 +1,64 @@
|
|||
from eth2spec.phase0 import spec as spec_phase0
|
||||
# from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
from eth2spec.utils import bls
|
||||
|
||||
from .helpers.genesis import create_genesis_state
|
||||
|
||||
from .utils import vector_test, with_meta_tags
|
||||
|
||||
from typing import Any, Callable, Sequence
|
||||
from typing import Any, Callable, Sequence, TypedDict, Protocol
|
||||
|
||||
from importlib import reload
|
||||
|
||||
|
||||
def reload_specs():
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
|
||||
|
||||
# Some of the Spec module functionality is exposed here to deal with phase-specific changes.
|
||||
|
||||
# TODO: currently phases are defined as python modules.
|
||||
# It would be better if they would be more well-defined interfaces for stronger typing.
|
||||
class Spec(Protocol):
|
||||
version: str
|
||||
|
||||
|
||||
class Phase0(Spec):
|
||||
...
|
||||
|
||||
|
||||
class Phase1(Spec):
|
||||
def upgrade_to_phase1(self, state: spec_phase0.BeaconState) -> spec_phase1.BeaconState:
|
||||
...
|
||||
|
||||
|
||||
# add transfer, bridge, etc. as the spec evolves
|
||||
class SpecForks(TypedDict, total=False):
|
||||
phase0: Phase0
|
||||
phase1: Phase1
|
||||
|
||||
|
||||
def with_custom_state(balances_fn: Callable[[Any], Sequence[int]],
|
||||
threshold_fn: Callable[[Any], int]):
|
||||
def deco(fn):
|
||||
def entry(*args, **kw):
|
||||
def entry(*args, spec: Spec, phases: SpecForks, **kw):
|
||||
try:
|
||||
spec = kw['spec']
|
||||
p0 = phases["phase0"]
|
||||
balances = balances_fn(p0)
|
||||
activation_threshold = threshold_fn(p0)
|
||||
|
||||
balances = balances_fn(spec)
|
||||
activation_threshold = threshold_fn(spec)
|
||||
state = create_genesis_state(spec=p0, validator_balances=balances,
|
||||
activation_threshold=activation_threshold)
|
||||
if spec.fork == 'phase1':
|
||||
# TODO: instead of upgrading a test phase0 genesis state we can also write a phase1 state helper.
|
||||
# Decide based on performance/consistency results later.
|
||||
state = phases["phase1"].upgrade_to_phase1(state)
|
||||
|
||||
kw['state'] = create_genesis_state(spec=spec, validator_balances=balances,
|
||||
activation_threshold=activation_threshold)
|
||||
kw['state'] = state
|
||||
except KeyError:
|
||||
raise TypeError('Spec decorator must come within state decorator to inject spec into state.')
|
||||
return fn(*args, **kw)
|
||||
return fn(*args, spec=spec, phases=phases, **kw)
|
||||
return entry
|
||||
return deco
|
||||
|
||||
|
@ -69,6 +104,19 @@ def misc_balances(spec):
|
|||
return [spec.MAX_EFFECTIVE_BALANCE] * num_validators + [spec.MIN_DEPOSIT_AMOUNT] * num_misc_validators
|
||||
|
||||
|
||||
def single_phase(fn):
|
||||
"""
|
||||
Decorator that filters out the phases data.
|
||||
most state tests only focus on behavior of a single phase (the "spec").
|
||||
This decorator is applied as part of spec_state_test(fn).
|
||||
"""
|
||||
def entry(*args, **kw):
|
||||
if 'phases' in kw:
|
||||
kw.pop('phases')
|
||||
return fn(*args, **kw)
|
||||
return entry
|
||||
|
||||
|
||||
# BLS is turned off by default *for performance purposes during TESTING*.
|
||||
# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON).
|
||||
# - Some tests are marked as BLS-requiring, and ignore this setting.
|
||||
|
@ -88,9 +136,9 @@ def spec_test(fn):
|
|||
return vector_test()(bls_switch(fn))
|
||||
|
||||
|
||||
# shorthand for decorating @spectest() @with_state
|
||||
# shorthand for decorating @spectest() @with_state @single_phase
|
||||
def spec_state_test(fn):
|
||||
return spec_test(with_state(fn))
|
||||
return spec_test(with_state(single_phase(fn)))
|
||||
|
||||
|
||||
def expect_assertion_error(fn):
|
||||
|
@ -169,15 +217,12 @@ def with_all_phases_except(exclusion_phases):
|
|||
return decorator
|
||||
|
||||
|
||||
def with_phases(phases):
|
||||
def with_phases(phases, other_phases=None):
|
||||
"""
|
||||
Decorator factory that returns a decorator that runs a test for the appropriate phases
|
||||
Decorator factory that returns a decorator that runs a test for the appropriate phases.
|
||||
Additional phases that do not initially run, but are made available through the test, are optional.
|
||||
"""
|
||||
def decorator(fn):
|
||||
def run_with_spec_version(spec, *args, **kw):
|
||||
kw['spec'] = spec
|
||||
return fn(*args, **kw)
|
||||
|
||||
def wrapper(*args, **kw):
|
||||
run_phases = phases
|
||||
|
||||
|
@ -188,12 +233,25 @@ def with_phases(phases):
|
|||
return
|
||||
run_phases = [phase]
|
||||
|
||||
available_phases = set(run_phases)
|
||||
if other_phases is not None:
|
||||
available_phases += set(other_phases)
|
||||
|
||||
# TODO: test state is dependent on phase0 but is immediately transitioned to phase1.
|
||||
# A new state-creation helper for phase 1 may be in place, and then phase1+ tests can run without phase0
|
||||
available_phases.add('phase0')
|
||||
|
||||
phase_dir = {}
|
||||
if 'phase0' in available_phases:
|
||||
phase_dir['phase0'] = spec_phase0
|
||||
if 'phase1' in available_phases:
|
||||
phase_dir['phase1'] = spec_phase1
|
||||
|
||||
# return is ignored whenever multiple phases are ran. If
|
||||
if 'phase0' in run_phases:
|
||||
ret = run_with_spec_version(spec_phase0, *args, **kw)
|
||||
ret = fn(spec=spec_phase0, phases=phase_dir, *args, **kw)
|
||||
if 'phase1' in run_phases:
|
||||
# temporarily disable phase 1 tests
|
||||
return
|
||||
# ret = run_with_spec_version(spec_phase1, *args, **kw)
|
||||
ret = fn(spec=spec_phase1, phases=phase_dir, *args, **kw)
|
||||
return ret
|
||||
return wrapper
|
||||
return decorator
|
||||
|
|
|
@ -30,22 +30,29 @@ def add_attestation_to_store(spec, store, attestation):
|
|||
spec.on_attestation(store, attestation)
|
||||
|
||||
|
||||
def get_anchor_root(spec, state):
|
||||
anchor_block_header = state.latest_block_header.copy()
|
||||
if anchor_block_header.state_root == spec.Bytes32():
|
||||
anchor_block_header.state_root = spec.hash_tree_root(state)
|
||||
return spec.hash_tree_root(anchor_block_header)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_genesis(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
|
||||
assert spec.get_head(store) == spec.hash_tree_root(genesis_block)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_chain_no_attestations(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
|
||||
assert spec.get_head(store) == spec.hash_tree_root(genesis_block)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
# On receiving a block of `GENESIS_SLOT + 1` slot
|
||||
block_1 = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -66,9 +73,9 @@ def test_split_tie_breaker_no_attestations(spec, state):
|
|||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
|
||||
assert spec.get_head(store) == spec.hash_tree_root(genesis_block)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
# block at slot 1
|
||||
block_1_state = genesis_state.copy()
|
||||
|
@ -94,9 +101,9 @@ def test_shorter_chain_but_heavier_weight(spec, state):
|
|||
genesis_state = state.copy()
|
||||
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
genesis_block = spec.BeaconBlock(state_root=state.hash_tree_root())
|
||||
assert spec.get_head(store) == spec.hash_tree_root(genesis_block)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
# build longer tree
|
||||
long_state = genesis_state.copy()
|
||||
|
@ -122,15 +129,14 @@ def test_shorter_chain_but_heavier_weight(spec, state):
|
|||
@spec_state_test
|
||||
def test_filtered_block_tree(spec, state):
|
||||
# Initialization
|
||||
genesis_state_root = state.hash_tree_root()
|
||||
store = spec.get_genesis_store(state)
|
||||
genesis_block = spec.BeaconBlock(state_root=genesis_state_root)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
anchor_root = get_anchor_root(spec, state)
|
||||
|
||||
# transition state past initial couple of epochs
|
||||
next_epoch(spec, state)
|
||||
next_epoch(spec, state)
|
||||
|
||||
assert spec.get_head(store) == spec.hash_tree_root(genesis_block)
|
||||
assert spec.get_head(store) == anchor_root
|
||||
|
||||
# fill in attestations for entire epoch, justifying the recent epoch
|
||||
prev_state, signed_blocks, state = next_epoch_with_attestations(spec, state, True, False)
|
||||
|
|
|
@ -15,8 +15,17 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
|||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
||||
spec.on_attestation(store, attestation)
|
||||
|
||||
if spec.fork == 'phase0':
|
||||
sample_index = indexed_attestation.attesting_indices[0]
|
||||
else:
|
||||
attesting_indices = [
|
||||
index for i, index in enumerate(indexed_attestation.committee)
|
||||
if attestation.aggregation_bits[i]
|
||||
]
|
||||
sample_index = attesting_indices[0]
|
||||
assert (
|
||||
store.latest_messages[indexed_attestation.attesting_indices[0]] ==
|
||||
store.latest_messages[sample_index] ==
|
||||
spec.LatestMessage(
|
||||
epoch=attestation.data.target.epoch,
|
||||
root=attestation.data.beacon_block_root,
|
||||
|
@ -27,7 +36,7 @@ def run_on_attestation(spec, state, store, attestation, valid=True):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_current_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * 2)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -46,7 +55,7 @@ def test_on_attestation_current_epoch(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_previous_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -65,7 +74,7 @@ def test_on_attestation_previous_epoch(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_past_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
|
||||
# move time forward 2 epochs
|
||||
time = store.time + 2 * spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
@ -87,7 +96,7 @@ def test_on_attestation_past_epoch(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_mismatched_target_and_slot(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
spec.on_tick(store, store.time + spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH)
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
@ -110,7 +119,7 @@ def test_on_attestation_mismatched_target_and_slot(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_target_not_in_store(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -131,7 +140,7 @@ def test_on_attestation_target_not_in_store(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_beacon_block_not_in_store(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -159,7 +168,7 @@ def test_on_attestation_beacon_block_not_in_store(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_future_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 3 * spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -179,7 +188,7 @@ def test_on_attestation_future_epoch(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_future_block(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = spec.SECONDS_PER_SLOT * 5
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -199,7 +208,7 @@ def test_on_attestation_future_block(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_same_slot(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 1 * spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -215,7 +224,7 @@ def test_on_attestation_same_slot(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_on_attestation_invalid_attestation(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 3 * spec.SECONDS_PER_SLOT
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ def apply_next_epoch_with_attestations(spec, state, store):
|
|||
@spec_state_test
|
||||
def test_basic(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
assert store.time == time
|
||||
|
@ -60,7 +60,7 @@ def test_basic(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_checkpoints(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -86,7 +86,7 @@ def test_on_block_checkpoints(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_future_block(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
|
||||
# do not tick time
|
||||
|
||||
|
@ -100,7 +100,7 @@ def test_on_block_future_block(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_bad_parent_root(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -120,7 +120,7 @@ def test_on_block_bad_parent_root(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_before_finalized(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -139,7 +139,7 @@ def test_on_block_before_finalized(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_finalized_skip_slots(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -159,7 +159,7 @@ def test_on_block_finalized_skip_slots(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
|
||||
store.finalized_checkpoint = spec.Checkpoint(
|
||||
epoch=store.finalized_checkpoint.epoch + 2,
|
||||
|
@ -181,7 +181,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -212,7 +212,7 @@ def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
@ -262,7 +262,7 @@ def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state):
|
|||
@spec_state_test
|
||||
def test_on_block_outside_safe_slots_but_finality(spec, state):
|
||||
# Initialization
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
time = 100
|
||||
spec.on_tick(store, time)
|
||||
|
||||
|
|
|
@ -19,14 +19,14 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_basic(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
run_on_tick(spec, store, store.time + 1)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_update_justified_single(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
|
@ -40,7 +40,7 @@ def test_update_justified_single(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_same_slot_at_epoch_boundary(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
|
@ -57,7 +57,7 @@ def test_no_update_same_slot_at_epoch_boundary(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_not_epoch_boundary(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
epoch=store.justified_checkpoint.epoch + 1,
|
||||
|
@ -70,7 +70,7 @@ def test_no_update_not_epoch_boundary(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_new_justified_equal_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
|
@ -89,7 +89,7 @@ def test_no_update_new_justified_equal_epoch(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_no_update_new_justified_later_epoch(spec, state):
|
||||
store = spec.get_genesis_store(state)
|
||||
store = spec.get_forkchoice_store(state)
|
||||
seconds_per_epoch = spec.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH
|
||||
|
||||
store.best_justified_checkpoint = spec.Checkpoint(
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from eth2spec.test.context import spec_test, with_phases
|
||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
|
@ -6,6 +6,7 @@ from eth2spec.test.helpers.deposits import (
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_from_eth1(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
deposits, deposit_root, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
|
@ -33,6 +34,7 @@ def test_initialize_beacon_state_from_eth1(spec):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_initialize_beacon_state_some_small_balances(spec):
|
||||
main_deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT
|
||||
main_deposits, _, deposit_data_list = prepare_genesis_deposits(spec, main_deposit_count,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from eth2spec.test.context import spec_test, with_phases
|
||||
from eth2spec.test.context import spec_test, with_phases, single_phase
|
||||
from eth2spec.test.helpers.deposits import (
|
||||
prepare_genesis_deposits,
|
||||
)
|
||||
|
@ -27,6 +27,7 @@ def run_is_valid_genesis_state(spec, state, valid=True):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
|
||||
|
@ -35,6 +36,7 @@ def test_is_valid_genesis_state_true(spec):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.genesis_time = spec.MIN_GENESIS_TIME - 1
|
||||
|
@ -44,6 +46,7 @@ def test_is_valid_genesis_state_false_invalid_timestamp(spec):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true_more_balance(spec):
|
||||
state = create_valid_beacon_state(spec)
|
||||
state.validators[0].effective_balance = spec.MAX_EFFECTIVE_BALANCE + 1
|
||||
|
@ -63,6 +66,7 @@ def test_is_valid_genesis_state_true_more_balance(spec):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_true_one_more_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT + 1
|
||||
deposits, _, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
|
@ -76,6 +80,7 @@ def test_is_valid_genesis_state_true_one_more_validator(spec):
|
|||
|
||||
@with_phases(['phase0'])
|
||||
@spec_test
|
||||
@single_phase
|
||||
def test_is_valid_genesis_state_false_not_enough_validator(spec):
|
||||
deposit_count = spec.MIN_GENESIS_ACTIVE_VALIDATOR_COUNT - 1
|
||||
deposits, _, _ = prepare_genesis_deposits(spec, deposit_count, spec.MAX_EFFECTIVE_BALANCE, signed=True)
|
||||
|
|
|
@ -77,12 +77,22 @@ def sign_aggregate_attestation(spec, state, attestation_data, participants: List
|
|||
privkey
|
||||
)
|
||||
)
|
||||
# TODO: we should try signing custody bits if spec.fork == 'phase1'
|
||||
return bls.Aggregate(signatures)
|
||||
|
||||
|
||||
def sign_indexed_attestation(spec, state, indexed_attestation):
|
||||
participants = indexed_attestation.attesting_indices
|
||||
indexed_attestation.signature = sign_aggregate_attestation(spec, state, indexed_attestation.data, participants)
|
||||
if spec.fork == 'phase0':
|
||||
participants = indexed_attestation.attesting_indices
|
||||
data = indexed_attestation.data
|
||||
indexed_attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||
else:
|
||||
participants = spec.get_indices_from_committee(
|
||||
indexed_attestation.committee,
|
||||
indexed_attestation.attestation.aggregation_bits,
|
||||
)
|
||||
data = indexed_attestation.attestation.data
|
||||
indexed_attestation.attestation.signature = sign_aggregate_attestation(spec, state, data, participants)
|
||||
|
||||
|
||||
def sign_attestation(spec, state, attestation):
|
||||
|
|
|
@ -1,12 +1,10 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
|
||||
|
||||
|
||||
def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
||||
attestation_1 = get_valid_attestation(spec, state, signed=signed_1)
|
||||
|
||||
attestation_2 = deepcopy(attestation_1)
|
||||
attestation_2 = attestation_1.copy()
|
||||
attestation_2.data.target.root = b'\x01' * 32
|
||||
|
||||
if signed_2:
|
||||
|
@ -16,3 +14,40 @@ def get_valid_attester_slashing(spec, state, signed_1=False, signed_2=False):
|
|||
attestation_1=spec.get_indexed_attestation(state, attestation_1),
|
||||
attestation_2=spec.get_indexed_attestation(state, attestation_2),
|
||||
)
|
||||
|
||||
|
||||
def get_indexed_attestation_participants(spec, indexed_att):
|
||||
"""
|
||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||
"""
|
||||
if spec.fork == "phase1":
|
||||
return list(spec.get_indices_from_committee(
|
||||
indexed_att.committee,
|
||||
indexed_att.attestation.aggregation_bits,
|
||||
))
|
||||
else:
|
||||
return list(indexed_att.attesting_indices)
|
||||
|
||||
|
||||
def set_indexed_attestation_participants(spec, indexed_att, participants):
|
||||
"""
|
||||
Wrapper around index-attestation to return the list of participant indices, regardless of spec phase.
|
||||
"""
|
||||
if spec.fork == "phase1":
|
||||
indexed_att.attestation.aggregation_bits = [bool(i in participants) for i in indexed_att.committee]
|
||||
else:
|
||||
indexed_att.attesting_indices = participants
|
||||
|
||||
|
||||
def get_attestation_1_data(spec, att_slashing):
|
||||
if spec.fork == "phase1":
|
||||
return att_slashing.attestation_1.attestation.data
|
||||
else:
|
||||
return att_slashing.attestation_1.data
|
||||
|
||||
|
||||
def get_attestation_2_data(spec, att_slashing):
|
||||
if spec.fork == "phase1":
|
||||
return att_slashing.attestation_2.attestation.data
|
||||
else:
|
||||
return att_slashing.attestation_2.data
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
|
@ -16,7 +14,7 @@ def get_proposer_index_maybe(spec, state, slot, proposer_index=None):
|
|||
print("warning: block slot far away, and no proposer index manually given."
|
||||
" Signing block is slow due to transition for proposer index calculation.")
|
||||
# use stub state to get proposer index of future slot
|
||||
stub_state = deepcopy(state)
|
||||
stub_state = state.copy()
|
||||
spec.process_slots(stub_state, slot)
|
||||
proposer_index = spec.get_beacon_proposer_index(stub_state)
|
||||
return proposer_index
|
||||
|
@ -67,14 +65,26 @@ def apply_empty_block(spec, state):
|
|||
|
||||
|
||||
def build_empty_block(spec, state, slot=None):
|
||||
"""
|
||||
Build empty block for ``slot``, built upon the latest block header seen by ``state``.
|
||||
Slot must be greater than or equal to the current slot in ``state``.
|
||||
"""
|
||||
if slot is None:
|
||||
slot = state.slot
|
||||
if slot < state.slot:
|
||||
raise Exception("build_empty_block cannot build blocks for past slots")
|
||||
if slot > state.slot:
|
||||
# transition forward in copied state to grab relevant data from state
|
||||
state = state.copy()
|
||||
spec.process_slots(state, slot)
|
||||
|
||||
empty_block = spec.BeaconBlock()
|
||||
empty_block.slot = slot
|
||||
empty_block.proposer_index = spec.get_beacon_proposer_index(state)
|
||||
empty_block.body.eth1_data.deposit_count = state.eth1_deposit_index
|
||||
previous_block_header = deepcopy(state.latest_block_header)
|
||||
previous_block_header = state.latest_block_header.copy()
|
||||
if previous_block_header.state_root == spec.Root():
|
||||
previous_block_header.state_root = state.hash_tree_root()
|
||||
previous_block_header.state_root = hash_tree_root(state)
|
||||
empty_block.parent_root = hash_tree_root(previous_block_header)
|
||||
apply_randao_reveal(spec, state, empty_block)
|
||||
return empty_block
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.hash_function import hash
|
||||
from eth2spec.utils.ssz.ssz_typing import Bitlist, ByteVector, Bitvector
|
||||
from eth2spec.utils.ssz.ssz_impl import chunkify, pack, hash_tree_root
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.utils.merkle_minimal import get_merkle_tree, get_merkle_proof
|
||||
from remerkleable.core import pack_bits_to_chunks
|
||||
from remerkleable.tree import subtree_fill_to_contents, get_depth
|
||||
|
||||
BYTES_PER_CHUNK = 32
|
||||
|
||||
|
@ -21,7 +22,7 @@ def get_valid_early_derived_secret_reveal(spec, state, epoch=None):
|
|||
signing_root = spec.compute_signing_root(spec.Epoch(epoch), domain)
|
||||
reveal = bls.Sign(privkeys[revealed_index], signing_root)
|
||||
# Generate the mask (any random 32 bytes that don't reveal the masker's secret will do)
|
||||
mask = hash(reveal)
|
||||
mask = spec.hash(reveal)
|
||||
# Generate masker's signature on the mask
|
||||
signing_root = spec.compute_signing_root(mask, domain)
|
||||
masker_signature = bls.Sign(privkeys[masker_index], signing_root)
|
||||
|
@ -120,10 +121,11 @@ def get_valid_custody_response(spec, state, bit_challenge, custody_data, challen
|
|||
data_branch = get_merkle_proof(data_tree, chunk_index)
|
||||
|
||||
bitlist_chunk_index = chunk_index // BYTES_PER_CHUNK
|
||||
bitlist_chunks = chunkify(pack(bit_challenge.chunk_bits))
|
||||
bitlist_tree = get_merkle_tree(bitlist_chunks, pad_to=spec.MAX_CUSTODY_CHUNKS // 256)
|
||||
bitlist_chunk_branch = get_merkle_proof(bitlist_tree, chunk_index // 256) + \
|
||||
[len(bit_challenge.chunk_bits).to_bytes(32, "little")]
|
||||
print(bitlist_chunk_index)
|
||||
bitlist_chunk_nodes = pack_bits_to_chunks(bit_challenge.chunk_bits)
|
||||
bitlist_tree = subtree_fill_to_contents(bitlist_chunk_nodes, get_depth(spec.MAX_CUSTODY_CHUNKS))
|
||||
print(bitlist_tree)
|
||||
bitlist_chunk_branch = None # TODO; extract proof from merkle tree
|
||||
|
||||
bitlist_chunk_index = chunk_index // 256
|
||||
|
||||
|
@ -146,4 +148,4 @@ def get_custody_test_vector(bytelength):
|
|||
|
||||
|
||||
def get_custody_merkle_root(data):
|
||||
return get_merkle_tree(chunkify(data))[-1][0]
|
||||
return None # get_merkle_tree(chunkify(data))[-1][0]
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
import copy
|
||||
from eth2spec.test.helpers.keys import pubkeys
|
||||
|
||||
|
||||
|
@ -35,7 +34,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
|
||||
# We "hack" in the initial validators,
|
||||
# as it is much faster than creating and processing genesis deposits for every single test case.
|
||||
state.balances = copy.deepcopy(validator_balances)
|
||||
state.balances = validator_balances
|
||||
state.validators = [build_mock_validator(spec, i, state.balances[i]) for i in range(len(validator_balances))]
|
||||
|
||||
# Process genesis activations
|
||||
|
@ -44,4 +43,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold):
|
|||
validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
|
||||
validator.activation_epoch = spec.GENESIS_EPOCH
|
||||
|
||||
# Set genesis validators root for domain separation and chain versioning
|
||||
state.genesis_validators_root = spec.hash_tree_root(state.validators)
|
||||
|
||||
return state
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.keys import privkeys
|
||||
from eth2spec.utils import bls
|
||||
from eth2spec.utils.bls import only_with_bls
|
||||
|
@ -32,12 +30,12 @@ def build_empty_shard_block(spec,
|
|||
if slot is None:
|
||||
slot = shard_state.slot
|
||||
|
||||
previous_beacon_header = deepcopy(beacon_state.latest_block_header)
|
||||
previous_beacon_header = beacon_state.latest_block_header.copy()
|
||||
if previous_beacon_header.state_root == spec.Bytes32():
|
||||
previous_beacon_header.state_root = beacon_state.hash_tree_root()
|
||||
beacon_block_root = hash_tree_root(previous_beacon_header)
|
||||
|
||||
previous_block_header = deepcopy(shard_state.latest_block_header)
|
||||
previous_block_header = shard_state.latest_block_header.copy()
|
||||
if previous_block_header.state_root == spec.Bytes32():
|
||||
previous_block_header.state_root = shard_state.hash_tree_root()
|
||||
parent_root = hash_tree_root(previous_block_header)
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.block_header import sign_block_header
|
||||
from eth2spec.test.helpers.keys import pubkey_to_privkey
|
||||
|
||||
|
@ -12,11 +10,12 @@ def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False):
|
|||
|
||||
header_1 = spec.BeaconBlockHeader(
|
||||
slot=slot,
|
||||
proposer_index=validator_index,
|
||||
parent_root=b'\x33' * 32,
|
||||
state_root=b'\x44' * 32,
|
||||
block_body_root=b'\x55' * 32,
|
||||
body_root=b'\x55' * 32,
|
||||
)
|
||||
header_2 = deepcopy(header_1)
|
||||
header_2 = header_1.copy()
|
||||
header_2.parent_root = b'\x99' * 32
|
||||
|
||||
if signed_1:
|
||||
|
@ -29,7 +28,6 @@ def get_valid_proposer_slashing(spec, state, signed_1=False, signed_2=False):
|
|||
signed_header_2 = spec.SignedBeaconBlockHeader(message=header_2)
|
||||
|
||||
return spec.ProposerSlashing(
|
||||
proposer_index=validator_index,
|
||||
signed_header_1=signed_header_1,
|
||||
signed_header_2=signed_header_2,
|
||||
)
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.context import expect_assertion_error
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.block import sign_block, build_empty_block_for_next_slot, transition_unsigned_block
|
||||
|
@ -61,7 +59,7 @@ def next_epoch_with_attestations(spec,
|
|||
fill_prev_epoch):
|
||||
assert state.slot % spec.SLOTS_PER_EPOCH == 0
|
||||
|
||||
post_state = deepcopy(state)
|
||||
post_state = state.copy()
|
||||
signed_blocks = []
|
||||
for _ in range(spec.SLOTS_PER_EPOCH):
|
||||
block = build_empty_block_for_next_slot(spec, post_state)
|
||||
|
|
|
@ -1,152 +0,0 @@
|
|||
import re
|
||||
from eth_utils import (
|
||||
to_tuple,
|
||||
)
|
||||
|
||||
from eth2spec.test.context import (
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_typing import (
|
||||
Bytes32,
|
||||
Container,
|
||||
List,
|
||||
uint64,
|
||||
)
|
||||
|
||||
|
||||
class Foo(Container):
|
||||
x: uint64
|
||||
y: List[Bytes32, 2]
|
||||
|
||||
# Tree
|
||||
# root
|
||||
# / \
|
||||
# x y_root
|
||||
# / \
|
||||
# y_data_root len(y)
|
||||
# / \
|
||||
# / \ / \
|
||||
#
|
||||
# Generalized indices
|
||||
# 1
|
||||
# / \
|
||||
# 2 (x) 3 (y_root)
|
||||
# / \
|
||||
# 6 7
|
||||
# / \
|
||||
# 12 13
|
||||
|
||||
|
||||
@to_tuple
|
||||
def ssz_object_to_path(start, end):
|
||||
is_len = False
|
||||
len_findall = re.findall(r"(?<=len\().*(?=\))", end)
|
||||
if len_findall:
|
||||
is_len = True
|
||||
end = len_findall[0]
|
||||
|
||||
route = ''
|
||||
if end.startswith(start):
|
||||
route = end[len(start):]
|
||||
|
||||
segments = route.split('.')
|
||||
for word in segments:
|
||||
index_match = re.match(r"(\w+)\[(\d+)]", word)
|
||||
if index_match:
|
||||
yield from index_match.groups()
|
||||
elif len(word):
|
||||
yield word
|
||||
if is_len:
|
||||
yield '__len__'
|
||||
|
||||
|
||||
to_path_test_cases = [
|
||||
('foo', 'foo.x', ('x',)),
|
||||
('foo', 'foo.x[100].y', ('x', '100', 'y')),
|
||||
('foo', 'foo.x[100].y[1].z[2]', ('x', '100', 'y', '1', 'z', '2')),
|
||||
('foo', 'len(foo.x[100].y[1].z[2])', ('x', '100', 'y', '1', 'z', '2', '__len__')),
|
||||
]
|
||||
|
||||
|
||||
def test_to_path():
|
||||
for test_case in to_path_test_cases:
|
||||
start, end, expected = test_case
|
||||
assert ssz_object_to_path(start, end) == expected
|
||||
|
||||
|
||||
generalized_index_cases = [
|
||||
(Foo, ('x',), 2),
|
||||
(Foo, ('y',), 3),
|
||||
(Foo, ('y', 0), 12),
|
||||
(Foo, ('y', 1), 13),
|
||||
(Foo, ('y', '__len__'), None),
|
||||
]
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_get_generalized_index(spec, state):
|
||||
for typ, path, generalized_index in generalized_index_cases:
|
||||
if generalized_index is not None:
|
||||
assert spec.get_generalized_index(
|
||||
typ=typ,
|
||||
path=path,
|
||||
) == generalized_index
|
||||
else:
|
||||
expect_assertion_error(lambda: spec.get_generalized_index(typ=typ, path=path))
|
||||
|
||||
yield 'typ', typ
|
||||
yield 'path', path
|
||||
yield 'generalized_index', generalized_index
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_verify_merkle_proof(spec, state):
|
||||
h = spec.hash
|
||||
a = b'\x11' * 32
|
||||
b = b'\x22' * 32
|
||||
c = b'\x33' * 32
|
||||
d = b'\x44' * 32
|
||||
root = h(h(a + b) + h(c + d))
|
||||
leaf = a
|
||||
generalized_index = 4
|
||||
proof = [b, h(c + d)]
|
||||
|
||||
is_valid = spec.verify_merkle_proof(
|
||||
leaf=leaf,
|
||||
proof=proof,
|
||||
index=generalized_index,
|
||||
root=root,
|
||||
)
|
||||
assert is_valid
|
||||
|
||||
yield 'proof', proof
|
||||
yield 'is_valid', is_valid
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_verify_merkle_multiproof(spec, state):
|
||||
h = spec.hash
|
||||
a = b'\x11' * 32
|
||||
b = b'\x22' * 32
|
||||
c = b'\x33' * 32
|
||||
d = b'\x44' * 32
|
||||
root = h(h(a + b) + h(c + d))
|
||||
leaves = [a, d]
|
||||
generalized_indices = [4, 7]
|
||||
proof = [c, b] # helper_indices = [6, 5]
|
||||
|
||||
is_valid = spec.verify_merkle_multiproof(
|
||||
leaves=leaves,
|
||||
proof=proof,
|
||||
indices=generalized_indices,
|
||||
root=root,
|
||||
)
|
||||
assert is_valid
|
||||
|
||||
yield 'proof', proof
|
||||
yield 'is_valid', is_valid
|
|
@ -6,7 +6,7 @@ from eth2spec.test.context import (
|
|||
spec_test,
|
||||
low_balances,
|
||||
with_custom_state,
|
||||
)
|
||||
single_phase)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_attestation,
|
||||
sign_aggregate_attestation,
|
||||
|
@ -66,6 +66,7 @@ def test_success(spec, state):
|
|||
@with_all_phases
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.EJECTION_BALANCE)
|
||||
@single_phase
|
||||
def test_success_multi_proposer_index_iterations(spec, state):
|
||||
state.slot += spec.SLOTS_PER_EPOCH * 2
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases
|
||||
from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls, with_all_phases, with_phases
|
||||
from eth2spec.test.helpers.attestations import sign_indexed_attestation
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, \
|
||||
get_indexed_attestation_participants, get_attestation_2_data, get_attestation_1_data
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
from eth2spec.test.helpers.state import (
|
||||
get_balance,
|
||||
|
@ -25,7 +26,7 @@ def run_attester_slashing_processing(spec, state, attester_slashing, valid=True)
|
|||
yield 'post', None
|
||||
return
|
||||
|
||||
slashed_indices = attester_slashing.attestation_1.attesting_indices
|
||||
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
|
||||
proposer_index = spec.get_beacon_proposer_index(state)
|
||||
pre_proposer_balance = get_balance(state, proposer_index)
|
||||
|
@ -92,12 +93,12 @@ def test_success_surround(spec, state):
|
|||
|
||||
state.current_justified_checkpoint.epoch += 1
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
attestation_1 = attester_slashing.attestation_1
|
||||
attestation_2 = attester_slashing.attestation_2
|
||||
att_1_data = get_attestation_1_data(spec, attester_slashing)
|
||||
att_2_data = get_attestation_2_data(spec, attester_slashing)
|
||||
|
||||
# set attestion1 to surround attestation 2
|
||||
attestation_1.data.source.epoch = attestation_2.data.source.epoch - 1
|
||||
attestation_1.data.target.epoch = attestation_2.data.target.epoch + 1
|
||||
att_1_data.source.epoch = att_2_data.source.epoch - 1
|
||||
att_1_data.target.epoch = att_2_data.target.epoch + 1
|
||||
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
|
@ -109,7 +110,7 @@ def test_success_surround(spec, state):
|
|||
@always_bls
|
||||
def test_success_already_exited_recent(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = attester_slashing.attestation_1.attesting_indices
|
||||
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
for index in slashed_indices:
|
||||
spec.initiate_validator_exit(state, index)
|
||||
|
||||
|
@ -121,7 +122,7 @@ def test_success_already_exited_recent(spec, state):
|
|||
@always_bls
|
||||
def test_success_already_exited_long_ago(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
slashed_indices = attester_slashing.attestation_1.attesting_indices
|
||||
slashed_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
for index in slashed_indices:
|
||||
spec.initiate_validator_exit(state, index)
|
||||
state.validators[index].withdrawable_epoch = spec.get_current_epoch(state) + 2
|
||||
|
@ -158,7 +159,12 @@ def test_invalid_sig_1_and_2(spec, state):
|
|||
def test_same_data(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
attester_slashing.attestation_1.data = attester_slashing.attestation_2.data
|
||||
indexed_att_1 = attester_slashing.attestation_1
|
||||
att_2_data = get_attestation_2_data(spec, attester_slashing)
|
||||
if spec.fork == 'phase1':
|
||||
indexed_att_1.attestation.data = att_2_data
|
||||
else:
|
||||
indexed_att_1.data = att_2_data
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
@ -169,7 +175,9 @@ def test_same_data(spec, state):
|
|||
def test_no_double_or_surround(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
attester_slashing.attestation_1.data.target.epoch += 1
|
||||
att_1_data = get_attestation_1_data(spec, attester_slashing)
|
||||
att_1_data.target.epoch += 1
|
||||
|
||||
sign_indexed_attestation(spec, state, attester_slashing.attestation_1)
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
@ -181,20 +189,23 @@ def test_participants_already_slashed(spec, state):
|
|||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set all indices to slashed
|
||||
validator_indices = attester_slashing.attestation_1.attesting_indices
|
||||
validator_indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
for index in validator_indices:
|
||||
state.validators[index].slashed = True
|
||||
|
||||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
# Some of the following tests are phase0 only: phase 1 lists participants with bitfields instead of index list.
|
||||
|
||||
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_extra_index(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_1.attesting_indices
|
||||
indices = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)
|
||||
options = list(set(range(len(state.validators))) - set(indices))
|
||||
indices.append(options[len(options) // 2]) # add random index, not previously in attestation.
|
||||
attester_slashing.attestation_1.attesting_indices = sorted(indices)
|
||||
|
@ -204,7 +215,7 @@ def test_att1_bad_extra_index(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_bad_replaced_index(spec, state):
|
||||
|
@ -220,7 +231,7 @@ def test_att1_bad_replaced_index(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_extra_index(spec, state):
|
||||
|
@ -236,7 +247,7 @@ def test_att2_bad_extra_index(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_bad_replaced_index(spec, state):
|
||||
|
@ -252,13 +263,13 @@ def test_att2_bad_replaced_index(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_normal_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_1.attesting_indices
|
||||
indices = list(attester_slashing.attestation_1.attesting_indices)
|
||||
indices.pop(1) # remove an index, make room for the additional duplicate index.
|
||||
attester_slashing.attestation_1.attesting_indices = sorted(indices)
|
||||
|
||||
|
@ -272,13 +283,13 @@ def test_att1_duplicate_index_normal_signed(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_normal_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
indices = attester_slashing.attestation_2.attesting_indices
|
||||
indices = list(attester_slashing.attestation_2.attesting_indices)
|
||||
indices.pop(2) # remove an index, make room for the additional duplicate index.
|
||||
attester_slashing.attestation_2.attesting_indices = sorted(indices)
|
||||
|
||||
|
@ -292,13 +303,13 @@ def test_att2_duplicate_index_normal_signed(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att1_duplicate_index_double_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
||||
indices = attester_slashing.attestation_1.attesting_indices
|
||||
indices = list(attester_slashing.attestation_1.attesting_indices)
|
||||
indices.pop(1) # remove an index, make room for the additional duplicate index.
|
||||
indices.append(indices[2]) # add one of the indices a second time
|
||||
attester_slashing.attestation_1.attesting_indices = sorted(indices)
|
||||
|
@ -307,13 +318,13 @@ def test_att1_duplicate_index_double_signed(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_att2_duplicate_index_double_signed(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
||||
indices = attester_slashing.attestation_2.attesting_indices
|
||||
indices = list(attester_slashing.attestation_2.attesting_indices)
|
||||
indices.pop(1) # remove an index, make room for the additional duplicate index.
|
||||
indices.append(indices[2]) # add one of the indices a second time
|
||||
attester_slashing.attestation_2.attesting_indices = sorted(indices)
|
||||
|
@ -322,7 +333,7 @@ def test_att2_duplicate_index_double_signed(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
def test_unsorted_att_1(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=False, signed_2=True)
|
||||
|
@ -335,7 +346,7 @@ def test_unsorted_att_1(spec, state):
|
|||
yield from run_attester_slashing_processing(spec, state, attester_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@with_phases(['phase0'])
|
||||
@spec_state_test
|
||||
def test_unsorted_att_2(spec, state):
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=False)
|
||||
|
|
|
@ -47,6 +47,18 @@ def test_invalid_slot_block_header(spec, state):
|
|||
yield from run_block_header_processing(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_proposer_index(spec, state):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))
|
||||
active_indices = [i for i in active_indices if i != block.proposer_index]
|
||||
block.proposer_index = active_indices[0] # invalid proposer index
|
||||
|
||||
yield from run_block_header_processing(spec, state, block, valid=False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_parent_root(spec, state):
|
||||
|
|
|
@ -22,22 +22,20 @@ def run_proposer_slashing_processing(spec, state, proposer_slashing, valid=True)
|
|||
yield 'post', None
|
||||
return
|
||||
|
||||
pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index)
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
pre_proposer_balance = get_balance(state, proposer_index)
|
||||
|
||||
spec.process_proposer_slashing(state, proposer_slashing)
|
||||
yield 'post', state
|
||||
|
||||
# check if slashed
|
||||
slashed_validator = state.validators[proposer_slashing.proposer_index]
|
||||
slashed_validator = state.validators[proposer_index]
|
||||
assert slashed_validator.slashed
|
||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
# lost whistleblower reward
|
||||
assert (
|
||||
get_balance(state, proposer_slashing.proposer_index) <
|
||||
pre_proposer_balance
|
||||
)
|
||||
assert get_balance(state, proposer_index) < pre_proposer_balance
|
||||
|
||||
|
||||
@with_all_phases
|
||||
|
@ -77,7 +75,24 @@ def test_invalid_sig_1_and_2(spec, state):
|
|||
def test_invalid_proposer_index(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
# Index just too high (by 1)
|
||||
proposer_slashing.proposer_index = len(state.validators)
|
||||
proposer_slashing.signed_header_1.message.proposer_index = len(state.validators)
|
||||
proposer_slashing.signed_header_2.message.proposer_index = len(state.validators)
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_invalid_different_proposer_indices(spec, state):
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
# set different index and sign
|
||||
header_1 = proposer_slashing.signed_header_1.message
|
||||
header_2 = proposer_slashing.signed_header_2.message
|
||||
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))
|
||||
active_indices = [i for i in active_indices if i != header_1.proposer_index]
|
||||
|
||||
header_2.proposer_index = active_indices[0]
|
||||
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[header_2.proposer_index])
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
@ -89,9 +104,9 @@ def test_epochs_are_different(spec, state):
|
|||
|
||||
# set slots to be in different epochs
|
||||
header_2 = proposer_slashing.signed_header_2.message
|
||||
proposer_index = header_2.proposer_index
|
||||
header_2.slot += spec.SLOTS_PER_EPOCH
|
||||
proposer_slashing.signed_header_2 = sign_block_header(
|
||||
spec, state, header_2, privkeys[proposer_slashing.proposer_index])
|
||||
proposer_slashing.signed_header_2 = sign_block_header(spec, state, header_2, privkeys[proposer_index])
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
@ -113,7 +128,8 @@ def test_proposer_is_not_activated(spec, state):
|
|||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set proposer to be not active yet
|
||||
state.validators[proposer_slashing.proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].activation_epoch = spec.get_current_epoch(state) + 1
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
@ -124,7 +140,8 @@ def test_proposer_is_slashed(spec, state):
|
|||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
|
||||
# set proposer to slashed
|
||||
state.validators[proposer_slashing.proposer_index].slashed = True
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].slashed = True
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
||||
|
@ -138,7 +155,7 @@ def test_proposer_is_withdrawn(spec, state):
|
|||
state.slot += spec.SLOTS_PER_EPOCH
|
||||
# set proposer withdrawable_epoch in past
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
proposer_index = proposer_slashing.proposer_index
|
||||
proposer_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
state.validators[proposer_index].withdrawable_epoch = current_epoch - 1
|
||||
|
||||
yield from run_proposer_slashing_processing(spec, state, proposer_slashing, False)
|
||||
|
|
|
@ -11,7 +11,7 @@ def run_process_final_updates(spec, state):
|
|||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_eth1_vote_no_reset(spec, state):
|
||||
assert spec.SLOTS_PER_ETH1_VOTING_PERIOD > spec.SLOTS_PER_EPOCH
|
||||
assert spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 1
|
||||
# skip ahead to the end of the epoch
|
||||
state.slot = spec.SLOTS_PER_EPOCH - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
|
@ -29,7 +29,7 @@ def test_eth1_vote_no_reset(spec, state):
|
|||
@spec_state_test
|
||||
def test_eth1_vote_reset(spec, state):
|
||||
# skip ahead to the end of the voting period
|
||||
state.slot = spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1
|
||||
state.slot = (spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH) - 1
|
||||
for i in range(state.slot + 1): # add a vote for each skipped slot.
|
||||
state.eth1_data_votes.append(
|
||||
spec.Eth1Data(deposit_root=b'\xaa' * 32,
|
||||
|
@ -51,19 +51,25 @@ def test_effective_balance_hysteresis(spec, state):
|
|||
max = spec.MAX_EFFECTIVE_BALANCE
|
||||
min = spec.EJECTION_BALANCE
|
||||
inc = spec.EFFECTIVE_BALANCE_INCREMENT
|
||||
half_inc = inc // 2
|
||||
div = spec.HYSTERESIS_QUOTIENT
|
||||
hys_inc = inc // div
|
||||
down = spec.HYSTERESIS_DOWNWARD_MULTIPLIER
|
||||
up = spec.HYSTERESIS_UPWARD_MULTIPLIER
|
||||
cases = [
|
||||
(max, max, max, "as-is"),
|
||||
(max, max - 1, max - inc, "round down, step lower"),
|
||||
(max, max - 1, max, "round up"),
|
||||
(max, max + 1, max, "round down"),
|
||||
(max, max - down * hys_inc, max, "lower balance, but not low enough"),
|
||||
(max, max - down * hys_inc - 1, max - inc, "lower balance, step down"),
|
||||
(max, max + (up * hys_inc) + 1, max, "already at max, as is"),
|
||||
(max, max - inc, max - inc, "exactly 1 step lower"),
|
||||
(max, max - inc - 1, max - (2 * inc), "just 1 over 1 step lower"),
|
||||
(max, max - inc - 1, max - (2 * inc), "past 1 step lower, double step"),
|
||||
(max, max - inc + 1, max - inc, "close to 1 step lower"),
|
||||
(min, min + (half_inc * 3), min, "bigger balance, but not high enough"),
|
||||
(min, min + (half_inc * 3) + 1, min + inc, "bigger balance, high enough, but small step"),
|
||||
(min, min + (half_inc * 4) - 1, min + inc, "bigger balance, high enough, close to double step"),
|
||||
(min, min + (half_inc * 4), min + (2 * inc), "exact two step balance increment"),
|
||||
(min, min + (half_inc * 4) + 1, min + (2 * inc), "over two steps, round down"),
|
||||
(min, min + (hys_inc * up), min, "bigger balance, but not high enough"),
|
||||
(min, min + (hys_inc * up) + 1, min + inc, "bigger balance, high enough, but small step"),
|
||||
(min, min + (hys_inc * div * 2) - 1, min + inc, "bigger balance, high enough, close to double step"),
|
||||
(min, min + (hys_inc * div * 2), min + (2 * inc), "exact two step balance increment"),
|
||||
(min, min + (hys_inc * div * 2) + 1, min + (2 * inc), "over two steps, round down"),
|
||||
]
|
||||
current_epoch = spec.get_current_epoch(state)
|
||||
for i, (pre_eff, bal, _, _) in enumerate(cases):
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.context import spec_state_test, with_all_phases, spec_test, \
|
||||
misc_balances, with_custom_state, default_activation_threshold
|
||||
from eth2spec.test.context import (
|
||||
spec_state_test, with_all_phases, spec_test,
|
||||
misc_balances, with_custom_state, default_activation_threshold,
|
||||
single_phase,
|
||||
)
|
||||
from eth2spec.test.helpers.state import (
|
||||
next_epoch,
|
||||
next_slot,
|
||||
|
@ -10,6 +13,7 @@ from eth2spec.test.helpers.attestations import (
|
|||
add_attestations_to_state,
|
||||
get_valid_attestation,
|
||||
)
|
||||
from eth2spec.test.helpers.attester_slashings import get_indexed_attestation_participants
|
||||
from eth2spec.test.phase_0.epoch_processing.run_epoch_process_base import run_epoch_processing_with
|
||||
|
||||
|
||||
|
@ -93,9 +97,33 @@ def test_full_attestations(spec, state):
|
|||
assert state.balances[index] < pre_state.balances[index]
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_full_attestations_random_incorrect_fields(spec, state):
|
||||
attestations = prepare_state_with_full_attestations(spec, state)
|
||||
for i, attestation in enumerate(state.previous_epoch_attestations):
|
||||
if i % 3 == 0:
|
||||
# Mess up some head votes
|
||||
attestation.data.beacon_block_root = b'\x56' * 32
|
||||
if i % 3 == 1:
|
||||
# Message up some target votes
|
||||
attestation.data.target.root = b'\x23' * 32
|
||||
if i % 3 == 2:
|
||||
# Keep some votes 100% correct
|
||||
pass
|
||||
|
||||
yield from run_process_rewards_and_penalties(spec, state)
|
||||
|
||||
attesting_indices = spec.get_unslashed_attesting_indices(state, attestations)
|
||||
assert len(attesting_indices) > 0
|
||||
# No balance checks, non-trivial base on group rewards
|
||||
# Mainly for consensus tests
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_test
|
||||
@with_custom_state(balances_fn=misc_balances, threshold_fn=default_activation_threshold)
|
||||
@single_phase
|
||||
def test_full_attestations_misc_balances(spec, state):
|
||||
attestations = prepare_state_with_full_attestations(spec, state)
|
||||
|
||||
|
@ -141,7 +169,7 @@ def test_duplicate_attestation(spec, state):
|
|||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
indexed_attestation = spec.get_indexed_attestation(state, attestation)
|
||||
participants = indexed_attestation.attesting_indices
|
||||
participants = get_indexed_attestation_participants(spec, indexed_attestation)
|
||||
|
||||
assert len(participants) > 0
|
||||
|
||||
|
|
|
@ -1,350 +0,0 @@
|
|||
from eth2spec.test.helpers.custody import (
|
||||
get_valid_bit_challenge,
|
||||
get_valid_custody_response,
|
||||
get_custody_test_vector,
|
||||
get_custody_merkle_root
|
||||
)
|
||||
from eth2spec.test.helpers.attestations import (
|
||||
get_valid_attestation,
|
||||
)
|
||||
from eth2spec.utils.ssz.ssz_impl import hash_tree_root
|
||||
from eth2spec.test.helpers.state import next_epoch, get_balance
|
||||
from eth2spec.test.helpers.block import apply_empty_block
|
||||
from eth2spec.test.context import (
|
||||
with_all_phases_except,
|
||||
spec_state_test,
|
||||
expect_assertion_error,
|
||||
)
|
||||
from eth2spec.test.phase_0.block_processing.test_process_attestation import run_attestation_processing
|
||||
|
||||
|
||||
def run_bit_challenge_processing(spec, state, custody_bit_challenge, valid=True):
|
||||
"""
|
||||
Run ``process_bit_challenge``, yielding:
|
||||
- pre-state ('pre')
|
||||
- CustodyBitChallenge ('custody_bit_challenge')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
yield 'custody_bit_challenge', custody_bit_challenge
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_bit_challenge(state, custody_bit_challenge))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
spec.process_bit_challenge(state, custody_bit_challenge)
|
||||
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].chunk_bits_merkle_root == \
|
||||
hash_tree_root(custody_bit_challenge.chunk_bits)
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].challenger_index == \
|
||||
custody_bit_challenge.challenger_index
|
||||
assert state.custody_bit_challenge_records[state.custody_challenge_index - 1].responder_index == \
|
||||
custody_bit_challenge.responder_index
|
||||
|
||||
yield 'post', state
|
||||
|
||||
|
||||
def run_custody_response_processing(spec, state, custody_response, valid=True):
|
||||
"""
|
||||
Run ``process_bit_challenge_response``, yielding:
|
||||
- pre-state ('pre')
|
||||
- CustodyResponse ('custody_response')
|
||||
- post-state ('post').
|
||||
If ``valid == False``, run expecting ``AssertionError``
|
||||
"""
|
||||
yield 'pre', state
|
||||
yield 'custody_response', custody_response
|
||||
|
||||
if not valid:
|
||||
expect_assertion_error(lambda: spec.process_custody_response(state, custody_response))
|
||||
yield 'post', None
|
||||
return
|
||||
|
||||
# TODO: Add capability to also process chunk challenges, not only bit challenges
|
||||
challenge = state.custody_bit_challenge_records[custody_response.challenge_index]
|
||||
pre_slashed_balance = get_balance(state, challenge.challenger_index)
|
||||
|
||||
spec.process_custody_response(state, custody_response)
|
||||
|
||||
slashed_validator = state.validators[challenge.challenger_index]
|
||||
|
||||
assert slashed_validator.slashed
|
||||
assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
|
||||
assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
|
||||
|
||||
assert get_balance(state, challenge.challenger_index) < pre_slashed_balance
|
||||
yield 'post', state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_challenge_appended(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_multiple_epochs_custody(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 3
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_many_epochs_custody(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 100
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * (spec.EPOCHS_PER_CUSTODY_PERIOD - 1)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_off_chain_attestation(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_invalid_custody_bit_challenge(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation, invalid_custody_bit=True)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge, valid=False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_max_reveal_lateness_1(spec, state):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
responder_index = challenge.responder_index
|
||||
target_epoch = attestation.data.target.epoch
|
||||
|
||||
state.validators[responder_index].max_reveal_lateness = 3
|
||||
|
||||
latest_reveal_epoch = spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index, target_epoch),
|
||||
responder_index
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness
|
||||
|
||||
while spec.get_current_epoch(state) < latest_reveal_epoch - 2:
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_max_reveal_lateness_2(spec, state):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
responder_index = challenge.responder_index
|
||||
|
||||
state.validators[responder_index].max_reveal_lateness = 3
|
||||
|
||||
for i in range(spec.get_randao_epoch_for_custody_period(
|
||||
spec.get_custody_period_for_validator(state, responder_index),
|
||||
responder_index
|
||||
) + 2 * spec.EPOCHS_PER_CUSTODY_PERIOD + state.validators[responder_index].max_reveal_lateness - 1):
|
||||
next_epoch(spec, state)
|
||||
apply_empty_block(spec, state)
|
||||
|
||||
yield from run_bit_challenge_processing(spec, state, challenge, False)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response_multiple_epochs(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 3
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_custody_response_many_epochs(spec, state):
|
||||
state.slot = spec.SLOTS_PER_EPOCH * 100
|
||||
attestation = get_valid_attestation(spec, state, signed=True)
|
||||
|
||||
test_vector = get_custody_test_vector(
|
||||
spec.get_custody_chunk_count(attestation.data.crosslink) * spec.BYTES_PER_CUSTODY_CHUNK)
|
||||
shard_root = get_custody_merkle_root(test_vector)
|
||||
attestation.data.crosslink.data_root = shard_root
|
||||
attestation.custody_bits[0] = 0
|
||||
|
||||
state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
|
||||
|
||||
_, _, _ = run_attestation_processing(spec, state, attestation)
|
||||
|
||||
state.slot += spec.SLOTS_PER_EPOCH * spec.EPOCHS_PER_CUSTODY_PERIOD
|
||||
|
||||
challenge = get_valid_bit_challenge(spec, state, attestation)
|
||||
|
||||
_, _, _ = run_bit_challenge_processing(spec, state, challenge)
|
||||
|
||||
bit_challenge_index = state.custody_challenge_index - 1
|
||||
|
||||
custody_response = get_valid_custody_response(spec, state, challenge, test_vector, bit_challenge_index)
|
||||
|
||||
yield from run_custody_response_processing(spec, state, custody_response)
|
|
@ -55,8 +55,8 @@ def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=Tru
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_success(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
@ -65,8 +65,8 @@ def test_success(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_reveal_too_early(spec, state):
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
||||
|
@ -74,8 +74,8 @@ def test_reveal_too_early(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_wrong_period(spec, state):
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state, period=5)
|
||||
|
||||
|
@ -83,8 +83,8 @@ def test_wrong_period(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_late_reveal(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
@ -93,8 +93,8 @@ def test_late_reveal(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_double_reveal(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 2
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
@ -105,8 +105,8 @@ def test_double_reveal(spec, state):
|
|||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@always_bls
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_max_decrement(spec, state):
|
||||
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
|
||||
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
from copy import deepcopy
|
||||
|
||||
from eth2spec.test.helpers.phase1.shard_block import (
|
||||
build_empty_shard_block,
|
||||
sign_shard_block,
|
||||
)
|
||||
from eth2spec.test.helpers.phase1.shard_state import (
|
||||
configure_shard_state,
|
||||
shard_state_transition_and_sign_block,
|
||||
)
|
||||
from eth2spec.test.context import (
|
||||
always_bls,
|
||||
expect_assertion_error,
|
||||
spec_state_test,
|
||||
with_all_phases_except,
|
||||
)
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_process_empty_shard_block(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
block = build_empty_shard_block(
|
||||
spec,
|
||||
beacon_state,
|
||||
shard_state,
|
||||
slot=shard_state.slot + 1,
|
||||
signed=True,
|
||||
full_attestation=False,
|
||||
)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
|
||||
shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'blocks', [block]
|
||||
yield 'post', shard_state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_process_full_attestation_shard_block(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
block = build_empty_shard_block(
|
||||
spec,
|
||||
beacon_state,
|
||||
shard_state,
|
||||
slot=shard_state.slot + 1,
|
||||
signed=True,
|
||||
full_attestation=True,
|
||||
)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
|
||||
shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'blocks', [block]
|
||||
yield 'post', shard_state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_prev_slot_block_transition(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
# Go to clean slot
|
||||
spec.process_shard_slots(shard_state, shard_state.slot + 1)
|
||||
# Make a block for it
|
||||
block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot, signed=True)
|
||||
# Transition to next slot, above block will not be invalid on top of new state.
|
||||
spec.process_shard_slots(shard_state, shard_state.slot + 1)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
expect_assertion_error(
|
||||
lambda: spec.shard_state_transition(beacon_state, shard_state, block)
|
||||
)
|
||||
yield 'blocks', [block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_same_slot_block_transition(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
# Same slot on top of pre-state, but move out of slot 0 first.
|
||||
spec.process_shard_slots(shard_state, shard_state.slot + 1)
|
||||
block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot, signed=True)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
|
||||
shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'blocks', [block]
|
||||
yield 'post', shard_state
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_invalid_state_root(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
spec.process_shard_slots(shard_state, shard_state.slot + 1)
|
||||
block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot)
|
||||
block.state_root = b'\x36' * 32
|
||||
sign_shard_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
expect_assertion_error(
|
||||
lambda: spec.shard_state_transition(beacon_state, shard_state, block, validate_state_root=True)
|
||||
)
|
||||
yield 'blocks', [block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_skipped_slots(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
block = build_empty_shard_block(spec, beacon_state, shard_state, slot=shard_state.slot + 3, signed=True)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
|
||||
shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'blocks', [block]
|
||||
yield 'post', shard_state
|
||||
|
||||
assert shard_state.slot == block.slot
|
||||
latest_block_header = deepcopy(shard_state.latest_block_header)
|
||||
latest_block_header.state_root = shard_state.hash_tree_root()
|
||||
assert latest_block_header.hash_tree_root() == block.hash_tree_root()
|
||||
|
||||
|
||||
@with_all_phases_except(['phase0'])
|
||||
@spec_state_test
|
||||
def test_empty_shard_period_transition(spec, state):
|
||||
beacon_state, shard_state = configure_shard_state(spec, state)
|
||||
|
||||
# modify some of the deltas to ensure the period transition works properly
|
||||
stub_delta = 10
|
||||
shard_state.newer_committee_positive_deltas[0] = stub_delta
|
||||
shard_state.newer_committee_negative_deltas[0] = stub_delta
|
||||
|
||||
slot = shard_state.slot + spec.SHARD_SLOTS_PER_EPOCH * spec.EPOCHS_PER_SHARD_PERIOD
|
||||
beacon_state.slot = spec.compute_epoch_of_shard_slot(slot) * spec.SLOTS_PER_EPOCH - 4
|
||||
spec.process_slots(beacon_state, spec.compute_epoch_of_shard_slot(slot) * spec.SLOTS_PER_EPOCH)
|
||||
|
||||
# all validators get slashed for not revealing keys
|
||||
# undo this to allow for a block proposal
|
||||
for index in range(len(beacon_state.validators)):
|
||||
beacon_state.validators[index].slashed = False
|
||||
block = build_empty_shard_block(spec, beacon_state, shard_state, slot=slot, signed=True)
|
||||
|
||||
yield 'pre', shard_state
|
||||
yield 'beacon_state', beacon_state
|
||||
|
||||
shard_state_transition_and_sign_block(spec, beacon_state, shard_state, block)
|
||||
|
||||
yield 'blocks', [block]
|
||||
yield 'post', shard_state
|
||||
|
||||
shard_state.older_committee_positive_deltas[0] == stub_delta
|
||||
shard_state.older_committee_negative_deltas[0] == stub_delta
|
||||
shard_state.newer_committee_positive_deltas[0] == 0
|
||||
shard_state.newer_committee_negative_deltas[0] == 0
|
|
@ -6,7 +6,7 @@ from eth2spec.test.helpers.state import get_balance, state_transition_and_sign_b
|
|||
from eth2spec.test.helpers.block import build_empty_block_for_next_slot, build_empty_block, sign_block, \
|
||||
transition_unsigned_block
|
||||
from eth2spec.test.helpers.keys import privkeys, pubkeys
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
|
||||
from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing, get_indexed_attestation_participants
|
||||
from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
|
||||
from eth2spec.test.helpers.attestations import get_valid_attestation
|
||||
from eth2spec.test.helpers.deposits import prepare_state_and_deposit
|
||||
|
@ -119,6 +119,49 @@ def test_invalid_block_sig(spec, state):
|
|||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_proposer_index_sig_from_expected_proposer(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
expect_proposer_index = block.proposer_index
|
||||
|
||||
# Set invalid proposer index but correct signature wrt expected proposer
|
||||
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))
|
||||
active_indices = [i for i in active_indices if i != block.proposer_index]
|
||||
block.proposer_index = active_indices[0] # invalid proposer index
|
||||
|
||||
invalid_signed_block = sign_block(spec, state, block, expect_proposer_index)
|
||||
|
||||
expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block))
|
||||
|
||||
yield 'blocks', [invalid_signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
@always_bls
|
||||
def test_invalid_proposer_index_sig_from_proposer_index(spec, state):
|
||||
yield 'pre', state
|
||||
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
|
||||
# Set invalid proposer index but correct signature wrt proposer_index
|
||||
active_indices = spec.get_active_validator_indices(state, spec.get_current_epoch(state))
|
||||
active_indices = [i for i in active_indices if i != block.proposer_index]
|
||||
block.proposer_index = active_indices[0] # invalid proposer index
|
||||
|
||||
invalid_signed_block = sign_block(spec, state, block, block.proposer_index)
|
||||
|
||||
expect_assertion_error(lambda: spec.state_transition(state, invalid_signed_block))
|
||||
|
||||
yield 'blocks', [invalid_signed_block]
|
||||
yield 'post', None
|
||||
|
||||
|
||||
@with_all_phases
|
||||
@spec_state_test
|
||||
def test_skipped_slots(spec, state):
|
||||
|
@ -187,7 +230,7 @@ def test_proposer_slashing(spec, state):
|
|||
# copy for later balance lookups.
|
||||
pre_state = deepcopy(state)
|
||||
proposer_slashing = get_valid_proposer_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
validator_index = proposer_slashing.proposer_index
|
||||
validator_index = proposer_slashing.signed_header_1.message.proposer_index
|
||||
|
||||
assert not state.validators[validator_index].slashed
|
||||
|
||||
|
@ -220,7 +263,7 @@ def test_attester_slashing(spec, state):
|
|||
pre_state = deepcopy(state)
|
||||
|
||||
attester_slashing = get_valid_attester_slashing(spec, state, signed_1=True, signed_2=True)
|
||||
validator_index = attester_slashing.attestation_1.attesting_indices[0]
|
||||
validator_index = get_indexed_attestation_participants(spec, attester_slashing.attestation_1)[0]
|
||||
|
||||
assert not state.validators[validator_index].slashed
|
||||
|
||||
|
@ -486,10 +529,12 @@ def test_historical_batch(spec, state):
|
|||
@spec_state_test
|
||||
def test_eth1_data_votes_consensus(spec, state):
|
||||
# Don't run when it will take very, very long to simulate. Minimal configuration suffices.
|
||||
if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
|
||||
if spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 2:
|
||||
return
|
||||
|
||||
offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
|
||||
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1)
|
||||
state_transition_and_sign_block(spec, state, offset_block)
|
||||
yield 'pre', state
|
||||
|
||||
|
@ -499,14 +544,14 @@ def test_eth1_data_votes_consensus(spec, state):
|
|||
|
||||
blocks = []
|
||||
|
||||
for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
|
||||
for i in range(0, voting_period_slots):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# wait for over 50% for A, then start voting B
|
||||
block.body.eth1_data.block_hash = b if i * 2 > spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
|
||||
block.body.eth1_data.block_hash = b if i * 2 > voting_period_slots else a
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
assert len(state.eth1_data_votes) == voting_period_slots
|
||||
assert state.eth1_data.block_hash == a
|
||||
|
||||
# transition to next eth1 voting period
|
||||
|
@ -519,7 +564,7 @@ def test_eth1_data_votes_consensus(spec, state):
|
|||
yield 'post', state
|
||||
|
||||
assert state.eth1_data.block_hash == a
|
||||
assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
|
||||
assert state.slot % voting_period_slots == 0
|
||||
assert len(state.eth1_data_votes) == 1
|
||||
assert state.eth1_data_votes[0].block_hash == c
|
||||
|
||||
|
@ -528,12 +573,14 @@ def test_eth1_data_votes_consensus(spec, state):
|
|||
@spec_state_test
|
||||
def test_eth1_data_votes_no_consensus(spec, state):
|
||||
# Don't run when it will take very, very long to simulate. Minimal configuration suffices.
|
||||
if spec.SLOTS_PER_ETH1_VOTING_PERIOD > 16:
|
||||
if spec.EPOCHS_PER_ETH1_VOTING_PERIOD > 2:
|
||||
return
|
||||
|
||||
voting_period_slots = spec.EPOCHS_PER_ETH1_VOTING_PERIOD * spec.SLOTS_PER_EPOCH
|
||||
|
||||
pre_eth1_hash = state.eth1_data.block_hash
|
||||
|
||||
offset_block = build_empty_block(spec, state, slot=spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1)
|
||||
offset_block = build_empty_block(spec, state, slot=voting_period_slots - 1)
|
||||
state_transition_and_sign_block(spec, state, offset_block)
|
||||
yield 'pre', state
|
||||
|
||||
|
@ -542,14 +589,14 @@ def test_eth1_data_votes_no_consensus(spec, state):
|
|||
|
||||
blocks = []
|
||||
|
||||
for i in range(0, spec.SLOTS_PER_ETH1_VOTING_PERIOD):
|
||||
for i in range(0, voting_period_slots):
|
||||
block = build_empty_block_for_next_slot(spec, state)
|
||||
# wait for precisely 50% for A, then start voting B for other 50%
|
||||
block.body.eth1_data.block_hash = b if i * 2 >= spec.SLOTS_PER_ETH1_VOTING_PERIOD else a
|
||||
block.body.eth1_data.block_hash = b if i * 2 >= voting_period_slots else a
|
||||
signed_block = state_transition_and_sign_block(spec, state, block)
|
||||
blocks.append(signed_block)
|
||||
|
||||
assert len(state.eth1_data_votes) == spec.SLOTS_PER_ETH1_VOTING_PERIOD
|
||||
assert len(state.eth1_data_votes) == voting_period_slots
|
||||
assert state.eth1_data.block_hash == pre_eth1_hash
|
||||
|
||||
yield 'blocks', blocks
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
from typing import Dict, Any
|
||||
from eth2spec.debug.encode import encode
|
||||
from eth2spec.utils.ssz.ssz_typing import SSZValue
|
||||
from eth2spec.utils.ssz.ssz_typing import View
|
||||
from eth2spec.utils.ssz.ssz_impl import serialize
|
||||
|
||||
|
||||
|
@ -38,15 +38,15 @@ def vector_test(description: str = None):
|
|||
(key, value) = data
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, SSZValue):
|
||||
if isinstance(value, View):
|
||||
yield key, 'data', encode(value)
|
||||
yield key, 'ssz', serialize(value)
|
||||
elif isinstance(value, bytes):
|
||||
yield key, 'data', encode(value)
|
||||
yield key, 'ssz', value
|
||||
elif isinstance(value, list) and all([isinstance(el, (SSZValue, bytes)) for el in value]):
|
||||
elif isinstance(value, list) and all([isinstance(el, (View, bytes)) for el in value]):
|
||||
for i, el in enumerate(value):
|
||||
if isinstance(el, SSZValue):
|
||||
if isinstance(el, View):
|
||||
yield f'{key}_{i}', 'data', encode(el)
|
||||
yield f'{key}_{i}', 'ssz', serialize(el)
|
||||
elif isinstance(el, bytes):
|
||||
|
|
|
@ -1,157 +1,10 @@
|
|||
from ..merkle_minimal import merkleize_chunks
|
||||
from ..hash_function import hash
|
||||
from .ssz_typing import (
|
||||
SSZValue, SSZType, BasicValue, BasicType, Series, Elements, Bits, boolean, Container, List, ByteList,
|
||||
Bitlist, Bitvector, uint,
|
||||
)
|
||||
|
||||
# SSZ Serialization
|
||||
# -----------------------------
|
||||
|
||||
BYTES_PER_LENGTH_OFFSET = 4
|
||||
from remerkleable.core import View
|
||||
from remerkleable.byte_arrays import Bytes32
|
||||
|
||||
|
||||
def serialize_basic(value: SSZValue):
|
||||
if isinstance(value, uint):
|
||||
return value.to_bytes(value.type().byte_len, 'little')
|
||||
elif isinstance(value, boolean):
|
||||
if value:
|
||||
return b'\x01'
|
||||
else:
|
||||
return b'\x00'
|
||||
else:
|
||||
raise Exception(f"Type not supported: {type(value)}")
|
||||
def serialize(obj: View) -> bytes:
|
||||
return obj.encode_bytes()
|
||||
|
||||
|
||||
def deserialize_basic(value, typ: BasicType):
|
||||
if issubclass(typ, uint):
|
||||
return typ(int.from_bytes(value, 'little'))
|
||||
elif issubclass(typ, boolean):
|
||||
assert value in (b'\x00', b'\x01')
|
||||
return typ(value == b'\x01')
|
||||
else:
|
||||
raise Exception(f"Type not supported: {typ}")
|
||||
|
||||
|
||||
def is_zero(obj: SSZValue):
|
||||
return type(obj).default() == obj
|
||||
|
||||
|
||||
def serialize(obj: SSZValue):
|
||||
if isinstance(obj, BasicValue):
|
||||
return serialize_basic(obj)
|
||||
elif isinstance(obj, Bitvector):
|
||||
return obj.as_bytes()
|
||||
elif isinstance(obj, Bitlist):
|
||||
as_bytearray = list(obj.as_bytes())
|
||||
if len(obj) % 8 == 0:
|
||||
as_bytearray.append(1)
|
||||
else:
|
||||
as_bytearray[len(obj) // 8] |= 1 << (len(obj) % 8)
|
||||
return bytes(as_bytearray)
|
||||
elif isinstance(obj, Series):
|
||||
return encode_series(obj)
|
||||
else:
|
||||
raise Exception(f"Type not supported: {type(obj)}")
|
||||
|
||||
|
||||
def encode_series(values: Series):
|
||||
if isinstance(values, bytes): # ByteList and ByteVector are already like serialized output
|
||||
return values
|
||||
|
||||
# Recursively serialize
|
||||
parts = [(v.type().is_fixed_size(), serialize(v)) for v in values]
|
||||
|
||||
# Compute and check lengths
|
||||
fixed_lengths = [len(serialized) if constant_size else BYTES_PER_LENGTH_OFFSET
|
||||
for (constant_size, serialized) in parts]
|
||||
variable_lengths = [len(serialized) if not constant_size else 0
|
||||
for (constant_size, serialized) in parts]
|
||||
|
||||
# Check if integer is not out of bounds (Python)
|
||||
assert sum(fixed_lengths + variable_lengths) < 2 ** (BYTES_PER_LENGTH_OFFSET * 8)
|
||||
|
||||
# Interleave offsets of variable-size parts with fixed-size parts.
|
||||
# Avoid quadratic complexity in calculation of offsets.
|
||||
offset = sum(fixed_lengths)
|
||||
variable_parts = []
|
||||
fixed_parts = []
|
||||
for (constant_size, serialized) in parts:
|
||||
if constant_size:
|
||||
fixed_parts.append(serialized)
|
||||
else:
|
||||
fixed_parts.append(offset.to_bytes(BYTES_PER_LENGTH_OFFSET, 'little'))
|
||||
variable_parts.append(serialized)
|
||||
offset += len(serialized)
|
||||
|
||||
# Return the concatenation of the fixed-size parts (offsets interleaved) with the variable-size parts
|
||||
return b''.join(fixed_parts + variable_parts)
|
||||
|
||||
|
||||
# SSZ Hash-tree-root
|
||||
# -----------------------------
|
||||
|
||||
|
||||
def pack(values: Series):
|
||||
if isinstance(values, bytes): # ByteList and ByteVector are already packed
|
||||
return values
|
||||
elif isinstance(values, Bits):
|
||||
# packs the bits in bytes, left-aligned.
|
||||
# Exclusive length delimiting bits for bitlists.
|
||||
return values.as_bytes()
|
||||
return b''.join([serialize_basic(value) for value in values])
|
||||
|
||||
|
||||
def chunkify(bytez):
|
||||
# pad `bytez` to nearest 32-byte multiple
|
||||
bytez += b'\x00' * (-len(bytez) % 32)
|
||||
return [bytez[i:i + 32] for i in range(0, len(bytez), 32)]
|
||||
|
||||
|
||||
def mix_in_length(root, length):
|
||||
return hash(root + length.to_bytes(32, 'little'))
|
||||
|
||||
|
||||
def is_bottom_layer_kind(typ: SSZType):
|
||||
return (
|
||||
isinstance(typ, BasicType) or
|
||||
(issubclass(typ, Elements) and isinstance(typ.elem_type, BasicType))
|
||||
)
|
||||
|
||||
|
||||
def item_length(typ: SSZType) -> int:
|
||||
if issubclass(typ, BasicValue):
|
||||
return typ.byte_len
|
||||
else:
|
||||
return 32
|
||||
|
||||
|
||||
def chunk_count(typ: SSZType) -> int:
|
||||
# note that for lists, .length *on the type* describes the list limit.
|
||||
if isinstance(typ, BasicType):
|
||||
return 1
|
||||
elif issubclass(typ, Bits):
|
||||
return (typ.length + 255) // 256
|
||||
elif issubclass(typ, Elements):
|
||||
return (typ.length * item_length(typ.elem_type) + 31) // 32
|
||||
elif issubclass(typ, Container):
|
||||
return len(typ.get_fields())
|
||||
else:
|
||||
raise Exception(f"Type not supported: {typ}")
|
||||
|
||||
|
||||
def hash_tree_root(obj: SSZValue):
|
||||
if isinstance(obj, Series):
|
||||
if is_bottom_layer_kind(obj.type()):
|
||||
leaves = chunkify(pack(obj))
|
||||
else:
|
||||
leaves = [hash_tree_root(value) for value in obj]
|
||||
elif isinstance(obj, BasicValue):
|
||||
leaves = chunkify(serialize_basic(obj))
|
||||
else:
|
||||
raise Exception(f"Type not supported: {type(obj)}")
|
||||
|
||||
if isinstance(obj, (List, ByteList, Bitlist)):
|
||||
return mix_in_length(merkleize_chunks(leaves, limit=chunk_count(obj.type())), len(obj))
|
||||
else:
|
||||
return merkleize_chunks(leaves)
|
||||
def hash_tree_root(obj: View) -> Bytes32:
|
||||
return Bytes32(obj.get_backing().merkle_root())
|
||||
|
|
|
@ -1,516 +1,8 @@
|
|||
from typing import Dict, Iterator, Iterable
|
||||
import copy
|
||||
from types import GeneratorType
|
||||
|
||||
|
||||
class DefaultingTypeMeta(type):
|
||||
def default(cls):
|
||||
raise Exception("Not implemented")
|
||||
|
||||
|
||||
class SSZType(DefaultingTypeMeta):
|
||||
|
||||
def is_fixed_size(cls):
|
||||
raise Exception("Not implemented")
|
||||
|
||||
|
||||
class SSZValue(object, metaclass=SSZType):
|
||||
|
||||
def type(self):
|
||||
return self.__class__
|
||||
|
||||
|
||||
class BasicType(SSZType):
|
||||
byte_len = 0
|
||||
|
||||
def is_fixed_size(cls):
|
||||
return True
|
||||
|
||||
|
||||
class BasicValue(int, SSZValue, metaclass=BasicType):
|
||||
pass
|
||||
|
||||
|
||||
class boolean(BasicValue): # can't subclass bool.
|
||||
byte_len = 1
|
||||
|
||||
def __new__(cls, value: int): # int value, but can be any subclass of int (bool, Bit, Bool, etc...)
|
||||
if value < 0 or value > 1:
|
||||
raise ValueError(f"value {value} out of bounds for bit")
|
||||
return super().__new__(cls, value)
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls(0)
|
||||
|
||||
def __bool__(self):
|
||||
return self > 0
|
||||
|
||||
|
||||
# Alias for Bool
|
||||
class bit(boolean):
|
||||
pass
|
||||
|
||||
|
||||
class uint(BasicValue, metaclass=BasicType):
|
||||
|
||||
def __new__(cls, value: int):
|
||||
if value < 0:
|
||||
raise ValueError("unsigned types must not be negative")
|
||||
if cls.byte_len and value.bit_length() > (cls.byte_len << 3):
|
||||
raise ValueError("value out of bounds for uint{}".format(cls.byte_len * 8))
|
||||
return super().__new__(cls, value)
|
||||
|
||||
def __add__(self, other):
|
||||
return self.__class__(super().__add__(coerce_type_maybe(other, self.__class__, strict=True)))
|
||||
|
||||
def __sub__(self, other):
|
||||
return self.__class__(super().__sub__(coerce_type_maybe(other, self.__class__, strict=True)))
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls(0)
|
||||
|
||||
|
||||
class uint8(uint):
|
||||
byte_len = 1
|
||||
|
||||
|
||||
# Alias for uint8
|
||||
class byte(uint8):
|
||||
pass
|
||||
|
||||
|
||||
class uint16(uint):
|
||||
byte_len = 2
|
||||
|
||||
|
||||
class uint32(uint):
|
||||
byte_len = 4
|
||||
|
||||
|
||||
class uint64(uint):
|
||||
byte_len = 8
|
||||
|
||||
|
||||
class uint128(uint):
|
||||
byte_len = 16
|
||||
|
||||
|
||||
class uint256(uint):
|
||||
byte_len = 32
|
||||
|
||||
|
||||
def coerce_type_maybe(v, typ: SSZType, strict: bool = False):
|
||||
v_typ = type(v)
|
||||
# shortcut if it's already the type we are looking for
|
||||
if v_typ == typ:
|
||||
return v
|
||||
elif isinstance(v, int):
|
||||
if isinstance(v, uint): # do not coerce from one uintX to another uintY
|
||||
if issubclass(typ, uint) and v.type().byte_len == typ.byte_len:
|
||||
return typ(v)
|
||||
# revert to default behavior below if-else. (ValueError/bare)
|
||||
else:
|
||||
return typ(v)
|
||||
elif isinstance(v, (list, tuple)):
|
||||
return typ(*v)
|
||||
elif isinstance(v, (bytes, ByteVector, ByteList)):
|
||||
return typ(v)
|
||||
elif isinstance(v, GeneratorType):
|
||||
return typ(v)
|
||||
|
||||
# just return as-is, Value-checkers will take care of it not being coerced, if we are not strict.
|
||||
if strict and not isinstance(v, typ):
|
||||
raise ValueError("Type coercion of {} to {} failed".format(v, typ))
|
||||
return v
|
||||
|
||||
|
||||
class Series(SSZValue):
|
||||
|
||||
def __iter__(self) -> Iterator[SSZValue]:
|
||||
raise Exception("Not implemented")
|
||||
|
||||
|
||||
# Note: importing ssz functionality locally, to avoid import loop
|
||||
|
||||
class Container(Series, metaclass=SSZType):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
cls = self.__class__
|
||||
for f, t in cls.get_fields().items():
|
||||
if f not in kwargs:
|
||||
setattr(self, f, t.default())
|
||||
else:
|
||||
value = coerce_type_maybe(kwargs[f], t)
|
||||
if not isinstance(value, t):
|
||||
raise ValueError(f"Bad input for class {self.__class__}:"
|
||||
f" field: {f} type: {t} value: {value} value type: {type(value)}")
|
||||
setattr(self, f, value)
|
||||
|
||||
def serialize(self):
|
||||
from .ssz_impl import serialize
|
||||
return serialize(self)
|
||||
|
||||
def hash_tree_root(self):
|
||||
from .ssz_impl import hash_tree_root
|
||||
return hash_tree_root(self)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name not in self.__class__.__annotations__:
|
||||
raise AttributeError("Cannot change non-existing SSZ-container attribute")
|
||||
field_typ = self.__class__.__annotations__[name]
|
||||
value = coerce_type_maybe(value, field_typ)
|
||||
if not isinstance(value, field_typ):
|
||||
raise ValueError(f"Cannot set field of {self.__class__}:"
|
||||
f" field: {name} type: {field_typ} value: {value} value type: {type(value)}")
|
||||
super().__setattr__(name, value)
|
||||
|
||||
def __repr__(self):
|
||||
return repr({field: (getattr(self, field) if hasattr(self, field) else 'unset')
|
||||
for field in self.get_fields().keys()})
|
||||
|
||||
def __str__(self):
|
||||
output = [f'{self.__class__.__name__}']
|
||||
for field in self.get_fields().keys():
|
||||
output.append(f' {field}: {getattr(self, field)}')
|
||||
return "\n".join(output)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.hash_tree_root() == other.hash_tree_root()
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.hash_tree_root())
|
||||
|
||||
def copy(self):
|
||||
return copy.deepcopy(self)
|
||||
|
||||
@classmethod
|
||||
def get_fields(cls) -> Dict[str, SSZType]:
|
||||
if not hasattr(cls, '__annotations__'): # no container fields
|
||||
return {}
|
||||
return dict(cls.__annotations__)
|
||||
|
||||
@classmethod
|
||||
def get_field_names(cls) -> Iterable[SSZType]:
|
||||
if not hasattr(cls, '__annotations__'): # no container fields
|
||||
return ()
|
||||
return list(cls.__annotations__.keys())
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls(**{f: t.default() for f, t in cls.get_fields().items()})
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return all(t.is_fixed_size() for t in cls.get_fields().values())
|
||||
|
||||
def __iter__(self) -> Iterator[SSZValue]:
|
||||
return iter([getattr(self, field) for field in self.get_fields().keys()])
|
||||
|
||||
|
||||
class ParamsBase(Series):
|
||||
_has_params = False
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not cls._has_params:
|
||||
raise Exception("cannot init bare type without params")
|
||||
return super().__new__(cls, **kwargs)
|
||||
|
||||
|
||||
class ParamsMeta(SSZType):
|
||||
|
||||
def __new__(cls, class_name, parents, attrs):
|
||||
out = type.__new__(cls, class_name, parents, attrs)
|
||||
if hasattr(out, "_has_params") and getattr(out, "_has_params"):
|
||||
for k, v in attrs.items():
|
||||
setattr(out, k, v)
|
||||
return out
|
||||
|
||||
def __getitem__(self, params):
|
||||
o = self.__class__(self.__name__, (self,), self.attr_from_params(params))
|
||||
return o
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.__name__}~{self.__class__.__name__}"
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__name__}~{self.__class__.__name__}"
|
||||
|
||||
def attr_from_params(self, p):
|
||||
# single key params are valid too. Wrap them in a tuple.
|
||||
params = p if isinstance(p, tuple) else (p,)
|
||||
res = {'_has_params': True}
|
||||
i = 0
|
||||
for (name, typ) in self.__annotations__.items():
|
||||
if hasattr(self.__class__, name):
|
||||
res[name] = getattr(self.__class__, name)
|
||||
else:
|
||||
if i >= len(params):
|
||||
i += 1
|
||||
continue
|
||||
param = params[i]
|
||||
if not isinstance(param, typ):
|
||||
raise TypeError(
|
||||
"cannot create parametrized class with param {} as {} of type {}".format(param, name, typ))
|
||||
res[name] = param
|
||||
i += 1
|
||||
if len(params) != i:
|
||||
raise TypeError("provided parameters {} mismatch required parameter count {}".format(params, i))
|
||||
return res
|
||||
|
||||
def __subclasscheck__(self, subclass):
|
||||
# check regular class system if we can, solves a lot of the normal cases.
|
||||
if super().__subclasscheck__(subclass):
|
||||
return True
|
||||
# if they are not normal subclasses, they are of the same class.
|
||||
# then they should have the same name
|
||||
if subclass.__name__ != self.__name__:
|
||||
return False
|
||||
# If they do have the same name, they should also have the same params.
|
||||
for name, typ in self.__annotations__.items():
|
||||
if hasattr(self, name) and hasattr(subclass, name) \
|
||||
and getattr(subclass, name) != getattr(self, name):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __instancecheck__(self, obj):
|
||||
return self.__subclasscheck__(obj.__class__)
|
||||
|
||||
|
||||
class ElementsType(ParamsMeta):
|
||||
elem_type: SSZType
|
||||
length: int
|
||||
|
||||
|
||||
class Elements(ParamsBase, metaclass=ElementsType):
|
||||
pass
|
||||
|
||||
|
||||
class BaseList(list, Elements):
|
||||
|
||||
def __init__(self, *args):
|
||||
items = self.extract_args(*args)
|
||||
|
||||
if not self.value_check(items):
|
||||
raise ValueError(f"Bad input for class {self.__class__}: {items}")
|
||||
super().__init__(items)
|
||||
|
||||
@classmethod
|
||||
def value_check(cls, value):
|
||||
return all(isinstance(v, cls.elem_type) for v in value) and len(value) <= cls.length
|
||||
|
||||
@classmethod
|
||||
def extract_args(cls, *args):
|
||||
x = list(args)
|
||||
if len(x) == 1 and isinstance(x[0], (GeneratorType, list, tuple)):
|
||||
x = list(x[0])
|
||||
x = [coerce_type_maybe(v, cls.elem_type) for v in x]
|
||||
return x
|
||||
|
||||
def __str__(self):
|
||||
cls = self.__class__
|
||||
return f"{cls.__name__}[{cls.elem_type.__name__}, {cls.length}]({', '.join(str(v) for v in self)})"
|
||||
|
||||
def __repr__(self):
|
||||
cls = self.__class__
|
||||
return f"{cls.__name__}[{cls.elem_type.__name__}, {cls.length}]({', '.join(str(v) for v in self)})"
|
||||
|
||||
def __getitem__(self, k) -> SSZValue:
|
||||
if isinstance(k, int): # check if we are just doing a lookup, and not slicing
|
||||
if k < 0:
|
||||
raise IndexError(f"cannot get item in type {self.__class__} at negative index {k}")
|
||||
if k > len(self):
|
||||
raise IndexError(f"cannot get item in type {self.__class__}"
|
||||
f" at out of bounds index {k}")
|
||||
return super().__getitem__(k)
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
if type(k) == slice:
|
||||
if (k.start is not None and k.start < 0) or (k.stop is not None and k.stop > len(self)):
|
||||
raise IndexError(f"cannot set item in type {self.__class__}"
|
||||
f" at out of bounds slice {k} (to {v}, bound: {len(self)})")
|
||||
super().__setitem__(k, [coerce_type_maybe(x, self.__class__.elem_type) for x in v])
|
||||
else:
|
||||
if k < 0:
|
||||
raise IndexError(f"cannot set item in type {self.__class__} at negative index {k} (to {v})")
|
||||
if k > len(self):
|
||||
raise IndexError(f"cannot set item in type {self.__class__}"
|
||||
f" at out of bounds index {k} (to {v}, bound: {len(self)})")
|
||||
super().__setitem__(k, coerce_type_maybe(v, self.__class__.elem_type, strict=True))
|
||||
|
||||
def append(self, v):
|
||||
super().append(coerce_type_maybe(v, self.__class__.elem_type, strict=True))
|
||||
|
||||
def __iter__(self) -> Iterator[SSZValue]:
|
||||
return super().__iter__()
|
||||
|
||||
def last(self):
|
||||
# be explicit about getting the last item, for the non-python readers, and negative-index safety
|
||||
return self[len(self) - 1]
|
||||
|
||||
|
||||
class BitElementsType(ElementsType):
|
||||
elem_type: SSZType = boolean
|
||||
length: int
|
||||
|
||||
|
||||
class Bits(BaseList, metaclass=BitElementsType):
|
||||
|
||||
def as_bytes(self):
|
||||
as_bytearray = [0] * ((len(self) + 7) // 8)
|
||||
for i in range(len(self)):
|
||||
as_bytearray[i // 8] |= int(self[i]) << (i % 8)
|
||||
return bytes(as_bytearray)
|
||||
|
||||
|
||||
class Bitlist(Bits):
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls()
|
||||
|
||||
|
||||
class Bitvector(Bits):
|
||||
|
||||
@classmethod
|
||||
def extract_args(cls, *args):
|
||||
if len(args) == 0:
|
||||
return cls.default()
|
||||
else:
|
||||
return super().extract_args(*args)
|
||||
|
||||
@classmethod
|
||||
def value_check(cls, value):
|
||||
# check length limit strictly
|
||||
return len(value) == cls.length and super().value_check(value)
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls(0 for _ in range(cls.length))
|
||||
|
||||
|
||||
class List(BaseList):
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls()
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return False
|
||||
|
||||
|
||||
class Vector(BaseList):
|
||||
|
||||
@classmethod
|
||||
def value_check(cls, value):
|
||||
# check length limit strictly
|
||||
return len(value) == cls.length and super().value_check(value)
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return cls(cls.elem_type.default() for _ in range(cls.length))
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return cls.elem_type.is_fixed_size()
|
||||
|
||||
def append(self, v):
|
||||
# Deep-copy and other utils like to change the internals during work.
|
||||
# Only complain if we had the right size.
|
||||
if len(self) == self.__class__.length:
|
||||
raise Exception("cannot modify vector length")
|
||||
else:
|
||||
super().append(v)
|
||||
|
||||
def pop(self, *args):
|
||||
raise Exception("cannot modify vector length")
|
||||
|
||||
|
||||
class BytesType(ElementsType):
|
||||
elem_type: SSZType = byte
|
||||
length: int
|
||||
|
||||
|
||||
class BaseBytes(bytes, Elements, metaclass=BytesType):
|
||||
|
||||
def __new__(cls, *args) -> "BaseBytes":
|
||||
extracted_val = cls.extract_args(*args)
|
||||
if not cls.value_check(extracted_val):
|
||||
raise ValueError(f"Bad input for class {cls}: {extracted_val}")
|
||||
return super().__new__(cls, extracted_val)
|
||||
|
||||
@classmethod
|
||||
def extract_args(cls, *args):
|
||||
x = args
|
||||
if len(x) == 1 and isinstance(x[0], (GeneratorType, bytes, str)):
|
||||
x = x[0]
|
||||
if isinstance(x, bytes): # Includes BytesLike
|
||||
return x
|
||||
if isinstance(x, str):
|
||||
if x[:2] == '0x':
|
||||
return bytes.fromhex(x[2:])
|
||||
else:
|
||||
return bytes.fromhex(x)
|
||||
else:
|
||||
return bytes(x) # E.g. GeneratorType put into bytes.
|
||||
|
||||
@classmethod
|
||||
def value_check(cls, value):
|
||||
# check type and virtual length limit
|
||||
return isinstance(value, bytes) and len(value) <= cls.length
|
||||
|
||||
def __str__(self):
|
||||
cls = self.__class__
|
||||
return f"{cls.__name__}[{cls.length}]: {self.hex()}"
|
||||
|
||||
|
||||
class ByteList(BaseBytes):
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return b''
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return False
|
||||
|
||||
|
||||
class ByteVector(BaseBytes):
|
||||
|
||||
@classmethod
|
||||
def extract_args(cls, *args):
|
||||
if len(args) == 0:
|
||||
return cls.default()
|
||||
else:
|
||||
return super().extract_args(*args)
|
||||
|
||||
@classmethod
|
||||
def default(cls):
|
||||
return b'\x00' * cls.length
|
||||
|
||||
@classmethod
|
||||
def value_check(cls, value):
|
||||
# check length limit strictly
|
||||
return len(value) == cls.length and super().value_check(value)
|
||||
|
||||
@classmethod
|
||||
def is_fixed_size(cls):
|
||||
return True
|
||||
|
||||
|
||||
# Helpers for common ByteVector types
|
||||
Bytes1: BytesType = ByteVector[1]
|
||||
Bytes4: BytesType = ByteVector[4]
|
||||
Bytes8: BytesType = ByteVector[8]
|
||||
Bytes32: BytesType = ByteVector[32]
|
||||
Bytes48: BytesType = ByteVector[48]
|
||||
Bytes96: BytesType = ByteVector[96]
|
||||
# flake8: noqa
|
||||
# Ignore linter: This module makes importing SSZ types easy, and hides away the underlying library from the spec.
|
||||
|
||||
from remerkleable.complex import Container, Vector, List
|
||||
from remerkleable.basic import boolean, bit, uint, byte, uint8, uint16, uint32, uint64, uint128, uint256
|
||||
from remerkleable.bitfields import Bitvector, Bitlist
|
||||
from remerkleable.byte_arrays import ByteVector, Bytes1, Bytes4, Bytes8, Bytes32, Bytes48, Bytes96, ByteList
|
||||
from remerkleable.core import BasicView, View, TypeDef
|
||||
|
|
|
@ -1,264 +0,0 @@
|
|||
from typing import Iterable
|
||||
from .ssz_impl import serialize, hash_tree_root
|
||||
from .ssz_typing import (
|
||||
bit, boolean, Container, List, Vector, ByteList, ByteVector,
|
||||
Bitlist, Bitvector,
|
||||
uint8, uint16, uint32, uint64, uint256, byte
|
||||
)
|
||||
from ..hash_function import hash as bytes_hash
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
class EmptyTestStruct(Container):
|
||||
pass
|
||||
|
||||
|
||||
class SingleFieldTestStruct(Container):
|
||||
A: byte
|
||||
|
||||
|
||||
class SmallTestStruct(Container):
|
||||
A: uint16
|
||||
B: uint16
|
||||
|
||||
|
||||
class FixedTestStruct(Container):
|
||||
A: uint8
|
||||
B: uint64
|
||||
C: uint32
|
||||
|
||||
|
||||
class VarTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 1024]
|
||||
C: uint8
|
||||
|
||||
|
||||
class ComplexTestStruct(Container):
|
||||
A: uint16
|
||||
B: List[uint16, 128]
|
||||
C: uint8
|
||||
D: ByteList[256]
|
||||
E: VarTestStruct
|
||||
F: Vector[FixedTestStruct, 4]
|
||||
G: Vector[VarTestStruct, 2]
|
||||
|
||||
|
||||
sig_test_data = [0 for i in range(96)]
|
||||
for k, v in {0: 1, 32: 2, 64: 3, 95: 0xff}.items():
|
||||
sig_test_data[k] = v
|
||||
|
||||
|
||||
def chunk(hex: str) -> str:
|
||||
return (hex + ("00" * 32))[:64] # just pad on the right, to 32 bytes (64 hex chars)
|
||||
|
||||
|
||||
def h(a: str, b: str) -> str:
|
||||
return bytes_hash(bytes.fromhex(a) + bytes.fromhex(b)).hex()
|
||||
|
||||
|
||||
# zero hashes, as strings, for
|
||||
zero_hashes = [chunk("")]
|
||||
for layer in range(1, 32):
|
||||
zero_hashes.append(h(zero_hashes[layer - 1], zero_hashes[layer - 1]))
|
||||
|
||||
|
||||
def merge(a: str, branch: Iterable[str]) -> str:
|
||||
"""
|
||||
Merge (out on left, branch on right) leaf a with branch items, branch is from bottom to top.
|
||||
"""
|
||||
out = a
|
||||
for b in branch:
|
||||
out = h(out, b)
|
||||
return out
|
||||
|
||||
|
||||
test_data = [
|
||||
("bit F", bit(False), "00", chunk("00")),
|
||||
("bit T", bit(True), "01", chunk("01")),
|
||||
("boolean F", boolean(False), "00", chunk("00")),
|
||||
("boolean T", boolean(True), "01", chunk("01")),
|
||||
("bitvector TTFTFTFF", Bitvector[8](1, 1, 0, 1, 0, 1, 0, 0), "2b", chunk("2b")),
|
||||
("bitlist TTFTFTFF", Bitlist[8](1, 1, 0, 1, 0, 1, 0, 0), "2b01", h(chunk("2b"), chunk("08"))),
|
||||
("bitvector FTFT", Bitvector[4](0, 1, 0, 1), "0a", chunk("0a")),
|
||||
("bitlist FTFT", Bitlist[4](0, 1, 0, 1), "1a", h(chunk("0a"), chunk("04"))),
|
||||
("bitvector FTF", Bitvector[3](0, 1, 0), "02", chunk("02")),
|
||||
("bitlist FTF", Bitlist[3](0, 1, 0), "0a", h(chunk("02"), chunk("03"))),
|
||||
("bitvector TFTFFFTTFT", Bitvector[10](1, 0, 1, 0, 0, 0, 1, 1, 0, 1), "c502", chunk("c502")),
|
||||
("bitlist TFTFFFTTFT", Bitlist[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1), "c506", h(chunk("c502"), chunk("0A"))),
|
||||
("bitvector TFTFFFTTFTFFFFTT", Bitvector[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1),
|
||||
"c5c2", chunk("c5c2")),
|
||||
("bitlist TFTFFFTTFTFFFFTT", Bitlist[16](1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1),
|
||||
"c5c201", h(chunk("c5c2"), chunk("10"))),
|
||||
("long bitvector", Bitvector[512](1 for i in range(512)),
|
||||
"ff" * 64, h("ff" * 32, "ff" * 32)),
|
||||
("long bitlist", Bitlist[512](1),
|
||||
"03", h(h(chunk("01"), chunk("")), chunk("01"))),
|
||||
("long bitlist", Bitlist[512](1 for i in range(512)),
|
||||
"ff" * 64 + "01", h(h("ff" * 32, "ff" * 32), chunk("0002"))),
|
||||
("odd bitvector", Bitvector[513](1 for i in range(513)),
|
||||
"ff" * 64 + "01", h(h("ff" * 32, "ff" * 32), h(chunk("01"), chunk("")))),
|
||||
("odd bitlist", Bitlist[513](1 for i in range(513)),
|
||||
"ff" * 64 + "03", h(h(h("ff" * 32, "ff" * 32), h(chunk("01"), chunk(""))), chunk("0102"))),
|
||||
("uint8 00", uint8(0x00), "00", chunk("00")),
|
||||
("uint8 01", uint8(0x01), "01", chunk("01")),
|
||||
("uint8 ab", uint8(0xab), "ab", chunk("ab")),
|
||||
("byte 00", byte(0x00), "00", chunk("00")),
|
||||
("byte 01", byte(0x01), "01", chunk("01")),
|
||||
("byte ab", byte(0xab), "ab", chunk("ab")),
|
||||
("uint16 0000", uint16(0x0000), "0000", chunk("0000")),
|
||||
("uint16 abcd", uint16(0xabcd), "cdab", chunk("cdab")),
|
||||
("uint32 00000000", uint32(0x00000000), "00000000", chunk("00000000")),
|
||||
("uint32 01234567", uint32(0x01234567), "67452301", chunk("67452301")),
|
||||
("small (4567, 0123)", SmallTestStruct(A=0x4567, B=0x0123), "67452301", h(chunk("6745"), chunk("2301"))),
|
||||
("small [4567, 0123]::2", Vector[uint16, 2](uint16(0x4567), uint16(0x0123)), "67452301", chunk("67452301")),
|
||||
("uint32 01234567", uint32(0x01234567), "67452301", chunk("67452301")),
|
||||
("uint64 0000000000000000", uint64(0x00000000), "0000000000000000", chunk("0000000000000000")),
|
||||
("uint64 0123456789abcdef", uint64(0x0123456789abcdef), "efcdab8967452301", chunk("efcdab8967452301")),
|
||||
("sig", ByteVector[96](*sig_test_data),
|
||||
"0100000000000000000000000000000000000000000000000000000000000000"
|
||||
"0200000000000000000000000000000000000000000000000000000000000000"
|
||||
"03000000000000000000000000000000000000000000000000000000000000ff",
|
||||
h(h(chunk("01"), chunk("02")),
|
||||
h("03000000000000000000000000000000000000000000000000000000000000ff", chunk("")))),
|
||||
("emptyTestStruct", EmptyTestStruct(), "", chunk("")),
|
||||
("singleFieldTestStruct", SingleFieldTestStruct(A=0xab), "ab", chunk("ab")),
|
||||
("uint16 list", List[uint16, 32](uint16(0xaabb), uint16(0xc0ad), uint16(0xeeff)), "bbaaadc0ffee",
|
||||
h(h(chunk("bbaaadc0ffee"), chunk("")), chunk("03000000")) # max length: 32 * 2 = 64 bytes = 2 chunks
|
||||
),
|
||||
("uint32 list", List[uint32, 128](uint32(0xaabb), uint32(0xc0ad), uint32(0xeeff)), "bbaa0000adc00000ffee0000",
|
||||
# max length: 128 * 4 = 512 bytes = 16 chunks
|
||||
h(merge(chunk("bbaa0000adc00000ffee0000"), zero_hashes[0:4]), chunk("03000000"))
|
||||
),
|
||||
("uint256 list", List[uint256, 32](uint256(0xaabb), uint256(0xc0ad), uint256(0xeeff)),
|
||||
"bbaa000000000000000000000000000000000000000000000000000000000000"
|
||||
"adc0000000000000000000000000000000000000000000000000000000000000"
|
||||
"ffee000000000000000000000000000000000000000000000000000000000000",
|
||||
h(merge(h(h(chunk("bbaa"), chunk("adc0")), h(chunk("ffee"), chunk(""))), zero_hashes[2:5]), chunk("03000000"))
|
||||
),
|
||||
("uint256 list long", List[uint256, 128](i for i in range(1, 20)),
|
||||
"".join([i.to_bytes(length=32, byteorder='little').hex() for i in range(1, 20)]),
|
||||
h(merge(
|
||||
h(
|
||||
h(
|
||||
h(
|
||||
h(h(chunk("01"), chunk("02")), h(chunk("03"), chunk("04"))),
|
||||
h(h(chunk("05"), chunk("06")), h(chunk("07"), chunk("08"))),
|
||||
),
|
||||
h(
|
||||
h(h(chunk("09"), chunk("0a")), h(chunk("0b"), chunk("0c"))),
|
||||
h(h(chunk("0d"), chunk("0e")), h(chunk("0f"), chunk("10"))),
|
||||
)
|
||||
),
|
||||
h(
|
||||
h(
|
||||
h(h(chunk("11"), chunk("12")), h(chunk("13"), chunk(""))),
|
||||
zero_hashes[2]
|
||||
),
|
||||
zero_hashes[3]
|
||||
)
|
||||
),
|
||||
zero_hashes[5:7]), chunk("13000000")) # 128 chunks = 7 deep
|
||||
),
|
||||
("fixedTestStruct", FixedTestStruct(A=0xab, B=0xaabbccdd00112233, C=0x12345678), "ab33221100ddccbbaa78563412",
|
||||
h(h(chunk("ab"), chunk("33221100ddccbbaa")), h(chunk("78563412"), chunk("")))),
|
||||
("varTestStruct nil", VarTestStruct(A=0xabcd, C=0xff), "cdab07000000ff",
|
||||
h(h(chunk("cdab"), h(zero_hashes[6], chunk("00000000"))), h(chunk("ff"), chunk("")))),
|
||||
("varTestStruct empty", VarTestStruct(A=0xabcd, B=List[uint16, 1024](), C=0xff), "cdab07000000ff",
|
||||
h(h(chunk("cdab"), h(zero_hashes[6], chunk("00000000"))), h(chunk("ff"), chunk("")))), # log2(1024*2/32)= 6 deep
|
||||
("varTestStruct some", VarTestStruct(A=0xabcd, B=List[uint16, 1024](1, 2, 3), C=0xff),
|
||||
"cdab07000000ff010002000300",
|
||||
h(
|
||||
h(
|
||||
chunk("cdab"),
|
||||
h(
|
||||
merge(
|
||||
chunk("010002000300"),
|
||||
zero_hashes[0:6]
|
||||
),
|
||||
chunk("03000000") # length mix in
|
||||
)
|
||||
),
|
||||
h(chunk("ff"), chunk(""))
|
||||
)),
|
||||
("complexTestStruct",
|
||||
ComplexTestStruct(
|
||||
A=0xaabb,
|
||||
B=List[uint16, 128](0x1122, 0x3344),
|
||||
C=0xff,
|
||||
D=ByteList[256](b"foobar"),
|
||||
E=VarTestStruct(A=0xabcd, B=List[uint16, 1024](1, 2, 3), C=0xff),
|
||||
F=Vector[FixedTestStruct, 4](
|
||||
FixedTestStruct(A=0xcc, B=0x4242424242424242, C=0x13371337),
|
||||
FixedTestStruct(A=0xdd, B=0x3333333333333333, C=0xabcdabcd),
|
||||
FixedTestStruct(A=0xee, B=0x4444444444444444, C=0x00112233),
|
||||
FixedTestStruct(A=0xff, B=0x5555555555555555, C=0x44556677)),
|
||||
G=Vector[VarTestStruct, 2](
|
||||
VarTestStruct(A=0xdead, B=List[uint16, 1024](1, 2, 3), C=0x11),
|
||||
VarTestStruct(A=0xbeef, B=List[uint16, 1024](4, 5, 6), C=0x22)),
|
||||
),
|
||||
"bbaa"
|
||||
"47000000" # offset of B, []uint16
|
||||
"ff"
|
||||
"4b000000" # offset of foobar
|
||||
"51000000" # offset of E
|
||||
"cc424242424242424237133713"
|
||||
"dd3333333333333333cdabcdab"
|
||||
"ee444444444444444433221100"
|
||||
"ff555555555555555577665544"
|
||||
"5e000000" # pointer to G
|
||||
"22114433" # contents of B
|
||||
"666f6f626172" # foobar
|
||||
"cdab07000000ff010002000300" # contents of E
|
||||
"08000000" "15000000" # [start G]: local offsets of [2]varTestStruct
|
||||
"adde0700000011010002000300"
|
||||
"efbe0700000022040005000600",
|
||||
h(
|
||||
h(
|
||||
h( # A and B
|
||||
chunk("bbaa"),
|
||||
h(merge(chunk("22114433"), zero_hashes[0:3]), chunk("02000000")) # 2*128/32 = 8 chunks
|
||||
),
|
||||
h( # C and D
|
||||
chunk("ff"),
|
||||
h(merge(chunk("666f6f626172"), zero_hashes[0:3]), chunk("06000000")) # 256/32 = 8 chunks
|
||||
)
|
||||
),
|
||||
h(
|
||||
h( # E and F
|
||||
h(h(chunk("cdab"), h(merge(chunk("010002000300"), zero_hashes[0:6]), chunk("03000000"))),
|
||||
h(chunk("ff"), chunk(""))),
|
||||
h(
|
||||
h(
|
||||
h(h(chunk("cc"), chunk("4242424242424242")), h(chunk("37133713"), chunk(""))),
|
||||
h(h(chunk("dd"), chunk("3333333333333333")), h(chunk("cdabcdab"), chunk(""))),
|
||||
),
|
||||
h(
|
||||
h(h(chunk("ee"), chunk("4444444444444444")), h(chunk("33221100"), chunk(""))),
|
||||
h(h(chunk("ff"), chunk("5555555555555555")), h(chunk("77665544"), chunk(""))),
|
||||
),
|
||||
)
|
||||
),
|
||||
h( # G and padding
|
||||
h(
|
||||
h(h(chunk("adde"), h(merge(chunk("010002000300"), zero_hashes[0:6]), chunk("03000000"))),
|
||||
h(chunk("11"), chunk(""))),
|
||||
h(h(chunk("efbe"), h(merge(chunk("040005000600"), zero_hashes[0:6]), chunk("03000000"))),
|
||||
h(chunk("22"), chunk(""))),
|
||||
),
|
||||
chunk("")
|
||||
)
|
||||
)
|
||||
))
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name, value, serialized, _", test_data)
|
||||
def test_serialize(name, value, serialized, _):
|
||||
assert serialize(value) == bytes.fromhex(serialized)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("name, value, _, root", test_data)
|
||||
def test_hash_tree_root(name, value, _, root):
|
||||
assert hash_tree_root(value) == bytes.fromhex(root)
|
|
@ -1,233 +0,0 @@
|
|||
from .ssz_typing import (
|
||||
SSZValue, SSZType, BasicValue, BasicType, Series, ElementsType,
|
||||
Elements, bit, boolean, Container, List, Vector, ByteList, ByteVector,
|
||||
byte, uint, uint8, uint16, uint32, uint64, uint128, uint256,
|
||||
Bytes32, Bytes48
|
||||
)
|
||||
|
||||
|
||||
def expect_value_error(fn, msg):
|
||||
try:
|
||||
fn()
|
||||
raise AssertionError(msg)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def test_subclasses():
|
||||
for u in [uint, uint8, uint16, uint32, uint64, uint128, uint256]:
|
||||
assert issubclass(u, uint)
|
||||
assert issubclass(u, int)
|
||||
assert issubclass(u, BasicValue)
|
||||
assert issubclass(u, SSZValue)
|
||||
assert isinstance(u, SSZType)
|
||||
assert isinstance(u, BasicType)
|
||||
assert issubclass(boolean, BasicValue)
|
||||
assert isinstance(boolean, BasicType)
|
||||
|
||||
for c in [Container, List, Vector, ByteList, ByteVector]:
|
||||
assert issubclass(c, Series)
|
||||
assert issubclass(c, SSZValue)
|
||||
assert isinstance(c, SSZType)
|
||||
assert not issubclass(c, BasicValue)
|
||||
assert not isinstance(c, BasicType)
|
||||
|
||||
for c in [List, Vector, ByteList, ByteVector]:
|
||||
assert issubclass(c, Elements)
|
||||
assert isinstance(c, ElementsType)
|
||||
|
||||
|
||||
def test_basic_instances():
|
||||
for u in [uint, uint8, byte, uint16, uint32, uint64, uint128, uint256]:
|
||||
v = u(123)
|
||||
assert isinstance(v, uint)
|
||||
assert isinstance(v, int)
|
||||
assert isinstance(v, BasicValue)
|
||||
assert isinstance(v, SSZValue)
|
||||
|
||||
assert isinstance(boolean(True), BasicValue)
|
||||
assert isinstance(boolean(False), BasicValue)
|
||||
assert isinstance(bit(True), boolean)
|
||||
assert isinstance(bit(False), boolean)
|
||||
|
||||
|
||||
def test_basic_value_bounds():
|
||||
max = {
|
||||
boolean: 2 ** 1,
|
||||
bit: 2 ** 1,
|
||||
uint8: 2 ** (8 * 1),
|
||||
byte: 2 ** (8 * 1),
|
||||
uint16: 2 ** (8 * 2),
|
||||
uint32: 2 ** (8 * 4),
|
||||
uint64: 2 ** (8 * 8),
|
||||
uint128: 2 ** (8 * 16),
|
||||
uint256: 2 ** (8 * 32),
|
||||
}
|
||||
for k, v in max.items():
|
||||
# this should work
|
||||
assert k(v - 1) == v - 1
|
||||
# but we do not allow overflows
|
||||
expect_value_error(lambda: k(v), "no overflows allowed")
|
||||
|
||||
for k, _ in max.items():
|
||||
# this should work
|
||||
assert k(0) == 0
|
||||
# but we do not allow underflows
|
||||
expect_value_error(lambda: k(-1), "no underflows allowed")
|
||||
|
||||
|
||||
def test_container():
|
||||
class Foo(Container):
|
||||
a: uint8
|
||||
b: uint32
|
||||
|
||||
empty = Foo()
|
||||
assert empty.a == uint8(0)
|
||||
assert empty.b == uint32(0)
|
||||
|
||||
assert issubclass(Foo, Container)
|
||||
assert issubclass(Foo, SSZValue)
|
||||
assert issubclass(Foo, Series)
|
||||
|
||||
assert Foo.is_fixed_size()
|
||||
x = Foo(a=uint8(123), b=uint32(45))
|
||||
assert x.a == 123
|
||||
assert x.b == 45
|
||||
assert isinstance(x.a, uint8)
|
||||
assert isinstance(x.b, uint32)
|
||||
assert x.type().is_fixed_size()
|
||||
|
||||
class Bar(Container):
|
||||
a: uint8
|
||||
b: List[uint8, 1024]
|
||||
|
||||
assert not Bar.is_fixed_size()
|
||||
|
||||
y = Bar(a=123, b=List[uint8, 1024](uint8(1), uint8(2)))
|
||||
assert y.a == 123
|
||||
assert isinstance(y.a, uint8)
|
||||
assert len(y.b) == 2
|
||||
assert isinstance(y.a, uint8)
|
||||
assert isinstance(y.b, List[uint8, 1024])
|
||||
assert not y.type().is_fixed_size()
|
||||
assert y.b[0] == 1
|
||||
v: List = y.b
|
||||
assert v.type().elem_type == uint8
|
||||
assert v.type().length == 1024
|
||||
|
||||
y.a = 42
|
||||
try:
|
||||
y.a = 256 # out of bounds
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
y.a = uint16(255) # within bounds, wrong type
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
y.not_here = 5
|
||||
assert False
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
def test_list():
|
||||
typ = List[uint64, 128]
|
||||
assert issubclass(typ, List)
|
||||
assert issubclass(typ, SSZValue)
|
||||
assert issubclass(typ, Series)
|
||||
assert issubclass(typ, Elements)
|
||||
assert isinstance(typ, ElementsType)
|
||||
|
||||
assert not typ.is_fixed_size()
|
||||
|
||||
assert len(typ()) == 0 # empty
|
||||
assert len(typ(uint64(0))) == 1 # single arg
|
||||
assert len(typ(uint64(i) for i in range(10))) == 10 # generator
|
||||
assert len(typ(uint64(0), uint64(1), uint64(2))) == 3 # args
|
||||
assert isinstance(typ(1, 2, 3, 4, 5)[4], uint64) # coercion
|
||||
assert isinstance(typ(i for i in range(10))[9], uint64) # coercion in generator
|
||||
|
||||
v = typ(uint64(0))
|
||||
v[0] = uint64(123)
|
||||
assert v[0] == 123
|
||||
assert isinstance(v[0], uint64)
|
||||
|
||||
assert isinstance(v, List)
|
||||
assert isinstance(v, List[uint64, 128])
|
||||
assert isinstance(v, typ)
|
||||
assert isinstance(v, SSZValue)
|
||||
assert isinstance(v, Series)
|
||||
assert issubclass(v.type(), Elements)
|
||||
assert isinstance(v.type(), ElementsType)
|
||||
|
||||
assert len(typ([i for i in range(10)])) == 10 # cast py list to SSZ list
|
||||
|
||||
foo = List[uint32, 128](0 for i in range(128))
|
||||
foo[0] = 123
|
||||
foo[1] = 654
|
||||
foo[127] = 222
|
||||
assert sum(foo) == 999
|
||||
try:
|
||||
foo[3] = 2 ** 32 # out of bounds
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
foo[3] = uint64(2 ** 32 - 1) # within bounds, wrong type
|
||||
assert False
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
foo[128] = 100
|
||||
assert False
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
try:
|
||||
foo[-1] = 100 # valid in normal python lists
|
||||
assert False
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
try:
|
||||
foo[128] = 100 # out of bounds
|
||||
assert False
|
||||
except IndexError:
|
||||
pass
|
||||
|
||||
|
||||
def test_bytesn_subclass():
|
||||
assert isinstance(ByteVector[32](b'\xab' * 32), Bytes32)
|
||||
assert not isinstance(ByteVector[32](b'\xab' * 32), Bytes48)
|
||||
assert issubclass(ByteVector[32](b'\xab' * 32).type(), Bytes32)
|
||||
assert issubclass(ByteVector[32], Bytes32)
|
||||
|
||||
class Root(Bytes32):
|
||||
pass
|
||||
|
||||
assert isinstance(Root(b'\xab' * 32), Bytes32)
|
||||
assert not isinstance(Root(b'\xab' * 32), Bytes48)
|
||||
assert issubclass(Root(b'\xab' * 32).type(), Bytes32)
|
||||
assert issubclass(Root, Bytes32)
|
||||
|
||||
assert not issubclass(Bytes48, Bytes32)
|
||||
|
||||
assert len(Bytes32() + Bytes48()) == 80
|
||||
|
||||
|
||||
def test_uint_math():
|
||||
assert uint8(0) + uint8(uint32(16)) == uint8(16) # allow explicit casting to make invalid addition valid
|
||||
|
||||
expect_value_error(lambda: uint8(0) - uint8(1), "no underflows allowed")
|
||||
expect_value_error(lambda: uint8(1) + uint8(255), "no overflows allowed")
|
||||
expect_value_error(lambda: uint8(0) + 256, "no overflows allowed")
|
||||
expect_value_error(lambda: uint8(42) + uint32(123), "no mixed types")
|
||||
expect_value_error(lambda: uint32(42) + uint8(123), "no mixed types")
|
||||
|
||||
assert type(uint32(1234) + 56) == uint32
|
|
@ -1,7 +0,0 @@
|
|||
-r requirements.txt
|
||||
pytest>=4.4
|
||||
../config_helpers
|
||||
flake8==3.7.7
|
||||
mypy==0.750
|
||||
pytest-cov
|
||||
pytest-xdist
|
|
@ -1,6 +0,0 @@
|
|||
eth-utils>=1.3.0,<2
|
||||
eth-typing>=2.1.0,<3.0.0
|
||||
pycryptodome==3.9.4
|
||||
py_ecc==2.0.0
|
||||
dataclasses==0.6
|
||||
ssz==0.1.3
|
|
@ -1,15 +0,0 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name='pyspec',
|
||||
packages=find_packages(),
|
||||
tests_require=["pytest"],
|
||||
install_requires=[
|
||||
"eth-utils>=1.3.0,<2",
|
||||
"eth-typing>=2.1.0,<3.0.0",
|
||||
"pycryptodome==3.9.4",
|
||||
"py_ecc==2.0.0",
|
||||
"ssz==0.1.3",
|
||||
"dataclasses==0.6",
|
||||
]
|
||||
)
|
|
@ -53,8 +53,8 @@ def case01_sign():
|
|||
for privkey in PRIVKEYS:
|
||||
for message in MESSAGES:
|
||||
sig = bls.G2ProofOfPossession.Sign(privkey, message)
|
||||
full_name = f'{int_to_hex(privkey)}_{encode_hex(message)}'
|
||||
yield f'sign_case_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{int_to_hex(privkey)}_{encode_hex(message)}'
|
||||
yield f'sign_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'privkey': int_to_hex(privkey),
|
||||
'message': encode_hex(message),
|
||||
|
@ -69,8 +69,8 @@ def case02_verify():
|
|||
# Valid signature
|
||||
signature = bls.G2ProofOfPossession.Sign(privkey, message)
|
||||
pubkey = bls.G2ProofOfPossession.PrivToPub(privkey)
|
||||
full_name = f'{encode_hex(pubkey)}_{encode_hex(message)}_valid'
|
||||
yield f'verify_case_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
||||
yield f'verify_valid_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkey': encode_hex(pubkey),
|
||||
'message': encode_hex(message),
|
||||
|
@ -81,8 +81,8 @@ def case02_verify():
|
|||
|
||||
# Invalid signatures -- wrong pubkey
|
||||
wrong_pubkey = bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[(i + 1) % len(PRIVKEYS)])
|
||||
full_name = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}_wrong_pubkey'
|
||||
yield f'verify_case_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{encode_hex(wrong_pubkey)}_{encode_hex(message)}'
|
||||
yield f'verify_wrong_pubkey_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkey': encode_hex(wrong_pubkey),
|
||||
'message': encode_hex(message),
|
||||
|
@ -93,8 +93,8 @@ def case02_verify():
|
|||
|
||||
# Invalid signature -- tampered with signature
|
||||
tampered_signature = signature[:-4] + b'\xFF\xFF\xFF\xFF'
|
||||
full_name = f'{encode_hex(pubkey)}_{encode_hex(message)}_tampered_signature'
|
||||
yield f'verify_case_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{encode_hex(pubkey)}_{encode_hex(message)}'
|
||||
yield f'verify_tampered_signature_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkey': encode_hex(pubkey),
|
||||
'message': encode_hex(message),
|
||||
|
@ -122,8 +122,8 @@ def case04_fast_aggregate_verify():
|
|||
pubkeys_serial = [encode_hex(pubkey) for pubkey in pubkeys]
|
||||
|
||||
# Valid signature
|
||||
full_name = f'{pubkeys_serial}_{encode_hex(message)}_valid'
|
||||
yield f'fast_aggregate_verify_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||
yield f'fast_aggregate_verify_valid_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkeys': pubkeys_serial,
|
||||
'message': encode_hex(message),
|
||||
|
@ -135,8 +135,8 @@ def case04_fast_aggregate_verify():
|
|||
# Invalid signature -- extra pubkey
|
||||
pubkeys_extra = pubkeys + [bls.G2ProofOfPossession.PrivToPub(PRIVKEYS[-1])]
|
||||
pubkeys_extra_serial = [encode_hex(pubkey) for pubkey in pubkeys_extra]
|
||||
full_name = f'{pubkeys_extra_serial}_{encode_hex(message)}_extra_pubkey'
|
||||
yield f'fast_aggregate_verify_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{pubkeys_extra_serial}_{encode_hex(message)}'
|
||||
yield f'fast_aggregate_verify_extra_pubkey_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkeys': pubkeys_extra_serial,
|
||||
'message': encode_hex(message),
|
||||
|
@ -147,8 +147,8 @@ def case04_fast_aggregate_verify():
|
|||
|
||||
# Invalid signature -- tampered with signature
|
||||
tampered_signature = aggregate_signature[:-4] + b'\xff\xff\xff\xff'
|
||||
full_name = f'{pubkeys_serial}_{encode_hex(message)}_tampered_signature'
|
||||
yield f'fast_aggregate_verify_{(hash(bytes(full_name, "utf-8"))[:8]).hex()}', {
|
||||
identifier = f'{pubkeys_serial}_{encode_hex(message)}'
|
||||
yield f'fast_aggregate_verify_tampered_signature_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', {
|
||||
'input': {
|
||||
'pubkeys': pubkeys_serial,
|
||||
'message': encode_hex(message),
|
||||
|
@ -171,7 +171,7 @@ def case05_aggregate_verify():
|
|||
sigs.append(sig)
|
||||
|
||||
aggregate_signature = bls.G2ProofOfPossession.Aggregate(sigs)
|
||||
yield f'fast_aggregate_verify_valid', {
|
||||
yield f'aggregate_verify_valid', {
|
||||
'input': {
|
||||
'pairs': pairs,
|
||||
'signature': encode_hex(aggregate_signature),
|
||||
|
@ -180,7 +180,7 @@ def case05_aggregate_verify():
|
|||
}
|
||||
|
||||
tampered_signature = aggregate_signature[:4] + b'\xff\xff\xff\xff'
|
||||
yield f'fast_aggregate_verify_tampered_signature', {
|
||||
yield f'aggregate_verify_tampered_signature', {
|
||||
'input': {
|
||||
'pairs': pairs,
|
||||
'signature': encode_hex(tampered_signature),
|
||||
|
|
|
@ -11,15 +11,16 @@ from eth2spec.test.phase_0.epoch_processing import (
|
|||
)
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
|
||||
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
../../core/config_helpers
|
||||
../../core/pyspec
|
||||
../../../
|
|
@ -4,15 +4,16 @@ from eth2spec.test.genesis import test_initialization, test_validity
|
|||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from eth2spec.phase0 import spec as spec
|
||||
from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
|
||||
|
||||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec.apply_constants_preset(presets)
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
|
|
|
@ -1,3 +1,2 @@
|
|||
../../core/gen_helpers
|
||||
../../core/config_helpers
|
||||
../../core/pyspec
|
||||
../../../
|
|
@ -11,7 +11,8 @@ from eth2spec.test.phase_0.block_processing import (
|
|||
|
||||
from gen_base import gen_runner, gen_typing
|
||||
from gen_from_tests.gen import generate_from_tests
|
||||
from preset_loader import loader
|
||||
from importlib import reload
|
||||
from eth2spec.config import config_util
|
||||
from eth2spec.phase0 import spec as spec_phase0
|
||||
from eth2spec.phase1 import spec as spec_phase1
|
||||
|
||||
|
@ -19,9 +20,9 @@ from eth2spec.phase1 import spec as spec_phase1
|
|||
def create_provider(handler_name: str, tests_src, config_name: str) -> gen_typing.TestProvider:
|
||||
|
||||
def prepare_fn(configs_path: str) -> str:
|
||||
presets = loader.load_presets(configs_path, config_name)
|
||||
spec_phase0.apply_constants_preset(presets)
|
||||
spec_phase1.apply_constants_preset(presets)
|
||||
config_util.prepare_config(configs_path, config_name)
|
||||
reload(spec_phase0)
|
||||
reload(spec_phase1)
|
||||
return config_name
|
||||
|
||||
def cases_fn() -> Iterable[gen_typing.TestCase]:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
eth-utils==1.6.0
|
||||
../../core/gen_helpers
|
||||
../../core/config_helpers
|
||||
../../core/pyspec
|
||||
../../../
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue